From 9b5ad50d63b76ad66879a2e96692d5cfbf028f06 Mon Sep 17 00:00:00 2001 From: wangguolei <wangguolei@hh-medic.com> Date: Tue, 21 Jun 2022 10:51:45 +0800 Subject: [PATCH] 3.4.0.06211051 --- HHVDoctorSDK.podspec | 2 +- HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/objects-11.0+.nib | Bin 0 -> 6918 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/runtime.nib | Bin 0 -> 6255 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/objects-11.0+.nib | Bin 0 -> 5529 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/runtime.nib | Bin 0 -> 4861 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/objects-11.0+.nib | Bin 0 -> 9119 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/runtime.nib | Bin 0 -> 8451 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/objects-11.0+.nib | Bin 0 -> 8319 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/runtime.nib | Bin 0 -> 7652 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/objects-11.0+.nib | Bin 0 -> 6391 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/runtime.nib | Bin 0 -> 5728 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ControlView.nib | Bin 0 -> 12539 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ExpandView.nib | Bin 0 -> 5264 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/objects-11.0+.nib | Bin 0 -> 10528 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/runtime.nib | Bin 0 -> 9860 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/objects-11.0+.nib | Bin 0 -> 20435 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/runtime.nib | Bin 0 -> 19774 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/objects-11.0+.nib | Bin 0 -> 6370 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/runtime.nib | Bin 0 -> 5706 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/objects-11.0+.nib | Bin 0 -> 5771 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/runtime.nib | Bin 0 -> 5104 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/Info.plist | Bin 0 -> 258 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/objects-12.3+.nib | Bin 0 -> 1048 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/runtime.nib | Bin 0 -> 1048 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/objects-12.3+.nib | Bin 0 -> 7809 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/runtime.nib | Bin 0 -> 7619 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/WaitingView.nib | Bin 0 -> 10690 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/Info.plist | Bin 0 -> 276 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/objects-11.0+.nib | Bin 0 -> 1742 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/runtime.nib | Bin 0 -> 1742 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/objects-11.0+.nib | Bin 0 -> 7075 bytes HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/runtime.nib | Bin 0 -> 7191 bytes HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/objects-12.3+.nib | Bin 0 -> 3413 bytes HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/runtime.nib | Bin 0 -> 3177 bytes HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/objects-11.0+.nib | Bin 0 -> 4208 bytes HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/runtime.nib | Bin 0 -> 3532 bytes HHVDoctorSDK/HHSDKVideo.framework/HHCodeCellView.nib | Bin 0 -> 2839 bytes HHVDoctorSDK/HHSDKVideo.framework/HHCodeView.nib | Bin 0 -> 2963 bytes HHVDoctorSDK/HHSDKVideo.framework/HHMemLoadView.nib | Bin 0 -> 3843 bytes HHVDoctorSDK/HHSDKVideo.framework/HHMultyVideoView.nib | Bin 0 -> 996 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/de.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/en.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/es-419.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/fr.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/id.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/it.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ja-US.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ko.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ms.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/pt-BR.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ru.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/tr.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/vi.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hans.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hant.lproj/Localizable.strings | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@2x.png | Bin 0 -> 1733 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@3x.png | Bin 0 -> 2851 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@2x.png | Bin 0 -> 555 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@3x.png | Bin 0 -> 705 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@2x.png | Bin 0 -> 1651 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@3x.png | Bin 0 -> 2787 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@2x.png | Bin 0 -> 450 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@3x.png | Bin 0 -> 636 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@2x.png | Bin 0 -> 329 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@3x.png | Bin 0 -> 459 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@2x.png | Bin 0 -> 416 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@3x.png | Bin 0 -> 544 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@2x.png | Bin 0 -> 548 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@3x.png | Bin 0 -> 709 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@2x.png | Bin 0 -> 632 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@3x.png | Bin 0 -> 944 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@2x.png | Bin 0 -> 460 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@3x.png | Bin 0 -> 621 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@2x.png | Bin 0 -> 644 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@3x.png | Bin 0 -> 940 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@2x.png | Bin 0 -> 186 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@3x.png | Bin 0 -> 171 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@2x.png | Bin 0 -> 1283 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@3x.png | Bin 0 -> 2111 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@2x.png | Bin 0 -> 825 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@3x.png | Bin 0 -> 1152 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@2x.png | Bin 0 -> 351 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@3x.png | Bin 0 -> 581 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@2x.png | Bin 0 -> 314 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@3x.png | Bin 0 -> 361 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@2x.png | Bin 0 -> 427 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@3x.png | Bin 0 -> 637 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@2x.png | Bin 0 -> 640 bytes HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@3x.png | Bin 0 -> 919 bytes HHVDoctorSDK/HHSDKVideo.framework/HHSDKVideo | Bin 0 -> 28475528 bytes HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemCell.nib | Bin 0 -> 3049 bytes HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/objects-11.0+.nib | Bin 0 -> 8224 bytes HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/runtime.nib | Bin 0 -> 7678 bytes HHVDoctorSDK/HHSDKVideo.framework/HHVideoView.nib | Bin 0 -> 991 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/SDKConfig.plist | 8 ++++++++ HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@2x.png | Bin 0 -> 269 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@3x.png | Bin 0 -> 403 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@2x.png | Bin 0 -> 758 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@3x.png | Bin 0 -> 1242 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/back_app@3x.png | Bin 0 -> 788 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@2x.png | Bin 0 -> 18609 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@3x.png | Bin 0 -> 33696 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/call_default@3x.png | Bin 0 -> 117168 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_disable@3x.png | Bin 0 -> 7329 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_close@3x.png | Bin 0 -> 1036 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_normal@3x.png | Bin 0 -> 721 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@2x.png | Bin 0 -> 1846 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@3x.png | Bin 0 -> 2516 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat9left.png | Bin 0 -> 526176 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_add_rights@3x.png | Bin 0 -> 18745 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow.png | Bin 0 -> 1298 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@2x.png | Bin 0 -> 1175 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@3x.png | Bin 0 -> 1298 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_b@3x.png | Bin 0 -> 223 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_t@3x.png | Bin 0 -> 925 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@2x.png | Bin 0 -> 1018 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@3x.png | Bin 0 -> 1018 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@2x.png | Bin 0 -> 985 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@3x.png | Bin 0 -> 837 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/close_member@3x.png | Bin 0 -> 429 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@2x.png | Bin 0 -> 466 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@3x.png | Bin 0 -> 984 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@2x.png | Bin 0 -> 1296 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@3x.png | Bin 0 -> 2377 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@2x.png | Bin 0 -> 799 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@3x.png | Bin 0 -> 1685 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@2x.png | Bin 0 -> 866 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@3x.png | Bin 0 -> 1826 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@2x.png | Bin 0 -> 802 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@3x.png | Bin 0 -> 1669 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@2x.png | Bin 0 -> 843 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@3x.png | Bin 0 -> 1843 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@2x.png | Bin 0 -> 576 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@3x.png | Bin 0 -> 633 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cus_shape@3x.png | Bin 0 -> 2057 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/default_icon@3x.png | Bin 0 -> 1781 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo2x.png | Bin 0 -> 29811 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo@3x.png | Bin 0 -> 56082 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license@3x.png | Bin 0 -> 1206 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license_highlight@3x.png | Bin 0 -> 1134 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_highlight@3x.png | Bin 0 -> 2669 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_normal@3x.png | Bin 0 -> 2729 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_call_mask@3x.png | Bin 0 -> 24207 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_close@3x.png | Bin 0 -> 2213 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_open@3x.png | Bin 0 -> 2032 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_highlight@3x.png | Bin 0 -> 7329 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_normal@3x.png | Bin 0 -> 6999 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor@3x.png | Bin 0 -> 9044 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor_blue@3x.png | Bin 0 -> 3046 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_highlight@3x.png | Bin 0 -> 7987 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_normal@3x.png | Bin 0 -> 7719 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@2x.png | Bin 0 -> 945 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@3x.png | Bin 0 -> 1156 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_highlight@3x.png | Bin 0 -> 7583 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_normal@3x.png | Bin 0 -> 7467 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_icon_album@2x.png | Bin 0 -> 1238 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_local_default@3x.png | Bin 0 -> 11979 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_graytip@3x.png | Bin 0 -> 5608 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_medic_creat@3x.png | Bin 0 -> 50722 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_redtip@3x.png | Bin 0 -> 5405 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@2x.png | Bin 0 -> 821 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@3x.png | Bin 0 -> 1196 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@2x.png | Bin 0 -> 1011 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@3x.png | Bin 0 -> 1931 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hp_card_video_icon.png | Bin 0 -> 769 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@2x.png | Bin 0 -> 55225 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@3x.png | Bin 0 -> 161019 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission_en@3x.png | Bin 0 -> 35579 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_address@3x.png | Bin 0 -> 3268 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_document@3x.png | Bin 0 -> 719 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_invitecode@3x.png | Bin 0 -> 2178 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_member@3x.png | Bin 0 -> 2684 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_order@3x.png | Bin 0 -> 1434 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_video@3x.png | Bin 0 -> 1722 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/licence_normal@3x.png | Bin 0 -> 2621 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/license_default@3x.png | Bin 0 -> 2293 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/me_arrow@3x.png | Bin 0 -> 1469 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/meet_disconnect@3x.png | Bin 0 -> 825 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@2x.png | Bin 0 -> 1555 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@3x.png | Bin 0 -> 2697 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_down@3x.png | Bin 0 -> 6422 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_up@3x.png | Bin 0 -> 6145 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker-en.gif | Bin 0 -> 64275 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker.gif | Bin 0 -> 82084 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@2x.png | Bin 0 -> 1797 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@3x.png | Bin 0 -> 3353 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@2x.png | Bin 0 -> 1173 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@3x.png | Bin 0 -> 2142 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_close@3x.png | Bin 0 -> 2692 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_pass_add@3x.png | Bin 0 -> 2935 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/upload_fail@3x.png | Bin 0 -> 690 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_1@3x.png | Bin 0 -> 762 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_2@3x.png | Bin 0 -> 925 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_3@3x.png | Bin 0 -> 1102 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_one@3x.png | Bin 0 -> 459 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_three@3x.png | Bin 0 -> 482 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_two@3x.png | Bin 0 -> 495 bytes HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wb_loadfail@3x.png | Bin 0 -> 2583 bytes HHVDoctorSDK/HHSDKVideo.framework/Headers/CGGeometry+RSKImageCropper.h | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHAnimatedImageRep.h | 20 ++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHFaceAuthBridge.h | 34 ++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCache.h | 295 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCacheConfig.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHMBProgressHUD.h | 444 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHPhotoPicker.h | 14 ++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-Swift.h | 2156 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-umbrella.h | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoder.h | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoderHelper.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCodersManager.h | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCompat.h | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloader.h | 271 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloaderOperation.h | 125 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageFrame.h | 34 ++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageGIFCoder.h | 23 +++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageImageIOCoder.h | 30 ++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageManager.h | 328 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageOperation.h | 15 +++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImagePrefetcher.h | 112 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageTransition.h | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/NSData+hhImageContentType.h | 42 ++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/NSImage+hhWebCache.h | 23 +++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/ObjectMapper.h | 40 ++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropVC.h | 262 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropViewController+Protected.h | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraImageModel.h | 24 ++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraUtil.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageCropper.h | 29 +++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageScrollView.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKPHAssetManager.h | 26 ++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKTouchView.h | 31 +++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIApplication+RSKImageCropper.h | 39 +++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIButton+hhWebCache.h | 255 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+RSKImageCropper.h | 35 +++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhForceDecode.h | 17 +++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhGIF.h | 25 +++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhMultiFormat.h | 30 ++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhHighlightedWebCache.h | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhWebCache.h | 167 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UITextView+Placeholder.h | 38 ++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCache.h | 140 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCacheOperation.h | 43 +++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Info.plist | Bin 0 -> 790 bytes HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/objects-11.0+.nib | Bin 0 -> 4760 bytes HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/runtime.nib | Bin 0 -> 4089 bytes HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/Info.plist | Bin 0 -> 258 bytes HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/objects-11.0+.nib | Bin 0 -> 5313 bytes HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/runtime.nib | Bin 0 -> 5440 bytes HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/objects-11.0+.nib | Bin 0 -> 1004 bytes HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/runtime.nib | Bin 0 -> 1004 bytes HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/objects-11.0+.nib | Bin 0 -> 6835 bytes HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/runtime.nib | Bin 0 -> 6900 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo | Bin 0 -> 530596 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64.swiftsourceinfo | Bin 0 -> 530596 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo | Bin 0 -> 530608 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64.swiftsourceinfo | Bin 0 -> 530608 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftdoc | Bin 0 -> 249348 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftinterface | 5650 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftmodule | Bin 0 -> 3449176 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftdoc | Bin 0 -> 249348 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftinterface | 5650 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftmodule | Bin 0 -> 3449176 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftdoc | Bin 0 -> 249360 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftinterface | 5650 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftmodule | Bin 0 -> 3449288 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftdoc | Bin 0 -> 249360 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftinterface | 5650 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftmodule | Bin 0 -> 3449288 bytes HHVDoctorSDK/HHSDKVideo.framework/Modules/module.modulemap | 11 +++++++++++ HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/objects-11.0+.nib | Bin 0 -> 3420 bytes HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/runtime.nib | Bin 0 -> 2755 bytes HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/Info.plist | Bin 0 -> 185 bytes HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/objects-11.0+.nib | Bin 0 -> 13917 bytes HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/runtime.nib | Bin 0 -> 13792 bytes HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/objects-11.0+.nib | Bin 0 -> 1074 bytes HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/runtime.nib | Bin 0 -> 1074 bytes HHVDoctorSDK/HHSDKVideo.framework/RealNameView.nib | Bin 0 -> 903 bytes HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/1yM-In-WzS-view-hCB-L7-FyM.nib | Bin 0 -> 1846 bytes HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/Info.plist | Bin 0 -> 296 bytes HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/UINavigationController-nJR-FK-Nsn.nib | Bin 0 -> 1708 bytes HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/blocked.nib | Bin 0 -> 1012 bytes HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/eYM-0S-bXl-view-yXU-C3-IV4.nib | Bin 0 -> 3444 bytes HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/Info.plist | Bin 0 -> 189 bytes HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/R4r-RW-Ik2-view-kp3-lk-DkN.nib | Bin 0 -> 2040 bytes HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/webbrowser.nib | Bin 0 -> 1007 bytes HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMBadResView.strings | 15 +++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMGoodResView.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMStarView.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTouSuView.strings | 9 +++++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTousuResView.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ControlView.strings | 30 ++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ExpandView.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/HHRealNameInputNewView.strings | 27 +++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Localizable.strings | Bin 0 -> 6122 bytes HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoGuide.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoPermissionAlert.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Upload.strings | 3 +++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/WaitingView.strings | 15 +++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/comment.strings | 9 +++++++++ HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/objects-11.0+.nib | Bin 0 -> 9437 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/runtime.nib | Bin 0 -> 9700 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Info.plist | Bin 0 -> 191 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/objects-11.0+.nib | Bin 0 -> 5431 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/runtime.nib | Bin 0 -> 5429 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/objects-11.0+.nib | Bin 0 -> 1120 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/runtime.nib | Bin 0 -> 1120 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/objects-11.0+.nib | Bin 0 -> 1048 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/runtime.nib | Bin 0 -> 1048 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/objects-11.0+.nib | Bin 0 -> 1081 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/runtime.nib | Bin 0 -> 1081 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/objects-11.0+.nib | Bin 0 -> 74813 bytes HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/runtime.nib | Bin 0 -> 75045 bytes HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMBadResView.strings | 15 +++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMGoodResView.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMStarView.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTouSuView.strings | 9 +++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTousuResView.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ControlView.strings | 30 ++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ExpandView.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputNewView.strings | 27 +++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputView.strings | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Localizable.strings | Bin 0 -> 8500 bytes HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoGuide.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoPermissionAlert.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Upload.strings | 3 +++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/WaitingView.strings | 15 +++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/en.lproj/comment.strings | 9 +++++++++ HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/Info.plist | Bin 0 -> 205 bytes HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/addMember.nib | Bin 0 -> 1191 bytes HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/profit.nib | Bin 0 -> 1239 bytes HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/xsQ-4A-MAW-view-Qcf-gy-1uQ.nib | Bin 0 -> 11283 bytes HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMBadResView.strings | 15 +++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMGoodResView.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMStarView.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTouSuView.strings | 9 +++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTousuResView.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ControlView.strings | 31 +++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ExpandView.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputNewView.strings | 27 +++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputView.strings | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Localizable.strings | Bin 0 -> 6109 bytes HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoGuide.strings | 12 ++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoPermissionAlert.strings | 6 ++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Upload.strings | 3 +++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/WaitingView.strings | 15 +++++++++++++++ HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/comment.strings | 9 +++++++++ HHVDoctorSDK/SecurityKit.framework/.DS_Store | Bin 6148 -> 0 bytes HHVDoctorSDK/TXFFmpeg.xcframework/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXFFmpeg.xcframework/Info.plist | 40 ++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/TXFFmpeg.h | 123 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h | 3462 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h | 37 +++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h | 38 ++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ass_split.h | 331 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avcodec.h | 6630 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avdct.h | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avfft.h | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/bytestream.h | 376 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h | 113 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dirac.h | 132 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dxva2.h | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/jni.h | 47 +++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h | 102 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/qsv.h | 108 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vaapi.h | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vdpau.h | 177 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/version.h | 146 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h | 128 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/xvmc.h | 171 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/avfilter.h | 1169 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersink.h | 166 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h | 210 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/version.h | 65 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avc.h | 38 ++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avformat.h | 3108 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avio.h | 868 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/internal.h | 807 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/os_support.h | 248 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/url.h | 345 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/version.h | 111 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/adler32.h | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes.h | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/attributes.h | 167 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h | 188 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avassert.h | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avconfig.h | 16 ++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avstring.h | 408 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avutil.h | 366 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/base64.h | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/blowfish.h | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bprint.h | 220 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bswap.h | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/buffer.h | 292 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/camellia.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cast5.h | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/channel_layout.h | 233 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/common.h | 561 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cpu.h | 131 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/crc.h | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/des.h | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dict.h | 201 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/display.h | 115 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/downmix_info.h | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/encryption_info.h | 206 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/error.h | 134 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/eval.h | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ffversion.h | 5 +++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/fifo.h | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/file.h | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/frame.h | 902 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hash.h | 270 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hmac.h | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext.h | 585 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h | 170 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h | 169 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h | 36 ++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/imgutils.h | 278 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intfloat.h | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h | 629 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lfg.h | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/log.h | 411 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lzo.h | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/macros.h | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h | 129 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mathematics.h | 243 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/md5.h | 99 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mem.h | 701 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/motion_vector.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/murmur3.h | 121 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/opt.h | 867 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/parseutils.h | 194 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixdesc.h | 441 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixfmt.h | 542 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h | 39 +++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/random_seed.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rational.h | 215 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rc4.h | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/replaygain.h | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ripemd.h | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/samplefmt.h | 273 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha.h | 96 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha512.h | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/spherical.h | 233 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/stereo3d.h | 234 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tea.h | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/threadmessage.h | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/time.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timecode.h | 141 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timestamp.h | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tree.h | 139 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/twofish.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tx.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/version.h | 139 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/xtea.h | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/swresample.h | 581 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/version.h | 45 +++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/swscale.h | 337 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/version.h | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Info.plist | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Modules/module.modulemap | 6 ++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/TXFFmpeg | Bin 0 -> 3090344 bytes HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/TXFFmpeg.h | 123 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h | 3462 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h | 37 +++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h | 38 ++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ass_split.h | 331 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avcodec.h | 6630 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avdct.h | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avfft.h | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/bytestream.h | 376 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h | 113 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dirac.h | 132 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dxva2.h | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/jni.h | 47 +++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h | 102 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/qsv.h | 108 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vaapi.h | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vdpau.h | 177 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/version.h | 146 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h | 128 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/xvmc.h | 171 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/avfilter.h | 1169 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersink.h | 166 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h | 210 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/version.h | 65 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avc.h | 38 ++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avformat.h | 3108 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avio.h | 868 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/internal.h | 807 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/os_support.h | 248 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/url.h | 345 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/version.h | 111 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/adler32.h | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes.h | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/attributes.h | 167 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h | 188 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avassert.h | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avconfig.h | 16 ++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avstring.h | 408 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avutil.h | 366 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/base64.h | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/blowfish.h | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bprint.h | 220 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bswap.h | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/buffer.h | 292 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/camellia.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cast5.h | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/channel_layout.h | 233 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/common.h | 561 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cpu.h | 131 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/crc.h | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/des.h | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dict.h | 201 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/display.h | 115 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/downmix_info.h | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/encryption_info.h | 206 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/error.h | 134 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/eval.h | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ffversion.h | 5 +++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/fifo.h | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/file.h | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/frame.h | 902 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hash.h | 270 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hmac.h | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext.h | 585 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h | 170 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h | 169 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h | 36 ++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/imgutils.h | 278 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intfloat.h | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h | 629 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lfg.h | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/log.h | 411 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lzo.h | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/macros.h | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h | 129 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mathematics.h | 243 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/md5.h | 99 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mem.h | 701 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/motion_vector.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/murmur3.h | 121 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/opt.h | 867 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/parseutils.h | 194 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixdesc.h | 441 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixfmt.h | 542 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h | 39 +++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/random_seed.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rational.h | 215 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rc4.h | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/replaygain.h | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ripemd.h | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/samplefmt.h | 273 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha.h | 96 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha512.h | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/spherical.h | 233 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/stereo3d.h | 234 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tea.h | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/threadmessage.h | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/time.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timecode.h | 141 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timestamp.h | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tree.h | 139 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/twofish.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tx.h | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/version.h | 139 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/xtea.h | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/swresample.h | 581 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/version.h | 45 +++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/swscale.h | 337 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/version.h | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Info.plist | Bin 0 -> 788 bytes HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Modules/module.modulemap | 6 ++++++ HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/TXFFmpeg | Bin 0 -> 1737224 bytes HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/ITRTCAudioPacketListener.h | 34 ++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloud.h | 2442 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDef.h | 2616 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDelegate.h | 800 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCStatistics.h | 240 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioCustomProcessDelegate.h | 19 ++++++++++--------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioEffectManager.h | 385 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioRawDataDelegate.h | 8 +------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXBeautyManager.h | 302 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXDeviceManager.h | 390 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------------------------------------------------------------------------------ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVBuffer.h | 86 +++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVCode.h | 476 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVEncodedDataProcessingListener.h | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSDK.h | 47 ++++++++++++++++++++++++++--------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSymbolExport.h | 11 +++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveAudioSessionDelegate.h | 21 ++++++++++++++++++--- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveBase.h | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayConfig.h | 38 +++++++++++++++++++------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayListener.h | 56 +++++++++++++++++++++++++------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayer.h | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordListener.h | 11 +++++------ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordTypeDef.h | 38 +++++++++++++++++--------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKEventDef.h | 290 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKTypeDef.h | 747 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXVideoCustomProcessDelegate.h | 11 +++-------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveCode.h | 124 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveDef.h | 603 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayer.h | 232 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayerObserver.h | 144 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePremier.h | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveProperty.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCCloud.h | 2919 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCStatistics.h | 225 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXAudioEffectManager.h | 368 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXDeviceManager.h | 617 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCCloudCallback.h | 1106 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCTypeDef.h | 2019 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TXLiteAVCode.h | 476 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Info.plist | Bin 722 -> 0 bytes HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Modules/module.modulemap | 14 ++++---------- HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/TXLiteAVSDK_TRTC | Bin 86850160 -> 0 bytes HHVDoctorSDK/TXSoundTouch.xcframework/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXSoundTouch.xcframework/Info.plist | 40 ++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/BPMDetect.h | 205 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSamplePipe.h | 231 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/STTypes.h | 190 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/SoundTouch.h | 349 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/TXSoundTouch.h | 11 +++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/soundtouch_config.h | 105 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Info.plist | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Modules/module.modulemap | 6 ++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/TXSoundTouch | Bin 0 -> 151648 bytes HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/.DS_Store | Bin 0 -> 6148 bytes HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/BPMDetect.h | 205 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSamplePipe.h | 231 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/STTypes.h | 190 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/SoundTouch.h | 349 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/TXSoundTouch.h | 11 +++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/soundtouch_config.h | 105 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Info.plist | Bin 0 -> 796 bytes HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Modules/module.modulemap | 6 ++++++ HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/TXSoundTouch | Bin 0 -> 69728 bytes HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo | Bin 21952 -> 0 bytes HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64.swiftsourceinfo | Bin 21952 -> 0 bytes HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo | Bin 21964 -> 0 bytes HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64.swiftsourceinfo | Bin 21964 -> 0 bytes HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64-apple-ios.swiftmodule | Bin 112164 -> 0 bytes HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64.swiftmodule | Bin 112164 -> 0 bytes HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64-apple-ios-simulator.swiftmodule | Bin 112288 -> 0 bytes HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64.swiftmodule | Bin 112288 -> 0 bytes HHVDoctorSDK/hhVDoctorSDK.framework/hhVDoctorSDK | Bin 1725024 -> 0 bytes 674 files changed, 114729 insertions(+), 7798 deletions(-) create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ControlView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ExpandView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/objects-12.3+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/objects-12.3+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/WaitingView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/objects-12.3+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHCodeCellView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHCodeView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHMemLoadView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHMultyVideoView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/de.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/en.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/es-419.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/fr.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/id.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/it.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ja-US.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ko.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ms.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/pt-BR.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ru.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/tr.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/vi.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hans.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hant.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@3x.png create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/HHSDKVideo create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemCell.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HHVideoView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/SDKConfig.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@3x.png create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@2x.png create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/back_app@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/call_default@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_disable@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_close@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_normal@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat9left.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_add_rights@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_b@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_t@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/close_member@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cus_shape@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/default_icon@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license_highlight@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_highlight@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_normal@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_call_mask@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_close@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_open@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_highlight@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_normal@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor_blue@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_highlight@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_normal@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_highlight@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_normal@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_icon_album@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_local_default@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_graytip@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_medic_creat@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_redtip@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@3x.png create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hp_card_video_icon.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission_en@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_address@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_document@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_invitecode@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_member@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_order@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_video@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/licence_normal@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/license_default@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/me_arrow@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/meet_disconnect@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_down@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_up@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker-en.gif create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker.gif create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@2x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_close@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_pass_add@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/upload_fail@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_1@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_2@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_3@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_one@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_three@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_two@3x.png create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wb_loadfail@3x.png create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/Headers/CGGeometry+RSKImageCropper.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHAnimatedImageRep.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHFaceAuthBridge.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCache.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCacheConfig.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHMBProgressHUD.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHPhotoPicker.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-Swift.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-umbrella.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoder.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoderHelper.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCodersManager.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCompat.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloader.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloaderOperation.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageFrame.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageGIFCoder.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageImageIOCoder.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageManager.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageOperation.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImagePrefetcher.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageTransition.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/NSData+hhImageContentType.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/NSImage+hhWebCache.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/ObjectMapper.h create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropVC.h create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropViewController+Protected.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraImageModel.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraUtil.h create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageCropper.h create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageScrollView.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKPHAssetManager.h create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKTouchView.h create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIApplication+RSKImageCropper.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIButton+hhWebCache.h create mode 100755 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+RSKImageCropper.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhForceDecode.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhGIF.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhMultiFormat.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhHighlightedWebCache.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhWebCache.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UITextView+Placeholder.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCache.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCacheOperation.h create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64.swiftsourceinfo create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64.swiftsourceinfo create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftdoc create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftinterface create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftmodule create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftdoc create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftinterface create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftmodule create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftdoc create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftinterface create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftmodule create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftdoc create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftinterface create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftmodule create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Modules/module.modulemap create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/RealNameView.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/1yM-In-WzS-view-hCB-L7-FyM.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/UINavigationController-nJR-FK-Nsn.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/blocked.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/eYM-0S-bXl-view-yXU-C3-IV4.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/R4r-RW-Ik2-view-kp3-lk-DkN.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/webbrowser.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMBadResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMGoodResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMStarView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTouSuView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTousuResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ControlView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ExpandView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/HHRealNameInputNewView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoGuide.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoPermissionAlert.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Upload.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/WaitingView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/comment.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/objects-11.0+.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/runtime.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMBadResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMGoodResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMStarView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTouSuView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTousuResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ControlView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ExpandView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputNewView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoGuide.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoPermissionAlert.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Upload.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/WaitingView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/en.lproj/comment.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/Info.plist create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/addMember.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/profit.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/xsQ-4A-MAW-view-Qcf-gy-1uQ.nib create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMBadResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMGoodResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMStarView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTouSuView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTousuResView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ControlView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ExpandView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputNewView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Localizable.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoGuide.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoPermissionAlert.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Upload.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/WaitingView.strings create mode 100644 HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/comment.strings create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/.DS_Store create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/Info.plist create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/.DS_Store create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/.DS_Store create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/TXFFmpeg.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ass_split.h create mode 100755 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avcodec.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avdct.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avfft.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/bytestream.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dirac.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dxva2.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/jni.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/qsv.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vaapi.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vdpau.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/xvmc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/avfilter.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersink.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avformat.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avio.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/internal.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/os_support.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/url.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/adler32.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/attributes.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avassert.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avconfig.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avstring.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avutil.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/base64.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/blowfish.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bprint.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bswap.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/buffer.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/camellia.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cast5.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/channel_layout.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/common.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cpu.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/crc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/des.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dict.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/display.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/downmix_info.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/encryption_info.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/error.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/eval.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ffversion.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/fifo.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/file.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/frame.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hash.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hmac.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/imgutils.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intfloat.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lfg.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/log.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lzo.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/macros.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mathematics.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/md5.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mem.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/motion_vector.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/murmur3.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/opt.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/parseutils.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixdesc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixfmt.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/random_seed.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rational.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rc4.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/replaygain.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ripemd.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/samplefmt.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha512.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/spherical.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/stereo3d.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tea.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/threadmessage.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/time.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timecode.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timestamp.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tree.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/twofish.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tx.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/xtea.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/swresample.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/swscale.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Info.plist create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Modules/module.modulemap create mode 100755 HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/TXFFmpeg create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/.DS_Store create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/.DS_Store create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/TXFFmpeg.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ass_split.h create mode 100755 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avcodec.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avdct.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avfft.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/bytestream.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dirac.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dxva2.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/jni.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/qsv.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vaapi.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vdpau.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/xvmc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/avfilter.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersink.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avformat.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avio.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/internal.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/os_support.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/url.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/adler32.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/attributes.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avassert.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avconfig.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avstring.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avutil.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/base64.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/blowfish.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bprint.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bswap.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/buffer.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/camellia.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cast5.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/channel_layout.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/common.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cpu.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/crc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/des.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dict.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/display.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/downmix_info.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/encryption_info.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/error.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/eval.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ffversion.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/fifo.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/file.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/frame.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hash.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hmac.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/imgutils.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intfloat.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lfg.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/log.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lzo.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/macros.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mathematics.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/md5.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mem.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/motion_vector.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/murmur3.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/opt.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/parseutils.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixdesc.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixfmt.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/random_seed.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rational.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rc4.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/replaygain.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ripemd.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/samplefmt.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha512.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/spherical.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/stereo3d.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tea.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/threadmessage.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/time.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timecode.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timestamp.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tree.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/twofish.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tx.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/xtea.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/swresample.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/swscale.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/version.h create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Info.plist create mode 100644 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Modules/module.modulemap create mode 100755 HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/TXFFmpeg create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/.DS_Store create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/ITRTCAudioPacketListener.h create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSymbolExport.h create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveCode.h create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveDef.h create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayer.h create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayerObserver.h create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePremier.h create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveProperty.h create mode 100644 HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCStatistics.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/.DS_Store create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/Info.plist create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/.DS_Store create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/.DS_Store create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/BPMDetect.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSamplePipe.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/STTypes.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/SoundTouch.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/TXSoundTouch.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/soundtouch_config.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Info.plist create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Modules/module.modulemap create mode 100755 HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/TXSoundTouch create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/.DS_Store create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/.DS_Store create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/BPMDetect.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSamplePipe.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/STTypes.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/SoundTouch.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/TXSoundTouch.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/soundtouch_config.h create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Info.plist create mode 100644 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Modules/module.modulemap create mode 100755 HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/TXSoundTouch diff --git a/HHVDoctorSDK.podspec b/HHVDoctorSDK.podspec index 3e3ddae..bc7dfea 100644 --- a/HHVDoctorSDK.podspec +++ b/HHVDoctorSDK.podspec @@ -1,6 +1,6 @@ Pod::Spec.new do |s| s.name = "HHVDoctorSDK" - s.version = "3.4.0.06201858" + s.version = "3.4.0.06211051" s.summary = "和缓视频医生 SDK" s.description = <<-DESC diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/objects-11.0+.nib new file mode 100644 index 0000000..fa3fcb8 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/runtime.nib new file mode 100644 index 0000000..601d6ff Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMBadResView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/objects-11.0+.nib new file mode 100644 index 0000000..33fc218 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/runtime.nib new file mode 100644 index 0000000..daaae4c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMGoodResView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/objects-11.0+.nib new file mode 100644 index 0000000..bdeb8a2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/runtime.nib new file mode 100644 index 0000000..9911ab0 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMStarView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/objects-11.0+.nib new file mode 100644 index 0000000..a1ad02c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/runtime.nib new file mode 100644 index 0000000..a08f410 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTouSuView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/objects-11.0+.nib new file mode 100644 index 0000000..c1d7693 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/runtime.nib new file mode 100644 index 0000000..dae3b8b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/CMTousuResView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ControlView.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ControlView.nib new file mode 100644 index 0000000..bb63695 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ControlView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ExpandView.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ExpandView.nib new file mode 100644 index 0000000..2ac2783 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/ExpandView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/objects-11.0+.nib new file mode 100644 index 0000000..333d153 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/runtime.nib new file mode 100644 index 0000000..01d579c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputNewView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/objects-11.0+.nib new file mode 100644 index 0000000..288cea5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/runtime.nib new file mode 100644 index 0000000..8bf84bc Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/HHRealNameInputView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/objects-11.0+.nib new file mode 100644 index 0000000..baaf656 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/runtime.nib new file mode 100644 index 0000000..7deb0b5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoGuide.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/objects-11.0+.nib new file mode 100644 index 0000000..9fe10ae Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/runtime.nib new file mode 100644 index 0000000..a6ede70 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/PhotoPermissionAlert.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/Info.plist new file mode 100644 index 0000000..51d58e6 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/objects-12.3+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/objects-12.3+.nib new file mode 100644 index 0000000..230fb10 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/objects-12.3+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/runtime.nib new file mode 100644 index 0000000..0f471a1 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/UIViewController-jgq-dG-SDa.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/objects-12.3+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/objects-12.3+.nib new file mode 100644 index 0000000..3acacbb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/objects-12.3+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/runtime.nib new file mode 100644 index 0000000..2979322 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/Upload.storyboardc/jgq-dG-SDa-view-Xv5-uR-Wlf.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/WaitingView.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/WaitingView.nib new file mode 100644 index 0000000..a2fb200 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/WaitingView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/Info.plist new file mode 100644 index 0000000..9d81ebb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/objects-11.0+.nib new file mode 100644 index 0000000..ee25e57 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/runtime.nib new file mode 100644 index 0000000..6269bd3 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/UINavigationController-0Si-xm-B2v.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/objects-11.0+.nib new file mode 100644 index 0000000..dde2e7b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/runtime.nib new file mode 100644 index 0000000..3953781 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Base.lproj/comment.storyboardc/wzQ-GF-Pq0-view-cCD-m9-X4Z.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/objects-12.3+.nib b/HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/objects-12.3+.nib new file mode 100644 index 0000000..6cf5441 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/objects-12.3+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/runtime.nib new file mode 100644 index 0000000..04e89da Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/CMQuesCell.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/objects-11.0+.nib new file mode 100644 index 0000000..73701c7 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/runtime.nib new file mode 100644 index 0000000..3295ac3 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/DoctorIntroView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHCodeCellView.nib b/HHVDoctorSDK/HHSDKVideo.framework/HHCodeCellView.nib new file mode 100644 index 0000000..259b7aa Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHCodeCellView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHCodeView.nib b/HHVDoctorSDK/HHSDKVideo.framework/HHCodeView.nib new file mode 100644 index 0000000..706e223 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHCodeView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHMemLoadView.nib b/HHVDoctorSDK/HHSDKVideo.framework/HHMemLoadView.nib new file mode 100644 index 0000000..abfe4b5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHMemLoadView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHMultyVideoView.nib b/HHVDoctorSDK/HHSDKVideo.framework/HHMultyVideoView.nib new file mode 100644 index 0000000..9cb8f8f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHMultyVideoView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/de.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/de.lproj/Localizable.strings new file mode 100644 index 0000000..d396f37 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/de.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Kamera"; +"previewCameraRecord" = "Aufzeichnung"; +"previewAlbum" = "Album"; +"cancel" = "Stornieren"; + +"originalPhoto" = "Vollbild"; +"done" = "Erledigt"; +"ok" = "in Ordnung"; +"editFinish" = "Fertig"; + +"back" = "Zurück"; +"edit" = "Bearbeiten"; +"revert" = "Rückgängig"; +"brightness" = "Helligkeit"; +"contrast" = "Kontrast"; +"saturation" = "Sättigung"; + +"photo" = "Fotos"; +"preview" = "Vorschau"; + +"noPhotoTips" = "Keine Fotos"; +"notAllowMixSelect" = "Video kann nicht ausgewählt werden"; + +"loading" = "Laden, bitte warten"; +"hudLoading" = "warten..."; + +"exceededMaxSelectCount" = "Maximale Auswahlanzahl: %ld"; +"longerThanMaxVideoDuration" = "Video mit einer Dauer von mehr als %lds kann nicht ausgewählt werden"; +"shorterThanMaxVideoDuration" = "Video mit einer Dauer von weniger als %lds kann nicht ausgewählt werden"; +"exceededMaxVideoSelectCount" = "Video max Auswahlanzahl: %ld"; +"lessThanMinVideoSelectCount" = "Video min Auswahlanzahl: %ld"; + +"noCameraAuthority" = "Bitte erlauben Sie %@, auf die Kamera Ihres Geräts unter \"Einstellungen\" > \"Datenschutz\" > \"Kamera\" zuzugreifen"; +"noPhotoLibratyAuthority" = "Bitte erlauben Sie %@, auf Ihr Album unter \"Einstellungen\" > \"Datenschutz\" > \"Fotos\" zuzugreifen"; +"noMicrophoneAuthority" = "Audio kann nicht aufgenommen werden. Gehen Sie zu \"Einstellungen\" > \"%@\" und aktivieren Sie den Mikrofonzugriff."; +"cameraUnavailable" = "Kamera ist nicht verfügbar"; +"keepRecording" = "Aufnahme behalten"; +"gotoSettings" = "Zu Einstellungen wechseln"; + +"iCloudVideoLoadFaild" = "Synchronisierung von iCloud nicht möglich"; +"imageLoadFailed" = "Laden fehlgeschlagen"; + +"save" = "Sparen"; +"saveImageError" = "Das Bild konnte nicht gespeichert werden"; +"saveVideoError" = "Das Video konnte nicht gespeichert werden"; +"timeout" = "Zeitüberschreitung der Anforderung"; + +"customCameraTips" = "Tippen, um Fotos aufzunehmen und halten, um ein Video aufzunehmen"; +"customCameraTakePhotoTips" = "Tippen, um ein Foto aufzunehmen"; +"customCameraRecordVideoTips" = "Halten Sie gedrückt, um ein Video aufzunehmen"; +"minRecordTimeTips" = "Nehmen Sie mindestens %lds auf"; + +"cameraRoll" = "Letzte"; +"panoramas" = "Panoramen"; +"videos" = "Videos"; +"favorites" = "Favoriten"; +"timelapses" = "Zeitraffer"; +"recentlyAdded" = "Kürzlich hinzugefügt"; +"bursts" = "Serien"; +"slomoVideos" = "Slo-Mo"; +"selfPortraits" = "Selfies"; +"screenshots" = "Bildschirmfotos"; +"depthEffect" = "Porträt"; +"livePhotos" = "Live Photos"; +"animated" = "Animiert"; +"myPhotoStream" = "Mein Fotostream"; + +"noTitleAlbumListPlaceholder" = "Alle Fotos"; +"unableToAccessAllPhotos" = "Zugriff auf alle Fotos im Album nicht möglich.\nZugriff auf \"Alle Fotos\" unter \"Fotos\" zulassen."; +"textStickerRemoveTips" = "Zum Entfernen hierher ziehen"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/en.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/en.lproj/Localizable.strings new file mode 100644 index 0000000..e91575a --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/en.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Camera"; +"previewCameraRecord" = "Record"; +"previewAlbum" = "Album"; +"cancel" = "Cancel"; + +"originalPhoto" = "Full Image"; +"done" = "Done"; +"ok" = "OK"; +"editFinish" = "Done"; + +"back" = "Back"; +"edit" = "Edit"; +"revert" = "Undo"; +"brightness" = "Brightness"; +"contrast" = "Contrast"; +"saturation" = "Saturation"; + +"photo" = "Photos"; +"preview" = "Preview"; + +"noPhotoTips" = "No Photos"; +"notAllowMixSelect" = "Unable to select video"; + +"loading" = "loading, waiting please"; +"hudLoading" = "waiting..."; + +"exceededMaxSelectCount" = "Max count for selection: %ld"; +"longerThanMaxVideoDuration" = "Unable to select video with a duration longer than %lds"; +"shorterThanMaxVideoDuration" = "Unable to select video with a duration shorter than %lds"; +"exceededMaxVideoSelectCount" = "Max count for video selection: %ld"; +"lessThanMinVideoSelectCount" = "Min count for video selection: %ld"; + +"noCameraAuthority" = "Please allow %@ to access your device's camera in \"Settings\" > \"Privacy\" > \"Camera\""; +"noPhotoLibratyAuthority" = "Please allow %@ to access your album in \"Settings\" > \"Privacy\" > \"Photos\""; +"noMicrophoneAuthority" = "Unable to record audio. Go to \"Settings\" > \"%@\" and enable microphone access."; +"cameraUnavailable" = "Camera is unavailable"; +"keepRecording" = "Keep Recording"; +"gotoSettings" = "Go to Settings"; + +"iCloudVideoLoadFaild" = "Unable to sync from iCloud"; +"imageLoadFailed" = "loading failed"; + +"save" = "Save"; +"saveImageError" = "Failed to save the image"; +"saveVideoError" = "Failed to save the video"; +"timeout" = "Request timed out"; + +"customCameraTips" = "Tap to take photo and hold to record video"; +"customCameraTakePhotoTips" = "Tap to take photo"; +"customCameraRecordVideoTips" = "Hold to record video"; +"minRecordTimeTips" = "Record at least %lds"; + +"cameraRoll" = "Recents"; +"panoramas" = "Panoramas"; +"videos" = "Videos"; +"favorites" = "Favorites"; +"timelapses" = "Time-Lapse"; +"recentlyAdded" = "Recently Added"; +"bursts" = "Bursts"; +"slomoVideos" = "Slo-mo"; +"selfPortraits" = "Selfies"; +"screenshots" = "Screenshots"; +"depthEffect" = "Portrait"; +"livePhotos" = "Live Photos"; +"animated" = "Animated"; +"myPhotoStream" = "My Photo Stream"; + +"noTitleAlbumListPlaceholder" = "All Photos"; +"unableToAccessAllPhotos" = "Unable to access all photos in the album.\nAllow access to \"All Photos\" in \"Photos\"."; +"textStickerRemoveTips" = "Drag here to remove"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/es-419.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/es-419.lproj/Localizable.strings new file mode 100644 index 0000000..d3047a2 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/es-419.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Cámara"; +"previewCameraRecord" = "Grabar"; +"previewAlbum" = "Álbum"; +"cancel" = "Cancelar"; + +"originalPhoto" = "Imagen completa"; +"done" = "Hecho"; +"ok" = "OK"; +"editFinish" = "Hecho"; + +"back" = "Volver"; +"edit" = "Editar"; +"revert" = "Deshacer"; +"brightness" = "Brillo"; +"contrast" = "Contraste"; +"saturation" = "Saturación"; + +"photo" = "Fotos"; +"preview" = "Vista previa"; + +"noPhotoTips" = "No hay fotos"; +"notAllowMixSelect" = "No se puede seleccionar el vídeo"; + +"loading" = "cargando, por favor espera"; +"hudLoading" = "cargando..."; + +"exceededMaxSelectCount" = "Número máximo para la selección: %ld"; +"longerThanMaxVideoDuration" = "No se puede seleccionar un vídeo con una duración superior a %lds"; +"shorterThanMaxVideoDuration" = "No se puede seleccionar un vídeo con una duración inferior a %lds"; +"exceededMaxVideoSelectCount" = "Número máximo para la selección de vídeos: %ld"; +"lessThanMinVideoSelectCount" = "Número mínimo para la selección de vídeos: %ld"; + +"noCameraAuthority" = "Permite que %@ acceda a la cámara de tu dispositivo en \"Ajustes\" > \"Privacidad\" > \"Cámara\""; +"noPhotoLibratyAuthority" = "Permita que %@ acceda a su álbum en \"Configuración\" > \"Privacidad\" > \"Fotos\""; +"noMicrophoneAuthority" = "No se puede grabar audio. Ve a \"Ajustes\" > \"%@\" y activa el acceso al micrófono."; +"cameraUnavailable" = "La cámara no está disponible"; +"keepRecording" = "Continuar Grabando"; +"gotoSettings" = "Ir a Ajustes"; + +"iCloudVideoLoadFaild" = "No se puede sincronizar desde iCloud"; +"imageLoadFailed" = "carga fallida"; + +"save" = "Guardar"; +"saveImageError" = "No se ha podido guardar la imagen"; +"saveVideoError" = "No se ha podido guardar el vídeo"; +"timeout" = "La solicitud se ha vencido"; + +"customCameraTips" = "Toca para tomar una foto y mantén pulsado para grabar un vídeo"; +"customCameraTakePhotoTips" = "Toca para tomar una foto"; +"customCameraRecordVideoTips" = "Mantén pulsado para grabar vídeo"; +"minRecordTimeTips" = "Grabar al menos %lds"; + +"cameraRoll" = "Recientes"; +"panoramas" = "Panoramas"; +"videos" = "Vídeos"; +"favorites" = "Favoritos"; +"timelapses" = "Lapso de Tiempo"; +"recentlyAdded" = "Añadido recientemente"; +"bursts" = "Ráfagas"; +"slomoVideos" = "Cámara lenta"; +"selfPortraits" = "Selfies"; +"screenshots" = "Capturas de pantalla"; +"depthEffect" = "Retrato"; +"livePhotos" = "Fotos en vivo"; +"animated" = "Animado"; +"myPhotoStream" = "Mi flujo de fotos"; + +"noTitleAlbumListPlaceholder" = "Todas las fotos"; +"unableToAccessAllPhotos" = "No se puede acceder a todas las fotos del álbum.\nPermite el acceso a \"Todas las fotos\" en \"Fotos\"."; +"textStickerRemoveTips" = "Arrastra aquí para eliminar"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/fr.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/fr.lproj/Localizable.strings new file mode 100644 index 0000000..0e22731 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/fr.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Caméra"; +"previewCameraRecord" = "Record"; +"previewAlbum" = "Album"; +"cancel" = "Annuler"; + +"originalPhoto" = "image complète"; +"done" = "Terminé"; +"ok" = "D'accord"; +"editFinish" = "Terminé"; + +"back" = "Retour"; +"edit" = "Modifier"; +"revert" = "Annuler"; +"brightness" = "Luminosité"; +"contrast" = "Contraste"; +"saturation" = "Saturation"; + +"photo" = "Photos"; +"preview" = "Aperçu"; + +"noPhotoTips" = "Pas de photos"; +"notAllowMixSelect" = "Impossible de sélectionner la vidéo"; + +"loading" = "chargement, attente s'il vous plaît"; +"hudLoading" = "attendre..."; + +"exceededMaxSelectCount" = "Nombre maximal de sélections: %ld"; +"longerThanMaxVideoDuration" = "Impossible de sélectionner une vidéo d'une durée supérieure à %lds"; +"shorterThanMaxVideoDuration" = "Impossible de sélectionner une vidéo d'une durée inférieure à %lds"; +"exceededMaxVideoSelectCount" = "Nombre maximal de sélections vidéo: %ld"; +"lessThanMinVideoSelectCount" = "Nombre minimal de sélection de vidéo: %ld"; + +"noCameraAuthority" = "Veuillez autoriser %@ à accéder à la caméra de votre appareil dans \"Paramètres\" > \"Confidentialité\" > \"Caméra\""; +"noPhotoLibratyAuthority" = "Veuillez autoriser %@ à accéder à votre album dans \"Paramètres\" > \"Confidentialité\" > \"Photos\""; +"noMicrophoneAuthority" = "Impossible d'enregistrer le son. Rendez-vous dans « Paramètres >> > << %@ » et activez l'accès au microphone."; +"cameraUnavailable" = "La caméra n'est pas disponible"; +"keepRecording" = "Continuer à enregistrer"; +"gotoSettings" = "Accéder à Paramètres"; + +"iCloudVideoLoadFaild" = "Impossible de synchroniser depuis iCloud"; +"imageLoadFailed" = "chargement échoué"; + +"save" = "Enregistrer"; +"saveImageError" = "Échec de l'enregistrement de l'image"; +"saveVideoError" = "Échec de l'enregistrement de la vidéo"; +"timeout" = "La demande a expiré"; + +"customCameraTips" = "Maintenez la pression sur pour enregistrer"; +"customCameraTakePhotoTips" = "Appuyez pour prendre une photo"; +"customCameraRecordVideoTips" = "Maintenez enfoncé pour enregistrer une vidéo"; +"minRecordTimeTips" = "Enregistrez au moins %lds"; + +"cameraRoll" = "Récents"; +"panoramas" = "Panoramas"; +"videos" = "Vidéos"; +"favorites" = "Favorites"; +"timelapses" = "Accéléré"; +"recentlyAdded" = "Récemment ajouté"; +"bursts" = "Rafales"; +"slomoVideos" = "Ralentis"; +"selfPortraits" = "Selfies"; +"screenshots" = "Captures d'écran"; +"depthEffect" = "Portrait"; +"livePhotos" = "Live Photos"; +"animated" = "Animations"; +"myPhotoStream" = "Mon flux de photos"; + +"noTitleAlbumListPlaceholder" = "Toutes les photos"; +"unableToAccessAllPhotos" = "Impossible d'accéder à toutes les photos de l'album.\nAutorisez l'accès à « Toutes les photos » dans « Photos »."; +"textStickerRemoveTips" = "Faites glisser ici pour supprimer"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/id.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/id.lproj/Localizable.strings new file mode 100644 index 0000000..4dad6ef --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/id.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Kamera"; +"previewCameraRecord" = "Merekam"; +"previewAlbum" = "Album"; +"cancel" = "Membatalkan"; + +"originalPhoto" = "Gambar Penuh"; +"done" = "Selesai"; +"ok" = "Oke"; +"editFinish" = "Selesai"; + +"back" = "Kembali"; +"edit" = "Edit"; +"revert" = "Batalkan"; +"brightness" = "Kecerahan"; +"contrast" = "Kontras"; +"saturation" = "Saturasi"; + +"photo" = "Foto"; +"preview" = "Pratinjau"; + +"noPhotoTips" = "Tidak ada fotos"; +"notAllowMixSelect" = "Tidak dapat memilih video"; + +"loading" = "sedang memuat, tolong tunggu"; +"hudLoading" = "menunggu..."; + +"exceededMaxSelectCount" = "Jumlah maksimum untuk seleksi: %ld"; +"longerThanMaxVideoDuration" = "Tidak dapat memilih video dengan durasi lebih dari %ld detik"; +"shorterThanMaxVideoDuration" = "Tidak dapat memilih video dengan durasi lebih pendek dari %ld detik"; +"exceededMaxVideoSelectCount" = "Jumlah maksimum untuk pemilihan video: %ld"; +"lessThanMinVideoSelectCount" = "Jumlah minimum untuk pemilihan video: %ld"; + +"noCameraAuthority" = "Izinkan akses kamera di \"Pengaturan\" > \"%@\" iPhone Anda."; +"noPhotoLibratyAuthority" = "Izinkan %@ mengakses album Anda di \"Setelan\" > \"Privasi\" > \"Foto\""; +"noMicrophoneAuthority" = "Tidak dapat merekam audio. Buka \"Setelan\" > \"%@\" dan aktifkan akses mikrofon."; +"cameraUnavailable" = "Kamera tidak tersedia"; +"keepRecording" = "Terus Merekam"; +"gotoSettings" = "Pergi ke pengaturan"; + +"iCloudVideoLoadFaild" = "Tidak dapat menyinkronkan dari iCloud"; +"imageLoadFailed" = "Gagal Memuat"; + +"save" = "Menghemat"; +"saveImageError" = "Gagal menyimpan gambar"; +"saveVideoError" = "Gagal menyimpan video"; +"timeout" = "Waktu permintaan habis"; + +"customCameraTips" = "Ketuk untuk mengambil video dan tahan untuk merekam"; +"customCameraTakePhotoTips" = "Ketuk untuk mengambil foto"; +"customCameraRecordVideoTips" = "Tahan untuk merekam video"; +"minRecordTimeTips" = "Merekam setidaknya %ld detik"; + +"cameraRoll" = "Terbaru"; +"panoramas" = "Panorama"; +"videos" = "Video"; +"favorites" = "Favorit"; +"timelapses" = "Selang Waktu"; +"recentlyAdded" = "Terkini"; +"bursts" = "Foto Beruntun"; +"slomoVideos" = "Slo-mo"; +"selfPortraits" = "Selfie"; +"screenshots" = "Jepretan Layer"; +"depthEffect" = "Potret"; +"livePhotos" = "Live Photos"; +"animated" = "Animasi"; +"myPhotoStream" = "Aliran Foto Saya"; + +"noTitleAlbumListPlaceholder" = "Semua Foto"; +"unableToAccessAllPhotos" = "Tidak dapat mengakses semua foto dalam album.\nIzinkan akses ke \"Semua Foto\" di \"Foto\"."; +"textStickerRemoveTips" = "Seret ke sini untuk menghapus"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/it.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/it.lproj/Localizable.strings new file mode 100644 index 0000000..50a41ef --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/it.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Fotocamera"; +"previewCameraRecord" = "Disco"; +"previewAlbum" = "Immagini"; +"cancel" = "Annulla"; + +"originalPhoto" = "Immagine completa"; +"done" = "Fine"; +"ok" = "OK"; +"editFinish" = "Fine"; + +"back" = "Indietro"; +"edit" = "Modifica"; +"revert" = "Annulla"; +"brightness" = "Luminosità"; +"contrast" = "Contrasto"; +"saturation" = "Saturazione"; + +"photo" = "Fotografie"; +"preview" = "Anteprima"; + +"noPhotoTips" = "Niente fotos"; +"notAllowMixSelect" = "Impossibile selezionare il video"; + +"loading" = "Caricamento, in attesa per favore"; +"hudLoading" = "in attesa..."; + +"exceededMaxSelectCount" = "Conteggio massimo per la selezione: %ld"; +"longerThanMaxVideoDuration" = "Impossibile selezionare video con una durata superiore a %lds"; +"shorterThanMaxVideoDuration" = "Impossibile selezionare video con una durata inferiore a %lds"; +"exceededMaxVideoSelectCount" = "Conteggio massimo per la selezione dei video: %ld"; +"lessThanMinVideoSelectCount" = "Conteggio minimo per la selezione del video: %ld"; + +"noCameraAuthority" = "Consenti a %@ di accedere alla fotocamera del tuo dispositivo in \"Impostazioni\" > \"Privacy\" > \"Fotocamera\""; +"noPhotoLibratyAuthority" = "Consenti a %@ di accedere al tuo album in \"Impostazioni\" > \"Privacy\" > \"Foto\""; +"noMicrophoneAuthority" = "Impossibile registrare I'audio. Vai a \"Impostazioni\" > \"%@\" e attiva I'accesso al microfono"; +"cameraUnavailable" = "La fotocamera non è disponibile"; +"keepRecording" = "Continua a registrare"; +"gotoSettings" = "Vai a lmpostazioni"; + +"iCloudVideoLoadFaild" = "Impossibile sincronizzare da iCloud"; +"imageLoadFailed" = "Caricamento fallito"; + +"save" = "Salva"; +"saveImageError" = "Impossibile salvare l'immagine"; +"saveVideoError" = "Impossibile salvare il video"; +"timeout" = "Tempo scaduto per la richiesta"; + +"customCameraTips" = "Toccare per scattare e tiene premuto per registrare"; +"customCameraTakePhotoTips" = "Tocca per scattare una foto"; +"customCameraRecordVideoTips" = "Tieni premuto per registrare il video"; +"minRecordTimeTips" = "Registra almeno %lds"; + +"cameraRoll" = "Recenti"; +"panoramas" = "Panoramiche"; +"videos" = "Video"; +"favorites" = "Preferiti"; +"timelapses" = "Time-lapse"; +"recentlyAdded" = "Aggiunto recentemente"; +"bursts" = "Sequenze"; +"slomoVideos" = "Slow motion"; +"selfPortraits" = "Selfie"; +"screenshots" = "Istantanee"; +"depthEffect" = "Ritratti"; +"livePhotos" = "Live Photo"; +"animated" = "Animazioni"; +"myPhotoStream" = "II mio streaming foto"; + +"noTitleAlbumListPlaceholder" = "Tutte le foto"; +"unableToAccessAllPhotos" = "Impossibile accedere a tutte le foto nell'album.\nConsenti l'accesso a \"Tutte le foto\" in \"Foto\""; +"textStickerRemoveTips" = "Trascina qui per rimuovere"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ja-US.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ja-US.lproj/Localizable.strings new file mode 100644 index 0000000..a34b74c --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ja-US.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "カメラ"; +"previewCameraRecord" = "撮影"; +"previewAlbum" = "アルバム"; +"cancel" = "キャンセル"; + +"originalPhoto" = "完全な画像"; +"done" = "確定"; +"ok" = "確定"; +"editFinish" = "完了"; + +"back" = "戻る"; +"edit" = "編集"; +"revert" = "元に戻す"; +"brightness" = "明るさ"; +"contrast" = "コントラスト"; +"saturation" = "飽和"; + +"photo" = "写真"; +"preview" = "プレビュー"; + +"noPhotoTips" = "写真でない"; +"notAllowMixSelect" = "ビデオを選択できません"; + +"loading" = "ロード中,お待ち下さい"; +"hudLoading" = "後ほど..."; + +"exceededMaxSelectCount" = "最大選択数: %ld"; +"longerThanMaxVideoDuration" = "%ldsより長い動画は選択できません"; +"shorterThanMaxVideoDuration" = "%ldsより短い動画は選択できません"; +"exceededMaxVideoSelectCount" = "動画の最大選択数: %ld"; +"lessThanMinVideoSelectCount" = "動画の最小選択数: %ld"; + +"noCameraAuthority" = "「設定」>「プライパシー」>「カメラ」から、%@があなたのデバイスのカメラにアクセスする許可をしてください"; +"noPhotoLibratyAuthority" = "%@があなたのアルバムにアクセスするには「設定」>「プライバシー」>「写真」"; +"noMicrophoneAuthority" = "音声を録音できません。「設定」 >「%@」に移動し、マイクへのアクセスを有効にしてください。"; +"cameraUnavailable" = "カメラは利用できません"; +"keepRecording" = "撮影を続ける"; +"gotoSettings" = "設定に移動"; + +"iCloudVideoLoadFaild" = "iCloudから同期できません"; +"imageLoadFailed" = "ロード失敗"; + +"save" = "セーブ"; +"saveImageError" = "画像の保存に失敗しました"; +"saveVideoError" = "ビデオの保存に失敗しました"; +"timeout" = "要求タイムアウト"; + +"customCameraTips" = "タップして撮影、長押しで記録"; +"customCameraTakePhotoTips" = "タップして撮影"; +"customCameraRecordVideoTips" = "長押しで記録"; +"minRecordTimeTips" = "%lds以上記録する"; + +"cameraRoll" = "最近の項目"; +"panoramas" = "パノラマ"; +"videos" = "ビデオ"; +"favorites" = "お気に入り"; +"timelapses" = "タイムラプス"; +"recentlyAdded" = "最後に追加した項目"; +"bursts" = "バースト"; +"slomoVideos" = "スローモーション"; +"selfPortraits" = "セルフイー"; +"screenshots" = "スクリーンショット"; +"depthEffect" = "ポートレート"; +"livePhotos" = "Live Photos"; +"animated" = "アニメーション"; +"myPhotoStream" = "マイフォトストリーム"; + +"noTitleAlbumListPlaceholder" = "画像すべて"; +"unableToAccessAllPhotos" = "アルバム内のすべての写真にアクセスできません。\n「写真」内の「すべての写真」 へのアクセスを許可してください。"; +"textStickerRemoveTips" = "ここにドラッグして削除します"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ko.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ko.lproj/Localizable.strings new file mode 100644 index 0000000..d6254f6 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ko.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "카메라"; +"previewCameraRecord" = "기록"; +"previewAlbum" = "이미지"; +"cancel" = "취소"; + +"originalPhoto" = "전체 이미지"; +"done" = "확인"; +"ok" = "확인"; +"editFinish" = "완료"; + +"back" = "뒤"; +"edit" = "편집"; +"revert" = "실행 취소"; +"brightness" = "밝기"; +"contrast" = "대비"; +"saturation" = "채도"; + +"photo" = "사진"; +"preview" = "미리 보기"; + +"noPhotoTips" = "사진 없음"; +"notAllowMixSelect" = "비디오를 선택할 수 없습니다"; + +"loading" = "로딩 중... 기다려주세요"; +"hudLoading" = "기다리는 중..."; + +"exceededMaxSelectCount" = "최대 선택 수: %ld"; +"longerThanMaxVideoDuration" = "길이가 %ld 초 보다 긴 동영상을 선택할 수 없습니다"; +"shorterThanMaxVideoDuration" = "기간이 %ld 초 보다 짧은 비디오를 선택할 수 없습니다"; +"exceededMaxVideoSelectCount" = "동영상 최대 선택 수: %ld"; +"lessThanMinVideoSelectCount" = "동영상 최소 선택 횟수: %ld"; + +"noCameraAuthority" = "%@ 에서 장치의 카메라에 액세스하도록 허용하십시오 에서 \"설정\" > \"개인 정보\" > \"카메라\""; +"noPhotoLibratyAuthority" = "%@ 이 \"설정\" > \"개인 정보\" > \"사진\"에서 앨범에 액세스하도록 허용하세요"; +"noMicrophoneAuthority" = "오디오를 녹음할 수 없습니다. \"설정\" > \"%@\"으로 이동하여 마이크 액세스를 사용으로 설정하십시오."; +"cameraUnavailable" = "카메라를 사용할 수 없습니다"; +"keepRecording" = "계속 촬영"; +"gotoSettings" = "설정으로 이동"; + +"iCloudVideoLoadFaild" = "iCloud에서 동기화 할 수 없습니다"; +"imageLoadFailed" = "로드 실패"; + +"save" = "저장"; +"saveImageError" = "이미지를 저장하지 못했습니다"; +"saveVideoError" = "비디오를 저장하지 못했습니다"; +"timeout" = "요청 시간이 초과되었습니다"; + +"customCameraTips" = "눌러서 촬영 및 길게 눌러서 기록"; +"customCameraTakePhotoTips" = "눌러서 촬영"; +"customCameraRecordVideoTips" = "길게 눌러서 기록"; +"minRecordTimeTips" = "%ld 초 이상 녹화"; + +"cameraRoll" = "최근 항목"; +"panoramas" = "파노라마"; +"videos" = "비디오"; +"favorites" = "즐겨 찾기"; +"timelapses" = "타임랩스"; +"recentlyAdded" = "최근에 추가"; +"bursts" = "고속 연사 촬영"; +"slomoVideos" = "슬로 모션"; +"selfPortraits" = "셀카"; +"screenshots" = "스크린샷"; +"depthEffect" = "인물 사진"; +"livePhotos" = "Live Photos"; +"animated" = "움직이는 항목"; +"myPhotoStream" = "나의 사진 스트림"; + +"noTitleAlbumListPlaceholder" = "모든 사진"; +"unableToAccessAllPhotos" = "앨범 사진에 접근할 수 없습니다.\n\"사진\"에서 \"모든 사진\"에 대한 접근을 허용합니다."; +"textStickerRemoveTips" = "제거하려면 여기로 드래그하세요"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ms.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ms.lproj/Localizable.strings new file mode 100644 index 0000000..ca0fe22 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ms.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Kamera"; +"previewCameraRecord" = "Rekod"; +"previewAlbum" = "Imej"; +"cancel" = "Batal"; + +"originalPhoto" = "Imej Penuh"; +"done" = "Selesai"; +"ok" = "Okey"; +"editFinish" = "Selesai"; + +"back" = "Belakang"; +"edit" = "Edit"; +"revert" = "Buat asal"; +"brightness" = "Kecerahan"; +"contrast" = "Contrast"; +"saturation" = "Ketepuan"; + +"photo" = "Gambar"; +"preview" = "Pratonton"; + +"noPhotoTips" = "Tiada Foto"; +"notAllowMixSelect" = "Tidak dapat memilih video"; + +"loading" = "Memuatkan, tunggu"; +"hudLoading" = "menunggu..."; + +"exceededMaxSelectCount" = "Kiraan maksimum untuk pemilihan: %ld"; +"longerThanMaxVideoDuration" = "Tidak dapat memilih video dengan jangka masa lebih lama daripada %lds"; +"shorterThanMaxVideoDuration" = "Tidak dapat memilih video dengan jangka masa lebih pendek daripada %lds"; +"exceededMaxVideoSelectCount" = "Jumlah maksimum untuk pemilihan video: %ld"; +"lessThanMinVideoSelectCount" = "Kiraan minimum untuk pemilihan video: %ld"; + +"noCameraAuthority" = "Izinkan %@ mengakses kamera peranti anda di \"Tetapan\" > \"Privasi\" > \"Kamera\""; +"noPhotoLibratyAuthority" = "Izinkan %@ mengakses album anda di \"Tetapan\" > \"Privasi\" > \"Foto\""; +"noMicrophoneAuthority" = "Tidak dapat merakam audio. Pergi Ke \"Tetapan\" > \"%@\" dan dayakan akses mikrofon."; +"cameraUnavailable" = "Kamera tidak tersedia"; +"keepRecording" = "Teruskan Perakaman"; +"gotoSettings" = "Pergi ke Tetapan"; + +"iCloudVideoLoadFaild" = "Tidak dapat menyegerakkan dari iCloud"; +"imageLoadFailed" = "pemuatan gagal"; + +"save" = "Berjimat"; +"saveImageError" = "Gagal menyimpan gambar"; +"saveVideoError" = "Gagal menyimpan video"; +"timeout" = "Permintaan tamat"; + +"customCameraTips" = "Ketik untuk menangkap dan tahan untuk merakam"; +"customCameraTakePhotoTips" = "Ketik untuk menangkap"; +"customCameraRecordVideoTips" = "Tahan untuk merakam"; +"minRecordTimeTips" = "Rakam sekurang-kurangnya %lds"; + +"cameraRoll" = "Terbaru"; +"panoramas" = "Panorama"; +"videos" = "Video"; +"favorites" = "Kegemaran"; +"timelapses" = "Selang Masa"; +"recentlyAdded" = "Ditambah Terkini"; +"bursts" = "Jujukan"; +"slomoVideos" = "Slo-mo"; +"selfPortraits" = "Swafoto"; +"screenshots" = "Gambar Skrin"; +"depthEffect" = "Potret"; +"livePhotos" = "Live Photos"; +"animated" = "Beranimasi"; +"myPhotoStream" = "Strim Foto Saya"; + +"noTitleAlbumListPlaceholder" = "Semua Foto"; +"unableToAccessAllPhotos" = "Tidak dapat mengakses semua foto dalam album.\nBenarkan akses kepada \"Semua Foto\" dalam \"Foto\"."; +"textStickerRemoveTips" = "Seret ke sini untuk mengalih keluar"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/pt-BR.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/pt-BR.lproj/Localizable.strings new file mode 100644 index 0000000..f48edd3 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/pt-BR.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Câmara"; +"previewCameraRecord" = "Recorde"; +"previewAlbum" = "Álbum"; +"cancel" = "Cancelar"; + +"originalPhoto" = "Imagem completa"; +"done" = "Feito"; +"ok" = "OK"; +"editFinish" = "Feito"; + +"back" = "Voltar"; +"edit" = "Editar"; +"revert" = "Desfazer"; +"brightness" = "Brilho"; +"contrast" = "Contraste"; +"saturation" = "Saturação"; + +"photo" = "Fotos"; +"preview" = "Pré-visualização"; + +"noPhotoTips" = "Sem Fotos"; +"notAllowMixSelect" = "Não foi possível selecionar o vídeo"; + +"loading" = "carregamento, esperando por favor"; +"hudLoading" = "à espera..."; + +"exceededMaxSelectCount" = "Contagem máxima para seleção: %ld"; +"longerThanMaxVideoDuration" = "Incapaz de selecionar vídeos com duração superior a %lds"; +"shorterThanMaxVideoDuration" = "Incapaz de selecionar vídeos com duração inferior a %lds"; +"exceededMaxVideoSelectCount" = "Contagem máxima para seleção de vídeo: %ld"; +"lessThanMinVideoSelectCount" = "Contagem mínima para seleção de vídeo: %ld"; + +"noCameraAuthority" = "Por favor, permita a %@ aceder à câmara do seu dispositivo em \"Definições\" > \"Privacidade\" > \"Câmara\"."; +"noPhotoLibratyAuthority" = "Por favor, permita que %@ acesse seu álbum em \"Configurações\" > \"Privacidade\" > \"Fotos\"."; +"noMicrophoneAuthority" = "Incapaz de gravar áudio. Vá para \"Configurações\" > \"%@\" e habilite o acesso ao microfone."; +"cameraUnavailable" = "A câmara não está disponível"; +"keepRecording" = "Continuar Gravando"; +"gotoSettings" = "Ir para Configurações"; + +"iCloudVideoLoadFaild" = "Incapaz de sincronizar a partir do iCloud"; +"imageLoadFailed" = "carregamento fracassado"; + +"save" = "Salvar"; +"saveImageError" = "Falha em salvar a imagem"; +"saveVideoError" = "Falha ao salvar o vídeo"; +"timeout" = "Pedidos com tempo limite"; + +"customCameraTips" = "Toque para tirar foto e segure para gravar vídeo"; +"customCameraTakePhotoTips" = "Toque para tirar foto"; +"customCameraRecordVideoTips" = "Segure para gravar vídeo"; +"minRecordTimeTips" = "Registre pelo menos %lds"; + +"cameraRoll" = "Recentes"; +"panoramas" = "Panoramas"; +"videos" = "Vídeos"; +"favorites" = "Favoritos"; +"timelapses" = "Prazo"; +"recentlyAdded" = "Adicionado recentemente"; +"bursts" = "Rebentamentos"; +"slomoVideos" = "Slo-mo"; +"selfPortraits" = "Selfies"; +"screenshots" = "Imagens de tela"; +"depthEffect" = "Retrato"; +"livePhotos" = "Fotos ao vivo"; +"animated" = "Animado"; +"myPhotoStream" = "Meu fluxo de fotos"; + +"noTitleAlbumListPlaceholder" = "Todas as fotos"; +"unableToAccessAllPhotos" = "Incapaz de aceder a todas as fotografias do álbum.\nPermitir o acesso a \"Todas as fotos\" em \"Fotos\"."; +"textStickerRemoveTips" = "Arraste aqui para remover"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ru.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ru.lproj/Localizable.strings new file mode 100644 index 0000000..3bb88ac --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/ru.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Камера"; +"previewCameraRecord" = "изображения"; +"previewAlbum" = "Альбом"; +"cancel" = "Отмена"; + +"originalPhoto" = "Полный формат"; +"done" = "Готово"; +"ok" = "в порядке"; +"editFinish" = "Готово"; + +"back" = "Назад"; +"edit" = "Pед"; +"revert" = "Отменить"; +"brightness" = "Яркость"; +"contrast" = "Контраст"; +"saturation" = "Насыщенность"; + +"photo" = "Фото"; +"preview" = "Предпросмотр"; + +"noPhotoTips" = "Нет фотографии"; +"notAllowMixSelect" = "Невозможно выбрать видео"; + +"loading" = "загрузка, подождите пожалуйста"; +"hudLoading" = "ожидание..."; + +"exceededMaxSelectCount" = "Максимальное количество выбранных: %ld"; +"longerThanMaxVideoDuration" = "Невозможно выбрать видео длительностью более %ld с"; +"shorterThanMaxVideoDuration" = "Невозможно выбрать видео продолжительностью менее 2 с"; +"exceededMaxVideoSelectCount" = "Максимальное количество выбранных видео: %ld"; +"lessThanMinVideoSelectCount" = "Количество мин. Выбора видео: %ld"; + +"noCameraAuthority" = "Разрешите %@ доступ к камере вашего устройства в \"Настройки\" > \"Конфиденциальность\" > \"Камера\""; +"noPhotoLibratyAuthority" = "Разрешите %@ доступ к вашему альбому в \"Настройки\" > \"Конфиденциальность\" > \"Фото\""; +"noMicrophoneAuthority" = "Не удалось записать звук. Перейдите в меню \"Настройки\" > \"%@\" и включите доступ к микрофону."; +"cameraUnavailable" = "Камера недоступна"; +"keepRecording" = "Продолжить запись"; +"gotoSettings" = "Перейти в настройки"; + +"iCloudVideoLoadFaild" = "Невозможно синхронизировать из iCloud"; +"imageLoadFailed" = "загрузка не удалась"; + +"save" = "Сохранить"; +"saveImageError" = "Не удалось сохранить изображение"; +"saveVideoError" = "Не удалось сохранить видео"; +"timeout" = "Истекло время запроса"; + +"customCameraTips" = "Нажмите для съемки, удерживайте для записи"; +"customCameraTakePhotoTips" = "Нажмите для съeмки"; +"customCameraRecordVideoTips" = "Удерживайте для записи"; +"minRecordTimeTips" = "Запишите не менее 2 с"; + +"cameraRoll" = "Недавние"; +"panoramas" = "Панорамы"; +"videos" = "Видео"; +"favorites" = "Избранное"; +"timelapses" = "Tаймлапс"; +"recentlyAdded" = "Недавно добавленный"; +"bursts" = "Cepии"; +"slomoVideos" = "Замедленное"; +"selfPortraits" = "Селфи"; +"screenshots" = "Cнимки зкрана"; +"depthEffect" = "Портреты"; +"livePhotos" = "Live Photos"; +"animated" = "Анимированные"; +"myPhotoStream" = "Мой фотопоток"; + +"noTitleAlbumListPlaceholder" = "Все фотографии"; +"unableToAccessAllPhotos" = "Невозможно получить доступ к фотографиям в альбоме.\nРазрешить доступ ко \"Всем фотографиям\" в \"Фото\"."; +"textStickerRemoveTips" = "Перетащите сюда, чтобы удалить"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/tr.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/tr.lproj/Localizable.strings new file mode 100644 index 0000000..a58d94d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/tr.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Kamera"; +"previewCameraRecord" = "Kayıt Et"; +"previewAlbum" = "Albüm"; +"cancel" = "İptal"; + +"originalPhoto" = "Orijinal Resim"; +"done" = "Bitti"; +"ok" = "OK"; +"editFinish" = "Bitti"; + +"back" = "Geri"; +"edit" = "Düzenle"; +"revert" = "Geri Al"; +"brightness" = "Parlaklık"; +"contrast" = "Kontrast"; +"saturation" = "Canlılık"; + +"photo" = "Fotoğrafşar"; +"preview" = "Önizle"; + +"noPhotoTips" = "Fotoğraf yok"; +"notAllowMixSelect" = "Video seçilemedi"; + +"loading" = "yükleniyor, lütfen bekleyin..."; +"hudLoading" = "bekleyin..."; + +"exceededMaxSelectCount" = "Maksimum seçim adeti: %ld"; +"longerThanMaxVideoDuration" = "%lds'dan uzun süreli videolar seçilemiyor."; +"shorterThanMaxVideoDuration" = "%lds'dan kıza süreli videolar seçilemiyor."; +"exceededMaxVideoSelectCount" = "Maksimum video seçim adeti: %ld"; +"lessThanMinVideoSelectCount" = "Minimum video seçim adeti: %ld"; + +"noCameraAuthority" = "Lütfen %@'nin \"Ayarlar\" > \"Gizlilik\" > \"Kamera\" bölümünden cihazınızın kamerasına erişmesine izin verin"; +"noPhotoLibratyAuthority" = "Lütfen %@'nin \"Ayarlar\" > \"Gizlilik\" > \"Fotoğraflar\" bölümünde albümünüze erişmesine izin verin"; +"noMicrophoneAuthority" = "Ses kaydedilemiyor. \"Ayarlar\" > \"%@\" seçeneğine gidin ve mikrofon erişimini etkinleştirin."; +"cameraUnavailable" = "Kamera kullanılamıyor"; +"keepRecording" = "Kayda Devam Et"; +"gotoSettings" = "Ayarlara git"; + +"iCloudVideoLoadFaild" = "iCloud'dan senkronize edilemiyor"; +"imageLoadFailed" = "Yüklenemedi!"; + +"save" = "Kaydet"; +"saveImageError" = "Resim kaydedilemedi!"; +"saveVideoError" = "Video kaydedilemedi!"; +"timeout" = "İstek zaman aşımına uğradı"; + +"customCameraTips" = "Fotoğraf çekmek için dokunun ve video kaydetmek için basılı tutun"; +"customCameraTakePhotoTips" = "Fotoğraf çekmek için dokunun"; +"customCameraRecordVideoTips" = "Video çekmek için basılı tutun"; +"minRecordTimeTips" = "En az %lds kaydedin"; + +"cameraRoll" = "Son Çekimler"; +"panoramas" = "Panoramalar"; +"videos" = "Videolar"; +"favorites" = "Favoriler"; +"timelapses" = "Hızlandırılmışlar"; +"recentlyAdded" = "Yeni Eklenenler"; +"bursts" = "Bursts"; +"slomoVideos" = "Yavaş Çekimler"; +"selfPortraits" = "Selfie'ler"; +"screenshots" = "Ekran Görüntüleri"; +"depthEffect" = "Portreler"; +"livePhotos" = "Live Photo'lar"; +"animated" = "Hareketli"; +"myPhotoStream" = "Fotoğraf Akışım"; + +"noTitleAlbumListPlaceholder" = "Tüm Fotoğraflar"; +"unableToAccessAllPhotos" = "Albümdeki tüm fotoğraflara erişilemiyor.\n\"Fotoğraflar\"da \"Tüm Fotoğraflar\"a erişime izin verin."; +"textStickerRemoveTips" = "Kaldırmak için buraya sürükleyin"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/vi.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/vi.lproj/Localizable.strings new file mode 100644 index 0000000..df420c9 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/vi.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "Camera"; +"previewCameraRecord" = "Ghi lại"; +"previewAlbum" = "Hình ảnh"; +"cancel" = "Huỷ"; + +"originalPhoto" = "Toàn bộ hình ảnh"; +"done" = "Xong"; +"ok" = "đồng ý"; +"editFinish" = "Xong"; + +"back" = "Trở lại"; +"edit" = "Chỉnh sửa"; +"revert" = "Hoàn tác"; +"brightness" = "độ sáng"; +"contrast" = "Sự tương phản"; +"saturation" = "Bão hòa"; + +"photo" = "Ảnh"; +"preview" = "Xem trước"; + +"noPhotoTips" = "Không có ảnh"; +"notAllowMixSelect" = "Không thể chọn video"; + +"loading" = "đang tải, vui lòng đợi"; +"hudLoading" = "đang chờ đợi..."; + +"exceededMaxSelectCount" = "Số lượng lựa chọn tối đa: %ld"; +"longerThanMaxVideoDuration" = "Không thể chọn video có thời lượng dài hơn %ld giây"; +"shorterThanMaxVideoDuration" = "Không thể chọn video có thời lượng ngắn hơn %ld giây"; +"exceededMaxVideoSelectCount" = "Số lượng lựa chọn tối đa của video: %ld"; +"lessThanMinVideoSelectCount" = "Số phút chọn tối thiểu của video: %ld"; + +"noCameraAuthority" = "Vui lòng cho phép %@ truy cập máy ảnh trên thiết bị của bạn trong \"Cài đặt\" > \"Quyền riêng tư\" > \"Máy ảnh\""; +"noPhotoLibratyAuthority" = "Vui lòng cho phép %@ truy cập anbom của bạn trong \"Cài đặt\" > \"Bảo mật\" > \"Ảnh\""; +"noMicrophoneAuthority" = "Không thểghi hình. Đi tới \"Cài đặt\" > \"%@\" và bật quyên truy cập mic."; +"cameraUnavailable" = "Máy ảnh không khả dụng"; +"keepRecording" = "Tiếp tục ghi hình"; +"gotoSettings" = "Đi đến Cài đặt"; + +"iCloudVideoLoadFaild" = "Không thể đồng bộ hóa từ iCloud"; +"imageLoadFailed" = "tải không thành công"; + +"save" = "Tiết kiệm"; +"saveImageError" = "Lưu ảnh không thành công"; +"saveVideoError" = "Lưu video không thành công"; +"timeout" = "Yêu cầu đã hết thời gian chờ"; + +"customCameraTips" = "Nhấn để chụp và giữ để ghi"; +"customCameraTakePhotoTips" = "Nhấn để chụp"; +"customCameraRecordVideoTips" = "Giữ để quay video"; +"minRecordTimeTips" = "Ghi ít nhất %ld giây"; + +"cameraRoll" = "Gần đây"; +"panoramas" = "Ảnh toàn cảnh"; +"videos" = "Video"; +"favorites" = "Mục ưa thích"; +"timelapses" = "Ảnh time-lapse"; +"recentlyAdded" = "Đã thêm gần đây"; +"bursts" = "Chụp liên hình"; +"slomoVideos" = "Quay chậm"; +"selfPortraits" = "Ảnh selfie"; +"screenshots" = "Ảnh màn hình"; +"depthEffect" = "Chân dung"; +"livePhotos" = "Live Photos"; +"animated" = "Hình động"; +"myPhotoStream" = "Kho ảnh của tôi"; + +"noTitleAlbumListPlaceholder" = "Tất cả ảnh"; +"unableToAccessAllPhotos" = "Không thể truy cập tất cả ảnh trong album.\nCho phép truy cập vào \"Tất cả ảnh\" trong \"Ành\"."; +"textStickerRemoveTips" = "Kéo vào đây để xóa"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hans.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hans.lproj/Localizable.strings new file mode 100644 index 0000000..906f891 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hans.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "拍照"; +"previewCameraRecord" = "拍摄"; +"previewAlbum" = "相册"; +"cancel" = "取消"; + +"originalPhoto" = "原图"; +"done" = "确定"; +"ok" = "确定"; +"editFinish" = "完成"; + +"back" = "返回"; +"edit" = "编辑"; +"revert" = "还原"; +"brightness" = "亮度"; +"contrast" = "对比度"; +"saturation" = "饱和度"; + +"photo" = "照片"; +"preview" = "预览"; + +"noPhotoTips" = "无照片"; +"notAllowMixSelect" = "不能同时选择照片和视频"; + +"loading" = "加载中,请稍后"; +"hudLoading" = "正在处理..."; + +"exceededMaxSelectCount" = "最多只能选择%ld张图片"; +"longerThanMaxVideoDuration" = "不能选择超过%ld秒的视频"; +"shorterThanMaxVideoDuration" = "不能选择低于%ld秒的视频"; +"exceededMaxVideoSelectCount" = "最多只能选择%ld个视频"; +"lessThanMinVideoSelectCount" = "最少选择%ld个视频"; + +"noCameraAuthority" = "请在iPhone的\"设置 > 隐私 > 相机\"选项中,允许%@访问你的相机"; +"noPhotoLibratyAuthority" = "请在iPhone的\"设置 > 隐私 >照片\"选项中,允许%@访问你的照片"; +"noMicrophoneAuthority" = "无法录制声音,前往\"设置 > %@\"中打开麦克风权限"; +"cameraUnavailable" = "相机不可用"; +"keepRecording" = "继续拍摄"; +"gotoSettings" = "前往设置"; + +"iCloudVideoLoadFaild" = "iCloud无法同步"; +"imageLoadFailed" = "图片加载失败"; + +"save" = "保存"; +"saveImageError" = "图片保存失败"; +"saveVideoError" = "视频保存失败"; +"timeout" = "请求超时"; + +"customCameraTips" = "轻触拍照,按住摄像"; +"customCameraTakePhotoTips" = "轻触拍照"; +"customCameraRecordVideoTips" = "按住摄像"; +"minRecordTimeTips" = "至少录制%ld秒"; + +"cameraRoll" = "最近项目"; +"panoramas" = "全景照片"; +"videos" = "视频"; +"favorites" = "个人收藏"; +"timelapses" = "延时摄影"; +"recentlyAdded" = "最近添加"; +"bursts" = "连拍快照"; +"slomoVideos" = "慢动作"; +"selfPortraits" = "自拍"; +"screenshots" = "屏幕快照"; +"depthEffect" = "人像"; +"livePhotos" = "Live Photos"; +"animated" = "动图"; +"myPhotoStream" = "我的照片流"; + +"noTitleAlbumListPlaceholder" = "所有照片"; +"unableToAccessAllPhotos" = "无法访问相册中所有照片,\n请允许访问「照片」中的「所有照片」。"; +"textStickerRemoveTips" = "拖到此处删除"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hant.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hant.lproj/Localizable.strings new file mode 100644 index 0000000..fe2e943 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zh-Hant.lproj/Localizable.strings @@ -0,0 +1,70 @@ +"previewCamera" = "拍照"; +"previewCameraRecord" = "拍攝"; +"previewAlbum" = "相冊"; +"cancel" = "取消"; + +"originalPhoto" = "原圖"; +"done" = "確定"; +"ok" = "確定"; +"editFinish" = "完成"; + +"back" = "返回"; +"edit" = "編輯"; +"revert" = "還原"; +"brightness" = "亮度"; +"contrast" = "對比度"; +"saturation" = "飽和度"; + +"photo" = "照片"; +"preview" = "預覽"; + +"noPhotoTips" = "無照片"; +"notAllowMixSelect" = "不能同時選擇照片和視頻"; + +"loading" = "加載中,請稍後"; +"hudLoading" = "正在處理..."; + +"exceededMaxSelectCount" = "最多只能選擇%ld張圖片"; +"longerThanMaxVideoDuration" = "不能選擇超過%ld秒的視頻"; +"shorterThanMaxVideoDuration" = "不能選擇低於%ld秒的視頻"; +"exceededMaxVideoSelectCount" = "最多只能選擇%ld個視頻"; +"lessThanMinVideoSelectCount" = "最少選擇%ld個視頻"; + +"noCameraAuthority" = "請在iPhone的\"設置 > 隱私 > 相機\"選項中,允許%@訪問你的相機"; +"noPhotoLibratyAuthority" = "請在iPhone的\"設置 > 隱私 > 相冊\"選項中,允許%@訪問你的照片"; +"noMicrophoneAuthority" = "無法錄製聲音,前往\"設置 > %@\"中打開麥克風權限"; +"cameraUnavailable" = "相機不可用"; +"keepRecording" = "繼續拍攝"; +"gotoSettings" = "前往設置"; + +"iCloudVideoLoadFaild" = "iCloud無法同步"; +"imageLoadFailed" = "圖片加載失敗"; + +"save" = "保存"; +"saveImageError" = "圖片保存失敗"; +"saveVideoError" = "視頻保存失敗"; +"timeout" = "請求超時"; + +"customCameraTips" = "輕觸拍照,按住攝像"; +"customCameraTakePhotoTips" = "輕觸拍照"; +"customCameraRecordVideoTips" = "按住攝像"; +"minRecordTimeTips" = "至少錄制%ld秒"; + +"cameraRoll" = "最近項目"; +"panoramas" = "全景照片"; +"videos" = "視頻"; +"favorites" = "個人收藏"; +"timelapses" = "延時攝影"; +"recentlyAdded" = "最近添加"; +"bursts" = "連拍快照"; +"slomoVideos" = "慢動作"; +"selfPortraits" = "自拍"; +"screenshots" = "屏幕快照"; +"depthEffect" = "人像"; +"livePhotos" = "Live Photos"; +"animated" = "動圖"; +"myPhotoStream" = "我的照片流"; + +"noTitleAlbumListPlaceholder" = "所有照片"; +"unableToAccessAllPhotos" = "無法訪問相冊中所有照片,\n請允許訪問「照片」中的「所有照片」。"; +"textStickerRemoveTips" = "拖到此處刪除"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@2x.png new file mode 100644 index 0000000..44be859 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@3x.png new file mode 100644 index 0000000..63912b2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_addPhoto@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@2x.png new file mode 100644 index 0000000..f2c6378 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@3x.png new file mode 100644 index 0000000..5ba0e5c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_albumSelect@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@2x.png new file mode 100644 index 0000000..61b5ee9 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@3x.png new file mode 100644 index 0000000..7bd78b7 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_arrow_down@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@2x.png new file mode 100644 index 0000000..963c561 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@3x.png new file mode 100644 index 0000000..87dc148 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_circle@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@2x.png new file mode 100644 index 0000000..531e6ed Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@3x.png new file mode 100644 index 0000000..a81e1d8 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_circle@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@2x.png new file mode 100644 index 0000000..1fccc9f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@3x.png new file mode 100644 index 0000000..95c7fc2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_original_selected@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@2x.png new file mode 100644 index 0000000..e799a98 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@3x.png new file mode 100644 index 0000000..63e3dec Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_selected@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@2x.png new file mode 100644 index 0000000..ad48454 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@3x.png new file mode 100644 index 0000000..605645f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_btn_unselected@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@2x.png new file mode 100644 index 0000000..ee2e295 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@3x.png new file mode 100644 index 0000000..870fe18 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_close@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@2x.png new file mode 100644 index 0000000..51eb831 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@3x.png new file mode 100644 index 0000000..8992648 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_downArrow@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@2x.png new file mode 100644 index 0000000..d480ce7 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@3x.png new file mode 100644 index 0000000..c19cb5d Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navBack@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@2x.png new file mode 100644 index 0000000..9f97c52 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@3x.png new file mode 100644 index 0000000..bec46b7 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_navClose@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@2x.png new file mode 100644 index 0000000..f38409b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@3x.png new file mode 100644 index 0000000..0d1ab03 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_retake@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@2x.png new file mode 100644 index 0000000..4c458fd Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@3x.png new file mode 100644 index 0000000..0b1aeb9 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@2x.png new file mode 100644 index 0000000..7d097fb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@3x.png new file mode 100644 index 0000000..3af669f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_right_arrow@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@2x.png new file mode 100644 index 0000000..b139d50 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@3x.png new file mode 100644 index 0000000..431346c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_shadow@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@2x.png new file mode 100644 index 0000000..62e5950 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@3x.png new file mode 100644 index 0000000..3e59abe Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHPhotoBrowser.bundle/zl_warning@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHSDKVideo b/HHVDoctorSDK/HHSDKVideo.framework/HHSDKVideo new file mode 100755 index 0000000..1a4d162 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHSDKVideo differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemCell.nib b/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemCell.nib new file mode 100644 index 0000000..d366214 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemCell.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/objects-11.0+.nib new file mode 100644 index 0000000..2f6970f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/runtime.nib new file mode 100644 index 0000000..5b22aba Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHSelectMemView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HHVideoView.nib b/HHVDoctorSDK/HHSDKVideo.framework/HHVideoView.nib new file mode 100644 index 0000000..46a905c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HHVideoView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/SDKConfig.plist b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/SDKConfig.plist new file mode 100644 index 0000000..15fc108 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/SDKConfig.plist @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>HHBundleVersion</key> + <string>$(CURRENT_PROJECT_VERSION)</string> +</dict> +</plist> diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@2x.png new file mode 100644 index 0000000..7f6bad6 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@3x.png new file mode 100644 index 0000000..ef7443e Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/addr_close@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@2x.png new file mode 100755 index 0000000..520dd4b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@3x.png new file mode 100755 index 0000000..8fcd456 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/address@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/back_app@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/back_app@3x.png new file mode 100644 index 0000000..74a12d7 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/back_app@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@2x.png new file mode 100644 index 0000000..cea8375 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@3x.png new file mode 100644 index 0000000..8207ff5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/buyVip@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/call_default@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/call_default@3x.png new file mode 100644 index 0000000..bb423ff Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/call_default@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_disable@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_disable@3x.png new file mode 100644 index 0000000..cafefa4 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_disable@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_close@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_close@3x.png new file mode 100644 index 0000000..8a6cbdf Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_close@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_normal@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_normal@3x.png new file mode 100644 index 0000000..ecb6891 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/camera_flash_normal@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@2x.png new file mode 100644 index 0000000..1c03043 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@3x.png new file mode 100644 index 0000000..97640df Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/card_logo_gold@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat9left.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat9left.png new file mode 100644 index 0000000..4edf73b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat9left.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_add_rights@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_add_rights@3x.png new file mode 100644 index 0000000..7e8952e Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_add_rights@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow.png new file mode 100644 index 0000000..641e8d1 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@2x.png new file mode 100644 index 0000000..1728621 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@3x.png new file mode 100644 index 0000000..641e8d1 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_arrow@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_b@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_b@3x.png new file mode 100644 index 0000000..160a188 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_b@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_t@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_t@3x.png new file mode 100644 index 0000000..d64a6f3 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/chat_mask_t@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@2x.png new file mode 100644 index 0000000..a9eca34 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@3x.png new file mode 100644 index 0000000..a9eca34 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_default@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@2x.png new file mode 100644 index 0000000..1b5be96 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@3x.png new file mode 100644 index 0000000..dca75cc Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/check_box_right@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/close_member@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/close_member@3x.png new file mode 100644 index 0000000..3bdf8a9 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/close_member@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@2x.png new file mode 100644 index 0000000..d610769 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@3x.png new file mode 100644 index 0000000..3c264bd Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_arrow_right@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@2x.png new file mode 100644 index 0000000..a3ab813 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@3x.png new file mode 100644 index 0000000..40c6a30 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cm_feedback@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@2x.png new file mode 100644 index 0000000..172d63a Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@3x.png new file mode 100644 index 0000000..ff7f49e Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/coment_unhappy_s@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@2x.png new file mode 100644 index 0000000..d2fd36b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@3x.png new file mode 100644 index 0000000..73f0dc5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@2x.png new file mode 100644 index 0000000..48f3de3 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@3x.png new file mode 100644 index 0000000..db7d190 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_happy_s@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@2x.png new file mode 100644 index 0000000..d001748 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@3x.png new file mode 100644 index 0000000..11a189b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/comment_unhappy@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@2x.png new file mode 100644 index 0000000..85e06cf Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@3x.png new file mode 100644 index 0000000..e0388eb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/counter_back@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cus_shape@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cus_shape@3x.png new file mode 100644 index 0000000..ce8c1f6 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/cus_shape@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/default_icon@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/default_icon@3x.png new file mode 100644 index 0000000..f441dd2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/default_icon@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo2x.png new file mode 100644 index 0000000..7e16385 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo@3x.png new file mode 100644 index 0000000..384752b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/delivery_logo@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license@3x.png new file mode 100644 index 0000000..cac677f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license_highlight@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license_highlight@3x.png new file mode 100644 index 0000000..a865a31 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/doctor_license_highlight@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_highlight@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_highlight@3x.png new file mode 100644 index 0000000..c0916ca Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_highlight@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_normal@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_normal@3x.png new file mode 100644 index 0000000..a6ca57a Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_accept_normal@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_call_mask@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_call_mask@3x.png new file mode 100644 index 0000000..dd929a1 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_call_mask@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_close@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_close@3x.png new file mode 100644 index 0000000..eb06ea2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_close@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_open@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_open@3x.png new file mode 100644 index 0000000..ac9fb14 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_camera_open@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_highlight@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_highlight@3x.png new file mode 100644 index 0000000..cafefa4 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_highlight@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_normal@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_normal@3x.png new file mode 100644 index 0000000..a43d9b6 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_camera_normal@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor@3x.png new file mode 100644 index 0000000..5328644 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor_blue@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor_blue@3x.png new file mode 100644 index 0000000..c42855b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_doctor_blue@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_highlight@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_highlight@3x.png new file mode 100644 index 0000000..d3ca7e5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_highlight@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_normal@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_normal@3x.png new file mode 100644 index 0000000..d2019eb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_change_voice_normal@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@2x.png new file mode 100644 index 0000000..2695abb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@3x.png new file mode 100644 index 0000000..a15e80d Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_eveluate_close@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_highlight@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_highlight@3x.png new file mode 100644 index 0000000..ee21b25 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_highlight@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_normal@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_normal@3x.png new file mode 100644 index 0000000..538ff00 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_hangup_normal@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_icon_album@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_icon_album@2x.png new file mode 100644 index 0000000..9effb57 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_icon_album@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_local_default@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_local_default@3x.png new file mode 100644 index 0000000..91d300e Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_local_default@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_graytip@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_graytip@3x.png new file mode 100644 index 0000000..4a99126 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_graytip@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_medic_creat@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_medic_creat@3x.png new file mode 100644 index 0000000..48b639a Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_medic_creat@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_redtip@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_redtip@3x.png new file mode 100644 index 0000000..929e38c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_medic_redtip@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@2x.png new file mode 100644 index 0000000..64197b7 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@3x.png new file mode 100644 index 0000000..a00d15b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hh_submitPhone@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@2x.png new file mode 100644 index 0000000..21574fb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@3x.png new file mode 100644 index 0000000..9a586aa Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hhicon_buy_vip@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hp_card_video_icon.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hp_card_video_icon.png new file mode 100755 index 0000000..4b5c684 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/hp_card_video_icon.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@2x.png new file mode 100644 index 0000000..dc3e0e2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@3x.png new file mode 100644 index 0000000..2cb8d85 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission_en@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission_en@3x.png new file mode 100644 index 0000000..f2ba2ee Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_photo_premission_en@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_address@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_address@3x.png new file mode 100644 index 0000000..6d5bdf7 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_address@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_document@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_document@3x.png new file mode 100644 index 0000000..4c3670b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_document@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_invitecode@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_invitecode@3x.png new file mode 100644 index 0000000..e870fe5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_invitecode@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_member@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_member@3x.png new file mode 100644 index 0000000..8155944 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_member@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_order@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_order@3x.png new file mode 100644 index 0000000..cddab78 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_setting_order@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_video@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_video@3x.png new file mode 100644 index 0000000..ce13154 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/icon_video@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/licence_normal@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/licence_normal@3x.png new file mode 100644 index 0000000..7461909 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/licence_normal@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/license_default@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/license_default@3x.png new file mode 100644 index 0000000..0061e3e Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/license_default@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/me_arrow@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/me_arrow@3x.png new file mode 100644 index 0000000..d59148a Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/me_arrow@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/meet_disconnect@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/meet_disconnect@3x.png new file mode 100644 index 0000000..74e2dbf Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/meet_disconnect@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@2x.png new file mode 100644 index 0000000..d2c6551 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@3x.png new file mode 100644 index 0000000..c5ee6e5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/member@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_down@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_down@3x.png new file mode 100644 index 0000000..68b0792 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_down@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_up@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_up@3x.png new file mode 100644 index 0000000..29aa986 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/nim_expand_up@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker-en.gif b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker-en.gif new file mode 100644 index 0000000..a153086 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker-en.gif differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker.gif b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker.gif new file mode 100644 index 0000000..2442047 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/photo_picker.gif differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@2x.png new file mode 100644 index 0000000..f01ec07 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@3x.png new file mode 100644 index 0000000..8607d45 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_normal@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@2x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@2x.png new file mode 100644 index 0000000..1563be4 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@2x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@3x.png new file mode 100644 index 0000000..f83eb13 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/rate_select@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_close@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_close@3x.png new file mode 100644 index 0000000..4521250 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_close@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_pass_add@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_pass_add@3x.png new file mode 100644 index 0000000..90e4539 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/real_name_pass_add@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/upload_fail@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/upload_fail@3x.png new file mode 100644 index 0000000..c392efd Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/upload_fail@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_1@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_1@3x.png new file mode 100644 index 0000000..635db7c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_1@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_2@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_2@3x.png new file mode 100644 index 0000000..f642f4c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_2@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_3@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_3@3x.png new file mode 100644 index 0000000..d16b5c5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/voice_volume_3@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_one@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_one@3x.png new file mode 100644 index 0000000..b8aa95a Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_one@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_three@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_three@3x.png new file mode 100644 index 0000000..ab78406 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_three@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_two@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_two@3x.png new file mode 100644 index 0000000..f663348 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wait_two@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wb_loadfail@3x.png b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wb_loadfail@3x.png new file mode 100644 index 0000000..1904c81 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/HMSDK.bundle/wb_loadfail@3x.png differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/CGGeometry+RSKImageCropper.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/CGGeometry+RSKImageCropper.h new file mode 100755 index 0000000..5004058 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/CGGeometry+RSKImageCropper.h @@ -0,0 +1,95 @@ +// +// CGGeometry+RSKImageCropper.h +// +// Copyright (c) 2015 Ruslan Skorb, http://ruslanskorb.com/ +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#import <CoreGraphics/CoreGraphics.h> +#import <tgmath.h> + +// tgmath functions aren't used on iOS when modules are enabled. +// Open Radar - http://www.openradar.me/16744288 +// Work around this by redeclaring things here. + +#undef cos +#define cos(__x) __tg_cos(__tg_promote1((__x))(__x)) + +#undef sin +#define sin(__x) __tg_sin(__tg_promote1((__x))(__x)) + +#undef atan2 +#define atan2(__x, __y) __tg_atan2(__tg_promote2((__x), (__y))(__x), \ +__tg_promote2((__x), (__y))(__y)) + +#undef pow +#define pow(__x, __y) __tg_pow(__tg_promote2((__x), (__y))(__x), \ +__tg_promote2((__x), (__y))(__y)) + +#undef sqrt +#define sqrt(__x) __tg_sqrt(__tg_promote1((__x))(__x)) + +#undef fabs +#define fabs(__x) __tg_fabs(__tg_promote1((__x))(__x)) + +#undef ceil +#define ceil(__x) __tg_ceil(__tg_promote1((__x))(__x)) + +#ifdef CGFLOAT_IS_DOUBLE + #define RSK_EPSILON DBL_EPSILON +#else + #define RSK_EPSILON FLT_EPSILON +#endif + +// Line segments. +struct RSKLineSegment { + CGPoint start; + CGPoint end; +}; +typedef struct RSKLineSegment RSKLineSegment; + +// The "empty" point. This is the point returned when, for example, we +// intersect two disjoint line segments. Note that the null point is not the +// same as the zero point. +CG_EXTERN const CGPoint RSKPointNull; + +// Returns the exact center point of the given rectangle. +CGPoint RSKRectCenterPoint(CGRect rect); + +// Returns the `rect` scaled around the `point` by `sx` and `sy`. +CGRect RSKRectScaleAroundPoint(CGRect rect, CGPoint point, CGFloat sx, CGFloat sy); + +// Returns true if `point' is the null point, false otherwise. +bool RSKPointIsNull(CGPoint point); + +// Returns the `point` rotated around the `pivot` by `angle`. +CGPoint RSKPointRotateAroundPoint(CGPoint point, CGPoint pivot, CGFloat angle); + +// Returns the distance between two points. +CGFloat RSKPointDistance(CGPoint p1, CGPoint p2); + +// Make a line segment from two points `start` and `end`. +RSKLineSegment RSKLineSegmentMake(CGPoint start, CGPoint end); + +// Returns the line segment rotated around the `pivot` by `angle`. +RSKLineSegment RSKLineSegmentRotateAroundPoint(RSKLineSegment lineSegment, CGPoint pivot, CGFloat angle); + +// Returns the intersection of `ls1' and `ls2'. This may return a null point. +CGPoint RSKLineSegmentIntersection(RSKLineSegment ls1, RSKLineSegment ls2); diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHAnimatedImageRep.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHAnimatedImageRep.h new file mode 100644 index 0000000..1931e8a --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHAnimatedImageRep.h @@ -0,0 +1,20 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +#if HH_MAC + +// A subclass of `NSBitmapImageRep` to fix that GIF loop count issue because `NSBitmapImageRep` will reset `NSImageCurrentFrameDuration` by using `kCGImagePropertyGIFDelayTime` but not `kCGImagePropertyGIFUnclampedDelayTime`. +// Built in GIF coder use this instead of `NSBitmapImageRep` for better GIF rendering. If you do not want this, only enable `HHWebImageImageIOCoder`, which just call `NSImage` API and actually use `NSBitmapImageRep` for GIF image. + +@interface HHAnimatedImageRep : NSBitmapImageRep + +@end + +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHFaceAuthBridge.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHFaceAuthBridge.h new file mode 100644 index 0000000..a8e61bd --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHFaceAuthBridge.h @@ -0,0 +1,34 @@ +// +// HHFaceAuthBridge.h +// hhVDoctorSDK +// +// Created by 程言方 on 2022/2/8. +// + +#import <Foundation/Foundation.h> + +typedef NS_ENUM(NSUInteger, HHFaceAuthStatus) { + HHAuthStatusSuccess = 0, + HHAuthStatusIdentifyFail = 1, + HHAuthStatusAuthenFail = 2, +}; + +typedef void(^AuthFaceCallBack)(HHFaceAuthStatus code, NSString* _Nullable errorMsg); +typedef void(^AuthFaceInitBack)(BOOL success); + + +NS_ASSUME_NONNULL_BEGIN + +@interface HHFaceAuthBridge : NSObject + ++ (void)initWihtVC: (UIViewController*)vc callBack: (AuthFaceInitBack) callBack; + ++ (void)startFaceWithVC:(UIViewController *)vc dict: (NSDictionary *)dict callBack: (AuthFaceCallBack) callBack; + ++ (BOOL)canFaceAuth; + ++ (void)unInitSDK; + +@end + +NS_ASSUME_NONNULL_END diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCache.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCache.h new file mode 100644 index 0000000..0a60153 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCache.h @@ -0,0 +1,295 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCompat.h" +#import "HHImageCacheConfig.h" + +typedef NS_ENUM(NSInteger, HHImageCacheType) { + /** + * The image wasn't available the HHWebImage caches, but was downloaded from the web. + */ + HHImageCacheTypeNone, + /** + * The image was obtained from the disk cache. + */ + HHImageCacheTypeDisk, + /** + * The image was obtained from the memory cache. + */ + HHImageCacheTypeMemory +}; + +typedef NS_OPTIONS(NSUInteger, HHImageCacheOptions) { + /** + * By default, we do not query disk data when the image is cached in memory. This mask can force to query disk data at the same time. + */ + HHImageCacheQueryDataWhenInMemory = 1 << 0, + /** + * By default, we query the memory cache synchronously, disk cache asynchronously. This mask can force to query disk cache synchronously. + */ + HHImageCacheQueryDiskSync = 1 << 1 +}; + +typedef void(^HHCacheQueryCompletedBlock)(UIImage * _Nullable image, NSData * _Nullable data, HHImageCacheType cacheType); + +typedef void(^HHWebImageCheckCacheCompletionBlock)(BOOL isInCache); + +typedef void(^HHWebImageCalculateSizeBlock)(NSUInteger fileCount, NSUInteger totalSize); + + +/** + * HHImageCache maintains a memory cache and an optional disk cache. Disk cache write operations are performed + * asynchronous so it doesn’t add unnecessary latency to the UI. + */ +@interface HHImageCache : NSObject + +#pragma mark - Properties + +/** + * Cache Config object - storing all kind of settings + */ +@property (nonatomic, nonnull, readonly) HHImageCacheConfig *config; + +/** + * The maximum "total cost" of the in-memory image cache. The cost function is the number of pixels held in memory. + */ +@property (assign, nonatomic) NSUInteger maxMemoryCost; + +/** + * The maximum number of objects the cache should hold. + */ +@property (assign, nonatomic) NSUInteger maxMemoryCountLimit; + +#pragma mark - Singleton and initialization + +/** + * Returns global shared cache instance + * + * @return HHImageCache global instance + */ ++ (nonnull instancetype)sharedImageCache; + +/** + * Init a new cache store with a specific namespace + * + * @param ns The namespace to use for this cache store + */ +- (nonnull instancetype)initWithNamespace:(nonnull NSString *)ns; + +/** + * Init a new cache store with a specific namespace and directory + * + * @param ns The namespace to use for this cache store + * @param directory Directory to cache disk images in + */ +- (nonnull instancetype)initWithNamespace:(nonnull NSString *)ns + diskCacheDirectory:(nonnull NSString *)directory NS_DESIGNATED_INITIALIZER; + +#pragma mark - Cache paths + +- (nullable NSString *)makeDiskCachePath:(nonnull NSString*)fullNamespace; + +/** + * Add a read-only cache path to search for images pre-cached by HHImageCache + * Useful if you want to bundle pre-loaded images with your app + * + * @param path The path to use for this read-only cache path + */ +- (void)addReadOnlyCachePath:(nonnull NSString *)path; + +#pragma mark - Store Ops + +/** + * Asynchronously store an image into memory and disk cache at the given key. + * + * @param image The image to store + * @param key The unique image cache key, usually it's image absolute URL + * @param completionBlock A block executed after the operation is finished + */ +- (void)storeImage:(nullable UIImage *)image + forKey:(nullable NSString *)key + completion:(nullable HHWebImageNoParamsBlock)completionBlock; + +/** + * Asynchronously store an image into memory and disk cache at the given key. + * + * @param image The image to store + * @param key The unique image cache key, usually it's image absolute URL + * @param toDisk Store the image to disk cache if YES + * @param completionBlock A block executed after the operation is finished + */ +- (void)storeImage:(nullable UIImage *)image + forKey:(nullable NSString *)key + toDisk:(BOOL)toDisk + completion:(nullable HHWebImageNoParamsBlock)completionBlock; + +/** + * Asynchronously store an image into memory and disk cache at the given key. + * + * @param image The image to store + * @param imageData The image data as returned by the server, this representation will be used for disk storage + * instead of converting the given image object into a storable/compressed image format in order + * to save quality and CPU + * @param key The unique image cache key, usually it's image absolute URL + * @param toDisk Store the image to disk cache if YES + * @param completionBlock A block executed after the operation is finished + */ +- (void)storeImage:(nullable UIImage *)image + imageData:(nullable NSData *)imageData + forKey:(nullable NSString *)key + toDisk:(BOOL)toDisk + completion:(nullable HHWebImageNoParamsBlock)completionBlock; + +/** + * Synchronously store image NSData into disk cache at the given key. + * + * @warning This method is synchronous, make sure to call it from the ioQueue + * + * @param imageData The image data to store + * @param key The unique image cache key, usually it's image absolute URL + */ +- (void)storeImageDataToDisk:(nullable NSData *)imageData forKey:(nullable NSString *)key; + +#pragma mark - Query and Retrieve Ops + +/** + * Async check if image exists in disk cache already (does not load the image) + * + * @param key the key describing the url + * @param completionBlock the block to be executed when the check is done. + * @note the completion block will be always executed on the main queue + */ +- (void)diskImageExistsWithKey:(nullable NSString *)key completion:(nullable HHWebImageCheckCacheCompletionBlock)completionBlock; + +/** + * Sync check if image data exists in disk cache already (does not load the image) + * + * @param key the key describing the url + */ +- (BOOL)diskImageDataExistsWithKey:(nullable NSString *)key; + +/** + * Operation that queries the cache asynchronously and call the completion when done. + * + * @param key The unique key used to store the wanted image + * @param doneBlock The completion block. Will not get called if the operation is cancelled + * + * @return a NSOperation instance containing the cache op + */ +- (nullable NSOperation *)queryCacheOperationForKey:(nullable NSString *)key done:(nullable HHCacheQueryCompletedBlock)doneBlock; + +/** + * Operation that queries the cache asynchronously and call the completion when done. + * + * @param key The unique key used to store the wanted image + * @param options A mask to specify options to use for this cache query + * @param doneBlock The completion block. Will not get called if the operation is cancelled + * + * @return a NSOperation instance containing the cache op + */ +- (nullable NSOperation *)queryCacheOperationForKey:(nullable NSString *)key options:(HHImageCacheOptions)options done:(nullable HHCacheQueryCompletedBlock)doneBlock; + +/** + * Query the memory cache synchronously. + * + * @param key The unique key used to store the image + */ +- (nullable UIImage *)imageFromMemoryCacheForKey:(nullable NSString *)key; + +/** + * Query the disk cache synchronously. + * + * @param key The unique key used to store the image + */ +- (nullable UIImage *)imageFromDiskCacheForKey:(nullable NSString *)key; + +/** + * Query the cache (memory and or disk) synchronously after checking the memory cache. + * + * @param key The unique key used to store the image + */ +- (nullable UIImage *)imageFromCacheForKey:(nullable NSString *)key; + +#pragma mark - Remove Ops + +/** + * Remove the image from memory and disk cache asynchronously + * + * @param key The unique image cache key + * @param completion A block that should be executed after the image has been removed (optional) + */ +- (void)removeImageForKey:(nullable NSString *)key withCompletion:(nullable HHWebImageNoParamsBlock)completion; + +/** + * Remove the image from memory and optionally disk cache asynchronously + * + * @param key The unique image cache key + * @param fromDisk Also remove cache entry from disk if YES + * @param completion A block that should be executed after the image has been removed (optional) + */ +- (void)removeImageForKey:(nullable NSString *)key fromDisk:(BOOL)fromDisk withCompletion:(nullable HHWebImageNoParamsBlock)completion; + +#pragma mark - Cache clean Ops + +/** + * Clear all memory cached images + */ +- (void)clearMemory; + +/** + * Async clear all disk cached images. Non-blocking method - returns immediately. + * @param completion A block that should be executed after cache expiration completes (optional) + */ +- (void)clearDiskOnCompletion:(nullable HHWebImageNoParamsBlock)completion; + +/** + * Async remove all expired cached image from disk. Non-blocking method - returns immediately. + * @param completionBlock A block that should be executed after cache expiration completes (optional) + */ +- (void)deleteOldFilesWithCompletionBlock:(nullable HHWebImageNoParamsBlock)completionBlock; + +#pragma mark - Cache Info + +/** + * Get the size used by the disk cache + */ +- (NSUInteger)getSize; + +/** + * Get the number of images in the disk cache + */ +- (NSUInteger)getDiskCount; + +/** + * Asynchronously calculate the disk cache's size. + */ +- (void)calculateSizeWithCompletionBlock:(nullable HHWebImageCalculateSizeBlock)completionBlock; + +#pragma mark - Cache Paths + +/** + * Get the cache path for a certain key (needs the cache path root folder) + * + * @param key the key (can be obtained from url using cacheKeyForURL) + * @param path the cache path root folder + * + * @return the cache path + */ +- (nullable NSString *)cachePathForKey:(nullable NSString *)key inPath:(nonnull NSString *)path; + +/** + * Get the default cache path for a certain key + * + * @param key the key (can be obtained from url using cacheKeyForURL) + * + * @return the default cache path + */ +- (nullable NSString *)defaultCachePathForKey:(nullable NSString *)key; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCacheConfig.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCacheConfig.h new file mode 100644 index 0000000..8d77fb7 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHImageCacheConfig.h @@ -0,0 +1,52 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCompat.h" + +@interface HHImageCacheConfig : NSObject + +/** + * Decompressing images that are downloaded and cached can improve performance but can consume lot of memory. + * Defaults to YES. Set this to NO if you are experiencing a crash due to excessive memory consumption. + */ +@property (assign, nonatomic) BOOL shouldDecompressImages; + +/** + * disable iCloud backup [defaults to YES] + */ +@property (assign, nonatomic) BOOL shouldDisableiCloud; + +/** + * use memory cache [defaults to YES] + */ +@property (assign, nonatomic) BOOL shouldCacheImagesInMemory; + +/** + * The reading options while reading cache from disk. + * Defaults to 0. You can set this to `NHHataReadingMappedIfSafe` to improve performance. + */ +@property (assign, nonatomic) NSDataReadingOptions diskCacheReadingOptions; + +/** + * The writing options while writing cache to disk. + * Defaults to `NSDataWritingAtomic`. You can set this to `NSDataWritingWithoutOverwriting` to prevent overwriting an existing file. + */ +@property (assign, nonatomic) NSDataWritingOptions diskCacheWritingOptions; + +/** + * The maximum length of time to keep an image in the cache, in seconds. + */ +@property (assign, nonatomic) NSInteger maxCacheAge; + +/** + * The maximum size of the cache, in bytes. + */ +@property (assign, nonatomic) NSUInteger maxCacheSize; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHMBProgressHUD.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHMBProgressHUD.h new file mode 100644 index 0000000..6bd922b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHMBProgressHUD.h @@ -0,0 +1,444 @@ +// +// MBProgressHUD.h +// Version 1.1.0 +// Created by Matej Bukovinski on 2.4.09. +// + +// This code is distributed under the terms and conditions of the MIT license. + +// Copyright © 2009-2016 Matej Bukovinski +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import <Foundation/Foundation.h> +#import <UIKit/UIKit.h> +#import <CoreGraphics/CoreGraphics.h> + +@class HH_MBBackgroundView; +@protocol HH_MBProgressHUDDelegate; + + +extern CGFloat const HH_MBProgressMaxOffset; + +typedef NS_ENUM(NSInteger, HH_MBProgressHUDMode) { + /// UIActivityIndicatorView. + MBProgressHUDModeIndeterminate, + /// A round, pie-chart like, progress view. + MBProgressHUDModeDeterminate, + /// Horizontal progress bar. + MBProgressHUDModeDeterminateHorizontalBar, + /// Ring-shaped progress view. + MBProgressHUDModeAnnularDeterminate, + /// Shows a custom view. + MBProgressHUDModeCustomView, + /// Shows only labels. + MBProgressHUDModeText +}; + +typedef NS_ENUM(NSInteger, HH_MBProgressHUDAnimation) { + /// Opacity animation + MBProgressHUDAnimationFade, + /// Opacity + scale animation (zoom in when appearing zoom out when disappearing) + MBProgressHUDAnimationZoom, + /// Opacity + scale animation (zoom out style) + MBProgressHUDAnimationZoomOut, + /// Opacity + scale animation (zoom in style) + MBProgressHUDAnimationZoomIn +}; + +typedef NS_ENUM(NSInteger, HH_MBProgressHUDBackgroundStyle) { + /// Solid color background + MBProgressHUDBackgroundStyleSolidColor, + /// UIVisualEffectView or UIToolbar.layer background view + MBProgressHUDBackgroundStyleBlur +}; + +typedef void (^HH_MBProgressHUDCompletionBlock)(void); + + +NS_ASSUME_NONNULL_BEGIN + + +/** + * Displays a simple HUD window containing a progress indicator and two optional labels for short messages. + * + * This is a simple drop-in class for displaying a progress HUD view similar to Apple's private UIProgressHUD class. + * The MBProgressHUD window spans over the entire space given to it by the initWithFrame: constructor and catches all + * user input on this region, thereby preventing the user operations on components below the view. + * + * @note To still allow touches to pass through the HUD, you can set hud.userInteractionEnabled = NO. + * @attention MBProgressHUD is a UI class and should therefore only be accessed on the main thread. + */ +@interface HH_MBProgressHUD : UIView + +/** + * Creates a new HUD, adds it to provided view and shows it. The counterpart to this method is hideHUDForView:animated:. + * + * @note This method sets removeFromSuperViewOnHide. The HUD will automatically be removed from the view hierarchy when hidden. + * + * @param view The view that the HUD will be added to + * @param animated If set to YES the HUD will appear using the current animationType. If set to NO the HUD will not use + * animations while appearing. + * @return A reference to the created HUD. + * + * @see hideHUDForView:animated: + * @see animationType + */ ++ (instancetype)showHUDAddedTo:(UIView *)view animated:(BOOL)animated; + +/// @name Showing and hiding + +/** + * Finds the top-most HUD subview that hasn't finished and hides it. The counterpart to this method is showHUDAddedTo:animated:. + * + * @note This method sets removeFromSuperViewOnHide. The HUD will automatically be removed from the view hierarchy when hidden. + * + * @param view The view that is going to be searched for a HUD subview. + * @param animated If set to YES the HUD will disappear using the current animationType. If set to NO the HUD will not use + * animations while disappearing. + * @return YES if a HUD was found and removed, NO otherwise. + * + * @see showHUDAddedTo:animated: + * @see animationType + */ ++ (BOOL)hideHUDForView:(UIView *)view animated:(BOOL)animated; + +/** + * Finds the top-most HUD subview that hasn't finished and returns it. + * + * @param view The view that is going to be searched. + * @return A reference to the last HUD subview discovered. + */ ++ (nullable HH_MBProgressHUD *)HUDForView:(UIView *)view; + +/** + * A convenience constructor that initializes the HUD with the view's bounds. Calls the designated constructor with + * view.bounds as the parameter. + * + * @param view The view instance that will provide the bounds for the HUD. Should be the same instance as + * the HUD's superview (i.e., the view that the HUD will be added to). + */ +- (instancetype)initWithView:(UIView *)view; + +/** + * Displays the HUD. + * + * @note You need to make sure that the main thread completes its run loop soon after this method call so that + * the user interface can be updated. Call this method when your task is already set up to be executed in a new thread + * (e.g., when using something like NSOperation or making an asynchronous call like NSURLRequest). + * + * @param animated If set to YES the HUD will appear using the current animationType. If set to NO the HUD will not use + * animations while appearing. + * + * @see animationType + */ +- (void)showAnimated:(BOOL)animated; + +/** + * Hides the HUD. This still calls the hudWasHidden: delegate. This is the counterpart of the show: method. Use it to + * hide the HUD when your task completes. + * + * @param animated If set to YES the HUD will disappear using the current animationType. If set to NO the HUD will not use + * animations while disappearing. + * + * @see animationType + */ +- (void)hideAnimated:(BOOL)animated; + +/** + * Hides the HUD after a delay. This still calls the hudWasHidden: delegate. This is the counterpart of the show: method. Use it to + * hide the HUD when your task completes. + * + * @param animated If set to YES the HUD will disappear using the current animationType. If set to NO the HUD will not use + * animations while disappearing. + * @param delay Delay in seconds until the HUD is hidden. + * + * @see animationType + */ +- (void)hideAnimated:(BOOL)animated afterDelay:(NSTimeInterval)delay; + +/** + * The HUD delegate object. Receives HUD state notifications. + */ +@property (weak, nonatomic) id<HH_MBProgressHUDDelegate> delegate; + +/** + * Called after the HUD is hiden. + */ +@property (copy, nullable) HH_MBProgressHUDCompletionBlock completionBlock; + +/* + * Grace period is the time (in seconds) that the invoked method may be run without + * showing the HUD. If the task finishes before the grace time runs out, the HUD will + * not be shown at all. + * This may be used to prevent HUD display for very short tasks. + * Defaults to 0 (no grace time). + */ +@property (assign, nonatomic) NSTimeInterval graceTime; + +/** + * The minimum time (in seconds) that the HUD is shown. + * This avoids the problem of the HUD being shown and than instantly hidden. + * Defaults to 0 (no minimum show time). + */ +@property (assign, nonatomic) NSTimeInterval minShowTime; + +/** + * Removes the HUD from its parent view when hidden. + * Defaults to NO. + */ +@property (assign, nonatomic) BOOL removeFromSuperViewOnHide; + +/// @name Appearance + +/** + * MBProgressHUD operation mode. The default is MBProgressHUDModeIndeterminate. + */ +@property (assign, nonatomic) HH_MBProgressHUDMode mode; + +/** + * A color that gets forwarded to all labels and supported indicators. Also sets the tintColor + * for custom views on iOS 7+. Set to nil to manage color individually. + * Defaults to semi-translucent black on iOS 7 and later and white on earlier iOS versions. + */ +@property (strong, nonatomic, nullable) UIColor *contentColor UI_APPEARANCE_SELECTOR; + +/** + * The animation type that should be used when the HUD is shown and hidden. + */ +@property (assign, nonatomic) HH_MBProgressHUDAnimation animationType UI_APPEARANCE_SELECTOR; + +/** + * The bezel offset relative to the center of the view. You can use MBProgressMaxOffset + * and -MBProgressMaxOffset to move the HUD all the way to the screen edge in each direction. + * E.g., CGPointMake(0.f, MBProgressMaxOffset) would position the HUD centered on the bottom edge. + */ +@property (assign, nonatomic) CGPoint offset UI_APPEARANCE_SELECTOR; + +/** + * The amount of space between the HUD edge and the HUD elements (labels, indicators or custom views). + * This also represents the minimum bezel distance to the edge of the HUD view. + * Defaults to 20.f + */ +@property (assign, nonatomic) CGFloat margin UI_APPEARANCE_SELECTOR; + +/** + * The minimum size of the HUD bezel. Defaults to CGSizeZero (no minimum size). + */ +@property (assign, nonatomic) CGSize minSize UI_APPEARANCE_SELECTOR; + +/** + * Force the HUD dimensions to be equal if possible. + */ +@property (assign, nonatomic, getter = isSquare) BOOL square UI_APPEARANCE_SELECTOR; + +/** + * When enabled, the bezel center gets slightly affected by the device accelerometer data. + * Has no effect on iOS < 7.0. Defaults to YES. + */ +@property (assign, nonatomic, getter=areDefaultMotionEffectsEnabled) BOOL defaultMotionEffectsEnabled UI_APPEARANCE_SELECTOR; + +/// @name Progress + +/** + * The progress of the progress indicator, from 0.0 to 1.0. Defaults to 0.0. + */ +@property (assign, nonatomic) float progress; + +/// @name ProgressObject + +/** + * The NSProgress object feeding the progress information to the progress indicator. + */ +@property (strong, nonatomic, nullable) NSProgress *progressObject; + +/// @name Views + +/** + * The view containing the labels and indicator (or customView). + */ +@property (strong, nonatomic, readonly) HH_MBBackgroundView *bezelView; + +/** + * View covering the entire HUD area, placed behind bezelView. + */ +@property (strong, nonatomic, readonly) HH_MBBackgroundView *backgroundView; + +/** + * The UIView (e.g., a UIImageView) to be shown when the HUD is in MBProgressHUDModeCustomView. + * The view should implement intrinsicContentSize for proper sizing. For best results use approximately 37 by 37 pixels. + */ +@property (strong, nonatomic, nullable) UIView *customView; + +/** + * A label that holds an optional short message to be displayed below the activity indicator. The HUD is automatically resized to fit + * the entire text. + */ +@property (strong, nonatomic, readonly) UILabel *label; + +/** + * A label that holds an optional details message displayed below the labelText message. The details text can span multiple lines. + */ +@property (strong, nonatomic, readonly) UILabel *detailsLabel; + +/** + * A button that is placed below the labels. Visible only if a target / action is added. + */ +//@property (strong, nonatomic, readonly) UIButton *button; + +@end + + +@protocol HH_MBProgressHUDDelegate <NSObject> + +@optional + +/** + * Called after the HUD was fully hidden from the screen. + */ +- (void)hudWasHidden:(HH_MBProgressHUD *)hud; + +@end + + +/** + * A progress view for showing definite progress by filling up a circle (pie chart). + */ +@interface HH_MBRoundProgressView : UIView + +/** + * Progress (0.0 to 1.0) + */ +@property (nonatomic, assign) float progress; + +/** + * Indicator progress color. + * Defaults to white [UIColor whiteColor]. + */ +@property (nonatomic, strong) UIColor *progressTintColor; + +/** + * Indicator background (non-progress) color. + * Only applicable on iOS versions older than iOS 7. + * Defaults to translucent white (alpha 0.1). + */ +@property (nonatomic, strong) UIColor *backgroundTintColor; + +/* + * Display mode - NO = round or YES = annular. Defaults to round. + */ +@property (nonatomic, assign, getter = isAnnular) BOOL annular; + +@end + + +/** + * A flat bar progress view. + */ +@interface HH_MBBarProgressView : UIView + +/** + * Progress (0.0 to 1.0) + */ +@property (nonatomic, assign) float progress; + +/** + * Bar border line color. + * Defaults to white [UIColor whiteColor]. + */ +@property (nonatomic, strong) UIColor *lineColor; + +/** + * Bar background color. + * Defaults to clear [UIColor clearColor]; + */ +@property (nonatomic, strong) UIColor *progressRemainingColor; + +/** + * Bar progress color. + * Defaults to white [UIColor whiteColor]. + */ +@property (nonatomic, strong) UIColor *progressColor; + +@end + + +@interface HH_MBBackgroundView : UIView + +/** + * The background style. + * Defaults to MBProgressHUDBackgroundStyleBlur on iOS 7 or later and MBProgressHUDBackgroundStyleSolidColor otherwise. + * @note Due to iOS 7 not supporting UIVisualEffectView, the blur effect differs slightly between iOS 7 and later versions. + */ +@property (nonatomic) HH_MBProgressHUDBackgroundStyle style; + +#if __IPHONE_OS_VERSION_MAX_ALLOWED >= 80000 || TARGET_OS_TV +/** + * The blur effect style, when using MBProgressHUDBackgroundStyleBlur. + * Defaults to UIBlurEffectStyleLight. + */ +@property (nonatomic) UIBlurEffectStyle blurEffectStyle; +#endif + +/** + * The background color or the blur tint color. + * @note Due to iOS 7 not supporting UIVisualEffectView, the blur effect differs slightly between iOS 7 and later versions. + */ +@property (nonatomic, strong) UIColor *color; + +@end + + +@interface HH_MBProgressHUD (Deprecated) + ++ (NSArray *)allHUDsForView:(UIView *)view __attribute__((deprecated("Store references when using more than one HUD per view."))); ++ (NSUInteger)hideAllHUDsForView:(UIView *)view animated:(BOOL)animated __attribute__((deprecated("Store references when using more than one HUD per view."))); + +- (id)initWithWindow:(UIWindow *)window __attribute__((deprecated("Use initWithView: instead."))); + +- (void)show:(BOOL)animated __attribute__((deprecated("Use showAnimated: instead."))); +- (void)hide:(BOOL)animated __attribute__((deprecated("Use hideAnimated: instead."))); +- (void)hide:(BOOL)animated afterDelay:(NSTimeInterval)delay __attribute__((deprecated("Use hideAnimated:afterDelay: instead."))); + +- (void)showWhileExecuting:(SEL)method onTarget:(id)target withObject:(id)object animated:(BOOL)animated __attribute__((deprecated("Use GCD directly."))); +- (void)showAnimated:(BOOL)animated whileExecutingBlock:(dispatch_block_t)block __attribute__((deprecated("Use GCD directly."))); +- (void)showAnimated:(BOOL)animated whileExecutingBlock:(dispatch_block_t)block completionBlock:(nullable HH_MBProgressHUDCompletionBlock)completion __attribute__((deprecated("Use GCD directly."))); +- (void)showAnimated:(BOOL)animated whileExecutingBlock:(dispatch_block_t)block onQueue:(dispatch_queue_t)queue __attribute__((deprecated("Use GCD directly."))); +- (void)showAnimated:(BOOL)animated whileExecutingBlock:(dispatch_block_t)block onQueue:(dispatch_queue_t)queue + completionBlock:(nullable HH_MBProgressHUDCompletionBlock)completion __attribute__((deprecated("Use GCD directly."))); +@property (assign) BOOL taskInProgress __attribute__((deprecated("No longer needed."))); + +@property (nonatomic, copy) NSString *labelText __attribute__((deprecated("Use label.text instead."))); +@property (nonatomic, strong) UIFont *labelFont __attribute__((deprecated("Use label.font instead."))); +@property (nonatomic, strong) UIColor *labelColor __attribute__((deprecated("Use label.textColor instead."))); +@property (nonatomic, copy) NSString *detailsLabelText __attribute__((deprecated("Use detailsLabel.text instead."))); +@property (nonatomic, strong) UIFont *detailsLabelFont __attribute__((deprecated("Use detailsLabel.font instead."))); +@property (nonatomic, strong) UIColor *detailsLabelColor __attribute__((deprecated("Use detailsLabel.textColor instead."))); +@property (assign, nonatomic) CGFloat opacity __attribute__((deprecated("Customize bezelView properties instead."))); +@property (strong, nonatomic) UIColor *color __attribute__((deprecated("Customize the bezelView color instead."))); +@property (assign, nonatomic) CGFloat xOffset __attribute__((deprecated("Set offset.x instead."))); +@property (assign, nonatomic) CGFloat yOffset __attribute__((deprecated("Set offset.y instead."))); +@property (assign, nonatomic) CGFloat cornerRadius __attribute__((deprecated("Set bezelView.layer.cornerRadius instead."))); +@property (assign, nonatomic) BOOL dimBackground __attribute__((deprecated("Customize HUD background properties instead."))); +@property (strong, nonatomic) UIColor *activityIndicatorColor __attribute__((deprecated("Use UIAppearance to customize UIActivityIndicatorView. E.g.: [UIActivityIndicatorView appearanceWhenContainedIn:[MBProgressHUD class], nil].color = [UIColor redColor];"))); +@property (atomic, assign, readonly) CGSize size __attribute__((deprecated("Get the bezelView.frame.size instead."))); + +@end + +NS_ASSUME_NONNULL_END diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHPhotoPicker.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHPhotoPicker.h new file mode 100644 index 0000000..4f76222 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHPhotoPicker.h @@ -0,0 +1,14 @@ +// +// PhotoPicker.h +// PhotoPicker +// +// Created by Shi Jian on 2018/1/25. +// Copyright © 2018年 AshenDever. All rights reserved. +// + +#ifndef PhotoPicker_h +#define PhotoPicker_h + +#import "SDKCameraImageModel.h" + +#endif /* PhotoPicker_h */ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-Swift.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-Swift.h new file mode 100644 index 0000000..3dc9c30 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-Swift.h @@ -0,0 +1,2156 @@ +// Generated by Apple Swift version 5.4.2 (swiftlang-1205.0.28.2 clang-1205.0.19.57) +#ifndef HHSDKVIDEO_SWIFT_H +#define HHSDKVIDEO_SWIFT_H +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wgcc-compat" + +#if !defined(__has_include) +# define __has_include(x) 0 +#endif +#if !defined(__has_attribute) +# define __has_attribute(x) 0 +#endif +#if !defined(__has_feature) +# define __has_feature(x) 0 +#endif +#if !defined(__has_warning) +# define __has_warning(x) 0 +#endif + +#if __has_include(<swift/objc-prologue.h>) +# include <swift/objc-prologue.h> +#endif + +#pragma clang diagnostic ignored "-Wauto-import" +#include <Foundation/Foundation.h> +#include <stdint.h> +#include <stddef.h> +#include <stdbool.h> + +#if !defined(SWIFT_TYPEDEFS) +# define SWIFT_TYPEDEFS 1 +# if __has_include(<uchar.h>) +# include <uchar.h> +# elif !defined(__cplusplus) +typedef uint_least16_t char16_t; +typedef uint_least32_t char32_t; +# endif +typedef float swift_float2 __attribute__((__ext_vector_type__(2))); +typedef float swift_float3 __attribute__((__ext_vector_type__(3))); +typedef float swift_float4 __attribute__((__ext_vector_type__(4))); +typedef double swift_double2 __attribute__((__ext_vector_type__(2))); +typedef double swift_double3 __attribute__((__ext_vector_type__(3))); +typedef double swift_double4 __attribute__((__ext_vector_type__(4))); +typedef int swift_int2 __attribute__((__ext_vector_type__(2))); +typedef int swift_int3 __attribute__((__ext_vector_type__(3))); +typedef int swift_int4 __attribute__((__ext_vector_type__(4))); +typedef unsigned int swift_uint2 __attribute__((__ext_vector_type__(2))); +typedef unsigned int swift_uint3 __attribute__((__ext_vector_type__(3))); +typedef unsigned int swift_uint4 __attribute__((__ext_vector_type__(4))); +#endif + +#if !defined(SWIFT_PASTE) +# define SWIFT_PASTE_HELPER(x, y) x##y +# define SWIFT_PASTE(x, y) SWIFT_PASTE_HELPER(x, y) +#endif +#if !defined(SWIFT_METATYPE) +# define SWIFT_METATYPE(X) Class +#endif +#if !defined(SWIFT_CLASS_PROPERTY) +# if __has_feature(objc_class_property) +# define SWIFT_CLASS_PROPERTY(...) __VA_ARGS__ +# else +# define SWIFT_CLASS_PROPERTY(...) +# endif +#endif + +#if __has_attribute(objc_runtime_name) +# define SWIFT_RUNTIME_NAME(X) __attribute__((objc_runtime_name(X))) +#else +# define SWIFT_RUNTIME_NAME(X) +#endif +#if __has_attribute(swift_name) +# define SWIFT_COMPILE_NAME(X) __attribute__((swift_name(X))) +#else +# define SWIFT_COMPILE_NAME(X) +#endif +#if __has_attribute(objc_method_family) +# define SWIFT_METHOD_FAMILY(X) __attribute__((objc_method_family(X))) +#else +# define SWIFT_METHOD_FAMILY(X) +#endif +#if __has_attribute(noescape) +# define SWIFT_NOESCAPE __attribute__((noescape)) +#else +# define SWIFT_NOESCAPE +#endif +#if __has_attribute(ns_consumed) +# define SWIFT_RELEASES_ARGUMENT __attribute__((ns_consumed)) +#else +# define SWIFT_RELEASES_ARGUMENT +#endif +#if __has_attribute(warn_unused_result) +# define SWIFT_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) +#else +# define SWIFT_WARN_UNUSED_RESULT +#endif +#if __has_attribute(noreturn) +# define SWIFT_NORETURN __attribute__((noreturn)) +#else +# define SWIFT_NORETURN +#endif +#if !defined(SWIFT_CLASS_EXTRA) +# define SWIFT_CLASS_EXTRA +#endif +#if !defined(SWIFT_PROTOCOL_EXTRA) +# define SWIFT_PROTOCOL_EXTRA +#endif +#if !defined(SWIFT_ENUM_EXTRA) +# define SWIFT_ENUM_EXTRA +#endif +#if !defined(SWIFT_CLASS) +# if __has_attribute(objc_subclassing_restricted) +# define SWIFT_CLASS(SWIFT_NAME) SWIFT_RUNTIME_NAME(SWIFT_NAME) __attribute__((objc_subclassing_restricted)) SWIFT_CLASS_EXTRA +# define SWIFT_CLASS_NAMED(SWIFT_NAME) __attribute__((objc_subclassing_restricted)) SWIFT_COMPILE_NAME(SWIFT_NAME) SWIFT_CLASS_EXTRA +# else +# define SWIFT_CLASS(SWIFT_NAME) SWIFT_RUNTIME_NAME(SWIFT_NAME) SWIFT_CLASS_EXTRA +# define SWIFT_CLASS_NAMED(SWIFT_NAME) SWIFT_COMPILE_NAME(SWIFT_NAME) SWIFT_CLASS_EXTRA +# endif +#endif +#if !defined(SWIFT_RESILIENT_CLASS) +# if __has_attribute(objc_class_stub) +# define SWIFT_RESILIENT_CLASS(SWIFT_NAME) SWIFT_CLASS(SWIFT_NAME) __attribute__((objc_class_stub)) +# define SWIFT_RESILIENT_CLASS_NAMED(SWIFT_NAME) __attribute__((objc_class_stub)) SWIFT_CLASS_NAMED(SWIFT_NAME) +# else +# define SWIFT_RESILIENT_CLASS(SWIFT_NAME) SWIFT_CLASS(SWIFT_NAME) +# define SWIFT_RESILIENT_CLASS_NAMED(SWIFT_NAME) SWIFT_CLASS_NAMED(SWIFT_NAME) +# endif +#endif + +#if !defined(SWIFT_PROTOCOL) +# define SWIFT_PROTOCOL(SWIFT_NAME) SWIFT_RUNTIME_NAME(SWIFT_NAME) SWIFT_PROTOCOL_EXTRA +# define SWIFT_PROTOCOL_NAMED(SWIFT_NAME) SWIFT_COMPILE_NAME(SWIFT_NAME) SWIFT_PROTOCOL_EXTRA +#endif + +#if !defined(SWIFT_EXTENSION) +# define SWIFT_EXTENSION(M) SWIFT_PASTE(M##_Swift_, __LINE__) +#endif + +#if !defined(OBJC_DESIGNATED_INITIALIZER) +# if __has_attribute(objc_designated_initializer) +# define OBJC_DESIGNATED_INITIALIZER __attribute__((objc_designated_initializer)) +# else +# define OBJC_DESIGNATED_INITIALIZER +# endif +#endif +#if !defined(SWIFT_ENUM_ATTR) +# if defined(__has_attribute) && __has_attribute(enum_extensibility) +# define SWIFT_ENUM_ATTR(_extensibility) __attribute__((enum_extensibility(_extensibility))) +# else +# define SWIFT_ENUM_ATTR(_extensibility) +# endif +#endif +#if !defined(SWIFT_ENUM) +# define SWIFT_ENUM(_type, _name, _extensibility) enum _name : _type _name; enum SWIFT_ENUM_ATTR(_extensibility) SWIFT_ENUM_EXTRA _name : _type +# if __has_feature(generalized_swift_name) +# define SWIFT_ENUM_NAMED(_type, _name, SWIFT_NAME, _extensibility) enum _name : _type _name SWIFT_COMPILE_NAME(SWIFT_NAME); enum SWIFT_COMPILE_NAME(SWIFT_NAME) SWIFT_ENUM_ATTR(_extensibility) SWIFT_ENUM_EXTRA _name : _type +# else +# define SWIFT_ENUM_NAMED(_type, _name, SWIFT_NAME, _extensibility) SWIFT_ENUM(_type, _name, _extensibility) +# endif +#endif +#if !defined(SWIFT_UNAVAILABLE) +# define SWIFT_UNAVAILABLE __attribute__((unavailable)) +#endif +#if !defined(SWIFT_UNAVAILABLE_MSG) +# define SWIFT_UNAVAILABLE_MSG(msg) __attribute__((unavailable(msg))) +#endif +#if !defined(SWIFT_AVAILABILITY) +# define SWIFT_AVAILABILITY(plat, ...) __attribute__((availability(plat, __VA_ARGS__))) +#endif +#if !defined(SWIFT_WEAK_IMPORT) +# define SWIFT_WEAK_IMPORT __attribute__((weak_import)) +#endif +#if !defined(SWIFT_DEPRECATED) +# define SWIFT_DEPRECATED __attribute__((deprecated)) +#endif +#if !defined(SWIFT_DEPRECATED_MSG) +# define SWIFT_DEPRECATED_MSG(...) __attribute__((deprecated(__VA_ARGS__))) +#endif +#if __has_feature(attribute_diagnose_if_objc) +# define SWIFT_DEPRECATED_OBJC(Msg) __attribute__((diagnose_if(1, Msg, "warning"))) +#else +# define SWIFT_DEPRECATED_OBJC(Msg) SWIFT_DEPRECATED_MSG(Msg) +#endif +#if !defined(IBSegueAction) +# define IBSegueAction +#endif +#if __has_feature(modules) +#if __has_warning("-Watimport-in-framework-header") +#pragma clang diagnostic ignored "-Watimport-in-framework-header" +#endif +@import AVFoundation; +@import CoreFoundation; +@import CoreGraphics; +@import CoreLocation; +@import CoreMedia; +@import Foundation; +@import ObjectiveC; +@import Photos; +@import QuartzCore; +@import UIKit; +#endif + +#pragma clang diagnostic ignored "-Wproperty-attribute-mismatch" +#pragma clang diagnostic ignored "-Wduplicate-method-arg" +#if __has_warning("-Wpragma-clang-attribute") +# pragma clang diagnostic ignored "-Wpragma-clang-attribute" +#endif +#pragma clang diagnostic ignored "-Wunknown-pragmas" +#pragma clang diagnostic ignored "-Wnullability" + +#if __has_attribute(external_source_symbol) +# pragma push_macro("any") +# undef any +# pragma clang attribute push(__attribute__((external_source_symbol(language="Swift", defined_in="HHSDKVideo",generated_declaration))), apply_to=any(function,enum,objc_interface,objc_category,objc_protocol)) +# pragma pop_macro("any") +#endif + + + + + + +@class NSNumber; +@class NSString; +@class NSBundle; +@class NSCoder; + +SWIFT_CLASS("_TtC10HHSDKVideo13CommentBaseVC") +@interface CommentBaseVC : UIViewController +- (void)viewWillAppear:(BOOL)animated; +- (nonnull instancetype)initWithNibName:(NSString * _Nullable)nibNameOrNil bundle:(NSBundle * _Nullable)nibBundleOrNil OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo9CommentVC") +@interface CommentVC : CommentBaseVC +- (void)viewDidLoad; +- (nonnull instancetype)initWithNibName:(NSString * _Nullable)nibNameOrNil bundle:(NSBundle * _Nullable)nibBundleOrNil OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo33DGElasticPullToRefreshLoadingView") +@interface DGElasticPullToRefreshLoadingView : UIView +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)initWithFrame:(CGRect)frame OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo39DGElasticPullToRefreshLoadingViewCircle") +@interface DGElasticPullToRefreshLoadingViewCircle : DGElasticPullToRefreshLoadingView +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)tintColorDidChange; +- (void)layoutSubviews; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo26DGElasticPullToRefreshView") +@interface DGElasticPullToRefreshView : UIView +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)observeValueForKeyPath:(NSString * _Nullable)keyPath ofObject:(id _Nullable)object change:(NSDictionary<NSKeyValueChangeKey, id> * _Nullable)change context:(void * _Nullable)context; +- (void)layoutSubviews; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + + + +SWIFT_CLASS("_TtC10HHSDKVideo26EKAccessoryNoteMessageView") +@interface EKAccessoryNoteMessageView : UIView +- (nonnull instancetype)initWithFrame:(CGRect)frame OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +@end + +@class UITraitCollection; + +SWIFT_CLASS("_TtC10HHSDKVideo19EKSimpleMessageView") +@interface EKSimpleMessageView : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)traitCollectionDidChange:(UITraitCollection * _Nullable)previousTraitCollection; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo18EKAlertMessageView") +@interface EKAlertMessageView : EKSimpleMessageView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +@end + + +/// Dynamic button bar view +/// Buttons are set according to the received content. +/// 1-2 buttons spread horizontally +/// 3 or more buttons spread vertically +SWIFT_CLASS("_TtC10HHSDKVideo15EKButtonBarView") +@interface EKButtonBarView : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)layoutSubviews; +- (void)traitCollectionDidChange:(UITraitCollection * _Nullable)previousTraitCollection; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo17EKFormMessageView") +@interface EKFormMessageView : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)traitCollectionDidChange:(UITraitCollection * _Nullable)previousTraitCollection; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo22EKImageNoteMessageView") +@interface EKImageNoteMessageView : EKAccessoryNoteMessageView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo20EKMessageContentView") +@interface EKMessageContentView : UIView +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)traitCollectionDidChange:(UITraitCollection * _Nullable)previousTraitCollection; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo17EKNoteMessageView") +@interface EKNoteMessageView : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo25EKNotificationMessageView") +@interface EKNotificationMessageView : EKSimpleMessageView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)traitCollectionDidChange:(UITraitCollection * _Nullable)previousTraitCollection; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo18EKPopUpMessageView") +@interface EKPopUpMessageView : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)traitCollectionDidChange:(UITraitCollection * _Nullable)previousTraitCollection; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo27EKProcessingNoteMessageView") +@interface EKProcessingNoteMessageView : EKAccessoryNoteMessageView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo19EKRatingMessageView") +@interface EKRatingMessageView : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo18EKRatingSymbolView") +@interface EKRatingSymbolView : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo28EKRatingSymbolsContainerView") +@interface EKRatingSymbolsContainerView : UIView +- (nonnull instancetype)initWithFrame:(CGRect)frame OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +@end + + + +SWIFT_CLASS("_TtC10HHSDKVideo11EKTextField") +@interface EKTextField : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)traitCollectionDidChange:(UITraitCollection * _Nullable)previousTraitCollection; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo23EKXStatusBarMessageView") +@interface EKXStatusBarMessageView : UIView +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + + + +SWIFT_CLASS("_TtC10HHSDKVideo18HHAppProtocolCheck") +@interface HHAppProtocolCheck : NSObject +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + +@class UITextView; +@class NSURL; + +@interface HHAppProtocolCheck (SWIFT_EXTENSION(HHSDKVideo)) <UITextViewDelegate> +- (BOOL)textView:(UITextView * _Nonnull)textView shouldInteractWithURL:(NSURL * _Nonnull)URL inRange:(NSRange)characterRange interaction:(UITextItemInteraction)interaction SWIFT_WARN_UNUSED_RESULT; +@end + +/// 接通前的呼叫状态 +/// <ul> +/// <li> +/// onStart: 呼叫开始 +/// </li> +/// <li> +/// waitingDoctor: 等待空闲的医生 +/// </li> +/// <li> +/// callFreeDoctor: 有医生空闲,开始呼叫 +/// </li> +/// <li> +/// callConnect: 建立呼叫 +/// </li> +/// <li> +/// didRing: 医生响铃,等待接受 +/// </li> +/// </ul> +typedef SWIFT_ENUM(NSInteger, HHBaseCallingState, open) { + HHBaseCallingStateOnStart = 0, + HHBaseCallingStateWaitingDoctor = 1, + HHBaseCallingStateCallFreeDoctor = 2, + HHBaseCallingStateCallConnect = 3, + HHBaseCallingStateDidRing = 4, +}; + +typedef SWIFT_ENUM(NSInteger, HHBasePermissionType, open) { + HHBasePermissionTypeLocationAlways = 0, + HHBasePermissionTypeLocationWhenInUse = 1, + HHBasePermissionTypeMicrophone = 2, + HHBasePermissionTypeCamera = 3, + HHBasePermissionTypePhotos = 4, +}; + + +SWIFT_CLASS("_TtC10HHSDKVideo9HHBaseSDK") +@interface HHBaseSDK : NSObject +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong, getter=default) HHBaseSDK * _Nonnull default_;) ++ (HHBaseSDK * _Nonnull)default SWIFT_WARN_UNUSED_RESULT; +/// SDK init +- (void)start; +/// 登录账户 +/// <ul> +/// <li> +/// Parameters: +/// </li> +/// <li> +/// userToken: 用户的唯一标志 +/// </li> +/// <li> +/// completion: 完成的回调 +/// </li> +/// </ul> +- (void)loginWithUserToken:(NSString * _Nonnull)userToken completion:(void (^ _Nonnull)(NSString * _Nullable))completion; +/// 登出 +/// \param callback 登出回调(字符串为空表示成功) +/// +- (void)logout:(void (^ _Nullable)(NSString * _Nullable))callback; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +/// 视频管理器代理 +SWIFT_PROTOCOL("_TtP10HHSDKVideo19HHBaseVideoDelegate_") +@protocol HHBaseVideoDelegate <NSObject> +/// 主动视频时的呼叫状态变化 +/// \param state 当前呼叫状态 +/// +- (void)callStateChange:(enum HHBaseCallingState)state; +@optional +/// \param orderId 当前订单ID +/// +- (void)onStartWithOrderId:(NSString * _Nullable)orderId; +@required +/// 通话已接通 +- (void)callDidEstablish; +/// 视频页面回调 +- (void)getChatParentView:(UIView * _Nonnull)view; +/// 呼叫失败(业务服务报错) +- (void)callFailWithCode:(NSInteger)code error:(NSString * _Nonnull)error; +/// 呼叫失败(音视频服务报错) +/// \param error 错误信息 +/// +- (void)onFail:(NSInteger)errorCode errrorStr:(NSString * _Nullable)errrorStr; +- (void)onCancel; +/// 呼叫时获取到订单信息 +- (void)receivedOrder:(NSString * _Nonnull)orderId; +/// 通话已结束 (接通之后才有结束) +- (void)callDidFinish; +/// 转呼医生 +- (void)onExtensionDoctor; +/// 接收到呼叫(被呼叫方) +/// \param callID 呼叫的 id +/// +- (void)onReceive:(NSString * _Nonnull)callID; +/// 收到视频呼入时的操作(被呼叫方) +/// \param accept 接受或者拒接 +/// +- (void)onResponse:(BOOL)accept; +/// 缺少必要权限 +/// \param type 缺少的权限类型 +/// +- (void)onLeakPermission:(enum HHBasePermissionType)type; +@optional +/// 强制下线 +- (void)onForceOffline; +@end + + +SWIFT_PROTOCOL("_TtP10HHSDKVideo14HHCallDelegate_") +@protocol HHCallDelegate <NSObject> +@optional +/// 呼叫状态 +/// \param error 错误信息(为空表示呼叫成功) +/// +- (void)onCallStatus:(NSError * _Nullable)error; +/// 呼叫成功 +- (void)onCallSuccess; +/// 呼叫完成 +- (void)callFinished; +@end + +/// 呼叫类型 +/// <ul> +/// <li> +/// child: 儿童 +/// </li> +/// <li> +/// adult: 成人 +/// </li> +/// </ul> +typedef SWIFT_ENUM(NSInteger, HHCallType, open) { + HHCallTypeChild = 600000, + HHCallTypeAdult = 600002, +}; + + +/// 用户信息 +SWIFT_CLASS("_TtC10HHSDKVideo12HHCallerInfo") +@interface HHCallerInfo : NSObject +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo11HHDateUtils") +@interface HHDateUtils : NSObject +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + + + + +SWIFT_CLASS("_TtC10HHSDKVideo15HHDeviceManager") +@interface HHDeviceManager : NSObject +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo18HHFileCacheManager") +@interface HHFileCacheManager : NSObject +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + + + +/// 加载进度协议 +SWIFT_PROTOCOL("_TtP10HHSDKVideo9HHHUDable_") +@protocol HHHUDable +@optional +/// 自动消失时间(默认1.5秒) +@property (nonatomic, readonly) NSTimeInterval autoDismissDuration; +@required +/// 显示加载中 +- (void)showHUD; +/// 隐藏菊花 +- (void)dismissHUD; +/// 正确提示 +- (void)showSuccess:(NSString * _Nullable)message; +/// 错误提示 +- (void)showError:(NSString * _Nullable)messgae; +@optional +/// 自动 dismiss 时间 +/// \param duraion 持续时间 +/// +- (void)setDismissDuration:(NSTimeInterval)duraion; +@end + + +SWIFT_PROTOCOL("_TtP10HHSDKVideo4HHIM_") +@protocol HHIM +- (void)register:(NSString * _Nullable)cerName; +- (void)login:(void (^ _Nullable)(NSString * _Nullable))completion; +- (void)autoLogin:(void (^ _Nullable)(NSString * _Nullable))completion; +- (void)logout:(void (^ _Nullable)(NSString * _Nullable))callback; +- (BOOL)canVideo SWIFT_WARN_UNUSED_RESULT; +@end + +typedef SWIFT_ENUM(NSInteger, HHLogMode, open) { +/// nothing show + HHLogModeError = 0, + HHLogModeWarn = 1, +/// only information + HHLogModeInfo = 2, +/// network info + HHLogModeDebug = 3, +/// debug mode + HHLogModeVerbose = 4, +}; + + +SWIFT_CLASS("_TtC10HHSDKVideo23HHMediaStatusCheckUtils") +@interface HHMediaStatusCheckUtils : NSObject +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo18HHMedicNetObserver") +@interface HHMedicNetObserver : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo18HHNeedRealNameView") +@interface HHNeedRealNameView : UIView +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder SWIFT_UNAVAILABLE; +@end + + +@class UIWindow; +@class UICollectionView; +@class NSIndexPath; +@class UICollectionViewCell; +@class UIScrollView; + +IB_DESIGNABLE +SWIFT_CLASS("_TtC10HHSDKVideo11HHPagerView") +@interface HHPagerView : UIView <UICollectionViewDataSource, UICollectionViewDelegate> +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +- (void)layoutSubviews; +- (void)willMoveToWindow:(UIWindow * _Nullable)newWindow; +- (void)prepareForInterfaceBuilder; +- (NSInteger)numberOfSectionsInCollectionView:(UICollectionView * _Nonnull)collectionView SWIFT_WARN_UNUSED_RESULT; +- (NSInteger)collectionView:(UICollectionView * _Nonnull)collectionView numberOfItemsInSection:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (UICollectionViewCell * _Nonnull)collectionView:(UICollectionView * _Nonnull)collectionView cellForItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (BOOL)collectionView:(UICollectionView * _Nonnull)collectionView shouldHighlightItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView didHighlightItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +- (BOOL)collectionView:(UICollectionView * _Nonnull)collectionView shouldSelectItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView didSelectItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView willDisplayCell:(UICollectionViewCell * _Nonnull)cell forItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView didEndDisplayingCell:(UICollectionViewCell * _Nonnull)cell forItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +- (void)scrollViewDidScroll:(UIScrollView * _Nonnull)scrollView; +- (void)scrollViewWillBeginDragging:(UIScrollView * _Nonnull)scrollView; +- (void)scrollViewWillEndDragging:(UIScrollView * _Nonnull)scrollView withVelocity:(CGPoint)velocity targetContentOffset:(CGPoint * _Nonnull)targetContentOffset; +- (void)scrollViewDidEndDecelerating:(UIScrollView * _Nonnull)scrollView; +- (void)scrollViewDidEndScrollingAnimation:(UIScrollView * _Nonnull)scrollView; +@end + +typedef SWIFT_ENUM(NSInteger, HHPagerViewTransformerType, open) { + HHPagerViewTransformerTypeCrossFading = 0, + HHPagerViewTransformerTypeZoomOut = 1, + HHPagerViewTransformerTypeDepth = 2, + HHPagerViewTransformerTypeOverlap = 3, + HHPagerViewTransformerTypeLinear = 4, + HHPagerViewTransformerTypeCoverFlow = 5, + HHPagerViewTransformerTypeFerrisWheel = 6, + HHPagerViewTransformerTypeInvertedFerrisWheel = 7, + HHPagerViewTransformerTypeCubic = 8, +}; + + +SWIFT_CLASS("_TtC10HHSDKVideo12HHPermission") +@interface HHPermission : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + +@class CLLocationManager; + +@interface HHPermission (SWIFT_EXTENSION(HHSDKVideo)) <CLLocationManagerDelegate> +- (void)locationManager:(CLLocationManager * _Nonnull)manager didChangeAuthorizationStatus:(CLAuthorizationStatus)status; +@end + + + + + + + +@interface HHPermission (SWIFT_EXTENSION(HHSDKVideo)) +/// The textual representation of self. +@property (nonatomic, readonly, copy) NSString * _Nonnull description; +/// A textual representation of this instance, suitable for debugging. +@property (nonatomic, readonly, copy) NSString * _Nonnull debugDescription; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo18HHPhotoConfigModel") +@interface HHPhotoConfigModel : NSObject +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo23HHPhotoPickerController") +@interface HHPhotoPickerController : UINavigationController +- (void)viewDidLoad; +- (nonnull instancetype)initWithNavigationBarClass:(Class _Nullable)navigationBarClass toolbarClass:(Class _Nullable)toolbarClass OBJC_DESIGNATED_INITIALIZER SWIFT_AVAILABILITY(ios,introduced=5.0); +- (nonnull instancetype)initWithRootViewController:(UIViewController * _Nonnull)rootViewController OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)initWithNibName:(NSString * _Nullable)nibNameOrNil bundle:(NSBundle * _Nullable)nibBundleOrNil OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)aDecoder OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo20HHPhotoPickerManager") +@interface HHPhotoPickerManager : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + +@class UIColor; + +SWIFT_CLASS("_TtC10HHSDKVideo20HHPhotoUIConfigModel") +@interface HHPhotoUIConfigModel : NSObject +@property (nonatomic, strong) UIColor * _Nonnull indexLabelBgColor; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + +@class UIImage; + +/// 音视频回调 +SWIFT_PROTOCOL("_TtP10HHSDKVideo5HHRTC_") +@protocol HHRTC +@optional +- (void)setOrderIdWithOrderId:(NSString * _Nonnull)orderId; +/// 开始呼叫 +- (void)startCallWithCallee:(NSString * _Nonnull)callee orderId:(NSString * _Nullable)orderId; +/// 进房 +- (void)enterRoomWithOrderId:(NSString * _Nonnull)orderId; +/// 切换本地音频采集 +- (void)switchLocalAudio:(BOOL)isOpen; +/// 切换本地音频采集 +- (void)switchLocalVideo:(BOOL)isOpen localView:(UIView * _Nonnull)localView; +/// 开启医生视频 +- (void)openDoctorViewWithUserId:(NSString * _Nonnull)userId view:(UIView * _Nonnull)view; +/// 关闭医生视频 +- (void)closeDoctorViewWithUserId:(NSString * _Nonnull)userId; +/// 切换摄像头 +- (void)switchCamera:(BOOL)isFront; +/// 设置闪光灯 +- (void)switchCameraFlash:(BOOL)isOpen; +/// 发送房间消息或p2p消息 +- (void)sendMsgWithIsSignal:(BOOL)isSignal cmd:(NSString * _Nonnull)cmd to:(NSString * _Nonnull)to complete:(void (^ _Nullable)(NSString * _Nullable))complete; +/// 退房 +- (void)leaveRoom; +/// 挂断 +- (void)hangUpWithCallId:(uint64_t)callId; +/// 开始响铃 +- (void)startRingWithAudioId:(NSInteger)audioId; +/// 结束响铃 +- (void)stopRing; +- (void)snapshotVideoWithUserId:(NSString * _Nullable)userId imageBack:(void (^ _Nonnull)(UIImage * _Nonnull))imageBack; +@end + +@class UITextField; + +SWIFT_CLASS("_TtC10HHSDKVideo22HHRealNameInputNewView") +@interface HHRealNameInputNewView : UIView +@property (nonatomic, weak) IBOutlet UITextField * _Null_unspecified idCardTF; +- (void)awakeFromNib; +- (nonnull instancetype)initWithFrame:(CGRect)frame OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +@end + + +@interface HHRealNameInputNewView (SWIFT_EXTENSION(HHSDKVideo)) <UITextFieldDelegate> +- (BOOL)textField:(UITextField * _Nonnull)textField shouldChangeCharactersInRange:(NSRange)range replacementString:(NSString * _Nonnull)string SWIFT_WARN_UNUSED_RESULT; +- (void)textFieldDidBeginEditing:(UITextField * _Nonnull)textField; +@end + + + +SWIFT_CLASS("_TtC10HHSDKVideo19HHRealNameInputView") +@interface HHRealNameInputView : UIView +- (void)awakeFromNib; +- (nonnull instancetype)initWithFrame:(CGRect)frame OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +@end + + + +@interface HHRealNameInputView (SWIFT_EXTENSION(HHSDKVideo)) <UITextFieldDelegate> +- (BOOL)textField:(UITextField * _Nonnull)textField shouldChangeCharactersInRange:(NSRange)range replacementString:(NSString * _Nonnull)string SWIFT_WARN_UNUSED_RESULT; +@end + + +typedef SWIFT_ENUM(NSInteger, HHRealNameType, open) { + HHRealNameTypeNormal = 0, + HHRealNameTypeBuyMedic = 1, +}; + + +/// 基础全局配置 +SWIFT_CLASS("_TtC10HHSDKVideo16HHSDKBaseOptions") +@interface HHSDKBaseOptions : NSObject +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong, getter=default) HHSDKBaseOptions * _Nonnull default_;) ++ (HHSDKBaseOptions * _Nonnull)default SWIFT_WARN_UNUSED_RESULT; +/// 调试模式(打印日志) +@property (nonatomic) BOOL isDebug; +/// 是否开启测试服 +@property (nonatomic) BOOL isDevelopment; +/// 是否是SDK +@property (nonatomic) BOOL isSDK; +/// 是否是腾讯 +@property (nonatomic) BOOL isTRTC; +/// 产品ID +@property (nonatomic, copy) NSString * _Nonnull sdkProductId; +/// app version +@property (nonatomic, copy) NSString * _Nonnull appVersion; +/// 是否配置DNS +@property (nonatomic) BOOL needDNS; +/// 脚本修改 +@property (nonatomic, copy) NSString * _Nonnull sdkVersion; +/// hud +@property (nonatomic) NSTimeInterval hudDisTime; +- (void)setConfig:(NSString * _Nonnull)sdkProductId isDebug:(BOOL)isDebug isDevelopment:(BOOL)isDevelopment isTrtc:(BOOL)isTrtc needDNS:(BOOL)needDNS; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +/// HH 定位 +SWIFT_CLASS("_TtC10HHSDKVideo15HHVideoLocation") +@interface HHVideoLocation : NSObject +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong, getter=default) HHVideoLocation * _Nonnull default_;) ++ (HHVideoLocation * _Nonnull)default SWIFT_WARN_UNUSED_RESULT; +/// 启用定位 +/// \param lng 经度 +/// +/// \param lat 纬度 +/// +- (void)startLocationWithLng:(NSString * _Nonnull)lng lat:(NSString * _Nonnull)lat; +/// 关闭定位 +- (void)closeLocation; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + +@protocol OptionProtocal; + +SWIFT_CLASS("_TtC10HHSDKVideo10HHVideoSDK") +@interface HHVideoSDK : NSObject +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong, getter=default) HHVideoSDK * _Nonnull default_;) ++ (HHVideoSDK * _Nonnull)default SWIFT_WARN_UNUSED_RESULT; +@property (nonatomic, copy) void (^ _Nullable photosPreview)(NSArray<NSString *> * _Nonnull); +/// 初始化 SDK +/// \param option 可选的 SDK 配置 +/// +- (void)startWithOption:(id <OptionProtocal> _Nonnull)option im:(id <HHIM> _Nonnull)im rtc:(id <HHRTC> _Nonnull)rtc; +/// 登录账户 +/// <ul> +/// <li> +/// Parameters: +/// </li> +/// <li> +/// userToken: 用户的唯一标志 +/// </li> +/// <li> +/// completion: 完成的回调 +/// </li> +/// </ul> +- (void)loginWithUserToken:(NSString * _Nonnull)userToken completion:(void (^ _Nonnull)(NSString * _Nullable))completion; +/// 自动登录(如果已经保存了用户帐号和令牌,建议使用这个登录方式) +/// \param uuid 自动登录参数 +/// +- (void)autoLoginWithUuid:(NSInteger)uuid completion:(void (^ _Nonnull)(NSString * _Nullable))completion; +/// 登出 +/// \param callback 登出回调(字符串为空表示成功) +/// +- (void)logout:(void (^ _Nullable)(NSString * _Nullable))callback; +- (void)terminate; +/// 设置支付宝支付回调 +- (void)setAlipayHookWithAlipayCallback:(BOOL (^ _Nonnull)(NSString * _Nonnull, NSString * _Nonnull, void (^ _Nonnull)(NSDictionary<NSString *, id> * _Nonnull)))alipayCallback; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + + +@interface HHVideoSDK (SWIFT_EXTENSION(HHSDKVideo)) +- (void)onKickedOffline; +@end + + + +@interface HHVideoSDK (SWIFT_EXTENSION(HHSDKVideo)) +/// 三方登录 +/// \param userToken 三方信息 +/// +/// \param completion 完成的回调 +/// +- (void)loginForThirdId:(NSDictionary<NSString *, id> * _Nonnull)thirdInfo completion:(void (^ _Nonnull)(NSString * _Nullable))completion; +@end + + + +@interface HHVideoSDK (SWIFT_EXTENSION(HHSDKVideo)) +/// 获取病历详情 +/// \param userToken 当前人的唯一标志 +/// +/// \param medicId 病历id +/// +/// +/// returns: +/// url +- (NSString * _Nonnull)getMedicDetailWithUserToken:(NSString * _Nonnull)userToken medicId:(NSString * _Nonnull)medicId SWIFT_WARN_UNUSED_RESULT; +/// 获取病历列表 +/// \param userToken 当前人的唯一标志 +/// +/// +/// returns: +/// url +- (NSString * _Nonnull)getMedicListWithUserToken:(NSString * _Nonnull)userToken SWIFT_WARN_UNUSED_RESULT; +/// 获取所有病历列表 +/// \param userToken 主账户的唯一标志 +/// +/// +/// returns: +/// url +- (NSString * _Nonnull)getAllMedicsWithUserToken:(NSString * _Nonnull)userToken SWIFT_WARN_UNUSED_RESULT; +@end + + +@interface HHVideoSDK (SWIFT_EXTENSION(HHSDKVideo)) +/// 跳转信息流首页 +- (void)skipChatHomeWithIsByPresent:(BOOL)isByPresent vc:(UIViewController * _Nullable)vc; +- (void)skipChatHome:(UINavigationController * _Nonnull)nav; +- (UIViewController * _Nullable)chatHomeVC SWIFT_WARN_UNUSED_RESULT; +@end + + +@interface HHVideoSDK (SWIFT_EXTENSION(HHSDKVideo)) +/// 指定人发起呼叫(带 UI) +/// <ul> +/// <li> +/// Parameters: +/// </li> +/// </ul> +- (void)startMemberCallWithNeedSelectMember:(BOOL)needSelectMember; +@end + + +@interface HHVideoSDK (SWIFT_EXTENSION(HHSDKVideo)) +/// 指定人呼叫 +/// <ul> +/// <li> +/// Parameters: +/// </li> +/// <li> +/// userToken: 呼叫人userToken +/// </li> +/// </ul> +- (void)call:(NSString * _Nonnull)memberToken scene:(NSString * _Nullable)scene; +@end + + +@interface HHVideoSDK (SWIFT_EXTENSION(HHSDKVideo)) +/// 主动发起多人通话 +/// \param type 呼叫类型 +/// +/// \param callee 被呼叫人的信息 +/// +- (void)startTeamCall:(enum HHCallType)type callee:(HHCallerInfo * _Nonnull)callee callDelegate:(id <HHCallDelegate> _Nullable)callDelegate; +@end + + +@interface HHVideoSDK (SWIFT_EXTENSION(HHSDKVideo)) +/// 呼叫 +/// <ul> +/// <li> +/// Parameters: +/// </li> +/// <li> +/// type: 呼叫类型 +/// </li> +/// <li> +/// callDelegate: 呼叫状态回调 +/// </li> +/// </ul> +- (void)startCall:(enum HHCallType)type scene:(NSString * _Nullable)scene callDelegate:(id <HHCallDelegate> _Nullable)callDelegate; +- (void)startNewCall:(NSInteger)uuid type:(enum HHCallType)type callDelegate:(id <HHCallDelegate> _Nullable)callDelegate; +/// 指定人呼叫 +/// <ul> +/// <li> +/// Parameters: +/// </li> +/// <li> +/// uuid: 呼叫人Uuid +/// </li> +/// <li> +/// callDelegate: 呼叫状态回调 +/// </li> +/// </ul> +- (void)startCall:(NSInteger)uuid scene:(NSString * _Nullable)scene type:(enum HHCallType)type callDelegate:(id <HHCallDelegate> _Nullable)callDelegate; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo14MessageOptions") +@interface MessageOptions : NSObject +/// 是否是Present显示 +@property (nonatomic) BOOL isByPresent; +/// 信息流是否过滤医生总结 +@property (nonatomic) BOOL isFilterSummary; +/// 信息流是否过滤药卡 +@property (nonatomic) BOOL isFilterMedicinal; +/// 信息流默认头像 +@property (nonatomic, copy) NSString * _Nonnull defaultDocHeader; +/// 信息流默认昵称 +@property (nonatomic, copy) NSString * _Nonnull defaultDocName; +/// 信息流默认标题 +@property (nonatomic, copy) NSString * _Nonnull messageTitle; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + + + + + +@class VideoOptions; +@class UsercenterOptions; + +SWIFT_PROTOCOL("_TtP10HHSDKVideo14OptionProtocal_") +@protocol OptionProtocal +@property (nonatomic) NSTimeInterval hudDisTime; +@property (nonatomic) BOOL isDebug; +@property (nonatomic) BOOL isDevelopment; +/// 加载进度自定义 +@property (nonatomic, strong) id <HHHUDable> _Nonnull hudManager; +@property (nonatomic, copy) NSString * _Nonnull productId; +@property (nonatomic, copy) NSString * _Nullable cerName; +@property (nonatomic) enum HHLogMode logLevel; +@property (nonatomic, copy) NSString * _Nonnull mExtension; +@property (nonatomic) NSInteger changeDoctorTime; +/// 日志回调(仅测试有用) +@property (nonatomic, copy) void (^ _Nullable logCallback)(NSString * _Nonnull); +@property (nonatomic, strong) VideoOptions * _Nonnull mVideoOptions; +@property (nonatomic, strong) MessageOptions * _Nonnull mMessageOptions; +@property (nonatomic, strong) UsercenterOptions * _Nonnull mUserCenterOptions; +@property (nonatomic, copy) NSString * _Nonnull sdkVersion; +@property (nonatomic, copy) NSString * _Nonnull appVersion; +@property (nonatomic) BOOL isTRTC; +@property (nonatomic) BOOL needDNS; +@property (nonatomic) BOOL shouldWaingCall; +@end + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@class UITapGestureRecognizer; +@class NSTimer; + +@interface UIView (SWIFT_EXTENSION(HHSDKVideo)) +- (void)hhhandleToastTapped:(UITapGestureRecognizer * _Nonnull)recognizer; +- (void)hhtoastTimerDidFinish:(NSTimer * _Nonnull)timer; +@end + + + + +@interface UIViewController (SWIFT_EXTENSION(HHSDKVideo)) +- (void)hhCloseThisController; +- (void)hhPopThisController; +@end + + + + +SWIFT_CLASS("_TtC10HHSDKVideo17UsercenterOptions") +@interface UsercenterOptions : NSObject +/// 是否隐藏会员信息 +@property (nonatomic) BOOL enableVipInfo; +/// 是否隐藏个人中心入口 +@property (nonatomic) BOOL hideUserCenter; +/// 是否展示激活码入口 +@property (nonatomic) BOOL enableActivate; +/// 是否展示档案库入口 +@property (nonatomic) BOOL enableMedical; +/// 档案库是否可以增加成员 +@property (nonatomic) BOOL enableAddMemberInDoc; +/// 是否显示购买VIP入口 +@property (nonatomic) BOOL enableBuyService; +/// 是否显示昵称 +@property (nonatomic) BOOL hideNickName; +/// 是否允许弹出实名认证 +@property (nonatomic) BOOL enablePopRealName; +/// 是否隐藏权益列表 +@property (nonatomic) BOOL isCloseMoreFunc; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo9VCManager") +@interface VCManager : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + + + + + +SWIFT_CLASS("_TtC10HHSDKVideo12VideoOptions") +@interface VideoOptions : NSObject +/// 是否开启美颜 +@property (nonatomic) BOOL allowBeauty; +/// 允许视频完成后评价 +@property (nonatomic) BOOL allowEvaluate; +/// 选人页面是否显示增加成员入口 +@property (nonatomic) BOOL allowAddMember; +/// 选人页面是否可以选择多人视频 +@property (nonatomic) BOOL allowMulti; +/// 是否显示医生信息 +@property (nonatomic) BOOL isShowDocInfo; +/// 可以关闭摄像头 +@property (nonatomic) BOOL enableCloseCamera; +/// 不可关闭摄像头 +@property (nonatomic) BOOL isCloseCameraCall; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo16ZLAlbumListModel") +@interface ZLAlbumListModel : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + + +enum CaptureSessionPreset : NSInteger; +enum FocusMode : NSInteger; +enum ExposureMode : NSInteger; +enum FlashMode : NSInteger; +enum VideoExportType : NSInteger; + +SWIFT_CLASS("_TtC10HHSDKVideo21ZLCameraConfiguration") +@interface ZLCameraConfiguration : NSObject +/// Video resolution. Defaults to hd1280x720. +@property (nonatomic) enum CaptureSessionPreset sessionPreset; +/// Camera focus mode. Defaults to continuousAutoFocus +@property (nonatomic) enum FocusMode focusMode; +/// Camera exposure mode. Defaults to continuousAutoExposure +@property (nonatomic) enum ExposureMode exposureMode; +/// Camera flahs mode. Default is off. Defaults to off. +@property (nonatomic) enum FlashMode flashMode; +/// Video export format for recording video and editing video. Defaults to mov. +@property (nonatomic) enum VideoExportType videoExportType; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + +typedef SWIFT_ENUM(NSInteger, CaptureSessionPreset, open) { + CaptureSessionPresetCif352x288 = 0, + CaptureSessionPresetVga640x480 = 1, + CaptureSessionPresetHd1280x720 = 2, + CaptureSessionPresetHd1920x1080 = 3, + CaptureSessionPresetHd4K3840x2160 = 4, +}; + +typedef SWIFT_ENUM(NSInteger, FocusMode, open) { + FocusModeAutoFocus = 0, + FocusModeContinuousAutoFocus = 1, +}; + +typedef SWIFT_ENUM(NSInteger, ExposureMode, open) { + ExposureModeAutoExpose = 0, + ExposureModeContinuousAutoExposure = 1, +}; + +typedef SWIFT_ENUM(NSInteger, FlashMode, open) { + FlashModeAuto = 0, + FlashModeOn = 1, + FlashModeOff = 2, +}; + +typedef SWIFT_ENUM(NSInteger, VideoExportType, open) { + VideoExportTypeMov = 0, + VideoExportTypeMp4 = 1, +}; + + +@class CAAnimation; + +SWIFT_CLASS("_TtC10HHSDKVideo14ZLCustomCamera") +@interface ZLCustomCamera : UIViewController <CAAnimationDelegate> +@property (nonatomic, copy) void (^ _Nullable takeDoneBlock)(UIImage * _Nullable, NSURL * _Nullable); +@property (nonatomic, copy) void (^ _Nullable cancelBlock)(void); +@property (nonatomic, readonly) UIInterfaceOrientationMask supportedInterfaceOrientations; +@property (nonatomic, readonly) BOOL prefersStatusBarHidden; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +- (void)viewDidLoad; +- (void)viewWillAppear:(BOOL)animated; +- (void)viewDidAppear:(BOOL)animated; +- (void)viewWillDisappear:(BOOL)animated; +- (void)viewDidDisappear:(BOOL)animated; +- (void)viewDidLayoutSubviews; +- (void)animationDidStop:(CAAnimation * _Nonnull)anim finished:(BOOL)flag; +- (nonnull instancetype)initWithNibName:(NSString * _Nullable)nibNameOrNil bundle:(NSBundle * _Nullable)nibBundleOrNil SWIFT_UNAVAILABLE; +@end + +@class UIGestureRecognizer; + +@interface ZLCustomCamera (SWIFT_EXTENSION(HHSDKVideo)) <UIGestureRecognizerDelegate> +- (BOOL)gestureRecognizer:(UIGestureRecognizer * _Nonnull)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer * _Nonnull)otherGestureRecognizer SWIFT_WARN_UNUSED_RESULT; +@end + +@class AVCapturePhotoOutput; +@class AVCaptureResolvedPhotoSettings; +@class AVCaptureBracketedStillImageSettings; + +@interface ZLCustomCamera (SWIFT_EXTENSION(HHSDKVideo)) <AVCapturePhotoCaptureDelegate> +- (void)captureOutput:(AVCapturePhotoOutput * _Nonnull)output willCapturePhotoForResolvedSettings:(AVCaptureResolvedPhotoSettings * _Nonnull)resolvedSettings; +- (void)captureOutput:(AVCapturePhotoOutput * _Nonnull)output didFinishProcessingPhotoSampleBuffer:(CMSampleBufferRef _Nullable)photoSampleBuffer previewPhotoSampleBuffer:(CMSampleBufferRef _Nullable)previewPhotoSampleBuffer resolvedSettings:(AVCaptureResolvedPhotoSettings * _Nonnull)resolvedSettings bracketSettings:(AVCaptureBracketedStillImageSettings * _Nullable)bracketSettings error:(NSError * _Nullable)error; +@end + +@class AVCaptureFileOutput; +@class AVCaptureConnection; + +@interface ZLCustomCamera (SWIFT_EXTENSION(HHSDKVideo)) <AVCaptureFileOutputRecordingDelegate> +- (void)captureOutput:(AVCaptureFileOutput * _Nonnull)output didStartRecordingToOutputFileAtURL:(NSURL * _Nonnull)fileURL fromConnections:(NSArray<AVCaptureConnection *> * _Nonnull)connections; +- (void)captureOutput:(AVCaptureFileOutput * _Nonnull)output didFinishRecordingToOutputFileAtURL:(NSURL * _Nonnull)outputFileURL fromConnections:(NSArray<AVCaptureConnection *> * _Nonnull)connections error:(NSError * _Nullable)error; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo10ZLDrawPath") +@interface ZLDrawPath : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + +@class ZLImageClipRatio; +@class ZLFilter; +@protocol ZLImageStickerContainerDelegate; + +SWIFT_CLASS("_TtC10HHSDKVideo24ZLEditImageConfiguration") +@interface ZLEditImageConfiguration : NSObject +/// Edit image tools. (This property is only for objc). +/// warning: +/// If you want to use the image sticker feature, you must provide a view that implements ZLImageStickerContainerDelegate. +@property (nonatomic, copy) NSArray<NSNumber *> * _Nonnull tools_objc; +/// Draw colors for image editor. +@property (nonatomic, copy) NSArray<UIColor *> * _Nonnull drawColors; +/// The default draw color. If this color not in editImageDrawColors, will pick the first color in editImageDrawColors as the default. +@property (nonatomic, strong) UIColor * _Nonnull defaultDrawColor; +/// Edit ratios for image editor. +@property (nonatomic, copy) NSArray<ZLImageClipRatio *> * _Nonnull clipRatios; +/// Text sticker colors for image editor. +@property (nonatomic, copy) NSArray<UIColor *> * _Nonnull textStickerTextColors; +/// The default text sticker color. If this color not in textStickerTextColors, will pick the first color in textStickerTextColors as the default. +@property (nonatomic, strong) UIColor * _Nonnull textStickerDefaultTextColor; +/// Filters for image editor. +@property (nonatomic, copy) NSArray<ZLFilter *> * _Nonnull filters; +@property (nonatomic, strong) UIView <ZLImageStickerContainerDelegate> * _Nullable imageStickerContainerView; +/// Adjust image tools. (This property is only for objc). +/// Valid when the tools contain EditTool.adjust +@property (nonatomic, copy) NSArray<NSNumber *> * _Nonnull adjustTools_objc; +/// Give an impact feedback when the adjust slider value is zero. Defaults to true. +@property (nonatomic) BOOL impactFeedbackWhenAdjustSliderValueIsZero; +/// Impact feedback style. Defaults to .medium +@property (nonatomic) UIImpactFeedbackStyle impactFeedbackStyle; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + +typedef SWIFT_ENUM(NSInteger, EditTool, open) { + EditToolDraw = 0, + EditToolClip = 1, + EditToolImageSticker = 2, + EditToolTextSticker = 3, + EditToolMosaic = 4, + EditToolFilter = 5, + EditToolAdjust = 6, +}; + +typedef SWIFT_ENUM(NSInteger, AdjustTool, open) { + AdjustToolBrightness = 0, + AdjustToolContrast = 1, + AdjustToolSaturation = 2, +}; + + + +SWIFT_CLASS("_TtC10HHSDKVideo16ZLEditImageModel") +@interface ZLEditImageModel : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + +@class ZLEnlargeButton; +@class CAGradientLayer; +@class UIButton; +@class UIImageView; + +SWIFT_CLASS("_TtC10HHSDKVideo25ZLEditImageViewController") +@interface ZLEditImageViewController : UIViewController +@property (nonatomic) CGFloat drawColViewH; +@property (nonatomic) CGFloat filterColViewH; +@property (nonatomic) CGFloat adjustColViewH; +@property (nonatomic, strong) UIColor * _Nonnull ashbinNormalBgColor; +@property (nonatomic, strong) ZLEnlargeButton * _Nonnull cancelBtn; +@property (nonatomic, strong) UIScrollView * _Nonnull mainScrollView; +@property (nonatomic, strong) UIView * _Nonnull topShadowView; +@property (nonatomic, strong) CAGradientLayer * _Nonnull topShadowLayer; +@property (nonatomic, strong) UIView * _Nonnull bottomShadowView; +@property (nonatomic, strong) CAGradientLayer * _Nonnull bottomShadowLayer; +@property (nonatomic, strong) UIButton * _Nonnull doneBtn; +@property (nonatomic, strong) UIButton * _Nonnull revokeBtn; +@property (nonatomic, strong) UIView * _Nonnull ashbinView; +@property (nonatomic, strong) UIImageView * _Nonnull ashbinImgView; +@property (nonatomic) CGFloat drawLineWidth; +@property (nonatomic) CGFloat mosaicLineWidth; +@property (nonatomic, copy) void (^ _Nullable editFinishBlock)(UIImage * _Nonnull, ZLEditImageModel * _Nullable); +@property (nonatomic, copy) void (^ _Nullable cancelEditBlock)(void); +@property (nonatomic, readonly) BOOL prefersStatusBarHidden; +@property (nonatomic, readonly) UIInterfaceOrientationMask supportedInterfaceOrientations; ++ (void)showEditImageVCWithParentVC:(UIViewController * _Nullable)parentVC animate:(BOOL)animate image:(UIImage * _Nonnull)image editModel:(ZLEditImageModel * _Nullable)editModel cancel:(void (^ _Nullable)(void))cancel completion:(void (^ _Nullable)(UIImage * _Nonnull, ZLEditImageModel * _Nullable))completion; +- (nonnull instancetype)initWithImage:(UIImage * _Nonnull)image editModel:(ZLEditImageModel * _Nullable)editModel OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +- (void)viewDidLoad; +- (void)viewDidLayoutSubviews; +- (nonnull instancetype)initWithNibName:(NSString * _Nullable)nibNameOrNil bundle:(NSBundle * _Nullable)nibBundleOrNil SWIFT_UNAVAILABLE; +@end + + +@interface ZLEditImageViewController (SWIFT_EXTENSION(HHSDKVideo)) <UIGestureRecognizerDelegate> +- (BOOL)gestureRecognizerShouldBegin:(UIGestureRecognizer * _Nonnull)gestureRecognizer SWIFT_WARN_UNUSED_RESULT; +@end + + +@interface ZLEditImageViewController (SWIFT_EXTENSION(HHSDKVideo)) <UICollectionViewDataSource, UICollectionViewDelegate> +- (NSInteger)collectionView:(UICollectionView * _Nonnull)collectionView numberOfItemsInSection:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (UICollectionViewCell * _Nonnull)collectionView:(UICollectionView * _Nonnull)collectionView cellForItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView didSelectItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +@end + + + +@interface ZLEditImageViewController (SWIFT_EXTENSION(HHSDKVideo)) <UIScrollViewDelegate> +- (UIView * _Nullable)viewForZoomingInScrollView:(UIScrollView * _Nonnull)scrollView SWIFT_WARN_UNUSED_RESULT; +- (void)scrollViewDidZoom:(UIScrollView * _Nonnull)scrollView; +- (void)scrollViewDidEndZooming:(UIScrollView * _Nonnull)scrollView withView:(UIView * _Nullable)view atScale:(CGFloat)scale; +- (void)scrollViewDidScroll:(UIScrollView * _Nonnull)scrollView; +- (void)scrollViewDidEndDragging:(UIScrollView * _Nonnull)scrollView willDecelerate:(BOOL)decelerate; +- (void)scrollViewDidEndDecelerating:(UIScrollView * _Nonnull)scrollView; +- (void)scrollViewDidEndScrollingAnimation:(UIScrollView * _Nonnull)scrollView; +@end + +@class AVAsset; + +SWIFT_CLASS("_TtC10HHSDKVideo25ZLEditVideoViewController") +@interface ZLEditVideoViewController : UIViewController +@property (nonatomic, copy) void (^ _Nullable editFinishBlock)(NSURL * _Nullable); +@property (nonatomic, readonly) BOOL prefersStatusBarHidden; +@property (nonatomic, readonly) UIInterfaceOrientationMask supportedInterfaceOrientations; +/// initialize +/// \param avAsset AVAsset对象,需要传入本地视频,网络视频不支持 +/// +/// \param animateDismiss 退出界面时是否显示dismiss动画 +/// +- (nonnull instancetype)initWithAvAsset:(AVAsset * _Nonnull)avAsset animateDismiss:(BOOL)animateDismiss OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder SWIFT_UNAVAILABLE; +- (void)viewDidLoad; +- (void)viewDidAppear:(BOOL)animated; +- (void)viewDidLayoutSubviews; +- (nonnull instancetype)initWithNibName:(NSString * _Nullable)nibNameOrNil bundle:(NSBundle * _Nullable)nibBundleOrNil SWIFT_UNAVAILABLE; +@end + + +@interface ZLEditVideoViewController (SWIFT_EXTENSION(HHSDKVideo)) <UIGestureRecognizerDelegate> +- (BOOL)gestureRecognizerShouldBegin:(UIGestureRecognizer * _Nonnull)gestureRecognizer SWIFT_WARN_UNUSED_RESULT; +@end + +@class UICollectionViewLayout; + +@interface ZLEditVideoViewController (SWIFT_EXTENSION(HHSDKVideo)) <UICollectionViewDataSource, UICollectionViewDelegateFlowLayout> +- (void)scrollViewDidScroll:(UIScrollView * _Nonnull)scrollView; +- (void)scrollViewDidEndDragging:(UIScrollView * _Nonnull)scrollView willDecelerate:(BOOL)decelerate; +- (void)scrollViewDidEndDecelerating:(UIScrollView * _Nonnull)scrollView; +- (UIEdgeInsets)collectionView:(UICollectionView * _Nonnull)collectionView layout:(UICollectionViewLayout * _Nonnull)collectionViewLayout insetForSectionAtIndex:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (NSInteger)collectionView:(UICollectionView * _Nonnull)collectionView numberOfItemsInSection:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (UICollectionViewCell * _Nonnull)collectionView:(UICollectionView * _Nonnull)collectionView cellForItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView willDisplayCell:(UICollectionViewCell * _Nonnull)cell forItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +@end + +@class UIEvent; + +SWIFT_CLASS("_TtC10HHSDKVideo15ZLEnlargeButton") +@interface ZLEnlargeButton : UIButton +- (BOOL)pointInside:(CGPoint)point withEvent:(UIEvent * _Nullable)event SWIFT_WARN_UNUSED_RESULT; +- (nonnull instancetype)initWithFrame:(CGRect)frame OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +@end + +enum ZLFilterType : NSInteger; + +SWIFT_CLASS("_TtC10HHSDKVideo8ZLFilter") +@interface ZLFilter : NSObject +- (nonnull instancetype)initWithName:(NSString * _Nonnull)name filterType:(enum ZLFilterType)filterType OBJC_DESIGNATED_INITIALIZER; +/// 可传入 applier 自定义滤镜 +- (nonnull instancetype)initWithName:(NSString * _Nonnull)name applier:(UIImage * _Nonnull (^ _Nullable)(UIImage * _Nonnull))applier OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + + + +@interface ZLFilter (SWIFT_EXTENSION(HHSDKVideo)) +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, copy) NSArray<ZLFilter *> * _Nonnull all;) ++ (NSArray<ZLFilter *> * _Nonnull)all SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull normal;) ++ (ZLFilter * _Nonnull)normal SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull clarendon;) ++ (ZLFilter * _Nonnull)clarendon SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull nashville;) ++ (ZLFilter * _Nonnull)nashville SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull apply1977;) ++ (ZLFilter * _Nonnull)apply1977 SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull toaster;) ++ (ZLFilter * _Nonnull)toaster SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull chrome;) ++ (ZLFilter * _Nonnull)chrome SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull fade;) ++ (ZLFilter * _Nonnull)fade SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull instant;) ++ (ZLFilter * _Nonnull)instant SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull process;) ++ (ZLFilter * _Nonnull)process SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull transfer;) ++ (ZLFilter * _Nonnull)transfer SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull tone;) ++ (ZLFilter * _Nonnull)tone SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull linear;) ++ (ZLFilter * _Nonnull)linear SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull sepia;) ++ (ZLFilter * _Nonnull)sepia SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull mono;) ++ (ZLFilter * _Nonnull)mono SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull noir;) ++ (ZLFilter * _Nonnull)noir SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLFilter * _Nonnull tonal;) ++ (ZLFilter * _Nonnull)tonal SWIFT_WARN_UNUSED_RESULT; +@end + +typedef SWIFT_ENUM(NSInteger, ZLFilterType, open) { + ZLFilterTypeNormal = 0, + ZLFilterTypeChrome = 1, + ZLFilterTypeFade = 2, + ZLFilterTypeInstant = 3, + ZLFilterTypeProcess = 4, + ZLFilterTypeTransfer = 5, + ZLFilterTypeTone = 6, + ZLFilterTypeLinear = 7, + ZLFilterTypeSepia = 8, + ZLFilterTypeMono = 9, + ZLFilterTypeNoir = 10, + ZLFilterTypeTonal = 11, +}; + + +SWIFT_CLASS("_TtC10HHSDKVideo16ZLImageClipRatio") +@interface ZLImageClipRatio : NSObject +- (nonnull instancetype)initWithTitle:(NSString * _Nonnull)title whRatio:(CGFloat)whRatio isCircle:(BOOL)isCircle OBJC_DESIGNATED_INITIALIZER; +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + + + +@interface ZLImageClipRatio (SWIFT_EXTENSION(HHSDKVideo)) +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull custom;) ++ (ZLImageClipRatio * _Nonnull)custom SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull circle;) ++ (ZLImageClipRatio * _Nonnull)circle SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull wh1x1;) ++ (ZLImageClipRatio * _Nonnull)wh1x1 SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull wh3x4;) ++ (ZLImageClipRatio * _Nonnull)wh3x4 SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull wh4x3;) ++ (ZLImageClipRatio * _Nonnull)wh4x3 SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull wh2x3;) ++ (ZLImageClipRatio * _Nonnull)wh2x3 SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull wh3x2;) ++ (ZLImageClipRatio * _Nonnull)wh3x2 SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull wh9x16;) ++ (ZLImageClipRatio * _Nonnull)wh9x16 SWIFT_WARN_UNUSED_RESULT; +SWIFT_CLASS_PROPERTY(@property (nonatomic, class, readonly, strong) ZLImageClipRatio * _Nonnull wh16x9;) ++ (ZLImageClipRatio * _Nonnull)wh16x9 SWIFT_WARN_UNUSED_RESULT; +@end + +enum ZLURLType : NSInteger; + +SWIFT_CLASS("_TtC10HHSDKVideo24ZLImagePreviewController") +@interface ZLImagePreviewController : UIViewController +@property (nonatomic, copy) void (^ _Nullable longPressBlock)(ZLImagePreviewController * _Nullable, UIImage * _Nullable, NSInteger); +@property (nonatomic, copy) void (^ _Nullable doneBlock)(NSArray * _Nonnull); +@property (nonatomic, copy) NSDictionary<NSString *, id> * _Nullable videoHttpHeader; +@property (nonatomic, readonly) BOOL prefersStatusBarHidden; +@property (nonatomic, readonly) UIStatusBarStyle preferredStatusBarStyle; +/// \param datas Must be one of PHAsset, UIImage and URL, will filter others in init function. +/// +/// \param showBottomView If showSelectBtn is true, showBottomView is always true. +/// +/// \param index Index for first display. +/// +/// \param urlType Tell me the url is image or video. +/// +/// \param urlImageLoader Called when cell will display, cell will layout after callback when image load finish. The first block is progress callback, second is load finish callback. +/// +- (nonnull instancetype)initWithDatas:(NSArray * _Nonnull)datas index:(NSInteger)index showSelectBtn:(BOOL)showSelectBtn showBottomView:(BOOL)showBottomView urlType:(enum ZLURLType (^ _Nullable)(NSURL * _Nonnull))urlType urlImageLoader:(void (^ _Nullable)(NSURL * _Nonnull, UIImageView * _Nonnull, void (^ _Nonnull)(CGFloat), void (^ _Nonnull)(void)))urlImageLoader OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder SWIFT_UNAVAILABLE; +- (void)viewDidLoad; +- (void)viewWillAppear:(BOOL)animated; +- (void)viewDidAppear:(BOOL)animated; +- (void)viewDidLayoutSubviews; +- (nonnull instancetype)initWithNibName:(NSString * _Nullable)nibNameOrNil bundle:(NSBundle * _Nullable)nibBundleOrNil SWIFT_UNAVAILABLE; +@end + + +@interface ZLImagePreviewController (SWIFT_EXTENSION(HHSDKVideo)) +- (void)scrollViewDidScroll:(UIScrollView * _Nonnull)scrollView; +- (void)scrollViewDidEndDecelerating:(UIScrollView * _Nonnull)scrollView; +@end + + +@interface ZLImagePreviewController (SWIFT_EXTENSION(HHSDKVideo)) <UICollectionViewDataSource, UICollectionViewDelegateFlowLayout> +- (CGFloat)collectionView:(UICollectionView * _Nonnull)collectionView layout:(UICollectionViewLayout * _Nonnull)collectionViewLayout minimumInteritemSpacingForSectionAtIndex:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (CGFloat)collectionView:(UICollectionView * _Nonnull)collectionView layout:(UICollectionViewLayout * _Nonnull)collectionViewLayout minimumLineSpacingForSectionAtIndex:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (UIEdgeInsets)collectionView:(UICollectionView * _Nonnull)collectionView layout:(UICollectionViewLayout * _Nonnull)collectionViewLayout insetForSectionAtIndex:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (CGSize)collectionView:(UICollectionView * _Nonnull)collectionView layout:(UICollectionViewLayout * _Nonnull)collectionViewLayout sizeForItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (NSInteger)collectionView:(UICollectionView * _Nonnull)collectionView numberOfItemsInSection:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (UICollectionViewCell * _Nonnull)collectionView:(UICollectionView * _Nonnull)collectionView cellForItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView didEndDisplayingCell:(UICollectionViewCell * _Nonnull)cell forItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +@end + + +/// Provide an image sticker container view that conform to this protocol must be a subclass of UIView +/// 必须是UIView的子类遵循这个协议 +SWIFT_PROTOCOL("_TtP10HHSDKVideo31ZLImageStickerContainerDelegate_") +@protocol ZLImageStickerContainerDelegate +@property (nonatomic, copy) void (^ _Nullable selectImageBlock)(UIImage * _Nonnull); +@property (nonatomic, copy) void (^ _Nullable hideBlock)(void); +- (void)showIn:(UIView * _Nonnull)view; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo19ZLImageStickerState") +@interface ZLImageStickerState : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + +typedef SWIFT_ENUM(NSInteger, ZLLanguageType, open) { + ZLLanguageTypeSystem = 0, + ZLLanguageTypeChineseSimplified = 1, + ZLLanguageTypeChineseTraditional = 2, + ZLLanguageTypeEnglish = 3, + ZLLanguageTypeJapanese = 4, + ZLLanguageTypeFrench = 5, + ZLLanguageTypeGerman = 6, + ZLLanguageTypeRussian = 7, + ZLLanguageTypeVietnamese = 8, + ZLLanguageTypeKorean = 9, + ZLLanguageTypeMalay = 10, + ZLLanguageTypeItalian = 11, + ZLLanguageTypeIndonesian = 12, + ZLLanguageTypePortuguese = 13, + ZLLanguageTypeSpanish = 14, + ZLLanguageTypeTurkish = 15, +}; + + +SWIFT_CLASS("_TtC10HHSDKVideo12ZLMosaicPath") +@interface ZLMosaicPath : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + +typedef SWIFT_ENUM(NSInteger, ZLNoAuthorityType, open) { + ZLNoAuthorityTypeLibrary = 0, + ZLNoAuthorityTypeCamera = 1, + ZLNoAuthorityTypeMicrophone = 2, +}; + +typedef SWIFT_ENUM(NSInteger, ZLPhotoBrowserStyle, open) { +/// The album list is embedded in the navigation of the thumbnail interface, click the drop-down display. + ZLPhotoBrowserStyleEmbedAlbumList = 0, +/// The display relationship between the album list and the thumbnail interface is push. + ZLPhotoBrowserStyleExternalAlbumList = 1, +}; + +enum HUDStyle : NSInteger; +@class PHAsset; + +SWIFT_CLASS("_TtC10HHSDKVideo20ZLPhotoConfiguration") +@interface ZLPhotoConfiguration : NSObject ++ (ZLPhotoConfiguration * _Nonnull)default SWIFT_WARN_UNUSED_RESULT; ++ (void)resetConfiguration; +/// Photo sorting method, the preview interface is not affected by this parameter. Defaults to true. +@property (nonatomic) BOOL sortAscending; +/// Anything superior than 1 will enable the multiple selection feature. Defaults to 9. +@property (nonatomic) NSInteger maxSelectCount; +/// A count for video max selection. Defaults to 0. +/// warning: +/// Only valid in mix selection mode. (i.e. allowMixSelect = true) +@property (nonatomic) NSInteger maxVideoSelectCount; +/// A count for video min selection. Defaults to 0. +/// warning: +/// Only valid in mix selection mode. (i.e. allowMixSelect = true) +@property (nonatomic) NSInteger minVideoSelectCount; +/// Whether photos and videos can be selected together. Defaults to true. +/// If set to false, only one video can be selected. Defaults to true. +@property (nonatomic) BOOL allowMixSelect; +/// Preview selection max preview count, if the value is zero, only show <code>Camera</code>, <code>Album</code>, <code>Cancel</code> buttons. Defaults to 20. +@property (nonatomic) NSInteger maxPreviewCount; +@property (nonatomic) CGFloat cellCornerRadio; +/// If set to false, gif and livephoto cannot be selected either. Defaults to true. +@property (nonatomic) BOOL allowSelectImage; +@property (nonatomic) BOOL allowSelectVideo; +/// Allow select Gif, it only controls whether it is displayed in Gif form. +/// If value is false, the Gif logo is not displayed. Defaults to true. +@property (nonatomic) BOOL allowSelectGif; +/// Allow select LivePhoto, it only controls whether it is displayed in LivePhoto form. +/// If value is false, the LivePhoto logo is not displayed. Defaults to false. +@property (nonatomic) BOOL allowSelectLivePhoto; +/// Allow take photos in the album. Defaults to true. +/// warning: +/// If allowTakePhoto and allowRecordVideo are both false, it will not be displayed. +@property (nonatomic) BOOL allowTakePhotoInLibrary; +@property (nonatomic) BOOL allowEditImage; +@property (nonatomic) BOOL allowEditVideo; +/// Control whether to display the selection button animation when selecting. Defaults to true. +@property (nonatomic) BOOL animateSelectBtnWhenSelect; +/// Animation duration for select button +@property (nonatomic) CFTimeInterval selectBtnAnimationDuration; +/// After selecting a image/video in the thumbnail interface, enter the editing interface directly. Defaults to false. +/// <ul> +/// <li> +/// discussion: Editing image is only valid when allowEditImage is true and maxSelectCount is 1. +/// Editing video is only valid when allowEditVideo is true and maxSelectCount is 1. +/// </li> +/// </ul> +@property (nonatomic) BOOL editAfterSelectThumbnailImage; +/// Only valid when allowMixSelect is false and allowEditVideo is true. Defaults to true. +/// Just like the Wechat-Timeline selection style. If you want to crop the video after select thumbnail under allowMixSelect = true, please use <em>editAfterSelectThumbnailImage</em>. +@property (nonatomic) BOOL cropVideoAfterSelectThumbnail; +/// If image edit tools only has clip and this property is true. When you click edit, the cropping interface (i.e. ZLClipImageViewController) will be displayed. Defaults to false. +@property (nonatomic) BOOL showClipDirectlyIfOnlyHasClipTool; +/// Save the edited image to the album after editing. Defaults to true. +@property (nonatomic) BOOL saveNewImageAfterEdit; +/// If true, you can slide select photos in album. Defaults to true. +@property (nonatomic) BOOL allowSlideSelect; +/// When slide select is active, will auto scroll to top or bottom when your finger at the top or bottom. Defaults to true. +@property (nonatomic) BOOL autoScrollWhenSlideSelectIsActive; +/// The max speed (pt/s) of auto scroll. Defaults to 600. +@property (nonatomic) CGFloat autoScrollMaxSpeed; +/// If true, you can drag select photo when preview selection style. Defaults to false. +@property (nonatomic) BOOL allowDragSelect; +/// Allow select full image. Defaults to true. +@property (nonatomic) BOOL allowSelectOriginal; +/// Allow access to the preview large image interface (That is, whether to allow access to the large image interface after clicking the thumbnail image). Defaults to true. +@property (nonatomic) BOOL allowPreviewPhotos; +/// Whether to show the preview button (i.e. the preview button in the lower left corner of the thumbnail interface). Defaults to true. +@property (nonatomic) BOOL showPreviewButtonInAlbum; +/// Whether to display the selected count on the button. Defaults to true. +@property (nonatomic) BOOL showSelectCountOnDoneBtn; +/// The column count when iPhone is in portait mode. Minimum is 2, maximum is 6. Defaults to 4. +/// \code +/// iPhone landscape mode: columnCount += 2. +/// iPad portait mode: columnCount += 2. +/// iPad landscape mode: columnCount += 4. +/// +/// \endcode +@property (nonatomic) NSInteger columnCount; +/// Maximum cropping time when editing video, unit: second. Defaults to 10. +@property (nonatomic) NSInteger maxEditVideoTime; +/// Allow to choose the maximum duration of the video. Defaults to 120. +@property (nonatomic) NSInteger maxSelectVideoDuration; +/// Allow to choose the minimum duration of the video. Defaults to 0. +@property (nonatomic) NSInteger minSelectVideoDuration; +/// Image editor configuration. +@property (nonatomic, strong) ZLEditImageConfiguration * _Nonnull editImageConfiguration; +/// Show the image captured by the camera is displayed on the camera button inside the album. Defaults to false. +@property (nonatomic) BOOL showCaptureImageOnTakePhotoBtn; +/// In single selection mode, whether to display the selection button. Defaults to false. +@property (nonatomic) BOOL showSelectBtnWhenSingleSelect; +/// Overlay a mask layer on top of the selected photos. Defaults to true. +@property (nonatomic) BOOL showSelectedMask; +/// Display a border on the selected photos cell. Defaults to false. +@property (nonatomic) BOOL showSelectedBorder; +/// Overlay a mask layer above the cells that cannot be selected. Defaults to true. +@property (nonatomic) BOOL showInvalidMask; +/// Display the index of the selected photos. Defaults to true. +@property (nonatomic) BOOL showSelectedIndex; +/// Display the selected photos at the bottom of the preview large photos interface. Defaults to true. +@property (nonatomic) BOOL showSelectedPhotoPreview; +/// Allow framework fetch photos when callback. Defaults to true. +@property (nonatomic) BOOL shouldAnialysisAsset; +/// Timeout for image parsing. Defaults to 20. +@property (nonatomic) NSTimeInterval timeout; +/// Language for framework. +@property (nonatomic) enum ZLLanguageType languageType; +/// Whether to use custom camera. Defaults to true. +@property (nonatomic) BOOL useCustomCamera; +/// Allow taking photos in the camera (Need allowSelectImage to be true). Defaults to true. +@property (nonatomic) BOOL allowTakePhoto; +/// Allow recording in the camera (Need allowSelectVideo to be true). Defaults to true. +@property (nonatomic) BOOL allowRecordVideo; +/// Minimum recording duration. Defaults to 0. +@property (nonatomic) NSInteger minRecordDuration; +/// Maximum recording duration. Defaults to 10, minimum is 1. +@property (nonatomic) NSInteger maxRecordDuration; +/// The configuration for camera. +@property (nonatomic, strong) ZLCameraConfiguration * _Nonnull cameraConfiguration; +/// Hud style. Defaults to lightBlur. +@property (nonatomic) enum HUDStyle hudStyle; +/// This block will be called before selecting an image, the developer can first determine whether the asset is allowed to be selected. +/// Only control whether it is allowed to be selected, and will not affect the selection logic in the framework. +/// <ul> +/// <li> +/// Tips: If the choice is not allowed, the developer can toast prompt the user for relevant information. +/// </li> +/// </ul> +@property (nonatomic, copy) BOOL (^ _Nullable canSelectAsset)(PHAsset * _Nonnull); +/// If user choose limited Photo mode, a button with ‘+’ will be added to the ZLThumbnailViewController. It will call PHPhotoLibrary.shared().presentLimitedLibraryPicker(from:) to add photo. Defaults to true. +/// E.g., Sina Weibo’s ImagePicker +@property (nonatomic) BOOL showAddPhotoButton; +/// iOS14 limited Photo mode, will show collection footer view in ZLThumbnailViewController. +/// Will go to system setting if clicked. Defaults to true. +@property (nonatomic) BOOL showEnterSettingTips; +/// Callback after the no authority alert dismiss. +@property (nonatomic, copy) void (^ _Nullable noAuthorityCallback)(enum ZLNoAuthorityType); +/// Allow user to do something before select photo result callback. +/// And you must call the second parameter of this block to continue the photos selection. +/// The first parameter is the current controller. +/// The second parameter is the block that needs to be called after the user completes the operation. +@property (nonatomic, copy) void (^ _Nullable operateBeforeDoneAction)(UIViewController * _Nonnull, void (^ _Nonnull)(void)); +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +@interface ZLPhotoConfiguration (SWIFT_EXTENSION(HHSDKVideo)) +- (ZLPhotoConfiguration * _Nonnull)allowSelectVideo:(BOOL)value; +- (ZLPhotoConfiguration * _Nonnull)canSelectAsset:(BOOL (^ _Nullable)(PHAsset * _Nonnull))block; +- (ZLPhotoConfiguration * _Nonnull)showAddPhotoButton:(BOOL)value; +- (ZLPhotoConfiguration * _Nonnull)showEnterSettingTips:(BOOL)value; +- (ZLPhotoConfiguration * _Nonnull)noAuthorityCallback:(void (^ _Nullable)(enum ZLNoAuthorityType))callback; +- (ZLPhotoConfiguration * _Nonnull)operateBeforeDoneAction:(void (^ _Nullable)(UIViewController * _Nonnull, void (^ _Nonnull)(void)))block; +@end + +@class ZLPhotoModel; +@class NSData; +@class PHLivePhoto; +@class AVPlayerItem; + +SWIFT_CLASS("_TtC10HHSDKVideo14ZLPhotoManager") +@interface ZLPhotoManager : NSObject +/// Save image to album. ++ (void)saveImageToAlbumWithImage:(UIImage * _Nonnull)image completion:(void (^ _Nullable)(BOOL, PHAsset * _Nullable))completion; +/// Save video to album. ++ (void)saveVideoToAlbumWithUrl:(NSURL * _Nonnull)url completion:(void (^ _Nullable)(BOOL, PHAsset * _Nullable))completion; +/// Fetch photos from result. ++ (NSArray<ZLPhotoModel *> * _Nonnull)fetchPhotoIn:(PHFetchResult<PHAsset *> * _Nonnull)result ascending:(BOOL)ascending allowSelectImage:(BOOL)allowSelectImage allowSelectVideo:(BOOL)allowSelectVideo limitCount:(NSInteger)limitCount SWIFT_WARN_UNUSED_RESULT; +/// Fetch all album list. ++ (void)getPhotoAlbumListWithAscending:(BOOL)ascending allowSelectImage:(BOOL)allowSelectImage allowSelectVideo:(BOOL)allowSelectVideo completion:(SWIFT_NOESCAPE void (^ _Nonnull)(NSArray<ZLAlbumListModel *> * _Nonnull))completion; +/// Fetch camera roll album. ++ (void)getCameraRollAlbumWithAllowSelectImage:(BOOL)allowSelectImage allowSelectVideo:(BOOL)allowSelectVideo completion:(void (^ _Nonnull)(ZLAlbumListModel * _Nonnull))completion; ++ (PHImageRequestID)fetchImageFor:(PHAsset * _Nonnull)asset size:(CGSize)size progress:(void (^ _Nullable)(CGFloat, NSError * _Nullable, BOOL * _Nonnull, NSDictionary * _Nullable))progress completion:(void (^ _Nonnull)(UIImage * _Nullable, BOOL))completion; ++ (PHImageRequestID)fetchOriginalImageFor:(PHAsset * _Nonnull)asset progress:(void (^ _Nullable)(CGFloat, NSError * _Nullable, BOOL * _Nonnull, NSDictionary * _Nullable))progress completion:(void (^ _Nonnull)(UIImage * _Nullable, BOOL))completion; +/// Fetch asset data. ++ (PHImageRequestID)fetchOriginalImageDataFor:(PHAsset * _Nonnull)asset progress:(void (^ _Nullable)(CGFloat, NSError * _Nullable, BOOL * _Nonnull, NSDictionary * _Nullable))progress completion:(void (^ _Nonnull)(NSData * _Nonnull, NSDictionary * _Nullable, BOOL))completion; ++ (PHImageRequestID)fetchLivePhotoFor:(PHAsset * _Nonnull)asset completion:(void (^ _Nonnull)(PHLivePhoto * _Nullable, NSDictionary * _Nullable, BOOL))completion SWIFT_WARN_UNUSED_RESULT; ++ (PHImageRequestID)fetchVideoFor:(PHAsset * _Nonnull)asset progress:(void (^ _Nullable)(CGFloat, NSError * _Nullable, BOOL * _Nonnull, NSDictionary * _Nullable))progress completion:(void (^ _Nonnull)(AVPlayerItem * _Nullable, NSDictionary * _Nullable, BOOL))completion SWIFT_WARN_UNUSED_RESULT; ++ (PHImageRequestID)fetchAVAssetForVideo:(PHAsset * _Nonnull)asset completion:(void (^ _Nonnull)(AVAsset * _Nullable, NSDictionary * _Nullable))completion SWIFT_WARN_UNUSED_RESULT; +/// Fetch asset local file path. ++ (void)fetchAssetFilePathWithAsset:(PHAsset * _Nonnull)asset completion:(void (^ _Nonnull)(NSString * _Nullable))completion; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +@interface ZLPhotoManager (SWIFT_EXTENSION(HHSDKVideo)) ++ (BOOL)hasPhotoLibratyAuthority SWIFT_WARN_UNUSED_RESULT; ++ (BOOL)hasCameraAuthority SWIFT_WARN_UNUSED_RESULT; ++ (BOOL)hasMicrophoneAuthority SWIFT_WARN_UNUSED_RESULT; +@end + + +SWIFT_CLASS("_TtC10HHSDKVideo12ZLPhotoModel") +@interface ZLPhotoModel : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + + + + +SWIFT_CLASS("_TtC10HHSDKVideo19ZLPhotoPreviewSheet") +@interface ZLPhotoPreviewSheet : UIView +/// Success callback +/// block params +/// <ul> +/// <li> +/// params1: images for asset. +/// </li> +/// <li> +/// params2: selected assets +/// </li> +/// <li> +/// params3: is full image +/// </li> +/// </ul> +@property (nonatomic, copy) void (^ _Nullable selectImageBlock)(NSArray<UIImage *> * _Nonnull, NSArray<PHAsset *> * _Nonnull, BOOL); +/// Callback for photos that failed to parse +/// block params +/// <ul> +/// <li> +/// params1: failed assets. +/// </li> +/// <li> +/// params2: index for asset +/// </li> +/// </ul> +@property (nonatomic, copy) void (^ _Nullable selectImageRequestErrorBlock)(NSArray<PHAsset *> * _Nonnull, NSArray<NSNumber *> * _Nonnull); +@property (nonatomic, copy) void (^ _Nullable cancelBlock)(void); +- (nonnull instancetype)initWithFrame:(CGRect)frame; +/// \param selectedAssets preselected assets +/// +- (nonnull instancetype)initWithSelectedAssets:(NSArray<PHAsset *> * _Nullable)selectedAssets OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder OBJC_DESIGNATED_INITIALIZER; +- (void)layoutSubviews; +- (void)showPreviewWithAnimate:(BOOL)animate sender:(UIViewController * _Nonnull)sender; +- (void)showPhotoLibraryWithSender:(UIViewController * _Nonnull)sender; +/// 传入已选择的assets,并预览 +- (void)previewAssetsWithSender:(UIViewController * _Nonnull)sender assets:(NSArray<PHAsset *> * _Nonnull)assets index:(NSInteger)index isOriginal:(BOOL)isOriginal showBottomViewAndSelectBtn:(BOOL)showBottomViewAndSelectBtn; +@end + + +@interface ZLPhotoPreviewSheet (SWIFT_EXTENSION(HHSDKVideo)) <UIGestureRecognizerDelegate> +- (BOOL)gestureRecognizerShouldBegin:(UIGestureRecognizer * _Nonnull)gestureRecognizer SWIFT_WARN_UNUSED_RESULT; +@end + +@class PHChange; + +@interface ZLPhotoPreviewSheet (SWIFT_EXTENSION(HHSDKVideo)) <PHPhotoLibraryChangeObserver> +- (void)photoLibraryDidChange:(PHChange * _Nonnull)changeInstance; +@end + +@class UIImagePickerController; + +@interface ZLPhotoPreviewSheet (SWIFT_EXTENSION(HHSDKVideo)) <UIImagePickerControllerDelegate, UINavigationControllerDelegate> +- (void)imagePickerController:(UIImagePickerController * _Nonnull)picker didFinishPickingMediaWithInfo:(NSDictionary<UIImagePickerControllerInfoKey, id> * _Nonnull)info; +@end + + +@interface ZLPhotoPreviewSheet (SWIFT_EXTENSION(HHSDKVideo)) <UICollectionViewDataSource, UICollectionViewDelegateFlowLayout> +- (CGSize)collectionView:(UICollectionView * _Nonnull)collectionView layout:(UICollectionViewLayout * _Nonnull)collectionViewLayout sizeForItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (NSInteger)collectionView:(UICollectionView * _Nonnull)collectionView numberOfItemsInSection:(NSInteger)section SWIFT_WARN_UNUSED_RESULT; +- (UICollectionViewCell * _Nonnull)collectionView:(UICollectionView * _Nonnull)collectionView cellForItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath SWIFT_WARN_UNUSED_RESULT; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView willDisplayCell:(UICollectionViewCell * _Nonnull)cell forItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +- (void)collectionView:(UICollectionView * _Nonnull)collectionView didSelectItemAtIndexPath:(NSIndexPath * _Nonnull)indexPath; +@end + +enum CancelButtonStyle : NSInteger; +@class UIBlurEffect; + +/// Custom UI configuration (include colors, images, text, font) +SWIFT_CLASS("_TtC10HHSDKVideo22ZLPhotoUIConfiguration") +@interface ZLPhotoUIConfiguration : NSObject ++ (ZLPhotoUIConfiguration * _Nonnull)default SWIFT_WARN_UNUSED_RESULT; ++ (void)resetConfiguration; +@property (nonatomic) enum ZLPhotoBrowserStyle style; +@property (nonatomic) UIStatusBarStyle statusBarStyle; +/// text: Cancel. image: ‘x’. Default to image. +@property (nonatomic) enum CancelButtonStyle navCancelButtonStyle; +/// Whether to show the status bar when previewing photos. Defaults to false. +@property (nonatomic) BOOL showStatusBarInPreviewInterface; +/// The blur effect of the navigation bar in the album list +@property (nonatomic, strong) UIBlurEffect * _Nullable navViewBlurEffectOfAlbumList; +/// The blur effect of the navigation bar in the preview interface +@property (nonatomic, strong) UIBlurEffect * _Nullable navViewBlurEffectOfPreview; +/// The blur effect of the bottom tool bar in the album list +@property (nonatomic, strong) UIBlurEffect * _Nullable bottomViewBlurEffectOfAlbumList; +/// The blur effect of the bottom tool bar in the preview interface +@property (nonatomic, strong) UIBlurEffect * _Nullable bottomViewBlurEffectOfPreview; +/// Developers can customize images, but the name of the custom image resource must be consistent with the image name in the replaced bundle. +/// <ul> +/// <li> +/// example: Developers need to replace the selected and unselected image resources, and the array that needs to be passed in is +/// [“zl_btn_selected”, “zl_btn_unselected”]. +/// </li> +/// </ul> +@property (nonatomic, copy) NSArray<NSString *> * _Nonnull customImageNames; +/// Developers can customize images, but the name of the custom image resource must be consistent with the image name in the replaced bundle. +/// <ul> +/// <li> +/// example: Developers need to replace the selected and unselected image resources, and the array that needs to be passed in is +/// [“zl_btn_selected”: selectedImage, “zl_btn_unselected”: unselectedImage]. +/// </li> +/// </ul> +@property (nonatomic, copy) NSDictionary<NSString *, UIImage *> * _Nonnull customImageForKey_objc; +/// Developers can customize languages (This property is only for objc). +/// warning: +/// Please pay attention to the placeholders contained in languages when changing, such as %ld, %@. +/// <ul> +/// <li> +/// example: If you needs to replace +/// key: @“loading”, value: @“loading, waiting please” language, +/// The dictionary that needs to be passed in is @[@“loading”: @“text to be replaced”]. +/// </li> +/// </ul> +@property (nonatomic, copy) NSDictionary<NSString *, NSString *> * _Nonnull customLanguageKeyValue_objc; +/// Font name. +@property (nonatomic, copy) NSString * _Nullable themeFontName; +/// Preview selection mode, translucent background color above. +/// 预览快速选择模式下,上方透明区域背景色 +@property (nonatomic, strong) UIColor * _Nonnull sheetTranslucentColor; +/// Preview selection mode, a background color for <code>Camera</code>, <code>Album</code>, <code>Cancel</code> buttons. +/// 预览快速选择模式下,按钮背景颜色 +@property (nonatomic, strong) UIColor * _Nonnull sheetBtnBgColor; +/// Preview selection mode, a text color for <code>Camera</code>, <code>Album</code>, <code>Cancel</code> buttons. +/// 预览快速选择模式下,按钮标题颜色 +@property (nonatomic, strong) UIColor * _Nonnull sheetBtnTitleColor; +/// Preview selection mode, cancel button title color when the selection amount is superior than 0. +/// 预览快速选择模式下,按钮标题高亮颜色 +@property (nonatomic, strong) UIColor * _Nonnull sheetBtnTitleTintColor; +/// A color for navigation bar. +/// 相册列表及小图界面导航条背景色 +@property (nonatomic, strong) UIColor * _Nonnull navBarColor; +/// A color for navigation bar in preview interface. +/// 预览大图界面的导航条背景色 +@property (nonatomic, strong) UIColor * _Nonnull navBarColorOfPreviewVC; +/// A color for Navigation bar text. +/// 相册列表及小图界面导航栏标题颜色 +@property (nonatomic, strong) UIColor * _Nonnull navTitleColor; +/// A color for Navigation bar text of preview vc. +/// 预览大图界面导航栏标题颜色 +@property (nonatomic, strong) UIColor * _Nonnull navTitleColorOfPreviewVC; +/// The background color of the title view when the frame style is embedAlbumList. +/// 下拉选择相册列表模式下,选择区域的背景色 +@property (nonatomic, strong) UIColor * _Nonnull navEmbedTitleViewBgColor; +/// A color for background in album list. +/// 相册列表背景色 +@property (nonatomic, strong) UIColor * _Nonnull albumListBgColor; +/// A color of the translucent area below the embed album list. +/// 嵌入式相册列表下方透明区域颜色 +@property (nonatomic, strong) UIColor * _Nonnull embedAlbumListTranslucentColor; +/// A color for album list title label. +/// 相册列表标题颜色 +@property (nonatomic, strong) UIColor * _Nonnull albumListTitleColor; +/// A color for album list count label. +/// 相册列表数量label的颜色 +@property (nonatomic, strong) UIColor * _Nonnull albumListCountColor; +/// A color for album list separator. +/// 相册列表分割线颜色 +@property (nonatomic, strong) UIColor * _Nonnull separatorColor; +/// A color for background in thumbnail interface. +/// 相册小图界面背景色 +@property (nonatomic, strong) UIColor * _Nonnull thumbnailBgColor; +/// A color for background in preview interface.. +/// 预览大图界面背景色 +@property (nonatomic, strong) UIColor * _Nonnull previewVCBgColor; +/// A color for background in bottom tool view. +/// 相册小图界面底部工具条背景色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBgColor; +/// A color for background in bottom tool view in preview interface. +/// 预览大图界面底部工具条背景色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBgColorOfPreviewVC; +/// The normal state title color of bottom tool view buttons. Without done button. +/// 相册小图界面底部按钮可交互状态下标题颜色,不包括 <code>完成</code> 按钮 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBtnNormalTitleColor; +/// The normal state title color of bottom tool view done button. +/// 相册小图界面底部 <code>完成</code> 按钮可交互状态下标题颜色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewDoneBtnNormalTitleColor; +/// The normal state title color of bottom tool view buttons in preview interface. Without done button. +/// 预览大图界面底部按钮可交互状态下标题颜色,不包括 <code>完成</code> 按钮 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBtnNormalTitleColorOfPreviewVC; +/// The normal state title color of bottom tool view done button. +/// 预览大图界面底部 <code>完成</code> 按钮可交互状态下标题颜色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewDoneBtnNormalTitleColorOfPreviewVC; +/// The disable state title color of bottom tool view buttons. Without done button. +/// 相册小图界面底部按钮不可交互状态下标题颜色,不包括 <code>完成</code> 按钮 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBtnDisableTitleColor; +/// The disable state title color of bottom tool view done button. +/// 相册小图界面底部 <code>完成</code> 按钮不可交互状态下标题颜色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewDoneBtnDisableTitleColor; +/// The disable state title color of bottom tool view buttons in preview interface. Without done button. +/// 预览大图界面底部按钮不可交互状态下标题颜色,不包括 <code>完成</code> 按钮 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBtnDisableTitleColorOfPreviewVC; +/// The disable state title color of bottom tool view done button in preview interface. +/// 预览大图界面底部 <code>完成</code> 按钮不可交互状态下标题颜色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewDoneBtnDisableTitleColorOfPreviewVC; +/// The normal state background color of bottom tool view buttons. +/// 相册小图界面底部按钮可交互状态下背景色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBtnNormalBgColor; +/// The normal state background color of bottom tool view buttons in preview interface. +/// 预览大图界面底部按钮可交互状态下背景色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBtnNormalBgColorOfPreviewVC; +/// The disable state background color of bottom tool view buttons. +/// 相册小图界面底部按钮不可交互状态下背景色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBtnDisableBgColor; +/// The disable state background color of bottom tool view buttons in preview interface. +/// 预览大图界面底部按钮不可交互状态下背景色 +@property (nonatomic, strong) UIColor * _Nonnull bottomToolViewBtnDisableBgColorOfPreviewVC; +/// With iOS14 limited authority, a color for select more photos at the bottom of the thumbnail interface. +/// iOS14 limited权限下,下方提示选择更多图片信息文字的颜色 +@property (nonatomic, strong) UIColor * _Nonnull selectMorePhotoWhenAuthIsLismitedTitleColor; +/// The record progress color of custom camera. +/// 自定义相机录制视频时进度条颜色 +@property (nonatomic, strong) UIColor * _Nonnull cameraRecodeProgressColor; +/// Mask layer color of selected cell. +/// 已选择照片上方遮罩阴影颜色 +@property (nonatomic, strong) UIColor * _Nonnull selectedMaskColor; +/// Border color of selected cell. +/// 已选择照片border颜色 +@property (nonatomic, strong) UIColor * _Nonnull selectedBorderColor; +/// Mask layer color of the cell that cannot be selected. +/// 不可选的照片上方遮罩阴影颜色 +@property (nonatomic, strong) UIColor * _Nonnull invalidMaskColor; +/// The text color of selected cell index label. +/// 已选照片右上角序号label背景色 +@property (nonatomic, strong) UIColor * _Nonnull indexLabelTextColor; +/// The background color of selected cell index label. +/// 已选照片右上角序号label背景色 +@property (nonatomic, strong) UIColor * _Nonnull indexLabelBgColor; +/// The background color of camera cell inside album. +/// 相册小图界面拍照按钮背景色 +@property (nonatomic, strong) UIColor * _Nonnull cameraCellBgColor; +/// The normal color of adjust slider. +/// 编辑图片,调整饱和度、对比度、亮度时,右侧slider背景色 +@property (nonatomic, strong) UIColor * _Nonnull adjustSliderNormalColor; +/// The tint color of adjust slider. +/// 编辑图片,调整饱和度、对比度、亮度时,右侧slider背景高亮色 +@property (nonatomic, strong) UIColor * _Nonnull adjustSliderTintColor; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + +typedef SWIFT_ENUM(NSInteger, CancelButtonStyle, open) { + CancelButtonStyleText = 0, + CancelButtonStyleImage = 1, +}; + + + +SWIFT_CLASS("_TtC10HHSDKVideo13ZLProgressHUD") +@interface ZLProgressHUD : UIView +- (nonnull instancetype)initWithStyle:(enum HUDStyle)style OBJC_DESIGNATED_INITIALIZER; +- (nullable instancetype)initWithCoder:(NSCoder * _Nonnull)coder SWIFT_UNAVAILABLE; +- (void)showWithTimeout:(NSTimeInterval)timeout; +- (void)hide; +- (nonnull instancetype)initWithFrame:(CGRect)frame SWIFT_UNAVAILABLE; +@end + +typedef SWIFT_ENUM(NSInteger, HUDStyle, open) { + HUDStyleLight = 0, + HUDStyleLightBlur = 1, + HUDStyleDark = 2, + HUDStyleDarkBlur = 3, +}; + + +SWIFT_CLASS("_TtC10HHSDKVideo18ZLTextStickerState") +@interface ZLTextStickerState : NSObject +- (nonnull instancetype)init SWIFT_UNAVAILABLE; ++ (nonnull instancetype)new SWIFT_UNAVAILABLE_MSG("-init is unavailable"); +@end + +typedef SWIFT_ENUM(NSInteger, ZLURLType, open) { + ZLURLTypeImage = 0, + ZLURLTypeVideo = 1, +}; + + +SWIFT_CLASS("_TtC10HHSDKVideo14ZLVideoManager") +@interface ZLVideoManager : NSObject +/// 没有针对不同分辨率视频做处理,仅用于处理相机拍照的视频 ++ (void)mergeVideosWithFileUrls:(NSArray<NSURL *> * _Nonnull)fileUrls completion:(void (^ _Nonnull)(NSURL * _Nullable, NSError * _Nullable))completion; +- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER; +@end + + +@interface ZLVideoManager (SWIFT_EXTENSION(HHSDKVideo)) +@end + +typedef SWIFT_ENUM(NSInteger, ExportType, open) { + ExportTypeMov = 0, + ExportTypeMp4 = 1, +}; + + +@interface ZLVideoManager (SWIFT_EXTENSION(HHSDKVideo)) ++ (void)exportVideoFor:(PHAsset * _Nonnull)asset exportType:(enum ExportType)exportType presetName:(NSString * _Nonnull)presetName complete:(void (^ _Nonnull)(NSURL * _Nullable, NSError * _Nullable))complete; ++ (void)exportVideoFor:(AVAsset * _Nonnull)asset range:(CMTimeRange)range exportType:(enum ExportType)exportType presetName:(NSString * _Nonnull)presetName complete:(void (^ _Nonnull)(NSURL * _Nullable, NSError * _Nullable))complete; +@end + +#if __has_attribute(external_source_symbol) +# pragma clang attribute pop +#endif +#pragma clang diagnostic pop +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-umbrella.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-umbrella.h new file mode 100644 index 0000000..1ca3141 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHSDKVideo-umbrella.h @@ -0,0 +1,58 @@ +#ifdef __OBJC__ +#import <UIKit/UIKit.h> +#else +#ifndef FOUNDATION_EXPORT +#if defined(__cplusplus) +#define FOUNDATION_EXPORT extern "C" +#else +#define FOUNDATION_EXPORT extern +#endif +#endif +#endif + +#import "HHFaceAuthBridge.h" +#import "HHMBProgressHUD.h" +#import "ObjectMapper.h" +#import "HHAnimatedImageRep.h" +#import "HHImageCache.h" +#import "HHImageCacheConfig.h" +#import "HHWebImageCoder.h" +#import "HHWebImageCoderHelper.h" +#import "HHWebImageCodersManager.h" +#import "HHWebImageCompat.h" +#import "HHWebImageDownloader.h" +#import "HHWebImageDownloaderOperation.h" +#import "HHWebImageFrame.h" +#import "HHWebImageGIFCoder.h" +#import "HHWebImageImageIOCoder.h" +#import "HHWebImageManager.h" +#import "HHWebImageOperation.h" +#import "HHWebImagePrefetcher.h" +#import "HHWebImageTransition.h" +#import "NSData+hhImageContentType.h" +#import "NSImage+hhWebCache.h" +#import "UIButton+hhWebCache.h" +#import "UIImage+hhForceDecode.h" +#import "UIImage+hhGIF.h" +#import "UIImage+hhMultiFormat.h" +#import "UIImageView+hhHighlightedWebCache.h" +#import "UIImageView+hhWebCache.h" +#import "UIView+hhWebCache.h" +#import "UIView+hhWebCacheOperation.h" +#import "HHPhotoPicker.h" +#import "CGGeometry+RSKImageCropper.h" +#import "RSKImageCropVC.h" +#import "RSKImageCropViewController+Protected.h" +#import "SDKImageCropper.h" +#import "SDKImageScrollView.h" +#import "SDKTouchView.h" +#import "UIApplication+RSKImageCropper.h" +#import "UIImage+RSKImageCropper.h" +#import "SDKCameraImageModel.h" +#import "SDKCameraUtil.h" +#import "SDKPHAssetManager.h" +#import "UITextView+Placeholder.h" + +FOUNDATION_EXPORT double HHSDKVideoVersionNumber; +FOUNDATION_EXPORT const unsigned char HHSDKVideoVersionString[]; + diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoder.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoder.h new file mode 100644 index 0000000..5839570 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoder.h @@ -0,0 +1,119 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCompat.h" +#import "NSData+hhImageContentType.h" + +/** + A Boolean value indicating whether to scale down large images during decompressing. (NSNumber) + */ +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageCoderScaleDownLargeImagesKey; + +/** + Return the shared device-dependent RGB color space created with CGColorSpaceCreateDeviceRGB. + + @return The device-dependent RGB color space + */ +CG_EXTERN CGColorSpaceRef _Nonnull HHCGColorSpaceGetDeviceRGB(void); + +/** + Check whether CGImageRef contains alpha channel. + + @param imageRef The CGImageRef + @return Return YES if CGImageRef contains alpha channel, otherwise return NO + */ +CG_EXTERN BOOL HHCGImageRefContainsAlpha(_Nullable CGImageRef imageRef); + + +/** + This is the image coder protocol to provide custom image decoding/encoding. + These methods are all required to implement. + @note Pay attention that these methods are not called from main queue. + */ +@protocol HHWebImageCoder <NSObject> + +@required +#pragma mark - Decoding +/** + Returns YES if this coder can decode some data. Otherwise, the data should be passed to another coder. + + @param data The image data so we can look at it + @return YES if this coder can decode the data, NO otherwise + */ +- (BOOL)canDecodeFromData:(nullable NSData *)data; + +/** + Decode the image data to image. + + @param data The image data to be decoded + @return The decoded image from data + */ +- (nullable UIImage *)decodedImageWithData:(nullable NSData *)data; + +/** + Decompress the image with original image and image data. + + @param image The original image to be decompressed + @param data The pointer to original image data. The pointer itself is nonnull but image data can be null. This data will set to cache if needed. If you do not need to modify data at the sametime, ignore this param. + @param optionsDict A dictionary containing any decompressing options. Pass {HHWebImageCoderScaleDownLargeImagesKey: @(YES)} to scale down large images + @return The decompressed image + */ +- (nullable UIImage *)decompressedImageWithImage:(nullable UIImage *)image + data:(NSData * _Nullable * _Nonnull)data + options:(nullable NSDictionary<NSString*, NSObject*>*)optionsDict; + +#pragma mark - Encoding + +/** + Returns YES if this coder can encode some image. Otherwise, it should be passed to another coder. + + @param format The image format + @return YES if this coder can encode the image, NO otherwise + */ +- (BOOL)canEncodeToFormat:(HHImageFormat)format; + +/** + Encode the image to image data. + + @param image The image to be encoded + @param format The image format to encode, you should note `HHImageFormatUndefined` format is also possible + @return The encoded image data + */ +- (nullable NSData *)encodedDataWithImage:(nullable UIImage *)image format:(HHImageFormat)format; + +@end + + +/** + This is the image coder protocol to provide custom progressive image decoding. + These methods are all required to implement. + @note Pay attention that these methods are not called from main queue. + */ +@protocol HHWebImageProgressiveCoder <HHWebImageCoder> + +@required +/** + Returns YES if this coder can incremental decode some data. Otherwise, it should be passed to another coder. + + @param data The image data so we can look at it + @return YES if this coder can decode the data, NO otherwise + */ +- (BOOL)canIncrementallyDecodeFromData:(nullable NSData *)data; + +/** + Incremental decode the image data to image. + + @param data The image data has been downloaded so far + @param finished Whether the download has finished + @warning because incremental decoding need to keep the decoded context, we will alloc a new instance with the same class for each download operation to avoid conflicts + @return The decoded image from data + */ +- (nullable UIImage *)incrementallyDecodedImageWithData:(nullable NSData *)data finished:(BOOL)finished; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoderHelper.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoderHelper.h new file mode 100644 index 0000000..4826e08 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCoderHelper.h @@ -0,0 +1,52 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCompat.h" +#import "HHWebImageFrame.h" + +@interface HHWebImageCoderHelper : NSObject + +/** + Return an animated image with frames array. + For UIKit, this will apply the patch and then create animated UIImage. The patch is because that `+[UIImage animatedImageWithImages:duration:]` just use the average of duration for each image. So it will not work if different frame has different duration. Therefore we repeat the specify frame for specify times to let it work. + For AppKit, NSImage does not support animates other than GIF. This will try to encode the frames to GIF format and then create an animated NSImage for rendering. Attention the animated image may loss some detail if the input frames contain full alpha channel because GIF only supports 1 bit alpha channel. (For 1 pixel, either transparent or not) + + @param frames The frames array. If no frames or frames is empty, return nil + @return A animated image for rendering on UIImageView(UIKit) or NSImageView(AppKit) + */ ++ (UIImage * _Nullable)animatedImageWithFrames:(NSArray<HHWebImageFrame *> * _Nullable)frames; + +/** + Return frames array from an animated image. + For UIKit, this will unapply the patch for the description above and then create frames array. This will also work for normal animated UIImage. + For AppKit, NSImage does not support animates other than GIF. This will try to decode the GIF imageRep and then create frames array. + + @param animatedImage A animated image. If it's not animated, return nil + @return The frames array + */ ++ (NSArray<HHWebImageFrame *> * _Nullable)framesFromAnimatedImage:(UIImage * _Nullable)animatedImage; + +#if HH_UIKIT || HH_WATCH +/** + Convert an EXIF image orientation to an iOS one. + + @param exifOrientation EXIF orientation + @return iOS orientation + */ ++ (UIImageOrientation)imageOrientationFromEXIFOrientation:(NSInteger)exifOrientation; +/** + Convert an iOS orientation to an EXIF image orientation. + + @param imageOrientation iOS orientation + @return EXIF orientation + */ ++ (NSInteger)exifOrientationFromImageOrientation:(UIImageOrientation)imageOrientation; +#endif + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCodersManager.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCodersManager.h new file mode 100644 index 0000000..7ce9661 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCodersManager.h @@ -0,0 +1,58 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCoder.h" + +/** + Global object holding the array of coders, so that we avoid passing them from object to object. + Uses a priority queue behind scenes, which means the latest added coders have the highest priority. + This is done so when encoding/decoding something, we go through the list and ask each coder if they can handle the current data. + That way, users can add their custom coders while preserving our existing prebuilt ones + + Note: the `coders` getter will return the coders in their reversed order + Example: + - by default we internally set coders = `IOCoder`, `WebPCoder`. (`GIFCoder` is not recommended to add only if you want to get GIF support without `FLAnimatedImage`) + - calling `coders` will return `@[WebPCoder, IOCoder]` + - call `[addCoder:[MyCrazyCoder new]]` + - calling `coders` now returns `@[MyCrazyCoder, WebPCoder, IOCoder]` + + Coders + ------ + A coder must conform to the `HHWebImageCoder` protocol or even to `HHWebImageProgressiveCoder` if it supports progressive decoding + Conformance is important because that way, they will implement `canDecodeFromData` or `canEncodeToFormat` + Those methods are called on each coder in the array (using the priority order) until one of them returns YES. + That means that coder can decode that data / encode to that format + */ +@interface HHWebImageCodersManager : NSObject<HHWebImageCoder> + +/** + Shared reusable instance + */ ++ (nonnull instancetype)sharedInstance; + +/** + All coders in coders manager. The coders array is a priority queue, which means the later added coder will have the highest priority + */ +@property (nonatomic, strong, readwrite, nullable) NSArray<HHWebImageCoder>* coders; + +/** + Add a new coder to the end of coders array. Which has the highest priority. + + @param coder coder + */ +- (void)addCoder:(nonnull id<HHWebImageCoder>)coder; + +/** + Remove a coder in the coders array. + + @param coder coder + */ +- (void)removeCoder:(nonnull id<HHWebImageCoder>)coder; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCompat.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCompat.h new file mode 100644 index 0000000..7aba26d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageCompat.h @@ -0,0 +1,101 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * (c) Jamie Pinkham + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <TargetConditionals.h> + +#ifdef __OBJC_GC__ + #error HHWebImage does not support Objective-C Garbage Collection +#endif + +// Apple's defines from TargetConditionals.h are a bit weird. +// Seems like TARGET_OS_MAC is always defined (on all platforms). +// To determine if we are running on OSX, we can only rely on TARGET_OS_IPHONE=0 and all the other platforms +#if !TARGET_OS_IPHONE && !TARGET_OS_IOS && !TARGET_OS_TV && !TARGET_OS_WATCH + #define HH_MAC 1 +#else + #define HH_MAC 0 +#endif + +// iOS and tvOS are very similar, UIKit exists on both platforms +// Note: watchOS also has UIKit, but it's very limited +#if TARGET_OS_IOS || TARGET_OS_TV + #define HH_UIKIT 1 +#else + #define HH_UIKIT 0 +#endif + +#if TARGET_OS_IOS + #define HH_IOS 1 +#else + #define HH_IOS 0 +#endif + +#if TARGET_OS_TV + #define HH_TV 1 +#else + #define HH_TV 0 +#endif + +#if TARGET_OS_WATCH + #define HH_WATCH 1 +#else + #define HH_WATCH 0 +#endif + + +#if HH_MAC + #import <AppKit/AppKit.h> + #ifndef UIImage + #define UIImage NSImage + #endif + #ifndef UIImageView + #define UIImageView NSImageView + #endif + #ifndef UIView + #define UIView NSView + #endif +#else + #if __IPHONE_OS_VERSION_MIN_REQUIRED != 20000 && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_5_0 + #error HHWebImage doesn't support Deployment Target version < 5.0 + #endif + + #if HH_UIKIT + #import <UIKit/UIKit.h> + #endif + #if HH_WATCH + #import <WatchKit/WatchKit.h> + #endif +#endif + +#ifndef NS_ENUM +#define NS_ENUM(_type, _name) enum _name : _type _name; enum _name : _type +#endif + +#ifndef NS_OPTIONS +#define NS_OPTIONS(_type, _name) enum _name : _type _name; enum _name : _type +#endif + +FOUNDATION_EXPORT UIImage *HHScaledImageForKey(NSString *key, UIImage *image); + +typedef void(^HHWebImageNoParamsBlock)(void); + +FOUNDATION_EXPORT NSString *const HHWebImageErrorDomain; + +#ifndef dispatch_queue_async_safe +#define dispatch_queue_async_safe(queue, block)\ + if (strcmp(dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL), dispatch_queue_get_label(queue)) == 0) {\ + block();\ + } else {\ + dispatch_async(queue, block);\ + } +#endif + +#ifndef dispatch_main_async_safe +#define dispatch_main_async_safe(block) dispatch_queue_async_safe(dispatch_get_main_queue(), block) +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloader.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloader.h new file mode 100644 index 0000000..5439743 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloader.h @@ -0,0 +1,271 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCompat.h" +#import "HHWebImageOperation.h" + +typedef NS_OPTIONS(NSUInteger, HHWebImageDownloaderOptions) { + /** + * Put the download in the low queue priority and task priority. + */ + HHWebImageDownloaderLowPriority = 1 << 0, + + /** + * This flag enables progressive download, the image is displayed progressively during download as a browser would do. + */ + HHWebImageDownloaderProgressiveDownload = 1 << 1, + + /** + * By default, request prevent the use of NSURLCache. With this flag, NSURLCache + * is used with default policies. + */ + HHWebImageDownloaderUseNSURLCache = 1 << 2, + + /** + * Call completion block with nil image/imageData if the image was read from NSURLCache + * (to be combined with `HHWebImageDownloaderUseNSURLCache`). + */ + HHWebImageDownloaderIgnoreCachedResponse = 1 << 3, + + /** + * In iOS 4+, continue the download of the image if the app goes to background. This is achieved by asking the system for + * extra time in background to let the request finish. If the background task expires the operation will be cancelled. + */ + HHWebImageDownloaderContinueInBackground = 1 << 4, + + /** + * Handles cookies stored in NSHTTPCookieStore by setting + * NSMutableURLRequest.HTTPShouldHandleCookies = YES; + */ + HHWebImageDownloaderHandleCookies = 1 << 5, + + /** + * Enable to allow untrusted SSL certificates. + * Useful for testing purposes. Use with caution in production. + */ + HHWebImageDownloaderAllowInvalidSSLCertificates = 1 << 6, + + /** + * Put the download in the high queue priority and task priority. + */ + HHWebImageDownloaderHighPriority = 1 << 7, + + /** + * Scale down the image + */ + HHWebImageDownloaderScaleDownLargeImages = 1 << 8, +}; + +typedef NS_ENUM(NSInteger, HHWebImageDownloaderExecutionOrder) { + /** + * Default value. All download operations will execute in queue style (first-in-first-out). + */ + HHWebImageDownloaderFIFOExecutionOrder, + + /** + * All download operations will execute in stack style (last-in-first-out). + */ + HHWebImageDownloaderLIFOExecutionOrder +}; + +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageDownloadStartNotification; +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageDownloadStopNotification; + +typedef void(^HHWebImageDownloaderProgressBlock)(NSInteger receivedSize, NSInteger expectedSize, NSURL * _Nullable targetURL); + +typedef void(^HHWebImageDownloaderCompletedBlock)(UIImage * _Nullable image, NSData * _Nullable data, NSError * _Nullable error, BOOL finished); + +typedef NSDictionary<NSString *, NSString *> HHHTTPHeadersDictionary; +typedef NSMutableDictionary<NSString *, NSString *> HHHTTPHeadersMutableDictionary; + +typedef HHHTTPHeadersDictionary * _Nullable (^HHWebImageDownloaderHeadersFilterBlock)(NSURL * _Nullable url, HHHTTPHeadersDictionary * _Nullable headers); + +/** + * A token associated with each download. Can be used to cancel a download + */ +@interface HHWebImageDownloadToken : NSObject <HHWebImageOperation> + +/** + The download's URL. This should be readonly and you should not modify + */ +@property (nonatomic, strong, nullable) NSURL *url; +/** + The cancel token taken from `addHandlersForProgress:completed`. This should be readonly and you should not modify + @note use `-[HHWebImageDownloadToken cancel]` to cancel the token + */ +@property (nonatomic, strong, nullable) id downloadOperationCancelToken; + +@end + + +/** + * Asynchronous downloader dedicated and optimized for image loading. + */ +@interface HHWebImageDownloader : NSObject + +/** + * Decompressing images that are downloaded and cached can improve performance but can consume lot of memory. + * Defaults to YES. Set this to NO if you are experiencing a crash due to excessive memory consumption. + */ +@property (assign, nonatomic) BOOL shouldDecompressImages; + +/** + * The maximum number of concurrent downloads + */ +@property (assign, nonatomic) NSInteger maxConcurrentDownloads; + +/** + * Shows the current amount of downloads that still need to be downloaded + */ +@property (readonly, nonatomic) NSUInteger currentDownloadCount; + +/** + * The timeout value (in seconds) for the download operation. Default: 15.0. + */ +@property (assign, nonatomic) NSTimeInterval downloadTimeout; + +/** + * The configuration in use by the internal NSURLSession. + * Mutating this object directly has no effect. + * + * @see createNewSessionWithConfiguration: + */ +@property (readonly, nonatomic, nonnull) NSURLSessionConfiguration *sessionConfiguration; + + +/** + * Changes download operations execution order. Default value is `HHWebImageDownloaderFIFOExecutionOrder`. + */ +@property (assign, nonatomic) HHWebImageDownloaderExecutionOrder executionOrder; + +/** + * Singleton method, returns the shared instance + * + * @return global shared instance of downloader class + */ ++ (nonnull instancetype)sharedDownloader; + +/** + * Set the default URL credential to be set for request operations. + */ +@property (strong, nonatomic, nullable) NSURLCredential *urlCredential; + +/** + * Set username + */ +@property (strong, nonatomic, nullable) NSString *username; + +/** + * Set password + */ +@property (strong, nonatomic, nullable) NSString *password; + +/** + * Set filter to pick headers for downloading image HTTP request. + * + * This block will be invoked for each downloading image request, returned + * NHHictionary will be used as headers in corresponding HTTP request. + */ +@property (nonatomic, copy, nullable) HHWebImageDownloaderHeadersFilterBlock headersFilter; + +/** + * Creates an instance of a downloader with specified session configuration. + * @note `timeoutIntervalForRequest` is going to be overwritten. + * @return new instance of downloader class + */ +- (nonnull instancetype)initWithSessionConfiguration:(nullable NSURLSessionConfiguration *)sessionConfiguration NS_DESIGNATED_INITIALIZER; + +/** + * Set a value for a HTTP header to be appended to each download HTTP request. + * + * @param value The value for the header field. Use `nil` value to remove the header. + * @param field The name of the header field to set. + */ +- (void)setValue:(nullable NSString *)value forHTTPHeaderField:(nullable NSString *)field; + +/** + * Returns the value of the specified HTTP header field. + * + * @return The value associated with the header field field, or `nil` if there is no corresponding header field. + */ +- (nullable NSString *)valueForHTTPHeaderField:(nullable NSString *)field; + +/** + * Sets a subclass of `HHWebImageDownloaderOperation` as the default + * `NSOperation` to be used each time HHWebImage constructs a request + * operation to download an image. + * + * @param operationClass The subclass of `HHWebImageDownloaderOperation` to set + * as default. Passing `nil` will revert to `HHWebImageDownloaderOperation`. + */ +- (void)setOperationClass:(nullable Class)operationClass; + +/** + * Creates a HHWebImageDownloader async downloader instance with a given URL + * + * The delegate will be informed when the image is finish downloaded or an error has happen. + * + * @see HHWebImageDownloaderDelegate + * + * @param url The URL to the image to download + * @param options The options to be used for this download + * @param progressBlock A block called repeatedly while the image is downloading + * @note the progress block is executed on a background queue + * @param completedBlock A block called once the download is completed. + * If the download succeeded, the image parameter is set, in case of error, + * error parameter is set with the error. The last parameter is always YES + * if HHWebImageDownloaderProgressiveDownload isn't use. With the + * HHWebImageDownloaderProgressiveDownload option, this block is called + * repeatedly with the partial image object and the finished argument set to NO + * before to be called a last time with the full image and finished argument + * set to YES. In case of error, the finished argument is always YES. + * + * @return A token (HHWebImageDownloadToken) that can be passed to -cancel: to cancel this operation + */ +- (nullable HHWebImageDownloadToken *)downloadImageWithURL:(nullable NSURL *)url + options:(HHWebImageDownloaderOptions)options + progress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHWebImageDownloaderCompletedBlock)completedBlock; + +/** + * Cancels a download that was previously queued using -downloadImageWithURL:options:progress:completed: + * + * @param token The token received from -downloadImageWithURL:options:progress:completed: that should be canceled. + */ +- (void)cancel:(nullable HHWebImageDownloadToken *)token; + +/** + * Sets the download queue suspension state + */ +- (void)setSuspended:(BOOL)suspended; + +/** + * Cancels all download operations in the queue + */ +- (void)cancelAllDownloads; + +/** + * Forces HHWebImageDownloader to create and use a new NSURLSession that is + * initialized with the given configuration. + * @note All existing download operations in the queue will be cancelled. + * @note `timeoutIntervalForRequest` is going to be overwritten. + * + * @param sessionConfiguration The configuration to use for the new NSURLSession + */ +- (void)createNewSessionWithConfiguration:(nonnull NSURLSessionConfiguration *)sessionConfiguration; + +/** + * Invalidates the managed session, optionally canceling pending operations. + * @note If you use custom downloader instead of the shared downloader, you need call this method when you do not use it to avoid memory leak + * @param cancelPendingOperations Whether or not to cancel pending operations. + * @note Calling this method on the shared downloader has no effect. + */ +- (void)invalidateSessionAndCancel:(BOOL)cancelPendingOperations; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloaderOperation.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloaderOperation.h new file mode 100644 index 0000000..a9f8acf --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageDownloaderOperation.h @@ -0,0 +1,125 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageDownloader.h" +#import "HHWebImageOperation.h" + +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageDownloadStartNotification; +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageDownloadReceiveResponseNotification; +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageDownloadStopNotification; +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageDownloadFinishNotification; + + + +/** + Describes a downloader operation. If one wants to use a custom downloader op, it needs to inherit from `NSOperation` and conform to this protocol + For the description about these methods, see `HHWebImageDownloaderOperation` + */ +@protocol HHWebImageDownloaderOperationInterface<NSObject> + +- (nonnull instancetype)initWithRequest:(nullable NSURLRequest *)request + inSession:(nullable NSURLSession *)session + options:(HHWebImageDownloaderOptions)options; + +- (nullable id)addHandlersForProgress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHWebImageDownloaderCompletedBlock)completedBlock; + +- (BOOL)shouldDecompressImages; +- (void)setShouldDecompressImages:(BOOL)value; + +- (nullable NSURLCredential *)credential; +- (void)setCredential:(nullable NSURLCredential *)value; + +- (BOOL)cancel:(nullable id)token; + +@end + + +@interface HHWebImageDownloaderOperation : NSOperation <HHWebImageDownloaderOperationInterface, HHWebImageOperation, NSURLSessionTaskDelegate, NSURLSessionDataDelegate> + +/** + * The request used by the operation's task. + */ +@property (strong, nonatomic, readonly, nullable) NSURLRequest *request; + +/** + * The operation's task + */ +@property (strong, nonatomic, readonly, nullable) NSURLSessionTask *dataTask; + + +@property (assign, nonatomic) BOOL shouldDecompressImages; + +/** + * Was used to determine whether the URL connection should consult the credential storage for authenticating the connection. + * @deprecated Not used for a couple of versions + */ +@property (nonatomic, assign) BOOL shouldUseCredentialStorage __deprecated_msg("Property deprecated. Does nothing. Kept only for backwards compatibility"); + +/** + * The credential used for authentication challenges in `-URLSession:task:didReceiveChallenge:completionHandler:`. + * + * This will be overridden by any shared credentials that exist for the username or password of the request URL, if present. + */ +@property (nonatomic, strong, nullable) NSURLCredential *credential; + +/** + * The HHWebImageDownloaderOptions for the receiver. + */ +@property (assign, nonatomic, readonly) HHWebImageDownloaderOptions options; + +/** + * The expected size of data. + */ +@property (assign, nonatomic) NSInteger expectedSize; + +/** + * The response returned by the operation's task. + */ +@property (strong, nonatomic, nullable) NSURLResponse *response; + +/** + * Initializes a `HHWebImageDownloaderOperation` object + * + * @see HHWebImageDownloaderOperation + * + * @param request the URL request + * @param session the URL session in which this operation will run + * @param options downloader options + * + * @return the initialized instance + */ +- (nonnull instancetype)initWithRequest:(nullable NSURLRequest *)request + inSession:(nullable NSURLSession *)session + options:(HHWebImageDownloaderOptions)options NS_DESIGNATED_INITIALIZER; + +/** + * Adds handlers for progress and completion. Returns a tokent that can be passed to -cancel: to cancel this set of + * callbacks. + * + * @param progressBlock the block executed when a new chunk of data arrives. + * @note the progress block is executed on a background queue + * @param completedBlock the block executed when the download is done. + * @note the completed block is executed on the main queue for success. If errors are found, there is a chance the block will be executed on a background queue + * + * @return the token to use to cancel this set of handlers + */ +- (nullable id)addHandlersForProgress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHWebImageDownloaderCompletedBlock)completedBlock; + +/** + * Cancels a set of callbacks. Once all callbacks are canceled, the operation is cancelled. + * + * @param token the token representing a set of callbacks to cancel + * + * @return YES if the operation was stopped because this was the last token to be canceled. NO otherwise. + */ +- (BOOL)cancel:(nullable id)token; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageFrame.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageFrame.h new file mode 100644 index 0000000..0a32056 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageFrame.h @@ -0,0 +1,34 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCompat.h" + +@interface HHWebImageFrame : NSObject + +// This class is used for creating animated images via `animatedImageWithFrames` in `HHWebImageCoderHelper`. Attention if you need to specify animated images loop count, use `sd_imageLoopCount` property in `UIImage+MultiFormat`. + +/** + The image of current frame. You should not set an animated image. + */ +@property (nonatomic, strong, readonly, nonnull) UIImage *image; +/** + The duration of current frame to be displayed. The number is seconds but not milliseconds. You should not set this to zero. + */ +@property (nonatomic, readonly, assign) NSTimeInterval duration; + +/** + Create a frame instance with specify image and duration + + @param image current frame's image + @param duration current frame's duration + @return frame instance + */ ++ (instancetype _Nonnull)frameWithImage:(UIImage * _Nonnull)image duration:(NSTimeInterval)duration; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageGIFCoder.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageGIFCoder.h new file mode 100644 index 0000000..f494c20 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageGIFCoder.h @@ -0,0 +1,23 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCoder.h" + +/** + Built in coder using ImageIO that supports GIF encoding/decoding + @note `HHWebImageIOCoder` supports GIF but only as static (will use the 1st frame). + @note Use `HHWebImageGIFCoder` for fully animated GIFs - less performant than `FLAnimatedImage` + @note If you decide to make all `UIImageView`(including `FLAnimatedImageView`) instance support GIF. You should add this coder to `HHWebImageCodersManager` and make sure that it has a higher priority than `HHWebImageIOCoder` + @note The recommended approach for animated GIFs is using `FLAnimatedImage`. It's more performant than `UIImageView` for GIF displaying + */ +@interface HHWebImageGIFCoder : NSObject <HHWebImageCoder> + ++ (nonnull instancetype)sharedCoder; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageImageIOCoder.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageImageIOCoder.h new file mode 100644 index 0000000..050ed4d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageImageIOCoder.h @@ -0,0 +1,30 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCoder.h" + +/** + Built in coder that supports PNG, JPEG, TIFF, includes support for progressive decoding. + + GIF + Also supports static GIF (meaning will only handle the 1st frame). + For a full GIF support, we recommend `FLAnimatedImage` or our less performant `HHWebImageGIFCoder` + + HEIC + This coder also supports HEIC format because ImageIO supports it natively. But it depends on the system capabilities, so it won't work on all devices, see: https://devstreaming-cdn.apple.com/videos/wwdc/2017/511tj33587vdhds/511/511_working_with_heif_and_hevc.pdf + Decode(Software): !Simulator && (iOS 11 || tvOS 11 || macOS 10.13) + Decode(Hardware): !Simulator && ((iOS 11 && A9Chip) || (macOS 10.13 && 6thGenerationIntelCPU)) + Encode(Software): macOS 10.13 + Encode(Hardware): !Simulator && ((iOS 11 && A10FusionChip) || (macOS 10.13 && 6thGenerationIntelCPU)) + */ +@interface HHWebImageImageIOCoder : NSObject <HHWebImageProgressiveCoder> + ++ (nonnull instancetype)sharedCoder; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageManager.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageManager.h new file mode 100644 index 0000000..04d5658 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageManager.h @@ -0,0 +1,328 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" +#import "HHWebImageOperation.h" +#import "HHWebImageDownloader.h" +#import "HHImageCache.h" + +typedef NS_OPTIONS(NSUInteger, HHWebImageOptions) { + /** + * By default, when a URL fail to be downloaded, the URL is blacklisted so the library won't keep trying. + * This flag disable this blacklisting. + */ + HHWebImageRetryFailed = 1 << 0, + + /** + * By default, image downloads are started during UI interactions, this flags disable this feature, + * leading to delayed download on UIScrollView deceleration for instance. + */ + HHWebImageLowPriority = 1 << 1, + + /** + * This flag disables on-disk caching after the download finished, only cache in memory + */ + HHWebImageCacheMemoryOnly = 1 << 2, + + /** + * This flag enables progressive download, the image is displayed progressively during download as a browser would do. + * By default, the image is only displayed once completely downloaded. + */ + HHWebImageProgressiveDownload = 1 << 3, + + /** + * Even if the image is cached, respect the HTTP response cache control, and refresh the image from remote location if needed. + * The disk caching will be handled by NSURLCache instead of HHWebImage leading to slight performance degradation. + * This option helps deal with images changing behind the same request URL, e.g. Facebook graph api profile pics. + * If a cached image is refreshed, the completion block is called once with the cached image and again with the final image. + * + * Use this flag only if you can't make your URLs static with embedded cache busting parameter. + */ + HHWebImageRefreshCached = 1 << 4, + + /** + * In iOS 4+, continue the download of the image if the app goes to background. This is achieved by asking the system for + * extra time in background to let the request finish. If the background task expires the operation will be cancelled. + */ + HHWebImageContinueInBackground = 1 << 5, + + /** + * Handles cookies stored in NSHTTPCookieStore by setting + * NSMutableURLRequest.HTTPShouldHandleCookies = YES; + */ + HHWebImageHandleCookies = 1 << 6, + + /** + * Enable to allow untrusted SSL certificates. + * Useful for testing purposes. Use with caution in production. + */ + HHWebImageAllowInvalidSSLCertificates = 1 << 7, + + /** + * By default, images are loaded in the order in which they were queued. This flag moves them to + * the front of the queue. + */ + HHWebImageHighPriority = 1 << 8, + + /** + * By default, placeholder images are loaded while the image is loading. This flag will delay the loading + * of the placeholder image until after the image has finished loading. + */ + HHWebImageDelayPlaceholder = 1 << 9, + + /** + * We usually don't call transformDownloadedImage delegate method on animated images, + * as most transformation code would mangle it. + * Use this flag to transform them anyway. + */ + HHWebImageTransformAnimatedImage = 1 << 10, + + /** + * By default, image is added to the imageView after download. But in some cases, we want to + * have the hand before setting the image (apply a filter or add it with cross-fade animation for instance) + * Use this flag if you want to manually set the image in the completion when success + */ + HHWebImageAvoidAutoSetImage = 1 << 11, + + /** + * By default, images are decoded respecting their original size. On iOS, this flag will scale down the + * images to a size compatible with the constrained memory of devices. + * If `HHWebImageProgressiveDownload` flag is set the scale down is deactivated. + */ + HHWebImageScaleDownLargeImages = 1 << 12, + + /** + * By default, we do not query disk data when the image is cached in memory. This mask can force to query disk data at the same time. + * This flag is recommend to be used with `HHWebImageQueryDiskSync` to ensure the image is loaded in the same runloop. + */ + HHWebImageQueryDataWhenInMemory = 1 << 13, + + /** + * By default, we query the memory cache synchronously, disk cache asynchronously. This mask can force to query disk cache synchronously to ensure that image is loaded in the same runloop. + * This flag can avoid flashing during cell reuse if you disable memory cache or in some other cases. + */ + HHWebImageQueryDiskSync = 1 << 14, + + /** + * By default, when the cache missed, the image is download from the network. This flag can prevent network to load from cache only. + */ + HHWebImageFromCacheOnly = 1 << 15, + /** + * By default, when you use `HHWebImageTransition` to do some view transition after the image load finished, this transition is only applied for image download from the network. This mask can force to apply view transition for memory and disk cache as well. + */ + HHWebImageForceTransition = 1 << 16 +}; + +typedef void(^HHExternalCompletionBlock)(UIImage * _Nullable image, NSError * _Nullable error, HHImageCacheType cacheType, NSURL * _Nullable imageURL); + +typedef void(^HHInternalCompletionBlock)(UIImage * _Nullable image, NSData * _Nullable data, NSError * _Nullable error, HHImageCacheType cacheType, BOOL finished, NSURL * _Nullable imageURL); + +typedef NSString * _Nullable(^HHWebImageCacheKeyFilterBlock)(NSURL * _Nullable url); + +typedef NSData * _Nullable(^HHWebImageCacheSerializerBlock)(UIImage * _Nonnull image, NSData * _Nullable data, NSURL * _Nullable imageURL); + + +@class HHWebImageManager; + +@protocol HHWebImageManagerDelegate <NSObject> + +@optional + +/** + * Controls which image should be downloaded when the image is not found in the cache. + * + * @param imageManager The current `HHWebImageManager` + * @param imageURL The url of the image to be downloaded + * + * @return Return NO to prevent the downloading of the image on cache misses. If not implemented, YES is implied. + */ +- (BOOL)imageManager:(nonnull HHWebImageManager *)imageManager shouldDownloadImageForURL:(nullable NSURL *)imageURL; + +/** + * Controls the complicated logic to mark as failed URLs when download error occur. + * If the delegate implement this method, we will not use the built-in way to mark URL as failed based on error code; + @param imageManager The current `HHWebImageManager` + @param imageURL The url of the image + @param error The download error for the url + @return Whether to block this url or not. Return YES to mark this URL as failed. + */ +- (BOOL)imageManager:(nonnull HHWebImageManager *)imageManager shouldBlockFailedURL:(nonnull NSURL *)imageURL withError:(nonnull NSError *)error; + +/** + * Allows to transform the image immediately after it has been downloaded and just before to cache it on disk and memory. + * NOTE: This method is called from a global queue in order to not to block the main thread. + * + * @param imageManager The current `HHWebImageManager` + * @param image The image to transform + * @param imageURL The url of the image to transform + * + * @return The transformed image object. + */ +- (nullable UIImage *)imageManager:(nonnull HHWebImageManager *)imageManager transformDownloadedImage:(nullable UIImage *)image withURL:(nullable NSURL *)imageURL; + +@end + +/** + * The HHWebImageManager is the class behind the UIImageView+WebCache category and likes. + * It ties the asynchronous downloader (HHWebImageDownloader) with the image cache store (HHImageCache). + * You can use this class directly to benefit from web image downloading with caching in another context than + * a UIView. + * + * Here is a simple example of how to use HHWebImageManager: + * + * @code + +HHWebImageManager *manager = [HHWebImageManager sharedManager]; +[manager loadImageWithURL:imageURL + options:0 + progress:nil + completed:^(UIImage *image, NSError *error, HHImageCacheType cacheType, BOOL finished, NSURL *imageURL) { + if (image) { + // do something with image + } + }]; + + * @endcode + */ +@interface HHWebImageManager : NSObject + +@property (weak, nonatomic, nullable) id <HHWebImageManagerDelegate> delegate; + +@property (strong, nonatomic, readonly, nullable) HHImageCache *imageCache; +@property (strong, nonatomic, readonly, nullable) HHWebImageDownloader *imageDownloader; + +/** + * The cache filter is a block used each time HHWebImageManager need to convert an URL into a cache key. This can + * be used to remove dynamic part of an image URL. + * + * The following example sets a filter in the application delegate that will remove any query-string from the + * URL before to use it as a cache key: + * + * @code + +HHWebImageManager.sharedManager.cacheKeyFilter = ^(NSURL * _Nullable url) { + url = [[NSURL alloc] initWithScheme:url.scheme host:url.host path:url.path]; + return [url absoluteString]; +}; + + * @endcode + */ +@property (nonatomic, copy, nullable) HHWebImageCacheKeyFilterBlock cacheKeyFilter; + +/** + * The cache serializer is a block used to convert the decoded image, the source downloaded data, to the actual data used for storing to the disk cache. If you return nil, means to generate the data from the image instance, see `HHImageCache`. + * For example, if you are using WebP images and facing the slow decoding time issue when later retriving from disk cache again. You can try to encode the decoded image to JPEG/PNG format to disk cache instead of source downloaded data. + * @note The `image` arg is nonnull, but when you also provide a image transformer and the image is transformed, the `data` arg may be nil, take attention to this case. + * @note This method is called from a global queue in order to not to block the main thread. + * @code + HHWebImageManager.sharedManager.cacheKeyFilter = ^NHHata * _Nullable(UIImage * _Nonnull image, NSData * _Nullable data, NSURL * _Nullable imageURL) { + HHImageFormat format = [NSData sd_imageFormatForImageData:data]; + switch (format) { + case HHImageFormatWebP: + return image.images ? data : nil; + default: + return data; + } + }; + * @endcode + * The default value is nil. Means we just store the source downloaded data to disk cache. + */ +@property (nonatomic, copy, nullable) HHWebImageCacheSerializerBlock cacheSerializer; + +/** + * Returns global HHWebImageManager instance. + * + * @return HHWebImageManager shared instance + */ ++ (nonnull instancetype)sharedManager; + +/** + * Allows to specify instance of cache and image downloader used with image manager. + * @return new instance of `HHWebImageManager` with specified cache and downloader. + */ +- (nonnull instancetype)initWithCache:(nonnull HHImageCache *)cache downloader:(nonnull HHWebImageDownloader *)downloader NS_DESIGNATED_INITIALIZER; + +/** + * Downloads the image at the given URL if not present in cache or return the cached version otherwise. + * + * @param url The URL to the image + * @param options A mask to specify options to use for this request + * @param progressBlock A block called while image is downloading + * @note the progress block is executed on a background queue + * @param completedBlock A block called when operation has been completed. + * + * This parameter is required. + * + * This block has no return value and takes the requested UIImage as first parameter and the NHHata representation as second parameter. + * In case of error the image parameter is nil and the third parameter may contain an NSError. + * + * The forth parameter is an `SDImageCacheType` enum indicating if the image was retrieved from the local cache + * or from the memory cache or from the network. + * + * The fith parameter is set to NO when the SDWebImageProgressiveDownload option is used and the image is + * downloading. This block is thus called repeatedly with a partial image. When image is fully downloaded, the + * block is called a last time with the full image and the last parameter set to YES. + * + * The last parameter is the original image URL + * + * @return Returns an NSObject conforming to SDWebImageOperation. Should be an instance of SDWebImageDownloaderOperation + */ +- (nullable id <HHWebImageOperation>)loadImageWithURL:(nullable NSURL *)url + options:(HHWebImageOptions)options + progress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHInternalCompletionBlock)completedBlock; + +/** + * Saves image to cache for given URL + * + * @param image The image to cache + * @param url The URL to the image + * + */ + +- (void)saveImageToCache:(nullable UIImage *)image forURL:(nullable NSURL *)url; + +/** + * Cancel all current operations + */ +- (void)cancelAll; + +/** + * Check one or more operations running + */ +- (BOOL)isRunning; + +/** + * Async check if image has already been cached + * + * @param url image url + * @param completionBlock the block to be executed when the check is finished + * + * @note the completion block is always executed on the main queue + */ +- (void)cachedImageExistsForURL:(nullable NSURL *)url + completion:(nullable HHWebImageCheckCacheCompletionBlock)completionBlock; + +/** + * Async check if image has already been cached on disk only + * + * @param url image url + * @param completionBlock the block to be executed when the check is finished + * + * @note the completion block is always executed on the main queue + */ +- (void)diskImageExistsForURL:(nullable NSURL *)url + completion:(nullable HHWebImageCheckCacheCompletionBlock)completionBlock; + + +/** + *Return the cache key for a given URL + */ +- (nullable NSString *)cacheKeyForURL:(nullable NSURL *)url; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageOperation.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageOperation.h new file mode 100644 index 0000000..5134ec2 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageOperation.h @@ -0,0 +1,15 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> + +@protocol HHWebImageOperation <NSObject> + +- (void)cancel; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImagePrefetcher.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImagePrefetcher.h new file mode 100644 index 0000000..f4628f0 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImagePrefetcher.h @@ -0,0 +1,112 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageManager.h" + +@class HHWebImagePrefetcher; + +@protocol HHWebImagePrefetcherDelegate <NSObject> + +@optional + +/** + * Called when an image was prefetched. + * + * @param imagePrefetcher The current image prefetcher + * @param imageURL The image url that was prefetched + * @param finishedCount The total number of images that were prefetched (successful or not) + * @param totalCount The total number of images that were to be prefetched + */ +- (void)imagePrefetcher:(nonnull HHWebImagePrefetcher *)imagePrefetcher didPrefetchURL:(nullable NSURL *)imageURL finishedCount:(NSUInteger)finishedCount totalCount:(NSUInteger)totalCount; + +/** + * Called when all images are prefetched. + * @param imagePrefetcher The current image prefetcher + * @param totalCount The total number of images that were prefetched (whether successful or not) + * @param skippedCount The total number of images that were skipped + */ +- (void)imagePrefetcher:(nonnull HHWebImagePrefetcher *)imagePrefetcher didFinishWithTotalCount:(NSUInteger)totalCount skippedCount:(NSUInteger)skippedCount; + +@end + +typedef void(^HHWebImagePrefetcherProgressBlock)(NSUInteger noOfFinishedUrls, NSUInteger noOfTotalUrls); +typedef void(^HHWebImagePrefetcherCompletionBlock)(NSUInteger noOfFinishedUrls, NSUInteger noOfSkippedUrls); + +/** + * Prefetch some URLs in the cache for future use. Images are downloaded in low priority. + */ +@interface HHWebImagePrefetcher : NSObject + +/** + * The web image manager + */ +@property (strong, nonatomic, readonly, nonnull) HHWebImageManager *manager; + +/** + * Maximum number of URLs to prefetch at the same time. Defaults to 3. + */ +@property (nonatomic, assign) NSUInteger maxConcurrentDownloads; + +/** + * HHWebImageOptions for prefetcher. Defaults to HHWebImageLowPriority. + */ +@property (nonatomic, assign) HHWebImageOptions options; + +/** + * Queue options for Prefetcher. Defaults to Main Queue. + */ +@property (strong, nonatomic, nonnull) dispatch_queue_t prefetcherQueue; + +@property (weak, nonatomic, nullable) id <HHWebImagePrefetcherDelegate> delegate; + +/** + * Return the global image prefetcher instance. + */ ++ (nonnull instancetype)sharedImagePrefetcher; + +/** + * Allows you to instantiate a prefetcher with any arbitrary image manager. + */ +- (nonnull instancetype)initWithImageManager:(nonnull HHWebImageManager *)manager NS_DESIGNATED_INITIALIZER; + +/** + * Assign list of URLs to let HHWebImagePrefetcher to queue the prefetching, + * currently one image is downloaded at a time, + * and skips images for failed downloads and proceed to the next image in the list. + * Any previously-running prefetch operations are canceled. + * + * @param urls list of URLs to prefetch + */ +- (void)prefetchURLs:(nullable NSArray<NSURL *> *)urls; + +/** + * Assign list of URLs to let HHWebImagePrefetcher to queue the prefetching, + * currently one image is downloaded at a time, + * and skips images for failed downloads and proceed to the next image in the list. + * Any previously-running prefetch operations are canceled. + * + * @param urls list of URLs to prefetch + * @param progressBlock block to be called when progress updates; + * first parameter is the number of completed (successful or not) requests, + * second parameter is the total number of images originally requested to be prefetched + * @param completionBlock block to be called when prefetching is completed + * first param is the number of completed (successful or not) requests, + * second parameter is the number of skipped requests + */ +- (void)prefetchURLs:(nullable NSArray<NSURL *> *)urls + progress:(nullable HHWebImagePrefetcherProgressBlock)progressBlock + completed:(nullable HHWebImagePrefetcherCompletionBlock)completionBlock; + +/** + * Remove and cancel queued list + */ +- (void)cancelPrefetching; + + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageTransition.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageTransition.h new file mode 100644 index 0000000..de34c2e --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/HHWebImageTransition.h @@ -0,0 +1,98 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +#if HH_UIKIT || HH_MAC +#import "HHImageCache.h" + +// This class is used to provide a transition animation after the view category load image finished. Use this on `sd_imageTransition` in UIView+WebCache.h +// for UIKit(iOS & tvOS), we use `+[UIView transitionWithView:duration:options:animations:completion]` for transition animation. +// for AppKit(macOS), we use `+[NSAnimationContext runAnimationGroup:completionHandler:]` for transition animation. You can call `+[NSAnimationContext currentContext]` to grab the context during animations block. +// These transition are provided for basic usage. If you need complicated animation, consider to directly use Core Animation or use `HHWebImageAvoidAutoSetImage` and implement your own after image load finished. + +#if HH_UIKIT +typedef UIViewAnimationOptions HHWebImageAnimationOptions; +#else +typedef NS_OPTIONS(NSUInteger, HHWebImageAnimationOptions) { + HHWebImageAnimationOptionAllowsImplicitAnimation = 1 << 0, // specify `allowsImplicitAnimation` for the `NSAnimationContext` +}; +#endif + +typedef void (^HHWebImageTransitionPreparesBlock)(__kindof UIView * _Nonnull view, UIImage * _Nullable image, NSData * _Nullable imageData, HHImageCacheType cacheType, NSURL * _Nullable imageURL); +typedef void (^HHWebImageTransitionAnimationsBlock)(__kindof UIView * _Nonnull view, UIImage * _Nullable image); +typedef void (^HHWebImageTransitionCompletionBlock)(BOOL finished); + +@interface HHWebImageTransition : NSObject + +/** + By default, we set the image to the view at the beginning of the animtions. You can disable this and provide custom set image process + */ +@property (nonatomic, assign) BOOL avoidAutoSetImage; +/** + The duration of the transition animation, measured in seconds. Defaults to 0.5. + */ +@property (nonatomic, assign) NSTimeInterval duration; +/** + The timing function used for all animations within this transition animation (macOS). + */ +@property (nonatomic, strong, nullable) CAMediaTimingFunction *timingFunction NS_AVAILABLE_MAC(10_7); +/** + A mask of options indicating how you want to perform the animations. + */ +@property (nonatomic, assign) HHWebImageAnimationOptions animationOptions; +/** + A block object to be executed before the animation sequence starts. + */ +@property (nonatomic, copy, nullable) HHWebImageTransitionPreparesBlock prepares; +/** + A block object that contains the changes you want to make to the specified view. + */ +@property (nonatomic, copy, nullable) HHWebImageTransitionAnimationsBlock animations; +/** + A block object to be executed when the animation sequence ends. + */ +@property (nonatomic, copy, nullable) HHWebImageTransitionCompletionBlock completion; + +@end + +// Convenience way to create transition. Remember to specify the duration if needed. +// for UIKit, these transition just use the correspond `animationOptions` +// for AppKit, these transition use Core Animation in `animations`. So your view must be layer-backed. Set `wantsLayer = YES` before you apply it. + +@interface HHWebImageTransition (Conveniences) + +// class property is available in Xcode 8. We will drop the Xcode 7.3 support in 5.x +#if __has_feature(objc_class_property) +/// Fade transition. +@property (nonatomic, class, nonnull, readonly) HHWebImageTransition *fadeTransition; +/// Flip from left transition. +@property (nonatomic, class, nonnull, readonly) HHWebImageTransition *flipFromLeftTransition; +/// Flip from right transition. +@property (nonatomic, class, nonnull, readonly) HHWebImageTransition *flipFromRightTransition; +/// Flip from top transition. +@property (nonatomic, class, nonnull, readonly) HHWebImageTransition *flipFromTopTransition; +/// Flip from bottom transition. +@property (nonatomic, class, nonnull, readonly) HHWebImageTransition *flipFromBottomTransition; +/// Curl up transition. +@property (nonatomic, class, nonnull, readonly) HHWebImageTransition *curlUpTransition; +/// Curl down transition. +@property (nonatomic, class, nonnull, readonly) HHWebImageTransition *curlDownTransition; +#else ++ (nonnull instancetype)fadeTransition; ++ (nonnull instancetype)flipFromLeftTransition; ++ (nonnull instancetype)flipFromRightTransition; ++ (nonnull instancetype)flipFromTopTransition; ++ (nonnull instancetype)flipFromBottomTransition; ++ (nonnull instancetype)curlUpTransition; ++ (nonnull instancetype)curlDownTransition; +#endif + +@end + +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/NSData+hhImageContentType.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/NSData+hhImageContentType.h new file mode 100644 index 0000000..02380b6 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/NSData+hhImageContentType.h @@ -0,0 +1,42 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * (c) Fabrice Aneche + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import <Foundation/Foundation.h> +#import "HHWebImageCompat.h" + +typedef NS_ENUM(NSInteger, HHImageFormat) { + HHImageFormatUndefined = -1, + HHImageFormatJPEG = 0, + HHImageFormatPNG, + HHImageFormatGIF, + HHImageFormatTIFF, + HHImageFormatWebP, + HHImageFormatHEIC +}; + +@interface NSData (hhImageContentType) + +/** + * Return image format + * + * @param data the input image data + * + * @return the image format as `HHImageFormat` (enum) + */ ++ (HHImageFormat)HH_imageFormatForImageData:(nullable NSData *)data; + +/** + Convert HHImageFormat to UTType + + @param format Format as HHImageFormat + @return The UTType as CFStringRef + */ ++ (nonnull CFStringRef)HH_UTTypeFromHHImageFormat:(HHImageFormat)format; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/NSImage+hhWebCache.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/NSImage+hhWebCache.h new file mode 100644 index 0000000..c34426d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/NSImage+hhWebCache.h @@ -0,0 +1,23 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +#if HH_MAC + +#import <Cocoa/Cocoa.h> + +@interface NSImage (hhWebCache) + +- (CGImageRef)CGImage; +- (NSArray<NSImage *> *)images; +- (BOOL)isGIF; + +@end + +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/ObjectMapper.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/ObjectMapper.h new file mode 100644 index 0000000..83b2ad1 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/ObjectMapper.h @@ -0,0 +1,40 @@ +// +// ObjectMapper.h +// ObjectMapper +// +// Created by Tristan Himmelman on 2014-10-16. +// +// The MIT License (MIT) +// +// Copyright (c) 2014-2018 Tristan Himmelman +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + + +#import <Foundation/Foundation.h> + +//! Project version number for ObjectMapper. +FOUNDATION_EXPORT double ObjectMapperVersionNumber; + +//! Project version string for ObjectMapper. +FOUNDATION_EXPORT const unsigned char ObjectMapperVersionString[]; + +// In this header, you should import all the public headers of your framework using statements like #import <ObjectMapper/PublicHeader.h> + + diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropVC.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropVC.h new file mode 100755 index 0000000..d1baf70 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropVC.h @@ -0,0 +1,262 @@ +// +// RSKImageCropVC.h +// +// Copyright (c) 2014-present Ruslan Skorb, http://ruslanskorb.com/ +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#import <UIKit/UIKit.h> + +@protocol RSKImageCropVCDataSource; +@protocol RSKImageCropVCDelegate; + +/** + Types of supported crop modes. + */ +typedef NS_ENUM(NSUInteger, RSKImageCropMode) { + RSKImageCropModeCircle, + RSKImageCropModeSquare, + RSKImageCropModeCustom, + RSKImageCropModeLicense,//执照 + RSKImageCropModeIdCard,// + RSKImageCropModeIdCardFull,//两页展开资格证 + +}; + +@interface RSKImageCropVC : UIViewController + +/** + Designated initializer. Initializes and returns a newly allocated view controller object with the specified image. + + @param originalImage The image for cropping. + */ +- (instancetype)initWithImage:(UIImage *)originalImage; + +/** + Initializes and returns a newly allocated view controller object with the specified image and the specified crop mode. + + @param originalImage The image for cropping. + @param cropMode The mode for cropping. + */ +- (instancetype)initWithImage:(UIImage *)originalImage cropMode:(RSKImageCropMode)cropMode; + +///----------------------------- +/// @name Accessing the Delegate +///----------------------------- + +/** + The receiver's delegate. + + @discussion A `RSKImageCropVCDelegate` delegate responds to messages sent by completing / canceling crop the image in the image crop view controller. + */ +@property (weak, nonatomic) id<RSKImageCropVCDelegate> delegate; + +/** + The receiver's data source. + + @discussion A `RSKImageCropVCDataSource` data source provides a custom rect and a custom path for the mask. + */ +@property (weak, nonatomic) id<RSKImageCropVCDataSource> dataSource; + +///-------------------------- +/// @name Accessing the Image +///-------------------------- + +/** + The image for cropping. + */ +@property (strong, nonatomic) UIImage *originalImage; + +/// ----------------------------------- +/// @name Accessing the Mask Attributes +/// ----------------------------------- + +/** + The color of the layer with the mask. Default value is [UIColor colorWithRed:0.0f green:0.0f blue:0.0f alpha:0.7f]. + */ +@property (strong, nonatomic) UIColor *maskLayerColor; + +/** + The rect of the mask. + + @discussion Updating each time before the crop view lays out its subviews. + */ +@property (assign, readonly, nonatomic) CGRect maskRect; + +/** + The path of the mask. + + @discussion Updating each time before the crop view lays out its subviews. + */ +@property (strong, readonly, nonatomic) UIBezierPath *maskPath; + +/// ----------------------------------- +/// @name Accessing the Crop Attributes +/// ----------------------------------- + +/** + The mode for cropping. Default value is `RSKImageCropModeCircle`. + */ +@property (assign, nonatomic) RSKImageCropMode cropMode; + +/** + The crop rectangle. + + @discussion The value is calculated at run time. + */ +@property (readonly, nonatomic) CGRect cropRect; + +/** + A value that specifies the current rotation angle of the image in radians. + +@discussion The value is calculated at run time. + */ +@property (readonly, nonatomic) CGFloat rotationAngle; + +/** + A floating-point value that specifies the current scale factor applied to the image. + + @discussion The value is calculated at run time. + */ +@property (readonly, nonatomic) CGFloat zoomScale; + +/** + A Boolean value that determines whether the image will always fill the mask space. Default value is `NO`. + */ +@property (assign, nonatomic) BOOL avoidEmptySpaceAroundImage; + +/** + A Boolean value that determines whether the mask applies to the image after cropping. Default value is `NO`. + */ +@property (assign, nonatomic) BOOL applyMaskToCroppedImage; + +/** + A Boolean value that controls whether the rotaion gesture is enabled. Default value is `NO`. + + @discussion To support the rotation when `cropMode` is `RSKImageCropModeCustom` you must implement the data source method `imageCropViewControllerCustomMovementRect:`. + */ +@property (assign, getter=isRotationEnabled, nonatomic) BOOL rotationEnabled; + +/// ------------------------------- +/// @name Accessing the UI Elements +/// ------------------------------- + +/** + The Title Label. + */ +@property (strong, nonatomic, readonly) UILabel *moveAndScaleLabel; + +/** + The Cancel Button. + */ +@property (strong, nonatomic, readonly) UIButton *cancelButton; + +/** + The Choose Button. + */ +@property (strong, nonatomic, readonly) UIButton *chooseButton; + + + +@property(nonatomic,strong) NSString *mTipsStr; + +/// ------------------------------------------- +/// @name Checking of the Interface Orientation +/// ------------------------------------------- + +/** + Returns a Boolean value indicating whether the user interface is currently presented in a portrait orientation. + + @return YES if the interface orientation is portrait, otherwise returns NO. + */ +- (BOOL)isPortraitInterfaceOrientation; + +@end + +/** + The `RSKImageCropVCDataSource` protocol is adopted by an object that provides a custom rect and a custom path for the mask. + */ +@protocol RSKImageCropVCDataSource <NSObject> + +/** + Asks the data source a custom rect for the mask. + + @param controller The crop view controller object to whom a rect is provided. + + @return A custom rect for the mask. + + @discussion Only valid if `cropMode` is `RSKImageCropModeCustom`. + */ +- (CGRect)imageCropViewControllerCustomMaskRect:(RSKImageCropVC *)controller; + +/** + Asks the data source a custom path for the mask. + + @param controller The crop view controller object to whom a path is provided. + + @return A custom path for the mask. + + @discussion Only valid if `cropMode` is `RSKImageCropModeCustom`. + */ +- (UIBezierPath *)imageCropViewControllerCustomMaskPath:(RSKImageCropVC *)controller; + +@optional + +/** + Asks the data source a custom rect in which the image can be moved. + + @param controller The crop view controller object to whom a rect is provided. + + @return A custom rect in which the image can be moved. + + @discussion Only valid if `cropMode` is `RSKImageCropModeCustom`. If you want to support the rotation when `cropMode` is `RSKImageCropModeCustom` you must implement it. Will be marked as `required` in version `2.0.0`. + */ +- (CGRect)imageCropViewControllerCustomMovementRect:(RSKImageCropVC *)controller; + +@end + +/** + The `RSKImageCropVCDelegate` protocol defines messages sent to a image crop view controller delegate when crop image was canceled or the original image was cropped. + */ +@protocol RSKImageCropVCDelegate <NSObject> + +@optional + +/** + Tells the delegate that crop image has been canceled. + */ +- (void)imageCropViewControllerDidCancelCrop:(RSKImageCropVC *)controller; + +/** + Tells the delegate that the original image will be cropped. + */ +- (void)imageCropViewController:(RSKImageCropVC *)controller willCropImage:(UIImage *)originalImage; + +/** + Tells the delegate that the original image has been cropped. Additionally provides a crop rect used to produce image. + */ +- (void)imageCropViewController:(RSKImageCropVC *)controller didCropImage:(UIImage *)croppedImage usingCropRect:(CGRect)cropRect; + +/** + Tells the delegate that the original image has been cropped. Additionally provides a crop rect and a rotation angle used to produce image. + */ +- (void)imageCropViewController:(RSKImageCropVC *)controller didCropImage:(UIImage *)croppedImage usingCropRect:(CGRect)cropRect rotationAngle:(CGFloat)rotationAngle; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropViewController+Protected.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropViewController+Protected.h new file mode 100755 index 0000000..046641b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/RSKImageCropViewController+Protected.h @@ -0,0 +1,49 @@ +// +// RSKImageCropVC+Protected.h +// +// Copyright (c) 2014-present Ruslan Skorb, http://ruslanskorb.com/ +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +/** + The methods in the RSKImageCropVCProtectedMethods category + typically should only be called by subclasses which are implementing new + image crop view controllers. They may be overridden but must call super. + */ +@interface RSKImageCropVC (RSKImageCropVCProtectedMethods) + +/** + Asynchronously crops the original image in accordance with the current settings and tells the delegate that the original image will be / has been cropped. + */ +- (void)cropImage; + +/** + Tells the delegate that the crop has been canceled. + */ +- (void)cancelCrop; + +/** + Resets the rotation angle, the position and the zoom scale of the original image to the default values. + + @param animated Set this value to YES to animate the reset. + */ +- (void)reset:(BOOL)animated; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraImageModel.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraImageModel.h new file mode 100644 index 0000000..4b483d8 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraImageModel.h @@ -0,0 +1,24 @@ +// +// HHCameraImage.h +// camera_Demo +// +// Created by shmily on 15/10/20. +// Copyright © 2015年 shmilyAshen. All rights reserved. +// + +#import <Foundation/Foundation.h> +#import <UIKit/UIKit.h> + +@interface SDKCameraImageModel : NSObject + +/// 全尺寸图像 +@property(nonatomic,strong)NSString *fullPath; + +@property(nonatomic,strong)NSString *scalledPath; + ++ (instancetype)cameraImageWithFullPath:(NSString *)fullPath scalledPath:(NSString *)scalledPath; + +- (bool)isMp4; + + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraUtil.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraUtil.h new file mode 100644 index 0000000..160475b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKCameraUtil.h @@ -0,0 +1,82 @@ +// +// SDKCameraUtil.h +// CameraLibrary +// +// Created by shmily on 16/5/25. +// Copyright © 2016年 HHPacs. All rights reserved. +// + +#import <Foundation/Foundation.h> +#import <UIKit/UIKit.h> + +// 图片是否压缩临界点 +#define HHImgReduceSize 1000000 + +@class AVAssetExportSession; + +typedef enum : NSUInteger { + ProTypePacs, + ProTypeUser +} ProType; + + +@interface SDKCameraUtil : NSObject + +/// 是否应该被压缩 +@property(nonatomic,assign)BOOL shouldReduce; + +/// 家庭医生使用 +@property(nonatomic, assign)ProType type; + +/// 单例 ++ (instancetype)shareInstance; + +/// 获取图片bundle +//+ (NSBundle *)getBundle; + +/// 根据图片名获取图片 +//+ (UIImage *)getImage:(NSString *)imgName; + +/// 获取视频的缩略图 +/// @param videoURL 视频的URL +/// @param time 截图时间 ++ (UIImage *)thumbnailImageForVideo:(NSURL *)videoURL atTime:(NSTimeInterval)time; + +/// 根据视频路径获取对应缩略图 +//+ (UIImage *)thumImageForVideo:(NSString *)videoPath; + +/// 能否被压缩(小于200k不被压缩) ++ (BOOL)isCanReduce:(UIImage *)image; + ++ (BOOL)isCanReduceFile:(NSString *)imgPath; + +/// 压缩图像后覆盖原图 ++ (BOOL)reduceImage:(UIImage *)img path:(NSString *)path; + ++ (BOOL)isImage:(NSString *)path; + +/// 创建图片的本地路径 +/// +/// @param prefix 文件头 ++ (NSString *)createDocumentPath:(NSString *)prefix; + +// 获取图片(视频)缩略图路径 ++ (NSString *)getScallPath:(NSString *)fullPath; + +/// 写入文件 ++ (NSString *)writeImageToFile:(UIImage *)image; + ++ (NSString *)writeImageToFile:(UIImage *)image scale:(CGSize)size fullPath:(NSString *)fullPath; + ++ (NSString *)writeScaledImg:(UIImage *)image scale:(CGSize)size fullPath:(NSString *)fullPath; + +/// 压缩图片 ++ (void)zipImages:(NSArray<NSString *> *)imgPaths; + ++ (UIImage *)fixOrientation:(UIImage *)aImag; + ++ (void)videoFixOrientation: (NSURL *)url path:(NSString *)path finished:(void (^)(AVAssetExportSession *))finishBlock; + ++ (Boolean)isIPad; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageCropper.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageCropper.h new file mode 100755 index 0000000..e3c0116 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageCropper.h @@ -0,0 +1,29 @@ +// +// RSKImageCropper.h +// +// Copyright (c) 2014-present Ruslan Skorb, http://ruslanskorb.com/ +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +/** + `RSKImageCropper` is an image cropper for iOS like in the Contacts app with support for landscape orientation. + */ + +#import "RSKImageCropVC.h" diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageScrollView.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageScrollView.h new file mode 100755 index 0000000..a810dd3 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKImageScrollView.h @@ -0,0 +1,57 @@ +/* + File: SDKImageScrollView.h + Abstract: Centers image within the scroll view and configures image sizing and display. + Version: 1.3 modified by Ruslan Skorb on 8/24/14. + + Disclaimer: IMPORTANT: This Apple software is supplied to you by Apple + Inc. ("Apple") in consideration of your agreement to the following + terms, and your use, installation, modification or redistribution of + this Apple software constitutes acceptance of these terms. If you do + not agree with these terms, please do not use, install, modify or + redistribute this Apple software. + + In consideration of your agreement to abide by the following terms, and + subject to these terms, Apple grants you a personal, non-exclusive + license, under Apple's copyrights in this original Apple software (the + "Apple Software"), to use, reproduce, modify and redistribute the Apple + Software, with or without modifications, in source and/or binary forms; + provided that if you redistribute the Apple Software in its entirety and + without modifications, you must retain this notice and the following + text and disclaimers in all such redistributions of the Apple Software. + Neither the name, trademarks, service marks or logos of Apple Inc. may + be used to endorse or promote products derived from the Apple Software + without specific prior written permission from Apple. Except as + expressly stated in this notice, no other rights or licenses, express or + implied, are granted by Apple herein, including but not limited to any + patent rights that may be infringed by your derivative works or by other + works in which the Apple Software may be incorporated. + + The Apple Software is provided by Apple on an "AS IS" basis. APPLE + MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION + THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS + FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND + OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS. + + IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL + OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, + MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED + AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE), + STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + Copyright (C) 2012 Apple Inc. All Rights Reserved. + + */ + +#import <UIKit/UIKit.h> + +@interface SDKImageScrollView : UIScrollView + +@property (nonatomic, strong) UIImageView *zoomView; +@property (nonatomic, assign) BOOL aspectFill; + +- (void)displayImage:(UIImage *)image; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKPHAssetManager.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKPHAssetManager.h new file mode 100644 index 0000000..7c28ab4 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKPHAssetManager.h @@ -0,0 +1,26 @@ +// +// SDKPHAssetManager.h +// CameraLibrary +// +// Created by shmily on 16/3/16. +// Copyright © 2016年 HHPacs. All rights reserved. +// + +#import <UIKit/UIKit.h> +#import <Photos/Photos.h> + +@interface SDKPHAssetManager : NSObject + +- (void)tranformImage:(PHAsset *)asset finished:(void (^)(NSData *fullData,NSData *scaledData))finishBlock; + +// 返回填充的缩略图 ++ (UIImage *)image:(UIImage *)image fillSize: (CGSize)viewsize; + ++ (NSString*)createFilePath:(NSString *)aFileName; + +/// 是否有缓存 ++ (BOOL)isWriteCache:(NSString *)aPath setData:(NSData *)aData; + ++ (instancetype)shareManager; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKTouchView.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKTouchView.h new file mode 100755 index 0000000..6571c5b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/SDKTouchView.h @@ -0,0 +1,31 @@ +// +// SDKTouchView.h +// +// Copyright (c) 2014-present Ruslan Skorb, http://ruslanskorb.com/ +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#import <UIKit/UIKit.h> + +@interface SDKTouchView : UIView + +@property (weak, nonatomic) UIView *receiver; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIApplication+RSKImageCropper.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIApplication+RSKImageCropper.h new file mode 100755 index 0000000..55de003 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIApplication+RSKImageCropper.h @@ -0,0 +1,39 @@ +// +// UIApplication+RSKImageCropper.h +// +// Copyright (c) 2015 Ruslan Skorb, http://ruslanskorb.com/ +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#import <UIKit/UIKit.h> + +/** + The category `RSKImageCropper` of the class `UIApplication` provides the method `rsk_sharedApplication` which returns `nil` in an application extension, otherwise it returns the singleton app instance. + */ +@interface UIApplication (RSKImageCropper) + +/** + Returns `nil` in an application extension, otherwise returns the singleton app instance. + + @return `nil` in an application extension, otherwise the app instance is created in the `UIApplicationMain` function. + */ ++ (UIApplication *)rsk_sharedApplication; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIButton+hhWebCache.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIButton+hhWebCache.h new file mode 100644 index 0000000..f54c541 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIButton+hhWebCache.h @@ -0,0 +1,255 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +#if HH_UIKIT + +#import "HHWebImageManager.h" + +/** + * Integrates HHWebImage async downloading and caching of remote images with UIButtonView. + */ +@interface UIButton (hhWebCache) + +#pragma mark - Image + +/** + * Get the current image URL. + */ +- (nullable NSURL *)HH_currentImageURL; + +/** + * Get the image URL for a control state. + * + * @param state Which state you want to know the URL for. The values are described in UIControlState. + */ +- (nullable NSURL *)HH_imageURLForState:(UIControlState)state; + +/** + * Set the imageView `image` with an `url`. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `image` with an `url` and a placeholder. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param placeholder The image to be set initially, until the image request finishes. + * @see HH_setImageWithURL:placeholderImage:options: + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + placeholderImage:(nullable UIImage *)placeholder NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `image` with an `url`, placeholder and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `image` with an `url`. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + completed:(nullable HHExternalCompletionBlock)completedBlock; + +/** + * Set the imageView `image` with an `url`, placeholder. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param placeholder The image to be set initially, until the image request finishes. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + placeholderImage:(nullable UIImage *)placeholder + completed:(nullable HHExternalCompletionBlock)completedBlock NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `image` with an `url`, placeholder and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options + completed:(nullable HHExternalCompletionBlock)completedBlock; + +#pragma mark - Background Image + +/** + * Get the current background image URL. + */ +- (nullable NSURL *)HH_currentBackgroundImageURL; + +/** + * Get the background image URL for a control state. + * + * @param state Which state you want to know the URL for. The values are described in UIControlState. + */ +- (nullable NSURL *)HH_backgroundImageURLForState:(UIControlState)state; + +/** + * Set the backgroundImageView `image` with an `url`. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + */ +- (void)HH_setBackgroundImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state NS_REFINED_FOR_SWIFT; + +/** + * Set the backgroundImageView `image` with an `url` and a placeholder. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param placeholder The image to be set initially, until the image request finishes. + * @see HH_setImageWithURL:placeholderImage:options: + */ +- (void)HH_setBackgroundImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + placeholderImage:(nullable UIImage *)placeholder NS_REFINED_FOR_SWIFT; + +/** + * Set the backgroundImageView `image` with an `url`, placeholder and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + */ +- (void)HH_setBackgroundImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options NS_REFINED_FOR_SWIFT; + +/** + * Set the backgroundImageView `image` with an `url`. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setBackgroundImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + completed:(nullable HHExternalCompletionBlock)completedBlock; + +/** + * Set the backgroundImageView `image` with an `url`, placeholder. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param state The state that uses the specified title. The values are described in UIControlState. + * @param placeholder The image to be set initially, until the image request finishes. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setBackgroundImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + placeholderImage:(nullable UIImage *)placeholder + completed:(nullable HHExternalCompletionBlock)completedBlock NS_REFINED_FOR_SWIFT; + +/** + * Set the backgroundImageView `image` with an `url`, placeholder and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setBackgroundImageWithURL:(nullable NSURL *)url + forState:(UIControlState)state + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options + completed:(nullable HHExternalCompletionBlock)completedBlock; + +#pragma mark - Cancel + +/** + * Cancel the current image download + */ +- (void)HH_cancelImageLoadForState:(UIControlState)state; + +/** + * Cancel the current backgroundImage download + */ +- (void)HH_cancelBackgroundImageLoadForState:(UIControlState)state; + +@end + +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+RSKImageCropper.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+RSKImageCropper.h new file mode 100755 index 0000000..6079dab --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+RSKImageCropper.h @@ -0,0 +1,35 @@ +// +// UIImage+RSKImageCropper.h +// +// Copyright (c) 2014-present Ruslan Skorb, http://ruslanskorb.com/ +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#import <UIKit/UIKit.h> + +@interface UIImage (RSKImageCropper) + +// Fix the orientation of the image. +- (UIImage *)fixOrientation; + +// Rotate the image clockwise around the center by the angle, in radians. +- (UIImage *)rotateByAngle:(CGFloat)angleInRadians; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhForceDecode.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhForceDecode.h new file mode 100644 index 0000000..7e52334 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhForceDecode.h @@ -0,0 +1,17 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +@interface UIImage (hhForceDecode) + ++ (nullable UIImage *)decodedImageWithImage:(nullable UIImage *)image; + ++ (nullable UIImage *)decodedAndScaledDownImageWithImage:(nullable UIImage *)image; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhGIF.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhGIF.h new file mode 100644 index 0000000..59765dc --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhGIF.h @@ -0,0 +1,25 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * (c) Laurin Brandner + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +@interface UIImage (hhGIF) + +/** + * Creates an animated UIImage from an NSData. + * For static GIF, will create an UIImage with `images` array set to nil. For animated GIF, will create an UIImage with valid `images` array. + */ ++ (UIImage *)HH_animatedGIFWithData:(NSData *)data; + +/** + * Checks if an UIImage instance is a GIF. Will use the `images` array. + */ +- (BOOL)isGIF; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhMultiFormat.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhMultiFormat.h new file mode 100644 index 0000000..5fba0b0 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImage+hhMultiFormat.h @@ -0,0 +1,30 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" +#import "NSData+hhImageContentType.h" + +@interface UIImage (hhMultiFormat) + +/** + * UIKit: + * For static image format, this value is always 0. + * For animated image format, 0 means infinite looping. + * Note that because of the limitations of categories this property can get out of sync if you create another instance with CGImage or other methods. + * AppKit: + * NSImage currently only support animated via GIF imageRep unlike UIImage. + * The getter of this property will get the loop count from GIF imageRep + * The setter of this property will set the loop count from GIF imageRep + */ +@property (nonatomic, assign) NSUInteger HH_imageLoopCount; + ++ (nullable UIImage *)HH_imageWithData:(nullable NSData *)data; +- (nullable NSData *)HH_imageData; +- (nullable NSData *)HH_imageDataAsFormat:(HHImageFormat)imageFormat; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhHighlightedWebCache.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhHighlightedWebCache.h new file mode 100644 index 0000000..9102fbf --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhHighlightedWebCache.h @@ -0,0 +1,94 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +#if HH_UIKIT + +#import "HHWebImageManager.h" + +/** + * Integrates HHWebImage async downloading and caching of remote images with UIImageView for highlighted state. + */ +@interface UIImageView (hhHighlightedWebCache) + +/** + * Set the imageView `highlightedImage` with an `url`. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + */ +- (void)HH_setHighlightedImageWithURL:(nullable NSURL *)url NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `highlightedImage` with an `url` and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + */ +- (void)HH_setHighlightedImageWithURL:(nullable NSURL *)url + options:(HHWebImageOptions)options NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `highlightedImage` with an `url`. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setHighlightedImageWithURL:(nullable NSURL *)url + completed:(nullable HHExternalCompletionBlock)completedBlock NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `highlightedImage` with an `url` and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setHighlightedImageWithURL:(nullable NSURL *)url + options:(HHWebImageOptions)options + completed:(nullable HHExternalCompletionBlock)completedBlock; + +/** + * Set the imageView `highlightedImage` with an `url` and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param progressBlock A block called while image is downloading + * @note the progress block is executed on a background queue + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setHighlightedImageWithURL:(nullable NSURL *)url + options:(HHWebImageOptions)options + progress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHExternalCompletionBlock)completedBlock; + +@end + +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhWebCache.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhWebCache.h new file mode 100644 index 0000000..d076cfc --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIImageView+hhWebCache.h @@ -0,0 +1,167 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +#if HH_UIKIT || HH_MAC + +#import "HHWebImageManager.h" + + +@interface UIImageView (hhWebCache) + +/** + * Set the imageView `image` with an `url`. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `image` with an `url` and a placeholder. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @see HH_setImageWithURL:placeholderImage:options: + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + placeholderImage:(nullable UIImage *)placeholder NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `image` with an `url`, placeholder and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `image` with an `url`. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + completed:(nullable HHExternalCompletionBlock)completedBlock; + +/** + * Set the imageView `image` with an `url`, placeholder. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + placeholderImage:(nullable UIImage *)placeholder + completed:(nullable HHExternalCompletionBlock)completedBlock NS_REFINED_FOR_SWIFT; + +/** + * Set the imageView `image` with an `url`, placeholder and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options + completed:(nullable HHExternalCompletionBlock)completedBlock; + +/** + * Set the imageView `image` with an `url`, placeholder and custom options. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param progressBlock A block called while image is downloading + * @note the progress block is executed on a background queue + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_setImageWithURL:(nullable NSURL *)url + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options + progress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHExternalCompletionBlock)completedBlock; + +/** + * Set the imageView `image` with an `url` and custom options. The placeholder image is from previous cached image and will use the provided one instead if the query failed. + * This method was designed to ensure that placeholder and query cache process happened in the same runloop to avoid flashing on cell during two `setImage:` call. But it's really misunderstanding and deprecated. + * This can be done by using `HH_setImageWithURL:` with `HHWebImageQueryDiskSync`. But take care that if the memory cache missed, query disk cache synchronously may reduce the frame rate + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param progressBlock A block called while image is downloading + * @note the progress block is executed on a background queue + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + * @deprecated consider using `HHWebImageQueryDiskSync` options with `HH_setImageWithURL:` instead + */ +- (void)HH_setImageWithPreviousCachedImageWithURL:(nullable NSURL *)url + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options + progress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHExternalCompletionBlock)completedBlock __deprecated_msg("This method is misunderstanding and deprecated, consider using `HHWebImageQueryDiskSync` options with `HH_setImageWithURL:` instead"); + +#if HH_UIKIT + +#pragma mark - Animation of multiple images + +/** + * Download an array of images and starts them in an animation loop + * + * @param arrayOfURLs An array of NSURL + */ +- (void)HH_setAnimationImagesWithURLs:(nonnull NSArray<NSURL *> *)arrayOfURLs; + +- (void)HH_cancelCurrentAnimationImagesLoad; + +#endif + +@end + +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UITextView+Placeholder.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UITextView+Placeholder.h new file mode 100644 index 0000000..466886d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UITextView+Placeholder.h @@ -0,0 +1,38 @@ +// The MIT License (MIT) +// +// Copyright (c) 2014 Suyeol Jeon (http:xoul.kr) +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +@import UIKit; + +FOUNDATION_EXPORT double UITextView_PlaceholderVersionNumber; +FOUNDATION_EXPORT const unsigned char UITextView_PlaceholderVersionString[]; + +@interface UITextView (Placeholder) + +@property (nonatomic, readonly) UILabel *placeholderLabel; + +@property (nonatomic, strong) IBInspectable NSString *placeholder; +@property (nonatomic, strong) NSAttributedString *attributedPlaceholder; +@property (nonatomic, strong) IBInspectable UIColor *placeholderColor; + ++ (UIColor *)defaultPlaceholderColor; + +@end diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCache.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCache.h new file mode 100644 index 0000000..f1a20a0 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCache.h @@ -0,0 +1,140 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +#if HH_UIKIT || HH_MAC + +#import "HHWebImageManager.h" +#import "HHWebImageTransition.h" + +/** + A Dispatch group to maintain setImageBlock and completionBlock. This key should be used only internally and may be changed in the future. (dispatch_group_t) + */ +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageInternalSetImageGroupKey; +/** + A HHWebImageManager instance to control the image download and cache process using in UIImageView+WebCache category and likes. If not provided, use the shared manager (HHWebImageManager) + */ +FOUNDATION_EXPORT NSString * _Nonnull const HHWebImageExternalCustomManagerKey; +/** + The value specify that the image progress unit count cannot be determined because the progressBlock is not been called. + */ +FOUNDATION_EXPORT const int64_t HHWebImageProgressUnitCountUnknown; /* 1LL */ + +typedef void(^HHSetImageBlock)(UIImage * _Nullable image, NSData * _Nullable imageData); + +@interface UIView (hhWebCache) + +/** + * Get the current image URL. + * + * @note Note that because of the limitations of categories this property can get out of sync if you use setImage: directly. + */ +- (nullable NSURL *)HH_imageURL; + +/** + * The current image loading progress associated to the view. The unit count is the received size and excepted size of download. + * The `totalUnitCount` and `completedUnitCount` will be reset to 0 after a new image loading start (change from current queue). And they will be set to `HHWebImageProgressUnitCountUnknown` if the progressBlock not been called but the image loading success to mark the progress finished (change from main queue). + * @note You can use Key-Value Observing on the progress, but you should take care that the change to progress is from a background queue during download(the same as progressBlock). If you want to using KVO and update the UI, make sure to dispatch on the main queue. And it's recommand to use some KVO libs like KVOController because it's more safe and easy to use. + * @note The getter will create a progress instance if the value is nil. You can also set a custom progress instance and let it been updated during image loading + * @note Note that because of the limitations of categories this property can get out of sync if you update the progress directly. + */ +@property (nonatomic, strong, null_resettable) NSProgress *HH_imageProgress; + +/** + * Set the imageView `image` with an `url` and optionally a placeholder image. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param operationKey A string to be used as the operation key. If nil, will use the class name + * @param setImageBlock Block used for custom set image code + * @param progressBlock A block called while image is downloading + * @note the progress block is executed on a background queue + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + */ +- (void)HH_internalSetImageWithURL:(nullable NSURL *)url + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options + operationKey:(nullable NSString *)operationKey + setImageBlock:(nullable HHSetImageBlock)setImageBlock + progress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHExternalCompletionBlock)completedBlock; + +/** + * Set the imageView `image` with an `url` and optionally a placeholder image. + * + * The download is asynchronous and cached. + * + * @param url The url for the image. + * @param placeholder The image to be set initially, until the image request finishes. + * @param options The options to use when downloading the image. @see HHWebImageOptions for the possible values. + * @param operationKey A string to be used as the operation key. If nil, will use the class name + * @param setImageBlock Block used for custom set image code + * @param progressBlock A block called while image is downloading + * @note the progress block is executed on a background queue + * @param completedBlock A block called when operation has been completed. This block has no return value + * and takes the requested UIImage as first parameter. In case of error the image parameter + * is nil and the second parameter may contain an NSError. The third parameter is a Boolean + * indicating if the image was retrieved from the local cache or from the network. + * The fourth parameter is the original image url. + * @param context A context with extra information to perform specify changes or processes. + */ +- (void)HH_internalSetImageWithURL:(nullable NSURL *)url + placeholderImage:(nullable UIImage *)placeholder + options:(HHWebImageOptions)options + operationKey:(nullable NSString *)operationKey + setImageBlock:(nullable HHSetImageBlock)setImageBlock + progress:(nullable HHWebImageDownloaderProgressBlock)progressBlock + completed:(nullable HHExternalCompletionBlock)completedBlock + context:(nullable NSDictionary<NSString *, id> *)context; + +/** + * Cancel the current image load + */ +- (void)HH_cancelCurrentImageLoad; + +#pragma mark - Image Transition + +/** + The image transition when image load finished. See `HHWebImageTransition`. + If you specify nil, do not do transition. Defautls to nil. + */ +@property (nonatomic, strong, nullable) HHWebImageTransition *HH_imageTransition; + +#if HH_UIKIT + +#pragma mark - Activity indicator + +/** + * Show activity UIActivityIndicatorView + */ +- (void)HH_setShowActivityIndicatorView:(BOOL)show; + +/** + * set desired UIActivityIndicatorViewStyle + * + * @param style The style of the UIActivityIndicatorView + */ +- (void)HH_setIndicatorStyle:(UIActivityIndicatorViewStyle)style; + +- (BOOL)HH_showActivityIndicatorView; +- (void)HH_addActivityIndicator; +- (void)HH_removeActivityIndicator; + +#endif + +@end + +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCacheOperation.h b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCacheOperation.h new file mode 100644 index 0000000..a808d9b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Headers/UIView+hhWebCacheOperation.h @@ -0,0 +1,43 @@ +/* + * This file is part of the HHWebImage package. + * (c) Olivier Poitrey <rs@dailymotion.com> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +#import "HHWebImageCompat.h" + +#if HH_UIKIT || HH_MAC + +#import "HHWebImageManager.h" + +// These methods are used to support canceling for UIView image loading, it's designed to be used internal but not external. +// All the stored operations are weak, so it will be dalloced after image loading finished. If you need to store operations, use your own class to keep a strong reference for them. +@interface UIView (hhWebCacheOperation) + +/** + * Set the image load operation (storage in a UIView based weak map table) + * + * @param operation the operation + * @param key key for storing the operation + */ +- (void)HH_setImageLoadOperation:(nullable id<HHWebImageOperation>)operation forKey:(nullable NSString *)key; + +/** + * Cancel all operations for the current UIView and key + * + * @param key key for identifying the operations + */ +- (void)HH_cancelImageLoadOperationWithKey:(nullable NSString *)key; + +/** + * Just remove the operations corresponding to the current UIView and key without cancelling them + * + * @param key key for identifying the operations + */ +- (void)HH_removeImageLoadOperationWithKey:(nullable NSString *)key; + +@end + +#endif diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/Info.plist new file mode 100644 index 0000000..11ed8b9 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/objects-11.0+.nib new file mode 100644 index 0000000..d7e2941 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/runtime.nib new file mode 100644 index 0000000..2aac78c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MedicItemView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/Info.plist new file mode 100644 index 0000000..b6e6b81 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/objects-11.0+.nib new file mode 100644 index 0000000..086df50 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/runtime.nib new file mode 100644 index 0000000..1a5e199 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/LXC-Q0-bne-view-uRS-pr-z6i.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/objects-11.0+.nib new file mode 100644 index 0000000..4196bec Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/runtime.nib new file mode 100644 index 0000000..90e8f9b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MedicStatus.storyboardc/UIViewController-LXC-Q0-bne.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/objects-11.0+.nib new file mode 100644 index 0000000..a5e77a8 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/runtime.nib new file mode 100644 index 0000000..830ca4f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/MemberListView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo new file mode 100644 index 0000000..f4548ac Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64.swiftsourceinfo b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64.swiftsourceinfo new file mode 100644 index 0000000..f4548ac Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/arm64.swiftsourceinfo differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo new file mode 100644 index 0000000..7e1741c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64.swiftsourceinfo b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64.swiftsourceinfo new file mode 100644 index 0000000..7e1741c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/Project/x86_64.swiftsourceinfo differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftdoc b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftdoc new file mode 100644 index 0000000..34fba8f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftdoc differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftinterface b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftinterface new file mode 100644 index 0000000..bd0c3c8 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftinterface @@ -0,0 +1,5650 @@ +// swift-interface-format-version: 1.0 +// swift-compiler-version: Apple Swift version 5.4.2 (swiftlang-1205.0.28.2 clang-1205.0.19.57) +// swift-module-flags: -target arm64-apple-ios10.0 -enable-objc-interop -enable-library-evolution -swift-version 5 -enforce-exclusivity=checked -O -module-name HHSDKVideo +import AVFoundation +import AVKit +import Accelerate +import CoreGraphics +import CoreLocation +import CoreMotion +import CoreTelephony +import Darwin +import Dispatch +import Foundation +@_exported import HHSDKVideo +import ImageIO +import LocalAuthentication +import MobileCoreServices +import ObjectiveC +import Photos +import PhotosUI +import Security +import SecurityKit +import Swift +import SystemConfiguration +import UIKit +import UserNotifications +import WebKit +public protocol AEAD { + static var kLen: Swift.Int { get } + static var ivRange: Swift.Range<Swift.Int> { get } +} +@_hasMissingDesignatedInitializers final public class AEADChaCha20Poly1305 : HHSDKVideo.AEAD { + public static let kLen: Swift.Int + public static var ivRange: Swift.Range<Swift.Int> + public static func encrypt(_ plainText: Swift.Array<Swift.UInt8>, key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>, authenticationHeader: Swift.Array<Swift.UInt8>) throws -> (cipherText: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>) + public static func decrypt(_ cipherText: Swift.Array<Swift.UInt8>, key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>, authenticationHeader: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>) throws -> (plainText: Swift.Array<Swift.UInt8>, success: Swift.Bool) + @objc deinit +} +final public class AES { + public enum Error : Swift.Error { + case invalidKeySize + case dataPaddingRequired + case invalidData + public static func == (a: HHSDKVideo.AES.Error, b: HHSDKVideo.AES.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant : Swift.Int { + case aes128, aes192, aes256 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + final public let variant: HHSDKVideo.AES.Variant + public init(key: Swift.Array<Swift.UInt8>, blockMode: HHSDKVideo.BlockMode, padding: HHSDKVideo.Padding = .pkcs7) throws + @objc deinit +} +extension AES : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension AES { + convenience public init(key: Swift.String, iv: Swift.String, padding: HHSDKVideo.Padding = .pkcs7) throws +} +extension AES : HHSDKVideo.Cryptors { + final public func makeEncryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + final public func makeDecryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable +} +extension Array where Element == Swift.UInt8 { + public init(hex: Swift.String) + public func toHexString() -> Swift.String +} +extension Array where Element == Swift.UInt8 { + @available(*, deprecated) + public func chunks(size chunksize: Swift.Int) -> Swift.Array<Swift.Array<Element>> + public func md5() -> [Element] + public func sha1() -> [Element] + public func sha224() -> [Element] + public func sha256() -> [Element] + public func sha384() -> [Element] + public func sha512() -> [Element] + public func sha2(_ variant: HHSDKVideo.SHA2.Variant) -> [Element] + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> [Element] + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public func crc16(seed: Swift.UInt16? = nil) -> Swift.UInt16 + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> [Element] + public func decrypt(cipher: HHSDKVideo.Cipher) throws -> [Element] + public func authenticate<A>(with authenticator: A) throws -> [Element] where A : HHSDKVideo.CryptoAuthenticator +} +extension Array where Element == Swift.UInt8 { + public func toBase64() -> Swift.String? + public init(base64: Swift.String) +} +public protocol CryptoAuthenticator { + func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +public enum Bit : Swift.Int { + case zero + case one + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@_hasMissingDesignatedInitializers public class BlockDecryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public func seek(to position: Swift.Int) throws + @objc deinit +} +public typealias CipherOperationOnBlock = (Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8>? +public protocol BlockMode { + var options: HHSDKVideo.BlockModeOption { get } + func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +public struct BlockModeOption : Swift.OptionSet { + public let rawValue: Swift.Int + public init(rawValue: Swift.Int) + public typealias ArrayLiteralElement = HHSDKVideo.BlockModeOption + public typealias Element = HHSDKVideo.BlockModeOption + public typealias RawValue = Swift.Int +} +final public class Blowfish { + public enum Error : Swift.Error { + case dataPaddingRequired + case invalidKeyOrInitializationVector + case invalidInitializationVector + case invalidBlockMode + public static func == (a: HHSDKVideo.Blowfish.Error, b: HHSDKVideo.Blowfish.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>, blockMode: HHSDKVideo.BlockMode = CBC(iv: Array<UInt8>(repeating: 0, count: Blowfish.blockSize)), padding: HHSDKVideo.Padding) throws + @objc deinit +} +extension Blowfish : HHSDKVideo.Cipher { + final public func encrypt<C>(_ bytes: C) throws -> Swift.Array<Swift.UInt8> where C : Swift.Collection, C.Element == Swift.UInt8, C.Index == Swift.Int + final public func decrypt<C>(_ bytes: C) throws -> Swift.Array<Swift.UInt8> where C : Swift.Collection, C.Element == Swift.UInt8, C.Index == Swift.Int +} +extension Blowfish { + convenience public init(key: Swift.String, iv: Swift.String, padding: HHSDKVideo.Padding = .pkcs7) throws +} +@_hasMissingDesignatedInitializers public class BusyPics { + public static let `default`: HHSDKVideo.BusyPics + public func cacheImgs() + public func getImgs() -> [Foundation.URL?] + @objc deinit +} +public struct CallDoctorModel : HHSDKVideo.Mappable { + public var doctor: HHSDKVideo.HHDoctorModel? + public var order: HHSDKVideo.OrderModel? + public var appoint: Swift.String? + public var pushFlowUrl: Swift.String? + public var realPatientUuid: Swift.Int? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct CBC : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CBC.Error, b: HHSDKVideo.CBC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@_inheritsConvenienceInitializers final public class CBCMAC : HHSDKVideo.CMAC { + override final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + override public init(key: Swift.Array<Swift.UInt8>) throws + @objc deinit +} +public struct CCM { + public enum Error : Swift.Error { + case invalidInitializationVector + case invalidParameter + case fail + public static func == (a: HHSDKVideo.CCM.Error, b: HHSDKVideo.CCM.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(iv: Swift.Array<Swift.UInt8>, tagLength: Swift.Int, messageLength: Swift.Int, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil) + public init(iv: Swift.Array<Swift.UInt8>, tagLength: Swift.Int, messageLength: Swift.Int, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +public struct CFB : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CFB.Error, b: HHSDKVideo.CFB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +final public class ChaCha20 { + public enum Error : Swift.Error { + case invalidKeyOrInitializationVector + case notSupported + public static func == (a: HHSDKVideo.ChaCha20.Error, b: HHSDKVideo.ChaCha20.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>, iv nonce: Swift.Array<Swift.UInt8>) throws + @objc deinit +} +extension ChaCha20 : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension ChaCha20 { + public struct ChaChaEncryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public func seek(to: Swift.Int) throws + } +} +extension ChaCha20 { + public struct ChaChaDecryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = true) throws -> Swift.Array<Swift.UInt8> + public func seek(to: Swift.Int) throws + } +} +extension ChaCha20 : HHSDKVideo.Cryptors { + final public func makeEncryptor() -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + final public func makeDecryptor() -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable +} +extension ChaCha20 { + convenience public init(key: Swift.String, iv: Swift.String) throws +} +public struct ChatApi { +} +@_hasMissingDesignatedInitializers final public class Checksum { + @objc deinit +} +extension Checksum { + public static func crc32(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public static func crc32c(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public static func crc16(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt16? = nil) -> Swift.UInt16 +} +public enum CipherError : Swift.Error { + case encrypt + case decrypt + public static func == (a: HHSDKVideo.CipherError, b: HHSDKVideo.CipherError) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public protocol Cipher : AnyObject { + var keySize: Swift.Int { get } + func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func encrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func decrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension Cipher { + public func encrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public func decrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +public protocol CipherModeWorker { + var cipherOperation: HHSDKVideo.CipherOperationOnBlock { get } + var additionalBufferSize: Swift.Int { get } + mutating func encrypt(block plaintext: Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + mutating func decrypt(block ciphertext: Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8> +} +public protocol BlockModeWorker : HHSDKVideo.CipherModeWorker { + var blockSize: Swift.Int { get } +} +public protocol CounterModeWorker : HHSDKVideo.CipherModeWorker { + associatedtype Counter + var counter: Self.Counter { get set } +} +public protocol SeekableModeWorker : HHSDKVideo.CipherModeWorker { + mutating func seek(to position: Swift.Int) throws +} +public protocol StreamModeWorker : HHSDKVideo.CipherModeWorker { +} +public protocol FinalizingEncryptModeWorker : HHSDKVideo.CipherModeWorker { + mutating func finalize(encrypt ciphertext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> +} +public protocol FinalizingDecryptModeWorker : HHSDKVideo.CipherModeWorker { + @discardableResult + mutating func willDecryptLast(bytes ciphertext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> + mutating func didDecryptLast(bytes plaintext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> + mutating func finalize(decrypt plaintext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> +} +public class CMAC : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case wrongKeyLength + public static func == (a: HHSDKVideo.CMAC.Error, b: HHSDKVideo.CMAC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(key: Swift.Array<Swift.UInt8>) throws + public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public func authenticate(_ bytes: Swift.Array<Swift.UInt8>, cipher: HHSDKVideo.Cipher) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +open class CodableTransform<T> : HHSDKVideo.TransformType where T : Swift.Decodable, T : Swift.Encodable { + public typealias Object = T + public typealias JSON = Any + public init() + open func transformFromJSON(_ value: Any?) -> HHSDKVideo.CodableTransform<T>.Object? + open func transformToJSON(_ value: T?) -> HHSDKVideo.CodableTransform<T>.JSON? + @objc deinit +} +public struct CommentApi { +} +@objc @_inheritsConvenienceInitializers public class CommentBaseVC : UIKit.UIViewController { + @objc override dynamic public func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class CommentVC : HHSDKVideo.CommentBaseVC { + @objc override dynamic public func viewDidLoad() + public static func show(_ orderId: Swift.String, docId: Swift.String, uuid: Swift.Int?, type: HHSDKVideo.HHCallType?, _ model: HHSDKVideo.HHGetQuesetionModel?) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public protocol Cryptor { + mutating func seek(to: Swift.Int) throws +} +public protocol Cryptors : AnyObject { + func makeEncryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + func makeDecryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + static func randomIV(_ blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> +} +extension Cryptors { + public static func randomIV(_ count: Swift.Int) -> Swift.Array<Swift.UInt8> +} +public struct CTR { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CTR.Error, b: HHSDKVideo.CTR.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>, counter: Swift.Int = 0) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +open class CustomDateFormatTransform : HHSDKVideo.DateFormatterTransform { + public init(formatString: Swift.String) + override public init(dateFormatter: Foundation.DateFormatter) + @objc deinit +} +extension Data { + public func checksum() -> Swift.UInt16 + public func md5() -> Foundation.Data + public func sha1() -> Foundation.Data + public func sha224() -> Foundation.Data + public func sha256() -> Foundation.Data + public func sha384() -> Foundation.Data + public func sha512() -> Foundation.Data + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> Foundation.Data + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Foundation.Data + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Foundation.Data + public func crc16(seed: Swift.UInt16? = nil) -> Foundation.Data + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> Foundation.Data + public func decrypt(cipher: HHSDKVideo.Cipher) throws -> Foundation.Data + public func authenticate(with authenticator: HHSDKVideo.CryptoAuthenticator) throws -> Foundation.Data +} +extension Data { + public init(hex: Swift.String) + public var bytes: Swift.Array<Swift.UInt8> { + get + } + public func toHexString() -> Swift.String +} +open class DataTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Data + public typealias JSON = Swift.String + public init() + open func transformFromJSON(_ value: Any?) -> Foundation.Data? + open func transformToJSON(_ value: Foundation.Data?) -> Swift.String? + @objc deinit +} +open class DateFormatterTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Date + public typealias JSON = Swift.String + final public let dateFormatter: Foundation.DateFormatter + public init(dateFormatter: Foundation.DateFormatter) + open func transformFromJSON(_ value: Any?) -> Foundation.Date? + open func transformToJSON(_ value: Foundation.Date?) -> Swift.String? + @objc deinit +} +open class DateTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Date + public typealias JSON = Swift.Double + public enum Unit : Foundation.TimeInterval { + case seconds + case milliseconds + public init?(rawValue: Foundation.TimeInterval) + public typealias RawValue = Foundation.TimeInterval + public var rawValue: Foundation.TimeInterval { + get + } + } + public init(unit: HHSDKVideo.DateTransform.Unit = .seconds) + open func transformFromJSON(_ value: Any?) -> Foundation.Date? + open func transformToJSON(_ value: Foundation.Date?) -> Swift.Double? + @objc deinit +} +public struct DGElasticPullToRefreshConstants { + public static var WaveMaxHeight: CoreGraphics.CGFloat + public static var MinOffsetToPull: CoreGraphics.CGFloat + public static var LoadingContentInset: CoreGraphics.CGFloat + public static var LoadingViewSize: CoreGraphics.CGFloat +} +extension NSObject { + public func dg_addObserver(_ observer: ObjectiveC.NSObject, forKeyPath keyPath: Swift.String) + public func dg_removeObserver(_ observer: ObjectiveC.NSObject, forKeyPath keyPath: Swift.String) +} +extension UIScrollView { + public func dg_addPullToRefreshWithActionHandler(_ actionHandler: @escaping () -> Swift.Void, loadingView: HHSDKVideo.DGElasticPullToRefreshLoadingView?) + public func dg_removePullToRefresh() + public func dg_setPullToRefreshBackgroundColor(_ color: UIKit.UIColor) + public func dg_setPullToRefreshFillColor(_ color: UIKit.UIColor) + public func dg_stopLoading() + public func dg_startLoading() +} +extension UIView { + public func dg_center(_ usePresentationLayerIfPossible: Swift.Bool) -> CoreGraphics.CGPoint +} +extension UIPanGestureRecognizer { + public func dg_resign() +} +extension UIGestureRecognizer.State { + public func dg_isAnyOf(_ values: [UIKit.UIGestureRecognizer.State]) -> Swift.Bool +} +@objc @_inheritsConvenienceInitializers open class DGElasticPullToRefreshLoadingView : UIKit.UIView { + @objc dynamic public init() + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + open func setPullProgress(_ progress: CoreGraphics.CGFloat) + open func startAnimating() + open func stopLoading() + @objc deinit +} +extension CGFloat { + public func toRadians() -> CoreGraphics.CGFloat + public func toDegrees() -> CoreGraphics.CGFloat +} +@objc open class DGElasticPullToRefreshLoadingViewCircle : HHSDKVideo.DGElasticPullToRefreshLoadingView { + @objc override dynamic public init() + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + override open func setPullProgress(_ progress: CoreGraphics.CGFloat) + override open func startAnimating() + override open func stopLoading() + @objc override dynamic open func tintColorDidChange() + @objc override dynamic open func layoutSubviews() + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public enum DGElasticPullToRefreshState : Swift.Int { + case stopped + case dragging + case animatingBounce + case loading + case animatingToStopped + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_hasMissingDesignatedInitializers open class DGElasticPullToRefreshView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc deinit + @objc override dynamic open func observeValue(forKeyPath keyPath: Swift.String?, of object: Any?, change: [Foundation.NSKeyValueChangeKey : Any]?, context: Swift.UnsafeMutableRawPointer?) + @objc override dynamic open func layoutSubviews() + @objc override dynamic public init(frame: CoreGraphics.CGRect) +} +public struct DictionaryTransform<Key, Value> : HHSDKVideo.TransformType where Key : Swift.Hashable, Key : Swift.RawRepresentable, Value : HHSDKVideo.Mappable, Key.RawValue == Swift.String { + public init() + public func transformFromJSON(_ value: Any?) -> [Key : Value]? + public func transformToJSON(_ value: [Key : Value]?) -> Any? + public typealias JSON = Any + public typealias Object = Swift.Dictionary<Key, Value> +} +@available(*, renamed: "Digest") +public typealias Hash = HHSDKVideo.Digest +public struct Digest { + public static func md5(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha1(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha224(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha256(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha384(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha512(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha2(_ bytes: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.SHA2.Variant) -> Swift.Array<Swift.UInt8> + public static func sha3(_ bytes: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.SHA3.Variant) -> Swift.Array<Swift.UInt8> +} +public struct ECB : HHSDKVideo.BlockMode { + public let options: HHSDKVideo.BlockModeOption + public init() + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@objc @_inheritsConvenienceInitializers public class EKAccessoryNoteMessageView : UIKit.UIView { + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public struct EKAlertMessage { + public enum ImagePosition { + case top + case left + public static func == (a: HHSDKVideo.EKAlertMessage.ImagePosition, b: HHSDKVideo.EKAlertMessage.ImagePosition) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let imagePosition: HHSDKVideo.EKAlertMessage.ImagePosition + public let simpleMessage: HHSDKVideo.EKSimpleMessage + public let buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent + public init(simpleMessage: HHSDKVideo.EKSimpleMessage, imagePosition: HHSDKVideo.EKAlertMessage.ImagePosition = .top, buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent) +} +@objc @_hasMissingDesignatedInitializers final public class EKAlertMessageView : HHSDKVideo.EKSimpleMessageView { + public init(with message: HHSDKVideo.EKAlertMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc deinit +} +public struct EKAttributes { + public var name: Swift.String? + public var windowLevel: HHSDKVideo.EKAttributes.WindowLevel + public var position: HHSDKVideo.EKAttributes.Position + public var precedence: HHSDKVideo.EKAttributes.Precedence + public var displayDuration: Swift.Double + public var positionConstraints: HHSDKVideo.EKAttributes.PositionConstraints + public var screenInteraction: HHSDKVideo.EKAttributes.UserInteraction + public var entryInteraction: HHSDKVideo.EKAttributes.UserInteraction + public var scroll: HHSDKVideo.EKAttributes.Scroll + public var hapticFeedbackType: HHSDKVideo.EKAttributes.NotificationHapticFeedback + public var lifecycleEvents: HHSDKVideo.EKAttributes.LifecycleEvents + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var entryBackground: HHSDKVideo.EKAttributes.BackgroundStyle + public var screenBackground: HHSDKVideo.EKAttributes.BackgroundStyle + public var shadow: HHSDKVideo.EKAttributes.Shadow + public var roundCorners: HHSDKVideo.EKAttributes.RoundCorners + public var border: HHSDKVideo.EKAttributes.Border + public var statusBar: HHSDKVideo.EKAttributes.StatusBar + public var entranceAnimation: HHSDKVideo.EKAttributes.Animation + public var exitAnimation: HHSDKVideo.EKAttributes.Animation + public var popBehavior: HHSDKVideo.EKAttributes.PopBehavior { + get + set + } + public init() +} +extension EKAttributes { + public struct Animation : Swift.Equatable { + public struct Spring : Swift.Equatable { + public var damping: CoreGraphics.CGFloat + public var initialVelocity: CoreGraphics.CGFloat + public init(damping: CoreGraphics.CGFloat, initialVelocity: CoreGraphics.CGFloat) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Spring, b: HHSDKVideo.EKAttributes.Animation.Spring) -> Swift.Bool + } + public struct RangeAnimation : Swift.Equatable { + public var duration: Foundation.TimeInterval + public var delay: Foundation.TimeInterval + public var start: CoreGraphics.CGFloat + public var end: CoreGraphics.CGFloat + public var spring: HHSDKVideo.EKAttributes.Animation.Spring? + public init(from start: CoreGraphics.CGFloat, to end: CoreGraphics.CGFloat, duration: Foundation.TimeInterval, delay: Foundation.TimeInterval = 0, spring: HHSDKVideo.EKAttributes.Animation.Spring? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation.RangeAnimation, b: HHSDKVideo.EKAttributes.Animation.RangeAnimation) -> Swift.Bool + } + public struct Translate : Swift.Equatable { + public enum AnchorPosition : Swift.Equatable { + case top + case bottom + case automatic + public func hash(into hasher: inout Swift.Hasher) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition, b: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition) -> Swift.Bool + public var hashValue: Swift.Int { + get + } + } + public var duration: Foundation.TimeInterval + public var delay: Foundation.TimeInterval + public var anchorPosition: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition + public var spring: HHSDKVideo.EKAttributes.Animation.Spring? + public init(duration: Foundation.TimeInterval, anchorPosition: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition = .automatic, delay: Foundation.TimeInterval = 0, spring: HHSDKVideo.EKAttributes.Animation.Spring? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Translate, b: HHSDKVideo.EKAttributes.Animation.Translate) -> Swift.Bool + } + public var translate: HHSDKVideo.EKAttributes.Animation.Translate? + public var scale: HHSDKVideo.EKAttributes.Animation.RangeAnimation? + public var fade: HHSDKVideo.EKAttributes.Animation.RangeAnimation? + public var containsTranslation: Swift.Bool { + get + } + public var containsScale: Swift.Bool { + get + } + public var containsFade: Swift.Bool { + get + } + public var containsAnimation: Swift.Bool { + get + } + public var maxDelay: Foundation.TimeInterval { + get + } + public var maxDuration: Foundation.TimeInterval { + get + } + public var totalDuration: Foundation.TimeInterval { + get + } + public static var translation: HHSDKVideo.EKAttributes.Animation { + get + } + public static var none: HHSDKVideo.EKAttributes.Animation { + get + } + public init(translate: HHSDKVideo.EKAttributes.Animation.Translate? = nil, scale: HHSDKVideo.EKAttributes.Animation.RangeAnimation? = nil, fade: HHSDKVideo.EKAttributes.Animation.RangeAnimation? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation, b: HHSDKVideo.EKAttributes.Animation) -> Swift.Bool + } +} +extension EKAttributes { + public enum BackgroundStyle : Swift.Equatable { + public struct BlurStyle : Swift.Equatable { + public static var extra: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public static var standard: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + @available(iOS 10.0, *) + public static var prominent: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public static var dark: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public init(style: UIKit.UIBlurEffect.Style) + public init(light: UIKit.UIBlurEffect.Style, dark: UIKit.UIBlurEffect.Style) + public func blurStyle(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIBlurEffect.Style + public func blurEffect(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIBlurEffect + public static func == (a: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle, b: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle) -> Swift.Bool + } + public struct Gradient { + public var colors: [HHSDKVideo.EKColor] + public var startPoint: CoreGraphics.CGPoint + public var endPoint: CoreGraphics.CGPoint + public init(colors: [HHSDKVideo.EKColor], startPoint: CoreGraphics.CGPoint, endPoint: CoreGraphics.CGPoint) + } + case visualEffect(style: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle) + case color(color: HHSDKVideo.EKColor) + case gradient(gradient: HHSDKVideo.EKAttributes.BackgroundStyle.Gradient) + case image(image: UIKit.UIImage) + case clear + public static func == (lhs: HHSDKVideo.EKAttributes.BackgroundStyle, rhs: HHSDKVideo.EKAttributes.BackgroundStyle) -> Swift.Bool + } +} +extension EKAttributes { + public enum DisplayMode { + case inferred + case light + case dark + public static func == (a: HHSDKVideo.EKAttributes.DisplayMode, b: HHSDKVideo.EKAttributes.DisplayMode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public typealias DisplayDuration = Foundation.TimeInterval +} +extension EKAttributes { + public enum RoundCorners { + case none + case all(radius: CoreGraphics.CGFloat) + case top(radius: CoreGraphics.CGFloat) + case bottom(radius: CoreGraphics.CGFloat) + } + public enum Border { + case none + case value(color: UIKit.UIColor, width: CoreGraphics.CGFloat) + } +} +extension EKAttributes { + public enum NotificationHapticFeedback { + case success + case warning + case error + case none + public static func == (a: HHSDKVideo.EKAttributes.NotificationHapticFeedback, b: HHSDKVideo.EKAttributes.NotificationHapticFeedback) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct LifecycleEvents { + public typealias Event = () -> Swift.Void + public var willAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var didAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var willDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var didDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public init(willAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, didAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, willDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, didDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil) + } +} +extension EKAttributes { + public enum PopBehavior { + case overridden + case animated(animation: HHSDKVideo.EKAttributes.Animation) + public var isOverriden: Swift.Bool { + get + } + } +} +extension EKAttributes { + public enum Position { + case top + case bottom + case center + public var isTop: Swift.Bool { + get + } + public var isCenter: Swift.Bool { + get + } + public var isBottom: Swift.Bool { + get + } + public static func == (a: HHSDKVideo.EKAttributes.Position, b: HHSDKVideo.EKAttributes.Position) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct PositionConstraints { + public enum SafeArea { + case overridden + case empty(fillSafeArea: Swift.Bool) + public var isOverridden: Swift.Bool { + get + } + } + public enum Edge { + case ratio(value: CoreGraphics.CGFloat) + case offset(value: CoreGraphics.CGFloat) + case constant(value: CoreGraphics.CGFloat) + case intrinsic + public static var fill: HHSDKVideo.EKAttributes.PositionConstraints.Edge { + get + } + } + public struct Size { + public var width: HHSDKVideo.EKAttributes.PositionConstraints.Edge + public var height: HHSDKVideo.EKAttributes.PositionConstraints.Edge + public init(width: HHSDKVideo.EKAttributes.PositionConstraints.Edge, height: HHSDKVideo.EKAttributes.PositionConstraints.Edge) + public static var intrinsic: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + public static var sizeToWidth: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + public static var screen: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + } + public enum KeyboardRelation { + public struct Offset { + public var bottom: CoreGraphics.CGFloat + public var screenEdgeResistance: CoreGraphics.CGFloat? + public init(bottom: CoreGraphics.CGFloat = 0, screenEdgeResistance: CoreGraphics.CGFloat? = nil) + public static var none: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation.Offset { + get + } + } + case bind(offset: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation.Offset) + case unbind + public var isBound: Swift.Bool { + get + } + } + public struct Rotation { + public enum SupportedInterfaceOrientation { + case standard + case all + public static func == (a: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation, b: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public var isEnabled: Swift.Bool + public var supportedInterfaceOrientations: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation + public init() + } + public var rotation: HHSDKVideo.EKAttributes.PositionConstraints.Rotation + public var keyboardRelation: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation + public var size: HHSDKVideo.EKAttributes.PositionConstraints.Size + public var maxSize: HHSDKVideo.EKAttributes.PositionConstraints.Size + public var verticalOffset: CoreGraphics.CGFloat + public var safeArea: HHSDKVideo.EKAttributes.PositionConstraints.SafeArea + public var hasVerticalOffset: Swift.Bool { + get + } + public static var float: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public static var fullWidth: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public static var fullScreen: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public init(verticalOffset: CoreGraphics.CGFloat = 0, size: HHSDKVideo.EKAttributes.PositionConstraints.Size = .sizeToWidth, maxSize: HHSDKVideo.EKAttributes.PositionConstraints.Size = .intrinsic) + } +} +extension EKAttributes { + public enum Precedence { + public struct Priority : Swift.Hashable, Swift.Equatable, Swift.RawRepresentable, Swift.Comparable { + public var rawValue: Swift.Int + public var hashValue: Swift.Int { + get + } + public init(_ rawValue: Swift.Int) + public init(rawValue: Swift.Int) + public static func == (lhs: HHSDKVideo.EKAttributes.Precedence.Priority, rhs: HHSDKVideo.EKAttributes.Precedence.Priority) -> Swift.Bool + public static func < (lhs: HHSDKVideo.EKAttributes.Precedence.Priority, rhs: HHSDKVideo.EKAttributes.Precedence.Priority) -> Swift.Bool + public typealias RawValue = Swift.Int + } + public enum QueueingHeuristic { + public static var value: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic + case chronological + case priority + public static func == (a: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic, b: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + case override(priority: HHSDKVideo.EKAttributes.Precedence.Priority, dropEnqueuedEntries: Swift.Bool) + case enqueue(priority: HHSDKVideo.EKAttributes.Precedence.Priority) + public var priority: HHSDKVideo.EKAttributes.Precedence.Priority { + get + set + } + } +} +extension EKAttributes.Precedence.Priority { + public static let maxRawValue: Swift.Int + public static let highRawValue: Swift.Int + public static let normalRawValue: Swift.Int + public static let lowRawValue: Swift.Int + public static let minRawValue: Swift.Int + public static let max: HHSDKVideo.EKAttributes.Precedence.Priority + public static let high: HHSDKVideo.EKAttributes.Precedence.Priority + public static let normal: HHSDKVideo.EKAttributes.Precedence.Priority + public static let low: HHSDKVideo.EKAttributes.Precedence.Priority + public static let min: HHSDKVideo.EKAttributes.Precedence.Priority +} +extension EKAttributes { + public static var `default`: HHSDKVideo.EKAttributes + public static var toast: HHSDKVideo.EKAttributes { + get + } + public static var float: HHSDKVideo.EKAttributes { + get + } + public static var topFloat: HHSDKVideo.EKAttributes { + get + } + public static var bottomFloat: HHSDKVideo.EKAttributes { + get + } + public static var centerFloat: HHSDKVideo.EKAttributes { + get + } + public static var bottomToast: HHSDKVideo.EKAttributes { + get + } + public static var topToast: HHSDKVideo.EKAttributes { + get + } + public static var topNote: HHSDKVideo.EKAttributes { + get + } + public static var bottomNote: HHSDKVideo.EKAttributes { + get + } + public static var statusBar: HHSDKVideo.EKAttributes { + get + } +} +extension EKAttributes { + public enum Scroll { + public struct PullbackAnimation { + public var duration: Foundation.TimeInterval + public var damping: CoreGraphics.CGFloat + public var initialSpringVelocity: CoreGraphics.CGFloat + public init(duration: Foundation.TimeInterval, damping: CoreGraphics.CGFloat, initialSpringVelocity: CoreGraphics.CGFloat) + public static var jolt: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation { + get + } + public static var easeOut: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation { + get + } + } + case disabled + case edgeCrossingDisabled(swipeable: Swift.Bool) + case enabled(swipeable: Swift.Bool, pullbackAnimation: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation) + } +} +extension EKAttributes { + public enum Shadow { + case none + case active(with: HHSDKVideo.EKAttributes.Shadow.Value) + public struct Value { + public let radius: CoreGraphics.CGFloat + public let opacity: Swift.Float + public let color: HHSDKVideo.EKColor + public let offset: CoreGraphics.CGSize + public init(color: HHSDKVideo.EKColor = .black, opacity: Swift.Float, radius: CoreGraphics.CGFloat, offset: CoreGraphics.CGSize = .zero) + } + } +} +extension EKAttributes { + public enum StatusBar { + public typealias Appearance = (visible: Swift.Bool, style: UIKit.UIStatusBarStyle) + case ignored + case hidden + case dark + case light + case inferred + public var appearance: HHSDKVideo.EKAttributes.StatusBar.Appearance { + get + } + public static func statusBar(by appearance: HHSDKVideo.EKAttributes.StatusBar.Appearance) -> HHSDKVideo.EKAttributes.StatusBar + public static var currentAppearance: HHSDKVideo.EKAttributes.StatusBar.Appearance { + get + } + public static var currentStatusBar: HHSDKVideo.EKAttributes.StatusBar { + get + } + public static func == (a: HHSDKVideo.EKAttributes.StatusBar, b: HHSDKVideo.EKAttributes.StatusBar) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct UserInteraction { + public typealias Action = () -> () + public enum Default { + case absorbTouches + case delayExit(by: Foundation.TimeInterval) + case dismissEntry + case forward + } + public var defaultAction: HHSDKVideo.EKAttributes.UserInteraction.Default + public var customTapActions: [HHSDKVideo.EKAttributes.UserInteraction.Action] + public init(defaultAction: HHSDKVideo.EKAttributes.UserInteraction.Default = .absorbTouches, customTapActions: [HHSDKVideo.EKAttributes.UserInteraction.Action] = []) + public static var dismiss: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static var forward: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static var absorbTouches: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static func delayExit(by delay: Foundation.TimeInterval) -> HHSDKVideo.EKAttributes.UserInteraction + } +} +extension EKAttributes { + public enum WindowLevel { + case alerts + case statusBar + case normal + case custom(level: UIKit.UIWindow.Level) + public var value: UIKit.UIWindow.Level { + get + } + } +} +@objc final public class EKButtonBarView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent) + @objc override final public func layoutSubviews() + final public func expand() + final public func compress() + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKColor : Swift.Equatable { + public var dark: UIKit.UIColor { + get + } + public var light: UIKit.UIColor { + get + } + public init(light: UIKit.UIColor, dark: UIKit.UIColor) + public init(_ unified: UIKit.UIColor) + public init(rgb: Swift.Int) + public init(red: Swift.Int, green: Swift.Int, blue: Swift.Int) + public func color(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIColor + public static func == (a: HHSDKVideo.EKColor, b: HHSDKVideo.EKColor) -> Swift.Bool +} +extension EKColor { + public var inverted: HHSDKVideo.EKColor { + get + } + public func with(alpha: CoreGraphics.CGFloat) -> HHSDKVideo.EKColor + public static var white: HHSDKVideo.EKColor { + get + } + public static var black: HHSDKVideo.EKColor { + get + } + public static var clear: HHSDKVideo.EKColor { + get + } + public static var standardBackground: HHSDKVideo.EKColor { + get + } + public static var standardContent: HHSDKVideo.EKColor { + get + } +} +@objc final public class EKFormMessageView : UIKit.UIView { + public init(with title: HHSDKVideo.EKProperty.LabelContent, textFieldsContent: [HHSDKVideo.EKProperty.TextFieldContent], buttonContent: HHSDKVideo.EKProperty.ButtonContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + final public func becomeFirstResponder(with textFieldIndex: Swift.Int) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKImageNoteMessageView : HHSDKVideo.EKAccessoryNoteMessageView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with content: HHSDKVideo.EKProperty.LabelContent, imageContent: HHSDKVideo.EKProperty.ImageContent) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKMessageContentView : UIKit.UIView { + public var titleContent: HHSDKVideo.EKProperty.LabelContent! { + get + set + } + public var subtitleContent: HHSDKVideo.EKProperty.LabelContent! { + get + set + } + public var titleAttributes: HHSDKVideo.EKProperty.LabelStyle! { + get + set + } + public var subtitleAttributes: HHSDKVideo.EKProperty.LabelStyle! { + get + set + } + public var title: Swift.String! { + get + set + } + public var subtitle: Swift.String! { + get + set + } + public var verticalMargins: CoreGraphics.CGFloat { + get + set + } + public var horizontalMargins: CoreGraphics.CGFloat { + get + set + } + public var labelsOffset: CoreGraphics.CGFloat { + get + set + } + @objc dynamic public init() + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKNoteMessageView : UIKit.UIView { + public var horizontalOffset: CoreGraphics.CGFloat { + get + set + } + public var verticalOffset: CoreGraphics.CGFloat { + get + set + } + public init(with content: HHSDKVideo.EKProperty.LabelContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKNotificationMessage { + public struct Insets { + public var contentInsets: UIKit.UIEdgeInsets + public var titleToDescription: CoreGraphics.CGFloat + public static var `default`: HHSDKVideo.EKNotificationMessage.Insets + } + public let simpleMessage: HHSDKVideo.EKSimpleMessage + public let auxiliary: HHSDKVideo.EKProperty.LabelContent? + public let insets: HHSDKVideo.EKNotificationMessage.Insets + public init(simpleMessage: HHSDKVideo.EKSimpleMessage, auxiliary: HHSDKVideo.EKProperty.LabelContent? = nil, insets: HHSDKVideo.EKNotificationMessage.Insets = .default) +} +@objc @_hasMissingDesignatedInitializers final public class EKNotificationMessageView : HHSDKVideo.EKSimpleMessageView { + public init(with message: HHSDKVideo.EKNotificationMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc deinit +} +public struct EKPopUpMessage { + public typealias EKPopUpMessageAction = () -> () + public struct ThemeImage { + public enum Position { + case topToTop(offset: CoreGraphics.CGFloat) + case centerToTop(offset: CoreGraphics.CGFloat) + } + public var image: HHSDKVideo.EKProperty.ImageContent + public var position: HHSDKVideo.EKPopUpMessage.ThemeImage.Position + public init(image: HHSDKVideo.EKProperty.ImageContent, position: HHSDKVideo.EKPopUpMessage.ThemeImage.Position = .topToTop(offset: 40)) + } + public var themeImage: HHSDKVideo.EKPopUpMessage.ThemeImage? + public var title: HHSDKVideo.EKProperty.LabelContent + public var description: HHSDKVideo.EKProperty.LabelContent + public var button: HHSDKVideo.EKProperty.ButtonContent + public var action: HHSDKVideo.EKPopUpMessage.EKPopUpMessageAction + public init(themeImage: HHSDKVideo.EKPopUpMessage.ThemeImage? = nil, title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent, button: HHSDKVideo.EKProperty.ButtonContent, action: @escaping HHSDKVideo.EKPopUpMessage.EKPopUpMessageAction) +} +@objc final public class EKPopUpMessageView : UIKit.UIView { + public init(with message: HHSDKVideo.EKPopUpMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKProcessingNoteMessageView : HHSDKVideo.EKAccessoryNoteMessageView { + public var isProcessing: Swift.Bool { + get + set + } + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with content: HHSDKVideo.EKProperty.LabelContent, activityIndicator: UIKit.UIActivityIndicatorView.Style) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKProperty { + public struct ButtonContent { + public typealias Action = () -> () + public var label: HHSDKVideo.EKProperty.LabelContent + public var backgroundColor: HHSDKVideo.EKColor + public var highlightedBackgroundColor: HHSDKVideo.EKColor + public var contentEdgeInset: CoreGraphics.CGFloat + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var accessibilityIdentifier: Swift.String? + public var action: HHSDKVideo.EKProperty.ButtonContent.Action? + public init(label: HHSDKVideo.EKProperty.LabelContent, backgroundColor: HHSDKVideo.EKColor, highlightedBackgroundColor: HHSDKVideo.EKColor, contentEdgeInset: CoreGraphics.CGFloat = 5, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, accessibilityIdentifier: Swift.String? = nil, action: @escaping HHSDKVideo.EKProperty.ButtonContent.Action = {}) + public func backgroundColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + public func highlightedBackgroundColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + public func highlighedLabelColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct LabelContent { + public var text: Swift.String + public var style: HHSDKVideo.EKProperty.LabelStyle + public var accessibilityIdentifier: Swift.String? + public init(text: Swift.String, style: HHSDKVideo.EKProperty.LabelStyle, accessibilityIdentifier: Swift.String? = nil) + } + public struct LabelStyle { + public var font: UIKit.UIFont + public var color: HHSDKVideo.EKColor + public var alignment: UIKit.NSTextAlignment + public var numberOfLines: Swift.Int + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public init(font: UIKit.UIFont, color: HHSDKVideo.EKColor, alignment: UIKit.NSTextAlignment = .left, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, numberOfLines: Swift.Int = 0) + public func color(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct ImageContent { + public enum TransformAnimation { + case animate(duration: Foundation.TimeInterval, options: UIKit.UIView.AnimationOptions, transform: CoreGraphics.CGAffineTransform) + case none + } + public var tint: HHSDKVideo.EKColor? + public var images: [UIKit.UIImage] + public var imageSequenceAnimationDuration: Foundation.TimeInterval + public var size: CoreGraphics.CGSize? + public var contentMode: UIKit.UIView.ContentMode + public var makesRound: Swift.Bool + public var animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var accessibilityIdentifier: Swift.String? + public init(imageName: Swift.String, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, size: CoreGraphics.CGSize? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, tint: HHSDKVideo.EKColor? = nil, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(image: UIKit.UIImage, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(images: [UIKit.UIImage], imageSequenceAnimationDuration: Foundation.TimeInterval = 1, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(imagesNames: [Swift.String], imageSequenceAnimationDuration: Foundation.TimeInterval = 1, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public static func thumb(with image: UIKit.UIImage, edgeSize: CoreGraphics.CGFloat) -> HHSDKVideo.EKProperty.ImageContent + public static func thumb(with imageName: Swift.String, edgeSize: CoreGraphics.CGFloat) -> HHSDKVideo.EKProperty.ImageContent + public func tintColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + } + public struct TextFieldContent { + weak public var delegate: UIKit.UITextFieldDelegate? + public var keyboardType: UIKit.UIKeyboardType + public var isSecure: Swift.Bool + public var leadingImage: UIKit.UIImage! + public var placeholder: HHSDKVideo.EKProperty.LabelContent + public var textStyle: HHSDKVideo.EKProperty.LabelStyle + public var tintColor: HHSDKVideo.EKColor! + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var bottomBorderColor: HHSDKVideo.EKColor + public var accessibilityIdentifier: Swift.String? + public var textContent: Swift.String { + get + set + } + public init(delegate: UIKit.UITextFieldDelegate? = nil, keyboardType: UIKit.UIKeyboardType = .default, placeholder: HHSDKVideo.EKProperty.LabelContent, tintColor: HHSDKVideo.EKColor? = nil, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, textStyle: HHSDKVideo.EKProperty.LabelStyle, isSecure: Swift.Bool = false, leadingImage: UIKit.UIImage? = nil, bottomBorderColor: HHSDKVideo.EKColor = .clear, accessibilityIdentifier: Swift.String? = nil) + public func tintColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + public func bottomBorderColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + } + public struct ButtonBarContent { + public var content: [HHSDKVideo.EKProperty.ButtonContent] + public var separatorColor: HHSDKVideo.EKColor + public var horizontalDistributionThreshold: Swift.Int + public var expandAnimatedly: Swift.Bool + public var buttonHeight: CoreGraphics.CGFloat + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public init(with buttonContents: HHSDKVideo.EKProperty.ButtonContent..., separatorColor: HHSDKVideo.EKColor, horizontalDistributionThreshold: Swift.Int = 2, buttonHeight: CoreGraphics.CGFloat = 50, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, expandAnimatedly: Swift.Bool) + public init(with buttonContents: [HHSDKVideo.EKProperty.ButtonContent], separatorColor: HHSDKVideo.EKColor, horizontalDistributionThreshold: Swift.Int = 2, buttonHeight: CoreGraphics.CGFloat = 50, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, expandAnimatedly: Swift.Bool) + public func separatorColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct EKRatingItemContent { + public var title: HHSDKVideo.EKProperty.LabelContent + public var description: HHSDKVideo.EKProperty.LabelContent + public var unselectedImage: HHSDKVideo.EKProperty.ImageContent + public var selectedImage: HHSDKVideo.EKProperty.ImageContent + public var size: CoreGraphics.CGSize + public init(title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent, unselectedImage: HHSDKVideo.EKProperty.ImageContent, selectedImage: HHSDKVideo.EKProperty.ImageContent, size: CoreGraphics.CGSize = CGSize(width: 50, height: 50)) + } +} +public struct EKRatingMessage { + public typealias Selection = (Swift.Int) -> Swift.Void + public var initialTitle: HHSDKVideo.EKProperty.LabelContent + public var initialDescription: HHSDKVideo.EKProperty.LabelContent + public var ratingItems: [HHSDKVideo.EKProperty.EKRatingItemContent] + public var buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent + public var selection: HHSDKVideo.EKRatingMessage.Selection! + public var selectedIndex: Swift.Int? { + get + set + } + public init(initialTitle: HHSDKVideo.EKProperty.LabelContent, initialDescription: HHSDKVideo.EKProperty.LabelContent, ratingItems: [HHSDKVideo.EKProperty.EKRatingItemContent], buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent, selection: HHSDKVideo.EKRatingMessage.Selection? = nil) +} +@objc final public class EKRatingMessageView : UIKit.UIView { + public init(with message: HHSDKVideo.EKRatingMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc @_inheritsConvenienceInitializers final public class EKRatingSymbolsContainerView : UIKit.UIView { + final public func setup(with message: HHSDKVideo.EKRatingMessage, externalSelection: @escaping HHSDKVideo.EKRatingMessage.Selection) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +@objc final public class EKRatingSymbolView : UIKit.UIView { + final public var isSelected: Swift.Bool { + get + set + } + public init(unselectedImage: HHSDKVideo.EKProperty.ImageContent, selectedImage: HHSDKVideo.EKProperty.ImageContent, selection: @escaping HHSDKVideo.EKRatingMessage.Selection) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKSimpleMessage { + public let image: HHSDKVideo.EKProperty.ImageContent? + public let title: HHSDKVideo.EKProperty.LabelContent + public let description: HHSDKVideo.EKProperty.LabelContent + public init(image: HHSDKVideo.EKProperty.ImageContent? = nil, title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent) +} +@objc @_hasMissingDesignatedInitializers public class EKSimpleMessageView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc final public class EKTextField : UIKit.UIView { + final public var text: Swift.String { + get + set + } + public init(with content: HHSDKVideo.EKProperty.TextFieldContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + final public func makeFirstResponder() + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKXStatusBarMessageView : UIKit.UIView { + public init(leading: HHSDKVideo.EKProperty.LabelContent, trailing: HHSDKVideo.EKProperty.LabelContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: T, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: T?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [Swift.String : T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [Swift.String : T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [Swift.String : T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [Swift.String : T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +open class EnumTransform<T> : HHSDKVideo.TransformType where T : Swift.RawRepresentable { + public typealias Object = T + public typealias JSON = T.RawValue + public init() + open func transformFromJSON(_ value: Any?) -> T? + open func transformToJSON(_ value: T?) -> T.RawValue? + @objc deinit +} +final public class GCM : HHSDKVideo.BlockMode { + public enum Mode { + case combined + case detached + public static func == (a: HHSDKVideo.GCM.Mode, b: HHSDKVideo.GCM.Mode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public let options: HHSDKVideo.BlockModeOption + public enum Error : Swift.Error { + case invalidInitializationVector + case fail + public static func == (a: HHSDKVideo.GCM.Error, b: HHSDKVideo.GCM.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(iv: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, tagLength: Swift.Int = 16, mode: HHSDKVideo.GCM.Mode = .detached) + convenience public init(iv: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, mode: HHSDKVideo.GCM.Mode = .detached) + final public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker + @objc deinit +} +open class HexColorTransform : HHSDKVideo.TransformType { + public typealias Object = UIKit.UIColor + public typealias JSON = Swift.String + public init(prefixToJSON: Swift.Bool = false, alphaToJSON: Swift.Bool = false) + open func transformFromJSON(_ value: Any?) -> HHSDKVideo.HexColorTransform.Object? + open func transformToJSON(_ value: HHSDKVideo.HexColorTransform.Object?) -> HHSDKVideo.HexColorTransform.JSON? + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHAppProtocolCheck : ObjectiveC.NSObject { + public static let instance: HHSDKVideo.HHAppProtocolCheck + @objc override dynamic public init() + public func showPrivacyDialog(content: Swift.String, userDoc: Swift.String, privateDoc: Swift.String, _ agreeBlock: ((Swift.Bool) -> Swift.Void)?) + @objc deinit +} +extension HHAppProtocolCheck : UIKit.UITextViewDelegate { + @objc dynamic public func textView(_ textView: UIKit.UITextView, shouldInteractWith URL: Foundation.URL, in characterRange: Foundation.NSRange, interaction: UIKit.UITextItemInteraction) -> Swift.Bool +} +extension Array { + public subscript(safe index: Swift.Int) -> Element? { + get + } +} +public struct HHBaseApi { +} +@propertyWrapper public struct ApiConfig { + public var wrappedValue: HHSDKVideo.HHBaseApi { + get + } + public init(path: Swift.String, method: HHSDKVideo.HHRequestMethod = .post, host: Swift.String = HHUrl.baseUrl(), domain: Swift.String = HHUrl.urlForFamily(), needUserInfo: Swift.Bool = true, needEncrypt: Swift.Bool = true, needDNS: Swift.Bool = true) +} +public typealias HHLoginHandler = ((Swift.String?) -> Swift.Void) +public var HMHudManager: HHSDKVideo.HHHUDable { + get +} +@_inheritsConvenienceInitializers @objc public class HHBaseSDK : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHBaseSDK + public var dnsCallback: HHSDKVideo.HHDNSProtocal? + @objc public func start() + @objc public func login(userToken: Swift.String, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func logout(_ callback: ((Swift.String?) -> Swift.Void)? = nil) + @objc override dynamic public init() + @objc deinit +} +@objc public enum HHBaseCallingState : Swift.Int { + case onStart = 0 + case waitingDoctor + case callFreeDoctor + case callConnect + case didRing + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc public protocol HHBaseVideoDelegate : ObjectiveC.NSObjectProtocol { + @objc func callStateChange(_ state: HHSDKVideo.HHBaseCallingState) + @objc optional func onStart(orderId: Swift.String?) + @objc func callDidEstablish() + @objc func getChatParentView(_ view: UIKit.UIView) + @objc func callFail(code: Swift.Int, error: Swift.String) + @objc func onFail(_ errorCode: Swift.Int, errrorStr: Swift.String?) + @objc func onCancel() + @objc func receivedOrder(_ orderId: Swift.String) + @objc func callDidFinish() + @objc func onExtensionDoctor() + @objc func onReceive(_ callID: Swift.String) + @objc func onResponse(_ accept: Swift.Bool) + @objc func onLeakPermission(_ type: HHSDKVideo.HHBasePermissionType) + @objc optional func onForceOffline() +} +@objc public protocol HHCallDelegate : ObjectiveC.NSObjectProtocol { + @objc optional func onCallStatus(_ error: Swift.Error?) + @objc optional func onCallSuccess() + @objc optional func callFinished() +} +@_inheritsConvenienceInitializers @objc public class HHCallerInfo : ObjectiveC.NSObject, HHSDKVideo.Mappable { + public var name: Swift.String? + public var photourl: Swift.String? + public var uuid: Swift.Int? + public var userToken: Swift.String? + @objc override dynamic public init() + required public init?(map: HHSDKVideo.Map) + public func mapping(map: HHSDKVideo.Map) + @objc deinit +} +public class HHCameraConfig { + weak public var sender: UIKit.UIViewController! + public var mediaType: HHSDKVideo.HHMediaType + public var isGrayCam: Swift.Bool + public var canReduce: Swift.Bool + public var autoUpload: Swift.Bool + public var maxCount: Swift.Int? + public var crop: HHSDKVideo.onCropFinish? + public var canceled: HHSDKVideo.onCanceled? + public init() + public func build(_ block: (inout HHSDKVideo.HHCameraConfig) -> Swift.Void) -> HHSDKVideo.HHCameraConfig + @objc deinit +} +public let HHSDKScreenWidth: CoreGraphics.CGFloat +public let HHSDKScreenHeight: CoreGraphics.CGFloat +public let China_Flag: Swift.String +public struct HHDimens { + public static func isPad() -> Swift.Bool + public static func isPlus() -> Swift.Bool +} +public func HHColor(_ red: CoreGraphics.CGFloat, green: CoreGraphics.CGFloat, blue: CoreGraphics.CGFloat, alpha: CoreGraphics.CGFloat = 1.0) -> UIKit.UIColor +public func HHUISingleColor(_ value: CoreGraphics.CGFloat, alpha: CoreGraphics.CGFloat = 1.0) -> UIKit.UIColor +public func visibleWindow() -> UIKit.UIWindow? +public func imageWithColor(color: UIKit.UIColor) -> UIKit.UIImage? +public func delayFunc(_ time: Swift.Double, block: @escaping () -> Swift.Void) +public func appLanguage() -> Swift.String +public func isChina() -> Swift.Bool +@_hasMissingDesignatedInitializers public class HHDevice { + public static func isIphoneX() -> Swift.Bool + public static func botOffset() -> CoreGraphics.CGFloat + public static func tOffset() -> CoreGraphics.CGFloat + public class func isSml() -> Swift.Bool + public class func isMid() -> Swift.Bool + public class func isPlus() -> Swift.Bool + public class func isX() -> Swift.Bool + public static func iphoneType() -> Swift.String + @objc deinit +} +public typealias HHFetchBlock = (UIKit.UIImage?, [Swift.AnyHashable : Any]?) -> Swift.Void +public typealias onCanceled = (() -> Swift.Void) +public typealias onCapFinished = (([HHSDKVideo.SDKCameraImageModel]?) -> Swift.Void) +public typealias onCropFinish = (UIKit.UIImage, Swift.String?) -> Swift.Void +public enum HHMediaType : Swift.Int { + case cusCamera + case sysCamera + case cusVideo + case sysVideo + case photoImage + case photoVideo + case cusPhoto + case sysCrop + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +open class HHDataController<T> where T : HHSDKVideo.Mappable { + open var mData: T? + public init() + open func request(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func emptyRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func noDataRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func request<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: ((E) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + @objc deinit +} +extension Date { + public static func currentDate() -> Foundation.Date +} +public enum DateFormat : Swift.String { + case Full + case SingleDate + case Single + case WithoutSecond + case WithoutYearAndSecond + case HourMinute + case CN_Month_Day + case CN_Hour_Minute + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +@objc @_inheritsConvenienceInitializers public class HHDateUtils : ObjectiveC.NSObject { + public class func getDateForChinaStr() -> Swift.String + public static func stringWithDurationFromSeconds(_ seconds: Foundation.TimeInterval) -> Swift.String + public static func component(_ date: Foundation.Date) -> Foundation.DateComponents + @objc override dynamic public init() + @objc deinit +} +extension HHDateUtils { + public class func date2String(_ date: Foundation.Date, format: Swift.String) -> Swift.String + public class func date2String(_ date: Foundation.Date, format: HHSDKVideo.DateFormat) -> Swift.String +} +extension HHDateUtils { + public class func string2Date(_ str: Swift.String, format: HHSDKVideo.DateFormat) -> Foundation.Date? + public class func string2Date(_ str: Swift.String, format: Swift.String) -> Foundation.Date? +} +extension HHDateUtils { + public static func dateStringFromNow(_ date: Swift.Int) -> Swift.String + public static func dateStringFromInt(_ date: Swift.Int) -> Swift.String + public static func dateYearStringFromInt(_ date: Swift.Int) -> Swift.String +} +@objc @_inheritsConvenienceInitializers open class HHDeviceManager : ObjectiveC.NSObject { + public static func jailBrokend() -> Swift.Bool + @objc override dynamic public init() + @objc deinit +} +public protocol HHDNSProtocal { + func changeHost(_ hostDomain: Swift.String) -> Swift.String + func requestHost(_ host: Swift.String, challenge: Foundation.URLAuthenticationChallenge, completion: @escaping (Foundation.URLSession.AuthChallengeDisposition, Foundation.URLCredential?) -> Swift.Void) +} +public typealias HHPriceInfo = (priceAttri: Foundation.NSMutableAttributedString, disPriceWidth: CoreGraphics.CGFloat?) +public struct HHDoctorModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public var agentUuid: Swift.String? + public var createtime: Swift.String? + public var department: Swift.String? + public var deptid: Swift.Int? + public var disease: Swift.String? + public var diseaseList: [Swift.String]? + public var doctorid: Swift.String? + public var expertStatus: Swift.String? + public var expertVideoTime: Swift.String? + public var famExpertVideoPrice: Swift.Float? + public var famServices: Swift.Int? + public var famprovidetypes: Swift.String? + public var hhTitle: Swift.String? + public var hospital: Swift.String? + public var hospitalid: Swift.Int? + public var introduction: Swift.String? + public var isTest: Swift.String? + public var login: HHSDKVideo.LoginModel? + public var workyear: Swift.Int? + public var name: Swift.String? + public var photourl: Swift.String? + public var price: Swift.Float? + public var providetype: Swift.String? + public var province: Swift.String? + public var service: Swift.String? + public var serviceTypeStatus: Swift.String? + public var speciality: Swift.String? + public var standardDeptid: Swift.Int? + public var standardDeptname: Swift.String? + public var standardid: Swift.Int? + public var subdept: Swift.String? + public var subdeptids: Swift.String? + public var title: Swift.String? + public var titleid: Swift.Int? + public var vedioTimeList: Swift.String? + public var videoprice: Swift.Float? + public var license: Swift.String? + public init() + public mutating func mapping(map: HHSDKVideo.Map) + public func isJianzhi() -> Swift.Bool + public func supportType(type: HHSDKVideo.HHConsType) -> Swift.Bool + public func getPrice() -> HHSDKVideo.HHPriceInfo? + public func isZhuanke() -> Swift.Bool +} +public struct LoginModel : HHSDKVideo.Mappable { + public var actionSource: Swift.String? + public var loginname: Swift.String? + public var name: Swift.String? + public var photourl: Swift.String? + public var uuid: Swift.Int? + public var videoToken: Swift.String? + public var phoneno: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public enum HHConsType : Swift.String { + case normal + case expert_video + case feiDao + case video + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public struct HHEmptyModel : HHSDKVideo.Mappable { + public init() + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +@_hasMissingDesignatedInitializers public class HHEncryptUtils { + public static func encrypto(key: Swift.String, content: Swift.String) -> Swift.String? + public static func decrypto(key: Swift.String, content: Swift.String) -> Swift.String? + public static func encrypto(key: Swift.String, content: Foundation.Data) -> Foundation.Data? + public static func decrypto(key: Swift.String, content: Foundation.Data) -> Foundation.Data? + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHFileCacheManager : ObjectiveC.NSObject { + public enum HHAssetPathType { + case image + case video + case sound + case dicom + case fb + case other + case dataBase + public static func == (a: HHSDKVideo.HHFileCacheManager.HHAssetPathType, b: HHSDKVideo.HHFileCacheManager.HHAssetPathType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum HHFileFormat : Swift.String { + case Jpg + case Png + case Jpeg + case webp + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } + } + @objc override dynamic public init() + @objc deinit +} +extension HHFileCacheManager { + public class func getFileFormat(_ name: Swift.String) -> HHSDKVideo.HHFileCacheManager.HHAssetPathType + public class func createSoundFilePath(_ aPath: Swift.String) -> Swift.String + public class func createDBPath(_ aPath: Swift.String) -> Swift.String + public class func assetsCachePath(_ pathType: HHSDKVideo.HHFileCacheManager.HHAssetPathType) -> Swift.String + public class func createImageFilePath(_ format: HHSDKVideo.HHFileCacheManager.HHFileFormat = .Jpg) -> Swift.String + public class func createVideoFilePath() -> Swift.String + public class func isWriteCache(_ path: Swift.String?, data: Foundation.Data?) -> Swift.Bool + public class func isWriteCache(_ path: Swift.String?, image: UIKit.UIImage, quality: CoreGraphics.CGFloat = 1.0) -> Swift.Bool + public class func getFilePath(_ name: Swift.String) -> Swift.String? +} +extension HHFileCacheManager { + public static func saveString2File(_ string: Swift.String?, fileName: Swift.String) + public static func stringFromFile(_ fileName: Swift.String) -> Swift.String? +} +extension FileManager { + public func addSkipBackupAttributeToItemAtURL(_ url: Foundation.URL) -> Swift.Bool +} +public var uploadManager: HHSDKVideo.UploadQueue { + get +} +@_hasMissingDesignatedInitializers public class UploadQueue { + @discardableResult + public func upload(files: [Swift.String], config: HHSDKVideo.SDKUploadConfig) -> HHSDKVideo.HHFileUploadManager + public func cancelAll(_ finished: (() -> Swift.Void)? = nil) + @objc deinit +} +public class HHFileUploadManager { + public var mFileQueue: [Swift.String] + public var config: HHSDKVideo.SDKUploadConfig! + public var mTransFile: Swift.String? + public var isUploading: Swift.Bool + public init(files: [Swift.String], config: HHSDKVideo.SDKUploadConfig) + public func uploadFile(_ file: [Swift.String]) + public func cancalFiles(_ files: [Swift.String], cancelFinish: ((Swift.String) -> Swift.Void)? = nil) + public func cancelAll(_ finished: (() -> Swift.Void)? = nil) + @objc deinit +} +@objc public protocol HHHUDable { + @objc optional var autoDismissDuration: Foundation.TimeInterval { get } + @objc func showHUD() + @objc func dismissHUD() + @objc func showSuccess(_ message: Swift.String?) + @objc func showError(_ messgae: Swift.String?) + @objc optional func setDismissDuration(_ duraion: Foundation.TimeInterval) +} +extension HHHUDable { + public var autoDismissDuration: Foundation.TimeInterval { + get + } + public func setDismissDuration(_ duraion: Foundation.TimeInterval) +} +@objc public protocol HHIM { + @objc func register(_ cerName: Swift.String?) + @objc func login(_ completion: ((Swift.String?) -> Swift.Void)?) + @objc func autoLogin(_ completion: ((Swift.String?) -> Swift.Void)?) + @objc func logout(_ callback: ((Swift.String?) -> Swift.Void)?) + @objc func canVideo() -> Swift.Bool +} +public struct HHInviteDocModel : HHSDKVideo.Mappable { + public var orderId: Swift.String? + public var channelId: Swift.UInt64? + public var doctorId: Swift.String? + public var imageUrl: Swift.String? + public var signalingType: Swift.String? + public var width: CoreGraphics.CGFloat + public var height: CoreGraphics.CGFloat + public init?(map: HHSDKVideo.Map) + public init(_ info: HHSDKVideo.HHNetCallChatInfo, meetId: Swift.UInt64?) + public func isWhiteBoard() -> Swift.Bool + public func isMultyCall() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +@objc public enum HHLogMode : Swift.Int { + case error = 0 + case warn = 1 + case info = 2 + case debug = 3 + case verbose = 4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public func logging(type: HHSDKVideo.HHLogMode = .info, _ tip: Swift.String) +@objc @_inheritsConvenienceInitializers open class HHMediaStatusCheckUtils : ObjectiveC.NSObject { + open class func checkCameraAccess() -> Swift.Bool + open class func checkCameraVideoPermission() -> Swift.Bool + open class func checkAlbumAccess() -> Swift.Bool + open class func checkAudioAccess() -> Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers open class HHMedicNetObserver : ObjectiveC.NSObject { + public static let sharedInstance: HHSDKVideo.HHMedicNetObserver + open func createReachability() + open func currentInWifi() -> Swift.Bool + open func haveNetWork() -> Swift.Bool + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHMedicPhotoPicker { + public static func openCamera(config: HHSDKVideo.HHCameraConfig, capFinished: HHSDKVideo.onCapFinished? = nil) + public static func reduceImages(paths: [Swift.String], finished: @escaping (([Swift.String]) -> Swift.Void)) + public class func changeAvatar(vc: UIKit.UIViewController, reference: UIKit.UIView? = nil, uuid: Swift.Int, imgClosure: @escaping (UIKit.UIImage) -> Swift.Void, keyClosure: @escaping (Swift.String) -> Swift.Void) + @objc deinit +} +extension HHMedicPhotoPicker { + public static func checkPermisstion(_ type: HHSDKVideo.HHBasePermissionType, authorized: (() -> Swift.Void)?, others: ((HHSDKVideo.HHBasePermissionType) -> Swift.Void)?) + public static func converSize(_ size: CoreGraphics.CGSize) -> CoreGraphics.CGSize +} +extension HHMedicPhotoPicker : HHSDKVideo.HHPhotoPickerManagerDelegate { + public func selectImage(_ selectedImages: [UIKit.UIImage]) + public func cancelImage() + public func selectImageRequestError(_ errorAssets: [Photos.PHAsset], errorIndexs: [Swift.Int]) +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers public class HHNeedRealNameView : UIKit.UIView { + public var realNameLinkClourse: (() -> ())? + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHNetCallChatInfo { + public init() + @objc deinit +} +@objc public enum HHCallType : Swift.Int { + case child = 600000 + case adult = 600002 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public enum HHServerType { + case pay + case pacs + case weixin + public static func == (a: HHSDKVideo.HHServerType, b: HHSDKVideo.HHServerType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public enum HHRequestMethod { + case get + case post + public static func == (a: HHSDKVideo.HHRequestMethod, b: HHSDKVideo.HHRequestMethod) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public let HH_RELOGIN_NOTIFICATION_STR: Swift.String +public struct HHRequestData { + public init(body: [Swift.String : Any] = ["default_sw":"default"], param: [Swift.String : Any] = ["default_sw":"default"]) + public var mHttpBody: [Swift.String : Any] + public var mParameters: [Swift.String : Any] +} +@_hasMissingDesignatedInitializers public class HHNetFetch { + public static func request<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: ((E) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + public static func requestArray<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: (([E]?) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + public static func noDataRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + public static func emptyRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + @objc deinit +} +extension UIControl.State : Swift.Hashable { + public var hashValue: Swift.Int { + get + } +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers @IBDesignable public class HHPagerView : UIKit.UIView, UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegate { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func layoutSubviews() + @objc override dynamic public func willMove(toWindow newWindow: UIKit.UIWindow?) + @objc override dynamic public func prepareForInterfaceBuilder() + @objc deinit + @objc public func numberOfSections(in collectionView: UIKit.UICollectionView) -> Swift.Int + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, shouldHighlightItemAt indexPath: Foundation.IndexPath) -> Swift.Bool + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didHighlightItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, shouldSelectItemAt indexPath: Foundation.IndexPath) -> Swift.Bool + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didEndDisplaying cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewWillBeginDragging(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewWillEndDragging(_ scrollView: UIKit.UIScrollView, withVelocity velocity: CoreGraphics.CGPoint, targetContentOffset: Swift.UnsafeMutablePointer<CoreGraphics.CGPoint>) + @objc public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewDidEndScrollingAnimation(_ scrollView: UIKit.UIScrollView) +} +@objc public enum HHPagerViewTransformerType : Swift.Int { + case crossFading + case zoomOut + case depth + case overlap + case linear + case coverFlow + case ferrisWheel + case invertedFerrisWheel + case cubic + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@_hasMissingDesignatedInitializers public class UrlParams { + public static func addUserParams(_ parameters: [Swift.String : Any]?) -> [Swift.String : Any]? + public static func addCommon(_ param: [Swift.String : Any]?) -> [Swift.String : Any] + public static func param2String(param: [Swift.String : Any]? = nil) -> Swift.String + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoPickerController : UIKit.UINavigationController { + @objc override dynamic public func viewDidLoad() + convenience public init(localPath: Swift.String? = nil, deleteMode: Swift.Bool = false, finish: (([HHSDKVideo.SDKCameraImageModel]?) -> Swift.Void)? = nil) + @objc deinit + @available(iOS 5.0, *) + @objc override dynamic public init(navigationBarClass: Swift.AnyClass?, toolbarClass: Swift.AnyClass?) + @objc override dynamic public init(rootViewController: UIKit.UIViewController) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) +} +public protocol HHPhotoPickerManagerDelegate { + func selectImage(_ selectedImages: [UIKit.UIImage]) + func cancelImage() + func selectImageRequestError(_ errorAssets: [Photos.PHAsset], errorIndexs: [Swift.Int]) +} +@objc public class HHPhotoPickerManager : ObjectiveC.NSObject { + public var viewDelegate: HHSDKVideo.HHPhotoPickerManagerDelegate? + public var photoConfigModel: HHSDKVideo.HHPhotoConfigModel + public var photoUIConfigModel: HHSDKVideo.HHPhotoUIConfigModel + required public init(showVC: UIKit.UIViewController) + public func showImagePicker() + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoConfigModel : ObjectiveC.NSObject { + public var maxPreviewCount: Swift.Int + public var maxSelectCount: Swift.Int { + get + set + } + public var minVideoSelectCount: Swift.Int { + get + set + } + public var maxVideoSelectCount: Swift.Int { + get + set + } + public var minSelectVideoDuration: Swift.Int + public var maxSelectVideoDuration: Swift.Int + public var cellCornerRadio: CoreGraphics.CGFloat + public var languageType: HHSDKVideo.ZLLanguageType { + get + set + } + public var columnCount: Swift.Int { + get + set + } + public var sortAscending: Swift.Bool + public var allowSelectImage: Swift.Bool + public var allowTakePhotoInLibrary: Swift.Bool + public var allowSelectOriginal: Swift.Bool + public var allowSelectGif: Swift.Bool + public var allowSelectVideo: Swift.Bool + public var allowSelectLivePhoto: Swift.Bool + public var allowEditImage: Swift.Bool + public var allowMixSelect: Swift.Bool + public var allowPreviewPhotos: Swift.Bool + public var editImageWithDraw: Swift.Bool + public var editImageWithClip: Swift.Bool + public var editImageWithImageSticker: Swift.Bool + public var editImageWithTextSticker: Swift.Bool + public var editImageWithMosaic: Swift.Bool + public var editImageWithFilter: Swift.Bool + public var editImageWithAdjust: Swift.Bool + public var editImageWitAdjustBrightness: Swift.Bool + public var editImageWitAdjustContrast: Swift.Bool + public var editImageWitAdjustSaturation: Swift.Bool + public var shouldAnialysisAsset: Swift.Bool + public var allowEditVideo: Swift.Bool { + get + set + } + public var saveNewImageAfterEdit: Swift.Bool + public var allowDragSelect: Swift.Bool + public var allowSlideSelect: Swift.Bool + public var autoScrollWhenSlideSelectIsActive: Swift.Bool + public var autoScrollMaxSpeed: CoreGraphics.CGFloat + public var showCaptureImageOnTakePhotoBtn: Swift.Bool + public var showSelectedIndex: Swift.Bool + public var showSelectedMask: Swift.Bool + public var showSelectedBorder: Swift.Bool + public var showInvalidMask: Swift.Bool + public var useCustomCamera: Swift.Bool + public var flashMode: HHSDKVideo.ZLCameraConfiguration.FlashMode + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoUIConfigModel : ObjectiveC.NSObject { + public var style: HHSDKVideo.ZLPhotoBrowserStyle + public var bottomToolViewBtnNormalBgColor: UIKit.UIColor + public var bottomToolViewBtnNormalBgColorOfPreviewVC: UIKit.UIColor + @objc public var indexLabelBgColor: UIKit.UIColor + @objc override dynamic public init() + @objc deinit +} +public class HHProgressHUD : HHSDKVideo.HHHUDable { + public init() + @objc public func showHUD() + @objc public func dismissHUD() + @objc public func showError(_ messgae: Swift.String?) + @objc public func showSuccess(_ message: Swift.String?) + public func hhMessageTips(message: Swift.String?) + @objc deinit +} +public struct HHGetQuesetionModel : HHSDKVideo.Mappable { + public var question: HHSDKVideo.HHQuesetionModel? + public var rate: [HHSDKVideo.rateModel]? + public init?(map: HHSDKVideo.Map) + public init() + public mutating func mapping(map: HHSDKVideo.Map) + public func isHaveQ() -> Swift.Bool +} +public struct HHQuesetionModel : HHSDKVideo.Mappable { + public var answerOne: Swift.String? + public var answerTwo: Swift.String? + public var content: Swift.String? + public var id: Swift.Int? + public init?(map: HHSDKVideo.Map) + public init() + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct rateModel : HHSDKVideo.Mappable { + public var createTime: Swift.Int? + public var content: Swift.String? + public var state: Swift.Int? + public var id: Swift.Int? + public var answerOne: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +@objc public enum HHRealNameType : Swift.Int { + case normal, buyMedic + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_inheritsConvenienceInitializers public class HHRealNameInputNewView : UIKit.UIView { + @objc @IBOutlet weak public var idCardTF: UIKit.UITextField! + public class func createRealNameInputNewView(realNameType: HHSDKVideo.HHRealNameType, hideNickName: Swift.Bool = false) -> HHSDKVideo.HHRealNameInputNewView + public func showErroTip(tip: Swift.String) + public func getInpuValues() -> [Swift.String : Swift.String]? + @objc override dynamic public func awakeFromNib() + public func load(userModel: HHSDKVideo.HHUserModel?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +extension HHRealNameInputNewView : UIKit.UITextFieldDelegate { + @objc dynamic public func textField(_ textField: UIKit.UITextField, shouldChangeCharactersIn range: Foundation.NSRange, replacementString string: Swift.String) -> Swift.Bool + @objc dynamic public func textFieldDidBeginEditing(_ textField: UIKit.UITextField) +} +@_inheritsConvenienceInitializers @objc public class HHRealNameInputView : UIKit.UIView { + public var nickName: Swift.String { + get + set + } + public class func createRealNameInputView(realNameType: HHSDKVideo.HHRealNameType) -> HHSDKVideo.HHRealNameInputView + public var showPassPort: Swift.Bool { + get + set + } + public func showErroTip(tip: Swift.String) + public func getInpuValues() -> [Swift.String : Swift.String]? + @objc override dynamic public func awakeFromNib() + public func load(userModel: HHSDKVideo.HHUserModel?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +extension HHRealNameInputView : UIKit.UITextFieldDelegate { + @objc dynamic public func textField(_ textField: UIKit.UITextField, shouldChangeCharactersIn range: Foundation.NSRange, replacementString string: Swift.String) -> Swift.Bool +} +public let KeyNetErrorStr: Swift.String +public let KeyNoNetErrorStr: Swift.String +public typealias HHNetError = ((Swift.String) -> (Swift.Void)) +public typealias HHNetSuccessNoData = () -> Swift.Void +public typealias NetResult = (Swift.Bool, Swift.String) -> (Swift.Void) +public class HHRequest<T> where T : HHSDKVideo.Mappable { + public typealias HHNetSuccess = (T) -> Swift.Void + public typealias HHNetSuccessForArray = ([T]?) -> Swift.Void + public var mRequestFail: HHSDKVideo.HHNetError? + public var mRequestSuccess: HHSDKVideo.HHRequest<T>.HHNetSuccess? + public var mRequestSuccessNoData: HHSDKVideo.HHNetSuccessNoData? + public var mRequestSuccessForArray: HHSDKVideo.HHRequest<T>.HHNetSuccessForArray? + public var errorCode: Swift.Int? + public var mApi: HHSDKVideo.HHBaseApi? + required public init(api: HHSDKVideo.HHBaseApi, requestData: HHSDKVideo.HHRequestData? = nil, postData: Foundation.Data? = nil) + public func start() + public func cancel() + @objc deinit +} +extension HHRequest { + public func startForArray(_ successCallBack: @escaping HHSDKVideo.HHRequest<T>.HHNetSuccessForArray, failCallBack: @escaping HHSDKVideo.HHNetError) +} +@objc public protocol HHRTC { + @objc optional func setOrderId(orderId: Swift.String) + @objc optional func startCall(callee: Swift.String, orderId: Swift.String?) + @objc optional func enterRoom(orderId: Swift.String) + @objc optional func switchLocalAudio(_ isOpen: Swift.Bool) + @objc optional func switchLocalVideo(_ isOpen: Swift.Bool, localView: UIKit.UIView) + @objc optional func openDoctorView(userId: Swift.String, view: UIKit.UIView) + @objc optional func closeDoctorView(userId: Swift.String) + @objc optional func switchCamera(_ isFront: Swift.Bool) + @objc optional func switchCameraFlash(_ isOpen: Swift.Bool) + @objc optional func sendMsg(isSignal: Swift.Bool, cmd: Swift.String, to: Swift.String, complete: ((Swift.String?) -> Swift.Void)?) + @objc optional func leaveRoom() + @objc optional func hangUp(callId: Swift.UInt64) + @objc optional func startRing(audioId: Swift.Int) + @objc optional func stopRing() + @objc optional func snapshotVideo(userId: Swift.String?, imageBack: @escaping (UIKit.UIImage) -> ()) +} +public protocol HHRTCDelegate : ObjectiveC.NSObject { + func onEnterRoom() + func checkHasAccept(_ isCmd: Swift.Bool, volumn: Swift.Int) + func switchVideo(_ isToAudio: Swift.Bool) + func onOtherViewAvailable(_ availableUserId: Swift.String, isAvailable: Swift.Bool) + func onRemoteUserEnterRoom(_ userId: Swift.String) + func onRemoteUserLeaveRoom(_ userId: Swift.String) + func sendRTCLog(action: HHSDKVideo.TrtcLog, ex: Swift.String) + func esdablishByRTC(error: HHSDKVideo.TrtcError, reason: Swift.String) + func processMsg(cmd: HHSDKVideo.HHIMCmd, orderId: Swift.String, uuid: Swift.String) + func waitingChanged(_ waitingInfo: HHSDKVideo.HHWaitDoctorModel) + func waitingSuccess(_ doctorInfo: HHSDKVideo.HHDoctorModel, orderId: Swift.String) + func onTransform(_ transInfo: HHSDKVideo.HHWaitDoctorModel) + func onExitRoom() + func hangup() + func getDoctorUserId() -> Swift.String? + func resumeRemote() + func onFirstVideoFrame(_ userId: Swift.String?, width: Swift.Int32, height: Swift.Int32) +} +public enum TrtcLog : Swift.String { + case waitingRecall + case missMessage + case ignoreCall + case enterError + case doctorJoinRoom + case micDidReady + case netQuality + case signalError + case killEror + case netDown + case joinSuccess + case schedule + case noSchedule + case video_busy + case permit_error + case transform + case camera_close + case camera_open + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public enum TrtcError : Swift.String { + case callTimeOut + case rtcError + case enterRoomFail + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +@_inheritsConvenienceInitializers @objc public class HHSDKBaseOptions : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHSDKBaseOptions + @objc public var isDebug: Swift.Bool + @objc public var isDevelopment: Swift.Bool + @objc public var isSDK: Swift.Bool + @objc public var isTRTC: Swift.Bool + @objc public var sdkProductId: Swift.String + @objc public var appVersion: Swift.String + @objc public var needDNS: Swift.Bool + public var hudManager: HHSDKVideo.HHHUDable + @objc public var sdkVersion: Swift.String + @objc public var hudDisTime: Swift.Double { + @objc get + @objc set + } + @objc public func setConfig(_ sdkProductId: Swift.String, isDebug: Swift.Bool, isDevelopment: Swift.Bool, isTrtc: Swift.Bool, needDNS: Swift.Bool = false) + @objc override dynamic public init() + @objc deinit +} +@objc public protocol OptionProtocal { + @objc var hudDisTime: Foundation.TimeInterval { get set } + @objc var isDebug: Swift.Bool { get set } + @objc var isDevelopment: Swift.Bool { get set } + @objc var hudManager: HHSDKVideo.HHHUDable { get set } + @objc var productId: Swift.String { get set } + @objc var cerName: Swift.String? { get set } + @objc var logLevel: HHSDKVideo.HHLogMode { get set } + @objc var mExtension: Swift.String { get set } + @objc var changeDoctorTime: Swift.Int { get set } + @objc var logCallback: ((Swift.String) -> Swift.Void)? { get set } + @objc var mVideoOptions: HHSDKVideo.VideoOptions { get set } + @objc var mMessageOptions: HHSDKVideo.MessageOptions { get set } + @objc var mUserCenterOptions: HHSDKVideo.UsercenterOptions { get set } + @objc var sdkVersion: Swift.String { get set } + @objc var appVersion: Swift.String { get set } + @objc var isTRTC: Swift.Bool { get set } + @objc var needDNS: Swift.Bool { get set } + @objc var shouldWaingCall: Swift.Bool { get set } +} +public var HMDefaultOpt: HHSDKVideo.OptionProtocal { + get +} +@_inheritsConvenienceInitializers @objc public class VideoOptions : ObjectiveC.NSObject { + public var filterCallerInfo: Swift.Bool + @objc public var allowBeauty: Swift.Bool + @objc public var allowEvaluate: Swift.Bool + @objc public var allowAddMember: Swift.Bool + @objc public var allowMulti: Swift.Bool + public var mCallExtension: Swift.String + @objc public var isShowDocInfo: Swift.Bool + @objc public var enableCloseCamera: Swift.Bool + @objc public var isCloseCameraCall: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class MessageOptions : ObjectiveC.NSObject { + @objc public var isByPresent: Swift.Bool + @objc public var isFilterSummary: Swift.Bool + @objc public var isFilterMedicinal: Swift.Bool + @objc public var defaultDocHeader: Swift.String + @objc public var defaultDocName: Swift.String + @objc public var messageTitle: Swift.String + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class UsercenterOptions : ObjectiveC.NSObject { + @objc public var enableVipInfo: Swift.Bool + @objc public var hideUserCenter: Swift.Bool + @objc public var enableActivate: Swift.Bool + @objc public var enableMedical: Swift.Bool + @objc public var enableAddMemberInDoc: Swift.Bool + @objc public var enableBuyService: Swift.Bool + @objc public var hideNickName: Swift.Bool + @objc public var enablePopRealName: Swift.Bool + @objc public var isCloseMoreFunc: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHStatics { + public static let `default`: HHSDKVideo.HHStatics + public func send(params: [Swift.String : Any]) + @objc deinit +} +public struct CommonApi { +} +extension String { + public func subFrom(_ index: Swift.Int) -> Swift.String + public func subTo(_ index: Swift.Int) -> Swift.String +} +extension String { + public func urlEncode() -> Swift.String + public func stringByAppendingPathComponent(_ pathComponent: Swift.String) -> Swift.String + public func hh_sha1() -> Swift.String + public func string2base64String() -> Swift.String + public func base64String2String() -> Swift.String + public var lastPathComponent: Swift.String { + get + } + public var pathExtension: Swift.String { + get + } +} +public enum hhToastPosition { + case top + case center + case bottom + public static func == (a: HHSDKVideo.hhToastPosition, b: HHSDKVideo.hhToastPosition) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +extension UIView { + public func hhmakeToast(_ message: Swift.String) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, style: HHSDKVideo.hhToastStyle?) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, style: HHSDKVideo.hhToastStyle?) + public func hhmakeToast(_ message: Swift.String?, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle?, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhmakeToast(_ message: Swift.String?, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle?, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhshowToast(_ toast: UIKit.UIView) + public func hhshowToast(_ toast: UIKit.UIView, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhshowToast(_ toast: UIKit.UIView, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhmakeToastActivity(_ position: HHSDKVideo.hhToastPosition) + public func hhmakeToastActivity(_ position: CoreGraphics.CGPoint) + public func hhhideToastActivity() + @objc dynamic public func hhhandleToastTapped(_ recognizer: UIKit.UITapGestureRecognizer) + @objc dynamic public func hhtoastTimerDidFinish(_ timer: Foundation.Timer) + public func hhtoastViewForMessage(_ message: Swift.String?, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle) throws -> UIKit.UIView +} +public struct hhToastStyle { + public init() + public var backgroundColor: UIKit.UIColor + public var titleColor: UIKit.UIColor + public var messageColor: UIKit.UIColor + public var maxWidthPercentage: CoreGraphics.CGFloat { + get + set + } + public var maxHeightPercentage: CoreGraphics.CGFloat { + get + set + } + public var horizontalPadding: CoreGraphics.CGFloat + public var verticalPadding: CoreGraphics.CGFloat + public var cornerRadius: CoreGraphics.CGFloat + public var titleFont: UIKit.UIFont + public var messageFont: UIKit.UIFont + public var titleAlignment: UIKit.NSTextAlignment + public var messageAlignment: UIKit.NSTextAlignment + public var titleNumberOfLines: Swift.Int + public var messageNumberOfLines: Swift.Int + public var displayShadow: Swift.Bool + public var shadowColor: UIKit.UIColor + public var shadowOpacity: Swift.Float { + get + set + } + public var shadowRadius: CoreGraphics.CGFloat + public var shadowOffset: CoreGraphics.CGSize + public var imageSize: CoreGraphics.CGSize + public var activitySize: CoreGraphics.CGSize + public var fadeDuration: Swift.Double +} +extension UIAlertController { + public func showAlter() + public func present(animated: Swift.Bool, completion: (() -> Swift.Void)?) + public func addAlterActions(_ actions: [UIKit.UIAlertAction]) + public func alterMessageStyle(_ fonsize: CoreGraphics.CGFloat = (HHDimens.isPad()) ? 18 : 16) + public static func closeAlert(_ title: Swift.String = "", msg: Swift.String = "", keyString: Swift.String = "取消", closeBlock: (() -> Swift.Void)? = nil) -> UIKit.UIAlertController +} +extension UIButton { + public func centerImageTitleVertically(spacing: CoreGraphics.CGFloat = 2) + public func imageTitleHorizonal(spacing: CoreGraphics.CGFloat = 2) +} +extension UIImage { + public func rotatedBy(_ degrees: CoreGraphics.CGFloat) -> UIKit.UIImage +} +extension UIImageView { + public func hh_image(url: Foundation.URL?) + public func hh_image(url: Foundation.URL?, complete: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)?) + public func hh_image(url: Foundation.URL?, placeHolder: UIKit.UIImage?) + public func hh_image(url: Foundation.URL?, placeHolder: UIKit.UIImage?, progresses: ((CoreGraphics.CGFloat) -> Swift.Void)?, complete: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)?) +} +public func hh_downloadImg(_ url: Foundation.URL?, finish: @escaping ((UIKit.UIImage?, Foundation.Data?, Swift.Error?) -> Swift.Void)) +extension UIViewController { + public func hhAddCloseBtn(_ atLeft: Swift.Bool? = nil, isDismiss: Swift.Bool = true, title: Swift.String = "关闭") + @objc dynamic public func hhCloseThisController() + @objc dynamic public func hhPopThisController() + public func setNavigationTheme() + public func setNaviBackImg(navi: UIKit.UINavigationController?, color: UIKit.UIColor) + public func imageFromColor(color: UIKit.UIColor, size: CoreGraphics.CGSize) -> UIKit.UIImage +} +extension UIView { + public var sj_width: CoreGraphics.CGFloat { + get + set + } + public var sj_height: CoreGraphics.CGFloat { + get + set + } + public var sj_size: CoreGraphics.CGSize { + get + set + } + public var sj_origin: CoreGraphics.CGPoint { + get + set + } + public var sj_x: CoreGraphics.CGFloat { + get + set + } + public var sj_y: CoreGraphics.CGFloat { + get + set + } + public var sj_centerX: CoreGraphics.CGFloat { + get + set + } + public var sj_centerY: CoreGraphics.CGFloat { + get + set + } + public var sj_top: CoreGraphics.CGFloat { + get + set + } + public var sj_bottom: CoreGraphics.CGFloat { + get + set + } + public var sj_right: CoreGraphics.CGFloat { + get + set + } + public var sj_left: CoreGraphics.CGFloat { + get + set + } +} +extension UIView { + public class func viewFromNib<T>(_ aClass: T.Type, frameworkPath: Swift.String) -> T +} +public typealias onSDKProgress = ((CoreGraphics.CGFloat, Swift.String) -> Swift.Void) +public typealias onSDKUploadOnce = ((Swift.Bool, HHSDKVideo.SDKUploadModel) -> Swift.Void) +public typealias onSDKFinished = (() -> Swift.Void) +public class SDKUploadConfig { + public var progress: HHSDKVideo.onSDKProgress? + public var uploadOnce: HHSDKVideo.onSDKUploadOnce? + public var finished: HHSDKVideo.onSDKFinished? + public var orderId: Swift.String? + public init() + @objc deinit +} +public class SDKUploadModel { + public var clouldKey: Swift.String? + public var filePath: Swift.String? { + get + set + } + public var smallImage: Swift.String + public var state: HHSDKVideo.SDKUploadState? + public init() + public init(full: Swift.String?, scale: Swift.String) + public init(clouldKey: Swift.String?, filePath: Swift.String?, uploadTime: Foundation.TimeInterval?, name: Swift.String?, smallImage: Swift.String) + @objc deinit +} +@_hasMissingDesignatedInitializers public class SDKUploadState { + public var file: Swift.String? + public var isSelect: Swift.Bool + public var changed: (() -> Swift.Void)? + public var progress: Swift.Float { + get + set + } + public func isSuccess() -> Swift.Bool + public func isFail() -> Swift.Bool + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHUrl { + public static func domains() -> [Swift.String] + public static var timeOffset: Swift.Double + public static func urlForPay() -> Swift.String + public static func urlForFamily() -> Swift.String + public static func urlForWeixin() -> Swift.String + public static func baseUrl() -> Swift.String + public static func basePayUrl() -> Swift.String + public static func baseMedicUrl() -> Swift.String + public static func baseSecUrl() -> Swift.String + public static func testURL() -> Swift.String + public static func fileLogUrl(_ name: Swift.String, orderId: Swift.String) -> Foundation.URL + public static func expertDetailUrl(expertId: Swift.String) -> Swift.String + public static func buyVIPUrl() -> Swift.String + public static func productRightUrl() -> Swift.String + @objc deinit +} +extension HHUrl { + public static func headers(host: Swift.String) -> [Swift.String : Swift.String] +} +public func languagePrefix() -> Swift.String +@_hasMissingDesignatedInitializers public class HHUserDefaults { + public class func setString(_ str: Swift.String, key: Swift.String) + public class func stringValue(_ key: Swift.String) -> Swift.String? + public class func setArray(_ array: [Swift.AnyObject], key: Swift.String) + public class func arrayForKey(_ key: Swift.String) -> [Swift.AnyObject]? + public class func setImage(_ image: UIKit.UIImage, key: Swift.String) + public class func imageForKey(_ key: Swift.String) -> UIKit.UIImage? + @objc deinit +} +extension HHUserDefaults { + public class func setBool(_ flag: Swift.Bool, key: Swift.String) + public class func boolForKey(_ key: Swift.String) -> Swift.Bool + public class func setObject(_ obj: Swift.AnyObject, key: Swift.String) + public class func objectForKey(_ key: Swift.String) -> Swift.AnyObject? + public class func removeObject(_ key: Swift.String) +} +extension HHUserDefaults { + public class func setData(_ data: Foundation.Data?, key: Swift.String) + public class func dataForKey(_ key: Swift.String) -> Foundation.Data? + public class func userDefaults() -> Foundation.UserDefaults + public class func synchronize() + public class func encryptkey(_ key: Swift.String) -> Swift.String +} +public struct HHMemberInfoModel : HHSDKVideo.Mappable { + public var productStatusDescn: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public class HHUserModel : HHSDKVideo.Mappable { + public var age: Swift.String? + public var companyLogo: Swift.String? + public var birthday: Swift.Int64? + public var loginname: Swift.String? + public var name: Swift.String? + public var photourl: Swift.String? + public var pid: Swift.Int? + public var product: HHSDKVideo.HHMemberInfoModel? + public var relation: Swift.String? + public var sex: Swift.String? + public var uuid: Swift.Int? + public var userToken: Swift.String? + public var videoToken: Swift.String? + public var auth: Swift.Bool? + public var isMember: Swift.Bool? + public var isAccount: Swift.Bool? + public var license: Swift.String? + public var userSig: Swift.String? + public var phoneNum: Swift.String? + required public init?(map: HHSDKVideo.Map) + public init() + public func mapping(map: HHSDKVideo.Map) + @objc deinit +} +public struct HHUserProtocolModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +open class HHValueObservable<T> { + public typealias Observer = (T) -> Swift.Void + open var observer: HHSDKVideo.HHValueObservable<T>.Observer? + open func observe(_ observer: HHSDKVideo.HHValueObservable<T>.Observer?) + open var value: T { + get + set + } + public init(_ v: T) + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class HHVideoLocation : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHVideoLocation + @objc public func startLocation(lng: Swift.String, lat: Swift.String) + @objc public func closeLocation() + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class HHVideoSDK : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHVideoSDK + public var mHHRTC: HHSDKVideo.HHRTC? + public var mSDKOption: HHSDKVideo.OptionProtocal? + weak public var mCallDelegate: HHSDKVideo.HHCallDelegate? + weak public var mHHRTCDelegate: HHSDKVideo.HHRTCDelegate? + weak public var videoManager: HHSDKVideo.HHBaseVideoDelegate? + public var expertVideoCallback: (() -> Swift.Void)? + public var autoLoginCheck: (() -> Swift.Void)? + public var onReceiveNewMsg: (([Swift.String : Any]) -> Swift.Void)? + public var userProtocolModel: HHSDKVideo.HHUserProtocolModel? + @objc public var photosPreview: ((Swift.Array<Swift.String>) -> Swift.Void)? + @objc public func start(option: HHSDKVideo.OptionProtocal, im: HHSDKVideo.HHIM, rtc: HHSDKVideo.HHRTC) + @objc public func login(userToken: Swift.String, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func autoLogin(uuid: Swift.Int, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func logout(_ callback: ((Swift.String?) -> Swift.Void)? = nil) + @objc public func terminate() + @objc public func setAlipayHook(alipayCallback: @escaping (Swift.String, Swift.String, @escaping (([Swift.String : Any]) -> Swift.Void)) -> Swift.Bool) + @objc override dynamic public init() + @objc deinit +} +extension HHVideoSDK { + @objc dynamic public func startCall(_ type: HHSDKVideo.HHCallType = .adult, scene: Swift.String? = nil, callDelegate: HHSDKVideo.HHCallDelegate? = nil) + @objc dynamic public func startNewCall(_ uuid: Swift.Int, type: HHSDKVideo.HHCallType = .adult, callDelegate: HHSDKVideo.HHCallDelegate? = nil) + @objc dynamic public func startCall(_ uuid: Swift.Int, scene: Swift.String? = nil, type: HHSDKVideo.HHCallType = .adult, callDelegate: HHSDKVideo.HHCallDelegate? = nil) +} +extension HHVideoSDK { + @objc dynamic public func startTeamCall(_ type: HHSDKVideo.HHCallType, callee: HHSDKVideo.HHCallerInfo, callDelegate: HHSDKVideo.HHCallDelegate? = nil) +} +extension HHVideoSDK { + @objc dynamic public func call(_ memberToken: Swift.String, scene: Swift.String? = nil) +} +extension HHVideoSDK { + public func waitExpert(userToken: Swift.String, callOrderId: Swift.String) +} +extension HHVideoSDK { + @objc dynamic public func startMemberCall(needSelectMember: Swift.Bool = true) +} +extension HHVideoSDK { + @objc dynamic public func skipChatHome(isByPresent: Swift.Bool = false, vc: UIKit.UIViewController? = nil) + @objc dynamic public func skipChatHome(_ nav: UIKit.UINavigationController) + @objc dynamic public func chatHomeVC() -> UIKit.UIViewController? +} +extension HHVideoSDK { + public func sendBaseLog(ex: [Swift.String : Swift.String]? = nil, action: [Swift.String : Swift.String]? = nil) +} +public func topviewController() -> UIKit.UIViewController? +extension HHVideoSDK { + @objc dynamic public func loginForThirdId(_ thirdInfo: [Swift.String : Any], completion: @escaping HHSDKVideo.HHLoginHandler) +} +extension HHVideoSDK { + public func checkProtocolUpdate(agreeBlock: ((Swift.Bool) -> Swift.Void)?) +} +extension HHVideoSDK { + @objc dynamic public func getMedicDetail(userToken: Swift.String, medicId: Swift.String) -> Swift.String + @objc dynamic public func getMedicList(userToken: Swift.String) -> Swift.String + @objc dynamic public func getAllMedics(userToken: Swift.String) -> Swift.String +} +extension HHVideoSDK { + @objc dynamic public func onKickedOffline() +} +public struct HHWaitDoctorModel : HHSDKVideo.Mappable { + public var isNormalTrans: Swift.Bool + public var deptId: Swift.String? + public var uuid: Swift.Int? + public var transUuid: Swift.Int? + public init?(map: HHSDKVideo.Map) + public func isWaiting() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HHWaitingCallModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public func isCall() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HHAgentCallModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public func isAgent() -> Swift.Bool + public func isTransform() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HKDF { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.HKDF.Error, b: HHSDKVideo.HKDF.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>? = nil, info: Swift.Array<Swift.UInt8>? = nil, keyLength: Swift.Int? = nil, variant: HHSDKVideo.HMAC.Variant = .sha256) throws + public func calculate() throws -> Swift.Array<Swift.UInt8> +} +final public class HMAC : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case authenticateError + case invalidInput + public static func == (a: HHSDKVideo.HMAC.Error, b: HHSDKVideo.HMAC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant { + case sha1, sha256, sha384, sha512, md5 + public static func == (a: HHSDKVideo.HMAC.Variant, b: HHSDKVideo.HMAC.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(key: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.HMAC.Variant = .md5) + final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension HMAC { + convenience public init(key: Swift.String, variant: HHSDKVideo.HMAC.Variant = .md5) throws +} +public protocol ImmutableMappable : HHSDKVideo.BaseMappable { + init(map: HHSDKVideo.Map) throws +} +extension ImmutableMappable { + public func mapping(map: HHSDKVideo.Map) + public init(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) throws + public init(JSON: [Swift.String : Any], context: HHSDKVideo.MapContext? = nil) throws + public init(JSONObject: Any, context: HHSDKVideo.MapContext? = nil) throws +} +extension Map { + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> Transform.Object where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T? where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T] where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T]? where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T? where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T]? where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Transform.Object] where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : T]? where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : Transform.Object] where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[T]]? where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[T]] where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[Transform.Object]] where Transform : HHSDKVideo.TransformType +} +extension Mapper where N : HHSDKVideo.ImmutableMappable { + final public func map(JSON: [Swift.String : Any]) throws -> N + final public func map(JSONString: Swift.String) throws -> N + final public func map(JSONObject: Any) throws -> N + final public func mapArray(JSONArray: [[Swift.String : Any]]) throws -> [N] + final public func mapArray(JSONString: Swift.String) throws -> [N] + final public func mapArray(JSONObject: Any) throws -> [N] + final public func mapDictionary(JSONString: Swift.String) throws -> [Swift.String : N] + final public func mapDictionary(JSONObject: Any?) throws -> [Swift.String : N] + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]]) throws -> [Swift.String : N] + final public func mapDictionaryOfArrays(JSONObject: Any?) throws -> [Swift.String : [N]] + final public func mapDictionaryOfArrays(JSON: [Swift.String : [[Swift.String : Any]]]) throws -> [Swift.String : [N]] + final public func mapArrayOfArrays(JSONObject: Any?) throws -> [[N]] +} +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.SignedInteger +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.SignedInteger +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.UnsignedInteger +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.UnsignedInteger +extension DateFormatter { + convenience public init(withFormat format: Swift.String, locale: Swift.String) +} +open class ISO8601DateTransform : HHSDKVideo.DateFormatterTransform { + public init() + override public init(dateFormatter: Foundation.DateFormatter) + @objc deinit +} +public let KeychainAccessErrorDomain: Swift.String +public enum ItemClass { + case genericPassword + case internetPassword +} +public enum ProtocolType { + case ftp + case ftpAccount + case http + case irc + case nntp + case pop3 + case smtp + case socks + case imap + case ldap + case appleTalk + case afp + case telnet + case ssh + case ftps + case https + case httpProxy + case httpsProxy + case ftpProxy + case smb + case rtsp + case rtspProxy + case daap + case eppc + case ipp + case nntps + case ldaps + case telnetS + case imaps + case ircs + case pop3S +} +public enum AuthenticationType { + case ntlm + case msn + case dpa + case rpa + case httpBasic + case httpDigest + case htmlForm + case `default` +} +public enum Accessibility { + case whenUnlocked + case afterFirstUnlock + case always + @available(iOS 8.0, macOS 10.10, *) + case whenPasscodeSetThisDeviceOnly + case whenUnlockedThisDeviceOnly + case afterFirstUnlockThisDeviceOnly + case alwaysThisDeviceOnly +} +public struct AuthenticationPolicy : Swift.OptionSet { + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + public static let userPresence: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let touchIDAny: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let touchIDCurrentSet: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, macOS 10.11, *) + @available(watchOS, unavailable) + public static let devicePasscode: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let or: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let and: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let privateKeyUsage: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let applicationPassword: HHSDKVideo.AuthenticationPolicy + public let rawValue: Swift.UInt + public init(rawValue: Swift.UInt) + public typealias ArrayLiteralElement = HHSDKVideo.AuthenticationPolicy + public typealias Element = HHSDKVideo.AuthenticationPolicy + public typealias RawValue = Swift.UInt +} +public struct Attributes { + public var `class`: Swift.String? { + get + } + public var data: Foundation.Data? { + get + } + public var ref: Foundation.Data? { + get + } + public var persistentRef: Foundation.Data? { + get + } + public var accessible: Swift.String? { + get + } + public var accessControl: Security.SecAccessControl? { + get + } + public var accessGroup: Swift.String? { + get + } + public var synchronizable: Swift.Bool? { + get + } + public var creationDate: Foundation.Date? { + get + } + public var modificationDate: Foundation.Date? { + get + } + public var attributeDescription: Swift.String? { + get + } + public var comment: Swift.String? { + get + } + public var creator: Swift.String? { + get + } + public var type: Swift.String? { + get + } + public var label: Swift.String? { + get + } + public var isInvisible: Swift.Bool? { + get + } + public var isNegative: Swift.Bool? { + get + } + public var account: Swift.String? { + get + } + public var service: Swift.String? { + get + } + public var generic: Foundation.Data? { + get + } + public var securityDomain: Swift.String? { + get + } + public var server: Swift.String? { + get + } + public var `protocol`: Swift.String? { + get + } + public var authenticationType: Swift.String? { + get + } + public var port: Swift.Int? { + get + } + public var path: Swift.String? { + get + } + public subscript(key: Swift.String) -> Any? { + get + } +} +@_hasMissingDesignatedInitializers final public class Keychain { + final public var itemClass: HHSDKVideo.ItemClass { + get + } + final public var service: Swift.String { + get + } + final public var accessGroup: Swift.String? { + get + } + final public var server: Foundation.URL { + get + } + final public var protocolType: HHSDKVideo.ProtocolType { + get + } + final public var authenticationType: HHSDKVideo.AuthenticationType { + get + } + final public var accessibility: HHSDKVideo.Accessibility { + get + } + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public var authenticationPolicy: HHSDKVideo.AuthenticationPolicy? { + get + } + final public var synchronizable: Swift.Bool { + get + } + final public var label: Swift.String? { + get + } + final public var comment: Swift.String? { + get + } + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public var authenticationPrompt: Swift.String? { + get + } + @available(iOS 9.0, macOS 10.11, *) + final public var authenticationContext: LocalAuthentication.LAContext? { + get + } + convenience public init() + convenience public init(service: Swift.String) + convenience public init(accessGroup: Swift.String) + convenience public init(service: Swift.String, accessGroup: Swift.String) + convenience public init(server: Swift.String, protocolType: HHSDKVideo.ProtocolType, authenticationType: HHSDKVideo.AuthenticationType = .default) + convenience public init(server: Foundation.URL, protocolType: HHSDKVideo.ProtocolType, authenticationType: HHSDKVideo.AuthenticationType = .default) + final public func accessibility(_ accessibility: HHSDKVideo.Accessibility) -> HHSDKVideo.Keychain + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public func accessibility(_ accessibility: HHSDKVideo.Accessibility, authenticationPolicy: HHSDKVideo.AuthenticationPolicy) -> HHSDKVideo.Keychain + final public func synchronizable(_ synchronizable: Swift.Bool) -> HHSDKVideo.Keychain + final public func label(_ label: Swift.String) -> HHSDKVideo.Keychain + final public func comment(_ comment: Swift.String) -> HHSDKVideo.Keychain + final public func attributes(_ attributes: [Swift.String : Any]) -> HHSDKVideo.Keychain + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public func authenticationPrompt(_ authenticationPrompt: Swift.String) -> HHSDKVideo.Keychain + @available(iOS 9.0, macOS 10.11, *) + final public func authenticationContext(_ authenticationContext: LocalAuthentication.LAContext) -> HHSDKVideo.Keychain + final public func get(_ key: Swift.String) throws -> Swift.String? + final public func getString(_ key: Swift.String) throws -> Swift.String? + final public func getData(_ key: Swift.String) throws -> Foundation.Data? + final public func get<T>(_ key: Swift.String, handler: (HHSDKVideo.Attributes?) -> T) throws -> T + final public func set(_ value: Swift.String, key: Swift.String) throws + final public func set(_ value: Foundation.Data, key: Swift.String) throws + final public subscript(key: Swift.String) -> Swift.String? { + get + set + } + final public subscript(string key: Swift.String) -> Swift.String? { + get + set + } + final public subscript(data key: Swift.String) -> Foundation.Data? { + get + set + } + final public subscript(attributes key: Swift.String) -> HHSDKVideo.Attributes? { + get + } + final public func remove(_ key: Swift.String) throws + final public func removeAll() throws + final public func contains(_ key: Swift.String) throws -> Swift.Bool + final public class func allKeys(_ itemClass: HHSDKVideo.ItemClass) -> [(Swift.String, Swift.String)] + final public func allKeys() -> [Swift.String] + final public class func allItems(_ itemClass: HHSDKVideo.ItemClass) -> [[Swift.String : Any]] + final public func allItems() -> [[Swift.String : Any]] + @available(iOS 8.0, *) + final public func getSharedPassword(_ completion: @escaping (Swift.String?, Swift.String?, Swift.Error?) -> () = { account, password, error -> () in }) + @available(iOS 8.0, *) + final public func getSharedPassword(_ account: Swift.String, completion: @escaping (Swift.String?, Swift.Error?) -> () = { password, error -> () in }) + @available(iOS 8.0, *) + final public func setSharedPassword(_ password: Swift.String, account: Swift.String, completion: @escaping (Swift.Error?) -> () = { e -> () in }) + @available(iOS 8.0, *) + final public func removeSharedPassword(_ account: Swift.String, completion: @escaping (Swift.Error?) -> () = { e -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(_ completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(domain: Swift.String, completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(domain: Swift.String, account: Swift.String, completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func generatePassword() -> Swift.String + @objc deinit +} +extension Keychain : Swift.CustomStringConvertible, Swift.CustomDebugStringConvertible { + final public var description: Swift.String { + get + } + final public var debugDescription: Swift.String { + get + } +} +extension Attributes : Swift.CustomStringConvertible, Swift.CustomDebugStringConvertible { + public var description: Swift.String { + get + } + public var debugDescription: Swift.String { + get + } +} +extension ItemClass : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension ProtocolType : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension AuthenticationType : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension Accessibility : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +public enum Status : Darwin.OSStatus, Swift.Error { + case success + case unimplemented + case diskFull + case io + case opWr + case param + case wrPerm + case allocate + case userCanceled + case badReq + case internalComponent + case notAvailable + case readOnly + case authFailed + case noSuchKeychain + case invalidKeychain + case duplicateKeychain + case duplicateCallback + case invalidCallback + case duplicateItem + case itemNotFound + case bufferTooSmall + case dataTooLarge + case noSuchAttr + case invalidItemRef + case invalidSearchRef + case noSuchClass + case noDefaultKeychain + case interactionNotAllowed + case readOnlyAttr + case wrongSecVersion + case keySizeNotAllowed + case noStorageModule + case noCertificateModule + case noPolicyModule + case interactionRequired + case dataNotAvailable + case dataNotModifiable + case createChainFailed + case invalidPrefsDomain + case inDarkWake + case aclNotSimple + case policyNotFound + case invalidTrustSetting + case noAccessForItem + case invalidOwnerEdit + case trustNotAvailable + case unsupportedFormat + case unknownFormat + case keyIsSensitive + case multiplePrivKeys + case passphraseRequired + case invalidPasswordRef + case invalidTrustSettings + case noTrustSettings + case pkcs12VerifyFailure + case invalidCertificate + case notSigner + case policyDenied + case invalidKey + case decode + case `internal` + case unsupportedAlgorithm + case unsupportedOperation + case unsupportedPadding + case itemInvalidKey + case itemInvalidKeyType + case itemInvalidValue + case itemClassMissing + case itemMatchUnsupported + case useItemListUnsupported + case useKeychainUnsupported + case useKeychainListUnsupported + case returnDataUnsupported + case returnAttributesUnsupported + case returnRefUnsupported + case returnPersitentRefUnsupported + case valueRefUnsupported + case valuePersistentRefUnsupported + case returnMissingPointer + case matchLimitUnsupported + case itemIllegalQuery + case waitForCallback + case missingEntitlement + case upgradePending + case mpSignatureInvalid + case otrTooOld + case otrIDTooNew + case serviceNotAvailable + case insufficientClientID + case deviceReset + case deviceFailed + case appleAddAppACLSubject + case applePublicKeyIncomplete + case appleSignatureMismatch + case appleInvalidKeyStartDate + case appleInvalidKeyEndDate + case conversionError + case appleSSLv2Rollback + case quotaExceeded + case fileTooBig + case invalidDatabaseBlob + case invalidKeyBlob + case incompatibleDatabaseBlob + case incompatibleKeyBlob + case hostNameMismatch + case unknownCriticalExtensionFlag + case noBasicConstraints + case noBasicConstraintsCA + case invalidAuthorityKeyID + case invalidSubjectKeyID + case invalidKeyUsageForPolicy + case invalidExtendedKeyUsage + case invalidIDLinkage + case pathLengthConstraintExceeded + case invalidRoot + case crlExpired + case crlNotValidYet + case crlNotFound + case crlServerDown + case crlBadURI + case unknownCertExtension + case unknownCRLExtension + case crlNotTrusted + case crlPolicyFailed + case idpFailure + case smimeEmailAddressesNotFound + case smimeBadExtendedKeyUsage + case smimeBadKeyUsage + case smimeKeyUsageNotCritical + case smimeNoEmailAddress + case smimeSubjAltNameNotCritical + case sslBadExtendedKeyUsage + case ocspBadResponse + case ocspBadRequest + case ocspUnavailable + case ocspStatusUnrecognized + case endOfData + case incompleteCertRevocationCheck + case networkFailure + case ocspNotTrustedToAnchor + case recordModified + case ocspSignatureError + case ocspNoSigner + case ocspResponderMalformedReq + case ocspResponderInternalError + case ocspResponderTryLater + case ocspResponderSignatureRequired + case ocspResponderUnauthorized + case ocspResponseNonceMismatch + case codeSigningBadCertChainLength + case codeSigningNoBasicConstraints + case codeSigningBadPathLengthConstraint + case codeSigningNoExtendedKeyUsage + case codeSigningDevelopment + case resourceSignBadCertChainLength + case resourceSignBadExtKeyUsage + case trustSettingDeny + case invalidSubjectName + case unknownQualifiedCertStatement + case mobileMeRequestQueued + case mobileMeRequestRedirected + case mobileMeServerError + case mobileMeServerNotAvailable + case mobileMeServerAlreadyExists + case mobileMeServerServiceErr + case mobileMeRequestAlreadyPending + case mobileMeNoRequestPending + case mobileMeCSRVerifyFailure + case mobileMeFailedConsistencyCheck + case notInitialized + case invalidHandleUsage + case pvcReferentNotFound + case functionIntegrityFail + case internalError + case memoryError + case invalidData + case mdsError + case invalidPointer + case selfCheckFailed + case functionFailed + case moduleManifestVerifyFailed + case invalidGUID + case invalidHandle + case invalidDBList + case invalidPassthroughID + case invalidNetworkAddress + case crlAlreadySigned + case invalidNumberOfFields + case verificationFailure + case unknownTag + case invalidSignature + case invalidName + case invalidCertificateRef + case invalidCertificateGroup + case tagNotFound + case invalidQuery + case invalidValue + case callbackFailed + case aclDeleteFailed + case aclReplaceFailed + case aclAddFailed + case aclChangeFailed + case invalidAccessCredentials + case invalidRecord + case invalidACL + case invalidSampleValue + case incompatibleVersion + case privilegeNotGranted + case invalidScope + case pvcAlreadyConfigured + case invalidPVC + case emmLoadFailed + case emmUnloadFailed + case addinLoadFailed + case invalidKeyRef + case invalidKeyHierarchy + case addinUnloadFailed + case libraryReferenceNotFound + case invalidAddinFunctionTable + case invalidServiceMask + case moduleNotLoaded + case invalidSubServiceID + case attributeNotInContext + case moduleManagerInitializeFailed + case moduleManagerNotFound + case eventNotificationCallbackNotFound + case inputLengthError + case outputLengthError + case privilegeNotSupported + case deviceError + case attachHandleBusy + case notLoggedIn + case algorithmMismatch + case keyUsageIncorrect + case keyBlobTypeIncorrect + case keyHeaderInconsistent + case unsupportedKeyFormat + case unsupportedKeySize + case invalidKeyUsageMask + case unsupportedKeyUsageMask + case invalidKeyAttributeMask + case unsupportedKeyAttributeMask + case invalidKeyLabel + case unsupportedKeyLabel + case invalidKeyFormat + case unsupportedVectorOfBuffers + case invalidInputVector + case invalidOutputVector + case invalidContext + case invalidAlgorithm + case invalidAttributeKey + case missingAttributeKey + case invalidAttributeInitVector + case missingAttributeInitVector + case invalidAttributeSalt + case missingAttributeSalt + case invalidAttributePadding + case missingAttributePadding + case invalidAttributeRandom + case missingAttributeRandom + case invalidAttributeSeed + case missingAttributeSeed + case invalidAttributePassphrase + case missingAttributePassphrase + case invalidAttributeKeyLength + case missingAttributeKeyLength + case invalidAttributeBlockSize + case missingAttributeBlockSize + case invalidAttributeOutputSize + case missingAttributeOutputSize + case invalidAttributeRounds + case missingAttributeRounds + case invalidAlgorithmParms + case missingAlgorithmParms + case invalidAttributeLabel + case missingAttributeLabel + case invalidAttributeKeyType + case missingAttributeKeyType + case invalidAttributeMode + case missingAttributeMode + case invalidAttributeEffectiveBits + case missingAttributeEffectiveBits + case invalidAttributeStartDate + case missingAttributeStartDate + case invalidAttributeEndDate + case missingAttributeEndDate + case invalidAttributeVersion + case missingAttributeVersion + case invalidAttributePrime + case missingAttributePrime + case invalidAttributeBase + case missingAttributeBase + case invalidAttributeSubprime + case missingAttributeSubprime + case invalidAttributeIterationCount + case missingAttributeIterationCount + case invalidAttributeDLDBHandle + case missingAttributeDLDBHandle + case invalidAttributeAccessCredentials + case missingAttributeAccessCredentials + case invalidAttributePublicKeyFormat + case missingAttributePublicKeyFormat + case invalidAttributePrivateKeyFormat + case missingAttributePrivateKeyFormat + case invalidAttributeSymmetricKeyFormat + case missingAttributeSymmetricKeyFormat + case invalidAttributeWrappedKeyFormat + case missingAttributeWrappedKeyFormat + case stagedOperationInProgress + case stagedOperationNotStarted + case verifyFailed + case querySizeUnknown + case blockSizeMismatch + case publicKeyInconsistent + case deviceVerifyFailed + case invalidLoginName + case alreadyLoggedIn + case invalidDigestAlgorithm + case invalidCRLGroup + case certificateCannotOperate + case certificateExpired + case certificateNotValidYet + case certificateRevoked + case certificateSuspended + case insufficientCredentials + case invalidAction + case invalidAuthority + case verifyActionFailed + case invalidCertAuthority + case invaldCRLAuthority + case invalidCRLEncoding + case invalidCRLType + case invalidCRL + case invalidFormType + case invalidID + case invalidIdentifier + case invalidIndex + case invalidPolicyIdentifiers + case invalidTimeString + case invalidReason + case invalidRequestInputs + case invalidResponseVector + case invalidStopOnPolicy + case invalidTuple + case multipleValuesUnsupported + case notTrusted + case noDefaultAuthority + case rejectedForm + case requestLost + case requestRejected + case unsupportedAddressType + case unsupportedService + case invalidTupleGroup + case invalidBaseACLs + case invalidTupleCredendtials + case invalidEncoding + case invalidValidityPeriod + case invalidRequestor + case requestDescriptor + case invalidBundleInfo + case invalidCRLIndex + case noFieldValues + case unsupportedFieldFormat + case unsupportedIndexInfo + case unsupportedLocality + case unsupportedNumAttributes + case unsupportedNumIndexes + case unsupportedNumRecordTypes + case fieldSpecifiedMultiple + case incompatibleFieldFormat + case invalidParsingModule + case databaseLocked + case datastoreIsOpen + case missingValue + case unsupportedQueryLimits + case unsupportedNumSelectionPreds + case unsupportedOperator + case invalidDBLocation + case invalidAccessRequest + case invalidIndexInfo + case invalidNewOwner + case invalidModifyMode + case missingRequiredExtension + case extendedKeyUsageNotCritical + case timestampMissing + case timestampInvalid + case timestampNotTrusted + case timestampServiceNotAvailable + case timestampBadAlg + case timestampBadRequest + case timestampBadDataFormat + case timestampTimeNotAvailable + case timestampUnacceptedPolicy + case timestampUnacceptedExtension + case timestampAddInfoNotAvailable + case timestampSystemFailure + case signingTimeMissing + case timestampRejection + case timestampWaiting + case timestampRevocationWarning + case timestampRevocationNotification + case unexpectedError +} +extension Status : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init(status: Darwin.OSStatus) + public var description: Swift.String { + get + } + public init?(rawValue: Darwin.OSStatus) + public typealias RawValue = Darwin.OSStatus + public var rawValue: Darwin.OSStatus { + get + } +} +extension Status : Foundation.CustomNSError { + public static let errorDomain: Swift.String + public var errorCode: Swift.Int { + get + } + public var errorUserInfo: [Swift.String : Any] { + get + } +} +extension HHPermission : CoreLocation.CLLocationManagerDelegate { + @objc dynamic public func locationManager(_ manager: CoreLocation.CLLocationManager, didChangeAuthorization status: CoreLocation.CLAuthorizationStatus) +} +public let HHUUID: Swift.String +public let HHUserToken: Swift.String +@_hasMissingDesignatedInitializers public class LoginManager { + public static let `default`: HHSDKVideo.LoginManager + public var mUUID: Swift.Int? + public var mUserInfo: HHSDKVideo.HHUserModel? + public func loadCache() + public func removeCache() + public func getUserInfo(token: Swift.String, success: ((Swift.String?) -> Swift.Void)? = nil, fail: ((Swift.String) -> Swift.Void)? = nil) + public func getUserInfoRequest(success: ((Swift.String?) -> Swift.Void)? = nil, fail: ((Swift.String) -> Swift.Void)? = nil) + public func convert2Model() -> Swift.String? + public func getUserInfo() -> HHSDKVideo.HHUserModel? + public func getCacheUserInfo() -> HHSDKVideo.HHUserModel? + public func hasLoginData() -> Swift.Bool + public func getUUID() -> Swift.Int? + public func setUUID(uuid: Swift.Int) + public func getToken() -> Swift.String? + public func uuidStr() -> Swift.String? + public func isMemeber() -> Swift.Bool + public func isVIP() -> Swift.Bool + public func getUpgradeVIPTips() -> Swift.String? + public func isBuyProduct() -> Swift.Bool + public func getMemberDes() -> Swift.String? + public func isPhoneAccount() -> Swift.Bool + @objc deinit +} +public protocol MapContext { +} +final public class Map { + final public let mappingType: HHSDKVideo.MappingType + final public var JSON: [Swift.String : Any] { + get + } + final public var isKeyPresent: Swift.Bool { + get + } + final public var currentValue: Any? { + get + } + final public var currentKey: Swift.String? { + get + } + final public var nestedKeyDelimiter: Swift.String { + get + } + final public var context: HHSDKVideo.MapContext? + final public var shouldIncludeNilValues: Swift.Bool + final public let toObject: Swift.Bool + public init(mappingType: HHSDKVideo.MappingType, JSON: [Swift.String : Any], toObject: Swift.Bool = false, context: HHSDKVideo.MapContext? = nil, shouldIncludeNilValues: Swift.Bool = false) + final public subscript(key: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, delimiter delimiter: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool, delimiter delimiter: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, delimiter delimiter: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool?, delimiter delimiter: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public func value<T>() -> T? + @objc deinit +} +extension Map { + final public func value<T>(_ key: Swift.String, default: T.Object, using transform: T) throws -> T.Object where T : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, default: T) throws -> T + final public func value<T>(_ key: Swift.String, default: [T]) -> [T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, default: T) throws -> T where T : HHSDKVideo.BaseMappable +} +public struct MapError : Swift.Error { + public var key: Swift.String? + public var currentValue: Any? + public var reason: Swift.String? + public var file: Swift.StaticString? + public var function: Swift.StaticString? + public var line: Swift.UInt? + public init(key: Swift.String?, currentValue: Any?, reason: Swift.String?, file: Swift.StaticString? = nil, function: Swift.StaticString? = nil, line: Swift.UInt? = nil) +} +extension MapError : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +public protocol BaseMappable { + mutating func mapping(map: HHSDKVideo.Map) +} +public protocol Mappable : HHSDKVideo.BaseMappable { + init?(map: HHSDKVideo.Map) +} +public protocol StaticMappable : HHSDKVideo.BaseMappable { + static func objectForMapping(map: HHSDKVideo.Map) -> HHSDKVideo.BaseMappable? +} +extension Mappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init?(JSON: [Swift.String : Any], context: HHSDKVideo.MapContext? = nil) +} +extension BaseMappable { + public func toJSON() -> [Swift.String : Any] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +extension Array where Element : HHSDKVideo.BaseMappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init(JSONArray: [[Swift.String : Any]], context: HHSDKVideo.MapContext? = nil) + public func toJSON() -> [[Swift.String : Any]] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +extension Set where Element : HHSDKVideo.BaseMappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init?(JSONArray: [[Swift.String : Any]], context: HHSDKVideo.MapContext? = nil) + public func toJSON() -> [[Swift.String : Any]] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +public enum MappingType { + case fromJSON + case toJSON + public static func == (a: HHSDKVideo.MappingType, b: HHSDKVideo.MappingType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +final public class Mapper<N> where N : HHSDKVideo.BaseMappable { + final public var context: HHSDKVideo.MapContext? + final public var shouldIncludeNilValues: Swift.Bool + public init(context: HHSDKVideo.MapContext? = nil, shouldIncludeNilValues: Swift.Bool = false) + final public func map(JSONObject: Any?, toObject object: N) -> N + final public func map(JSONString: Swift.String, toObject object: N) -> N + final public func map(JSON: [Swift.String : Any], toObject object: N) -> N + final public func map(JSONString: Swift.String) -> N? + final public func map(JSONObject: Any?) -> N? + final public func map(JSON: [Swift.String : Any]) -> N? + final public func mapArray(JSONString: Swift.String) -> [N]? + final public func mapArray(JSONObject: Any?) -> [N]? + final public func mapArray(JSONArray: [[Swift.String : Any]]) -> [N] + final public func mapDictionary(JSONString: Swift.String) -> [Swift.String : N]? + final public func mapDictionary(JSONObject: Any?) -> [Swift.String : N]? + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]]) -> [Swift.String : N]? + final public func mapDictionary(JSONObject: Any?, toDictionary dictionary: [Swift.String : N]) -> [Swift.String : N] + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]], toDictionary dictionary: [Swift.String : N]) -> [Swift.String : N] + final public func mapDictionaryOfArrays(JSONObject: Any?) -> [Swift.String : [N]]? + final public func mapDictionaryOfArrays(JSON: [Swift.String : [[Swift.String : Any]]]) -> [Swift.String : [N]]? + final public func mapArrayOfArrays(JSONObject: Any?) -> [[N]]? + public static func parseJSONStringIntoDictionary(JSONString: Swift.String) -> [Swift.String : Any]? + public static func parseJSONString(JSONString: Swift.String) -> Any? + @objc deinit +} +extension Mapper { + final public func map(JSONfile: Swift.String) -> N? + final public func mapArray(JSONfile: Swift.String) -> [N]? +} +extension Mapper { + final public func toJSON(_ object: N) -> [Swift.String : Any] + final public func toJSONArray(_ array: [N]) -> [[Swift.String : Any]] + final public func toJSONDictionary(_ dictionary: [Swift.String : N]) -> [Swift.String : [Swift.String : Any]] + final public func toJSONDictionaryOfArrays(_ dictionary: [Swift.String : [N]]) -> [Swift.String : [[Swift.String : Any]]] + final public func toJSONString(_ object: N, prettyPrint: Swift.Bool = false) -> Swift.String? + final public func toJSONString(_ array: [N], prettyPrint: Swift.Bool = false) -> Swift.String? + public static func toJSONString(_ JSONObject: Any, prettyPrint: Swift.Bool) -> Swift.String? + public static func toJSONData(_ JSONObject: Any, options: Foundation.JSONSerialization.WritingOptions) -> Foundation.Data? +} +extension Mapper where N : Swift.Hashable { + final public func mapSet(JSONString: Swift.String) -> Swift.Set<N>? + final public func mapSet(JSONObject: Any?) -> Swift.Set<N>? + final public func mapSet(JSONArray: [[Swift.String : Any]]) -> Swift.Set<N> + final public func toJSONSet(_ set: Swift.Set<N>) -> [[Swift.String : Any]] + final public func toJSONString(_ set: Swift.Set<N>, prettyPrint: Swift.Bool = false) -> Swift.String? +} +final public class MD5 { + public init() + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension MD5 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +public struct NotifyInfo { + public init() + public var fromAccountId: Swift.String? + public var requestId: Swift.String? + public var channelId: Swift.String? + public var customInfo: Swift.String? +} +open class NSDecimalNumberTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.NSDecimalNumber + public typealias JSON = Swift.String + public init() + open func transformFromJSON(_ value: Any?) -> Foundation.NSDecimalNumber? + open func transformToJSON(_ value: Foundation.NSDecimalNumber?) -> Swift.String? + @objc deinit +} +final public class OCB : HHSDKVideo.BlockMode { + public enum Mode { + case combined + case detached + public static func == (a: HHSDKVideo.OCB.Mode, b: HHSDKVideo.OCB.Mode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public let options: HHSDKVideo.BlockModeOption + public enum Error : Swift.Error { + case invalidNonce + case fail + public static func == (a: HHSDKVideo.OCB.Error, b: HHSDKVideo.OCB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(nonce N: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, tagLength: Swift.Int = 16, mode: HHSDKVideo.OCB.Mode = .detached) + convenience public init(nonce N: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, mode: HHSDKVideo.OCB.Mode = .detached) + final public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker + @objc deinit +} +public struct OFB : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.OFB.Error, b: HHSDKVideo.OFB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +infix operator <- : DefaultPrecedence +infix operator >>> : DefaultPrecedence +public func <- <T>(left: inout T, right: HHSDKVideo.Map) +public func >>> <T>(left: T, right: HHSDKVideo.Map) +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) +public func >>> <T>(left: T?, right: HHSDKVideo.Map) +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: T, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: T?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, [T]>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, [T]>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, [T]>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, [T]>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<Swift.Array<T>>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<Swift.Array<T>>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<Swift.Array<T>>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<Swift.Array<T>>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Set<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func >>> <T>(left: Swift.Set<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func <- <T>(left: inout Swift.Set<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func >>> <T>(left: Swift.Set<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public struct OrderModel : HHSDKVideo.Mappable { + public var orderid: Swift.String? + public var price: Swift.Float? + public var buyServiceName: Swift.String? + public var expertId: Swift.String? + public var expertName: Swift.String? + public var patientName: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public protocol PaddingProtocol { + func add(to: Swift.Array<Swift.UInt8>, blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> + func remove(from: Swift.Array<Swift.UInt8>, blockSize: Swift.Int?) -> Swift.Array<Swift.UInt8> +} +public enum Padding : HHSDKVideo.PaddingProtocol { + case noPadding, zeroPadding, pkcs7, pkcs5, iso78164 + public func add(to: Swift.Array<Swift.UInt8>, blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> + public func remove(from: Swift.Array<Swift.UInt8>, blockSize: Swift.Int?) -> Swift.Array<Swift.UInt8> + public static func == (a: HHSDKVideo.Padding, b: HHSDKVideo.Padding) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +extension PKCS5 { + public struct PBKDF1 { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.PKCS5.PBKDF1.Error, b: HHSDKVideo.PKCS5.PBKDF1.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant { + case md5, sha1 + public static func == (a: HHSDKVideo.PKCS5.PBKDF1.Variant, b: HHSDKVideo.PKCS5.PBKDF1.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.PKCS5.PBKDF1.Variant = .sha1, iterations: Swift.Int = 4096, keyLength: Swift.Int? = nil) throws + public func calculate() -> Swift.Array<Swift.UInt8> + } +} +extension PKCS5 { + public struct PBKDF2 { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.PKCS5.PBKDF2.Error, b: HHSDKVideo.PKCS5.PBKDF2.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, iterations: Swift.Int = 4096, keyLength: Swift.Int? = nil, variant: HHSDKVideo.HMAC.Variant = .sha256) throws + public func calculate() throws -> Swift.Array<Swift.UInt8> + } +} +public struct PCBC : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.PCBC.Error, b: HHSDKVideo.PCBC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@objc @_hasMissingDesignatedInitializers public class HHPermission : ObjectiveC.NSObject { + public static let locationAlways: HHSDKVideo.HHPermission + public static let locationWhenInUse: HHSDKVideo.HHPermission + public static let microphone: HHSDKVideo.HHPermission + public static let camera: HHSDKVideo.HHPermission + public static let photos: HHSDKVideo.HHPermission + final public let type: HHSDKVideo.HHBasePermissionType + public var status: HHSDKVideo.PermissionStatus { + get + } + public var presentPrePermissionAlert: Swift.Bool + public var prePermissionAlert: HHSDKVideo.PermissionAlert { + get + set + } + public var presentDeniedAlert: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +extension HHPermission { + @objc override dynamic public var description: Swift.String { + @objc get + } + @objc override dynamic public var debugDescription: Swift.String { + @objc get + } +} +@_hasMissingDesignatedInitializers public class PermissionAlert { + @objc deinit +} +public enum PermissionStatus : Swift.String { + case authorized + case denied + case disabled + case notDetermined + case limited + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +extension PermissionStatus : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +@objc public enum HHBasePermissionType : Swift.Int { + case locationAlways + case locationWhenInUse + case microphone + case camera + case photos + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension HHBasePermissionType : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +@_hasMissingDesignatedInitializers public class PhotoPickerConfig { + public static let `default`: HHSDKVideo.PhotoPickerConfig + public var miniPicTip: Swift.Bool + public var mMaxSelectCount: Swift.Int + public var mDetailColumnCount: Swift.Int + @objc deinit +} +public enum PKCS5 { +} +public enum PKCS7 { +} +final public class Poly1305 : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case authenticateError + public static func == (a: HHSDKVideo.Poly1305.Error, b: HHSDKVideo.Poly1305.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>) + final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +@_hasMissingDesignatedInitializers public class PostBodyEncoding { + @objc deinit +} +final public class Rabbit { + public enum Error : Swift.Error { + case invalidKeyOrInitializationVector + public static func == (a: HHSDKVideo.Rabbit.Error, b: HHSDKVideo.Rabbit.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let ivSize: Swift.Int + public static let keySize: Swift.Int + public static let blockSize: Swift.Int + final public var keySize: Swift.Int { + get + } + convenience public init(key: Swift.Array<Swift.UInt8>) throws + public init(key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>?) throws + @objc deinit +} +extension Rabbit : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension Rabbit { + convenience public init(key: Swift.String) throws + convenience public init(key: Swift.String, iv: Swift.String) throws +} +public enum ReachabilityError : Swift.Error { + case FailedToCreateWithAddress(Darwin.sockaddr_in) + case FailedToCreateWithHostname(Swift.String) + case UnableToSetCallback + case UnableToSetDispatchQueue +} +public let ReachabilityChangedNotification: Foundation.NSNotification.Name +public class Reachability { + public typealias NetworkReachable = (HHSDKVideo.Reachability) -> () + public typealias NetworkUnreachable = (HHSDKVideo.Reachability) -> () + public enum NetworkStatus : Swift.CustomStringConvertible { + case notReachable, reachableViaWiFi, reachableViaWWAN + public var description: Swift.String { + get + } + public static func == (a: HHSDKVideo.Reachability.NetworkStatus, b: HHSDKVideo.Reachability.NetworkStatus) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public var whenReachable: HHSDKVideo.Reachability.NetworkReachable? + public var whenUnreachable: HHSDKVideo.Reachability.NetworkUnreachable? + public var reachableOnWWAN: Swift.Bool + public var currentReachabilityString: Swift.String { + get + } + public var currentReachabilityStatus: HHSDKVideo.Reachability.NetworkStatus { + get + } + required public init(reachabilityRef: SystemConfiguration.SCNetworkReachability) + convenience public init?(hostname: Swift.String) + convenience public init?() + @objc deinit +} +extension Reachability { + public func startNotifier() throws + public func stopNotifier() + public var isReachable: Swift.Bool { + get + } + public var isReachableViaWWAN: Swift.Bool { + get + } + public var isReachableViaWiFi: Swift.Bool { + get + } + public var description: Swift.String { + get + } +} +public enum RecordImgType : Swift.Int { + case medic + case check + case yingXiang + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public struct RemoteData : HHSDKVideo.Mappable { + public var changeDoctorTime: Swift.Int + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +final public class Scrypt { + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, dkLen: Swift.Int, N: Swift.Int, r: Swift.Int, p: Swift.Int) throws + final public func calculate() throws -> [Swift.UInt8] + @objc deinit +} +public struct SDKConfigModel : HHSDKVideo.Mappable { + public var cardIdActiveShow: Swift.Int + public var changeDoctorTime: Swift.Int? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +final public class SHA1 { + public init() + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA1 : HHSDKVideo.Updatable { + @discardableResult + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +final public class SHA2 { + public enum Variant : Swift.RawRepresentable { + case sha224, sha256, sha384, sha512 + public var digestLength: Swift.Int { + get + } + public var blockSize: Swift.Int { + get + } + public typealias RawValue = Swift.Int + public var rawValue: HHSDKVideo.SHA2.Variant.RawValue { + get + } + public init?(rawValue: HHSDKVideo.SHA2.Variant.RawValue) + } + public init(variant: HHSDKVideo.SHA2.Variant) + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA2 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +final public class SHA3 { + final public let blockSize: Swift.Int + final public let digestLength: Swift.Int + final public let markByte: Swift.UInt8 + public enum Variant { + case sha224, sha256, sha384, sha512, keccak224, keccak256, keccak384, keccak512 + public var outputLength: Swift.Int { + get + } + public static func == (a: HHSDKVideo.SHA3.Variant, b: HHSDKVideo.SHA3.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(variant: HHSDKVideo.SHA3.Variant) + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA3 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +extension String { + public var bytes: Swift.Array<Swift.UInt8> { + get + } + public func md5() -> Swift.String + public func sha1() -> Swift.String + public func sha224() -> Swift.String + public func sha256() -> Swift.String + public func sha384() -> Swift.String + public func sha512() -> Swift.String + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> Swift.String + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.String + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.String + public func crc16(seed: Swift.UInt16? = nil) -> Swift.String + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> Swift.String + public func encryptToBase64(cipher: HHSDKVideo.Cipher) throws -> Swift.String? + public func authenticate<A>(with authenticator: A) throws -> Swift.String where A : HHSDKVideo.CryptoAuthenticator +} +extension String { + public func decryptBase64ToString(cipher: HHSDKVideo.Cipher) throws -> Swift.String + public func decryptBase64(cipher: HHSDKVideo.Cipher) throws -> Swift.Array<Swift.UInt8> +} +@_hasMissingDesignatedInitializers final public class SwiftEntryKit { + public enum EntryDismissalDescriptor { + case specific(entryName: Swift.String) + case prioritizedLowerOrEqualTo(priority: HHSDKVideo.EKAttributes.Precedence.Priority) + case enqueued + case all + case displayed + } + public enum RollbackWindow { + case main + case custom(window: UIKit.UIWindow) + } + public typealias DismissCompletionHandler = () -> Swift.Void + final public class var window: UIKit.UIWindow? { + get + } + final public class var isCurrentlyDisplaying: Swift.Bool { + get + } + final public class func isCurrentlyDisplaying(entryNamed name: Swift.String? = nil) -> Swift.Bool + final public class var isQueueEmpty: Swift.Bool { + get + } + final public class func queueContains(entryNamed name: Swift.String? = nil) -> Swift.Bool + final public class func display(entry view: UIKit.UIView, using attributes: HHSDKVideo.EKAttributes, presentInsideKeyWindow: Swift.Bool = false, rollbackWindow: HHSDKVideo.SwiftEntryKit.RollbackWindow = .main) + final public class func display(entry viewController: UIKit.UIViewController, using attributes: HHSDKVideo.EKAttributes, presentInsideKeyWindow: Swift.Bool = false, rollbackWindow: HHSDKVideo.SwiftEntryKit.RollbackWindow = .main) + final public class func transform(to view: UIKit.UIView) + final public class func dismiss(_ descriptor: HHSDKVideo.SwiftEntryKit.EntryDismissalDescriptor = .displayed, with completion: HHSDKVideo.SwiftEntryKit.DismissCompletionHandler? = nil) + final public class func layoutIfNeeded() + @objc deinit +} +open class TransformOf<ObjectType, JSONType> : HHSDKVideo.TransformType { + public typealias Object = ObjectType + public typealias JSON = JSONType + public init(fromJSON: @escaping (JSONType?) -> ObjectType?, toJSON: @escaping (ObjectType?) -> JSONType?) + open func transformFromJSON(_ value: Any?) -> ObjectType? + open func transformToJSON(_ value: ObjectType?) -> JSONType? + @objc deinit +} +public func <- <Transform>(left: inout Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Swift.String : Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Swift.String : Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Swift.String : Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Swift.String : Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, [Transform.Object]>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, [Transform.Object]>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, [Transform.Object]>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, [Transform.Object]>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Array<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Array<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Array<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Array<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout [[Transform.Object]], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [[Transform.Object]], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [[Transform.Object]]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [[Transform.Object]]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Swift.Set<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func >>> <Transform>(left: Swift.Set<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func <- <Transform>(left: inout Swift.Set<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func >>> <Transform>(left: Swift.Set<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public protocol TransformType { + associatedtype Object + associatedtype JSON + func transformFromJSON(_ value: Any?) -> Self.Object? + func transformToJSON(_ value: Self.Object?) -> Self.JSON? +} +extension UIImage { + public class func gifImageWithData(_ data: Foundation.Data) -> UIKit.UIImage? + public class func gifImageWithURL(_ gifUrl: Swift.String) -> UIKit.UIImage? + public class func gifImageWithName(_ name: Swift.String) -> UIKit.UIImage? +} +public protocol _UInt8Type { +} +extension UInt8 : HHSDKVideo._UInt8Type { +} +extension UInt8 { + public func bits() -> [HHSDKVideo.Bit] + public func bits() -> Swift.String +} +public protocol Updatable { + mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool) throws -> Swift.Array<Swift.UInt8> + mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws +} +extension Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public mutating func update(withBytes bytes: Swift.Array<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public mutating func update(withBytes bytes: Swift.Array<Swift.UInt8>, isLast: Swift.Bool = false, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(withBytes bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public mutating func finish(withBytes bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public mutating func finish() throws -> Swift.Array<Swift.UInt8> + public mutating func finish(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(withBytes bytes: Swift.Array<Swift.UInt8>, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws +} +open class URLTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.URL + public typealias JSON = Swift.String + public init(shouldEncodeURLString: Swift.Bool = false, allowedCharacterSet: Foundation.CharacterSet = .urlQueryAllowed) + open func transformFromJSON(_ value: Any?) -> Foundation.URL? + open func transformToJSON(_ value: Foundation.URL?) -> Swift.String? + @objc deinit +} +public struct UserApi { +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers public class VCManager : ObjectiveC.NSObject { + public static let `default`: HHSDKVideo.VCManager + public var waitModel: HHSDKVideo.HHWaitDoctorModel? + @objc deinit +} +extension VCManager { + public func onReceiveCall(callee: Swift.String, caller: Swift.String, orderId: Swift.String) + public func isInBusy() -> Swift.Bool +} +extension VCManager { + public func onReceiveInvite(docModel: HHSDKVideo.HHInviteDocModel) + public static func onUserReject(_ fromUuid: Swift.String) + public static func onCancelInvite(_ fromUuid: Swift.String) + public static func changeVideo(_ isVoice: Swift.Bool) +} +extension VCManager { + public func showEduBoard(groupId: Swift.String, orderId: Swift.String) + public func closeEduBoard() +} +public struct VideoApi { +} +public enum HHIMCmd : Swift.String { + case audio + case video + case closeVideo + case openVideo + case transfor + case accept + case call + case reject + case cancelCall + case pcCancel + case phoneCall + case busy + case waiting + case waitingTip + case agentTrans + case web_transform + case callWeb + case SWITCH_TO_CAMERA_wmp + case cancelCallWeb + case call_invite + case reject_invite + case cancel_invite + case exit_camera + case enter_camera + case conference_begin + case conference_end + case user_certification + case cancel_user_certification + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public class WeakArray<T> { + public func add(_ delegate: T) + public func remove(_ delegate: T) + public func excute(_ block: @escaping ((T?) -> Swift.Void)) + public init() + @objc deinit +} +@objc public class ZLAlbumListModel : ObjectiveC.NSObject { + final public let title: Swift.String + public var count: Swift.Int { + get + } + public var result: Photos.PHFetchResult<Photos.PHAsset> + final public let collection: Photos.PHAssetCollection + final public let option: Photos.PHFetchOptions + final public let isCameraRoll: Swift.Bool + public var headImageAsset: Photos.PHAsset? { + get + } + public var models: [HHSDKVideo.ZLPhotoModel] + public init(title: Swift.String, result: Photos.PHFetchResult<Photos.PHAsset>, collection: Photos.PHAssetCollection, option: Photos.PHFetchOptions, isCameraRoll: Swift.Bool) + public func refetchPhotos() + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class ZLCameraConfiguration : ObjectiveC.NSObject { + @objc public enum CaptureSessionPreset : Swift.Int { + case cif352x288 + case vga640x480 + case hd1280x720 + case hd1920x1080 + case hd4K3840x2160 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum FocusMode : Swift.Int { + case autoFocus + case continuousAutoFocus + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum ExposureMode : Swift.Int { + case autoExpose + case continuousAutoExposure + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum FlashMode : Swift.Int { + case auto + case on + case off + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum VideoExportType : Swift.Int { + case mov + case mp4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public var sessionPreset: HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset + @objc public var focusMode: HHSDKVideo.ZLCameraConfiguration.FocusMode + @objc public var exposureMode: HHSDKVideo.ZLCameraConfiguration.ExposureMode + @objc public var flashMode: HHSDKVideo.ZLCameraConfiguration.FlashMode + @objc public var videoExportType: HHSDKVideo.ZLCameraConfiguration.VideoExportType + @objc override dynamic public init() + @objc deinit +} +extension ZLCameraConfiguration { + @discardableResult + public func sessionPreset(_ sessionPreset: HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func focusMode(_ mode: HHSDKVideo.ZLCameraConfiguration.FocusMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func exposureMode(_ mode: HHSDKVideo.ZLCameraConfiguration.ExposureMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func flashMode(_ mode: HHSDKVideo.ZLCameraConfiguration.FlashMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func videoExportType(_ type: HHSDKVideo.ZLCameraConfiguration.VideoExportType) -> HHSDKVideo.ZLCameraConfiguration +} +@objc open class ZLCustomCamera : UIKit.UIViewController, QuartzCore.CAAnimationDelegate { + @objc public var takeDoneBlock: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)? + @objc public var cancelBlock: (() -> Swift.Void)? + public var tipsLabel: UIKit.UILabel { + get + set + } + public var bottomView: UIKit.UIView { + get + set + } + public var largeCircleView: UIKit.UIVisualEffectView { + get + set + } + public var smallCircleView: UIKit.UIView { + get + set + } + public var animateLayer: QuartzCore.CAShapeLayer { + get + set + } + public var retakeBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var doneBtn: UIKit.UIButton { + get + set + } + public var dismissBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var switchCameraBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var focusCursorView: UIKit.UIImageView { + get + set + } + public var takedImageView: UIKit.UIImageView { + get + set + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc deinit + @objc dynamic public init() + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic open func viewDidLoad() + @objc override dynamic open func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic open func viewWillDisappear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidDisappear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidLayoutSubviews() + @objc public func animationDidStop(_ anim: QuartzCore.CAAnimation, finished flag: Swift.Bool) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLCustomCamera : AVFoundation.AVCapturePhotoCaptureDelegate { + @objc dynamic public func photoOutput(_ output: AVFoundation.AVCapturePhotoOutput, willCapturePhotoFor resolvedSettings: AVFoundation.AVCaptureResolvedPhotoSettings) + @objc dynamic public func photoOutput(_ output: AVFoundation.AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CoreMedia.CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CoreMedia.CMSampleBuffer?, resolvedSettings: AVFoundation.AVCaptureResolvedPhotoSettings, bracketSettings: AVFoundation.AVCaptureBracketedStillImageSettings?, error: Swift.Error?) +} +extension ZLCustomCamera : AVFoundation.AVCaptureFileOutputRecordingDelegate { + @objc dynamic public func fileOutput(_ output: AVFoundation.AVCaptureFileOutput, didStartRecordingTo fileURL: Foundation.URL, from connections: [AVFoundation.AVCaptureConnection]) + @objc dynamic public func fileOutput(_ output: AVFoundation.AVCaptureFileOutput, didFinishRecordingTo outputFileURL: Foundation.URL, from connections: [AVFoundation.AVCaptureConnection], error: Swift.Error?) +} +extension ZLCustomCamera : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizer(_ gestureRecognizer: UIKit.UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +@objc public protocol ZLImageStickerContainerDelegate { + @objc var selectImageBlock: ((UIKit.UIImage) -> Swift.Void)? { get set } + @objc var hideBlock: (() -> Swift.Void)? { get set } + @objc func show(in view: UIKit.UIView) +} +@objc @_inheritsConvenienceInitializers public class ZLEditImageConfiguration : ObjectiveC.NSObject { + @objc public enum EditTool : Swift.Int, Swift.CaseIterable { + case draw + case clip + case imageSticker + case textSticker + case mosaic + case filter + case adjust + public init?(rawValue: Swift.Int) + public typealias AllCases = [HHSDKVideo.ZLEditImageConfiguration.EditTool] + public typealias RawValue = Swift.Int + public static var allCases: [HHSDKVideo.ZLEditImageConfiguration.EditTool] { + get + } + public var rawValue: Swift.Int { + get + } + } + @objc public enum AdjustTool : Swift.Int, Swift.CaseIterable { + case brightness + case contrast + case saturation + public init?(rawValue: Swift.Int) + public typealias AllCases = [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] + public typealias RawValue = Swift.Int + public static var allCases: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] { + get + } + public var rawValue: Swift.Int { + get + } + } + public var tools: [HHSDKVideo.ZLEditImageConfiguration.EditTool] { + get + set + } + @objc public var tools_objc: [Swift.Int] { + @objc get + @objc set + } + @objc public var drawColors: [UIKit.UIColor] { + @objc get + @objc set + } + @objc public var defaultDrawColor: UIKit.UIColor + @objc public var clipRatios: [HHSDKVideo.ZLImageClipRatio] { + @objc get + @objc set + } + @objc public var textStickerTextColors: [UIKit.UIColor] { + @objc get + @objc set + } + @objc public var textStickerDefaultTextColor: UIKit.UIColor + @objc public var filters: [HHSDKVideo.ZLFilter] { + @objc get + @objc set + } + @objc public var imageStickerContainerView: (UIKit.UIView & HHSDKVideo.ZLImageStickerContainerDelegate)? + public var adjustTools: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] { + get + set + } + @objc public var adjustTools_objc: [Swift.Int] { + @objc get + @objc set + } + @objc public var impactFeedbackWhenAdjustSliderValueIsZero: Swift.Bool + @objc public var impactFeedbackStyle: UIKit.UIImpactFeedbackGenerator.FeedbackStyle + @objc override dynamic public init() + @objc deinit +} +extension ZLEditImageConfiguration { + @discardableResult + public func tools(_ tools: [HHSDKVideo.ZLEditImageConfiguration.EditTool]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func drawColors(_ colors: [UIKit.UIColor]) -> HHSDKVideo.ZLEditImageConfiguration + public func defaultDrawColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func clipRatios(_ ratios: [HHSDKVideo.ZLImageClipRatio]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func textStickerTextColors(_ colors: [UIKit.UIColor]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func textStickerDefaultTextColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func filters(_ filters: [HHSDKVideo.ZLFilter]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func imageStickerContainerView(_ view: (UIKit.UIView & HHSDKVideo.ZLImageStickerContainerDelegate)?) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func adjustTools(_ tools: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func impactFeedbackWhenAdjustSliderValueIsZero(_ value: Swift.Bool) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func impactFeedbackStyle(_ style: UIKit.UIImpactFeedbackGenerator.FeedbackStyle) -> HHSDKVideo.ZLEditImageConfiguration +} +@objc public class ZLImageClipRatio : ObjectiveC.NSObject { + public var title: Swift.String + final public let whRatio: CoreGraphics.CGFloat + @objc public init(title: Swift.String, whRatio: CoreGraphics.CGFloat, isCircle: Swift.Bool = false) + @objc override dynamic public init() + @objc deinit +} +extension ZLImageClipRatio { + @objc public static let custom: HHSDKVideo.ZLImageClipRatio + @objc public static let circle: HHSDKVideo.ZLImageClipRatio + @objc public static let wh1x1: HHSDKVideo.ZLImageClipRatio + @objc public static let wh3x4: HHSDKVideo.ZLImageClipRatio + @objc public static let wh4x3: HHSDKVideo.ZLImageClipRatio + @objc public static let wh2x3: HHSDKVideo.ZLImageClipRatio + @objc public static let wh3x2: HHSDKVideo.ZLImageClipRatio + @objc public static let wh9x16: HHSDKVideo.ZLImageClipRatio + @objc public static let wh16x9: HHSDKVideo.ZLImageClipRatio +} +@objc public class ZLEditImageModel : ObjectiveC.NSObject { + final public let drawPaths: [HHSDKVideo.ZLDrawPath] + final public let mosaicPaths: [HHSDKVideo.ZLMosaicPath] + final public let editRect: CoreGraphics.CGRect? + final public let angle: CoreGraphics.CGFloat + final public let brightness: Swift.Float + final public let contrast: Swift.Float + final public let saturation: Swift.Float + final public let selectRatio: HHSDKVideo.ZLImageClipRatio? + final public let selectFilter: HHSDKVideo.ZLFilter? + final public let textStickers: [(state: HHSDKVideo.ZLTextStickerState, index: Swift.Int)]? + final public let imageStickers: [(state: HHSDKVideo.ZLImageStickerState, index: Swift.Int)]? + public init(drawPaths: [HHSDKVideo.ZLDrawPath], mosaicPaths: [HHSDKVideo.ZLMosaicPath], editRect: CoreGraphics.CGRect?, angle: CoreGraphics.CGFloat, brightness: Swift.Float, contrast: Swift.Float, saturation: Swift.Float, selectRatio: HHSDKVideo.ZLImageClipRatio?, selectFilter: HHSDKVideo.ZLFilter, textStickers: [(state: HHSDKVideo.ZLTextStickerState, index: Swift.Int)]?, imageStickers: [(state: HHSDKVideo.ZLImageStickerState, index: Swift.Int)]?) + @objc override dynamic public init() + @objc deinit +} +@objc open class ZLEditImageViewController : UIKit.UIViewController { + @objc public var drawColViewH: CoreGraphics.CGFloat + @objc public var filterColViewH: CoreGraphics.CGFloat + @objc public var adjustColViewH: CoreGraphics.CGFloat + @objc public var ashbinNormalBgColor: UIKit.UIColor + @objc public var cancelBtn: HHSDKVideo.ZLEnlargeButton { + @objc get + @objc set + } + @objc public var mainScrollView: UIKit.UIScrollView { + @objc get + @objc set + } + @objc public var topShadowView: UIKit.UIView { + @objc get + @objc set + } + @objc public var topShadowLayer: QuartzCore.CAGradientLayer { + @objc get + @objc set + } + @objc public var bottomShadowView: UIKit.UIView + @objc public var bottomShadowLayer: QuartzCore.CAGradientLayer + @objc public var doneBtn: UIKit.UIButton + @objc public var revokeBtn: UIKit.UIButton + @objc public var ashbinView: UIKit.UIView { + @objc get + @objc set + } + @objc public var ashbinImgView: UIKit.UIImageView { + @objc get + @objc set + } + @objc public var drawLineWidth: CoreGraphics.CGFloat + @objc public var mosaicLineWidth: CoreGraphics.CGFloat + @objc public var editFinishBlock: ((UIKit.UIImage, HHSDKVideo.ZLEditImageModel?) -> Swift.Void)? + @objc public var cancelEditBlock: (() -> Swift.Void)? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc deinit + @objc public class func showEditImageVC(parentVC: UIKit.UIViewController?, animate: Swift.Bool = false, image: UIKit.UIImage, editModel: HHSDKVideo.ZLEditImageModel? = nil, cancel: (() -> Swift.Void)? = nil, completion: ((UIKit.UIImage, HHSDKVideo.ZLEditImageModel?) -> Swift.Void)?) + @objc public init(image: UIKit.UIImage, editModel: HHSDKVideo.ZLEditImageModel? = nil) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic open func viewDidLoad() + @objc override dynamic open func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLEditImageViewController : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLEditImageViewController : UIKit.UIScrollViewDelegate { + @objc dynamic public func viewForZooming(in scrollView: UIKit.UIScrollView) -> UIKit.UIView? + @objc dynamic public func scrollViewDidZoom(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndZooming(_ scrollView: UIKit.UIScrollView, with view: UIKit.UIView?, atScale scale: CoreGraphics.CGFloat) + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDragging(_ scrollView: UIKit.UIScrollView, willDecelerate decelerate: Swift.Bool) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndScrollingAnimation(_ scrollView: UIKit.UIScrollView) +} +extension ZLEditImageViewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegate { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) +} +@objc @_hasMissingDesignatedInitializers public class ZLDrawPath : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_hasMissingDesignatedInitializers public class ZLMosaicPath : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_hasMissingDesignatedInitializers public class ZLEditVideoViewController : UIKit.UIViewController { + @objc public var editFinishBlock: ((Foundation.URL?) -> Swift.Void)? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc deinit + @objc public init(avAsset: AVFoundation.AVAsset, animateDismiss: Swift.Bool = false) + @objc override dynamic public func viewDidLoad() + @objc override dynamic public func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLEditVideoViewController : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLEditVideoViewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDragging(_ scrollView: UIKit.UIScrollView, willDecelerate decelerate: Swift.Bool) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, insetForSectionAt section: Swift.Int) -> UIKit.UIEdgeInsets + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) +} +@objc @_inheritsConvenienceInitializers public class ZLEnlargeButton : UIKit.UIButton { + public var enlargeInsets: UIKit.UIEdgeInsets + public var enlargeInset: CoreGraphics.CGFloat { + get + set + } + @objc override dynamic public func point(inside point: CoreGraphics.CGPoint, with event: UIKit.UIEvent?) -> Swift.Bool + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public typealias ZLFilterApplierType = ((UIKit.UIImage) -> UIKit.UIImage) +@objc public enum ZLFilterType : Swift.Int { + case normal + case chrome + case fade + case instant + case process + case transfer + case tone + case linear + case sepia + case mono + case noir + case tonal + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc public class ZLFilter : ObjectiveC.NSObject { + public var name: Swift.String + @objc public init(name: Swift.String, filterType: HHSDKVideo.ZLFilterType) + @objc public init(name: Swift.String, applier: HHSDKVideo.ZLFilterApplierType?) + @objc override dynamic public init() + @objc deinit +} +extension ZLFilter { + @objc public static let all: [HHSDKVideo.ZLFilter] + @objc public static let normal: HHSDKVideo.ZLFilter + @objc public static let clarendon: HHSDKVideo.ZLFilter + @objc public static let nashville: HHSDKVideo.ZLFilter + @objc public static let apply1977: HHSDKVideo.ZLFilter + @objc public static let toaster: HHSDKVideo.ZLFilter + @objc public static let chrome: HHSDKVideo.ZLFilter + @objc public static let fade: HHSDKVideo.ZLFilter + @objc public static let instant: HHSDKVideo.ZLFilter + @objc public static let process: HHSDKVideo.ZLFilter + @objc public static let transfer: HHSDKVideo.ZLFilter + @objc public static let tone: HHSDKVideo.ZLFilter + @objc public static let linear: HHSDKVideo.ZLFilter + @objc public static let sepia: HHSDKVideo.ZLFilter + @objc public static let mono: HHSDKVideo.ZLFilter + @objc public static let noir: HHSDKVideo.ZLFilter + @objc public static let tonal: HHSDKVideo.ZLFilter +} +@objc public enum ZLURLType : Swift.Int { + case image + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_hasMissingDesignatedInitializers public class ZLImagePreviewController : UIKit.UIViewController { + @objc public var longPressBlock: ((HHSDKVideo.ZLImagePreviewController?, UIKit.UIImage?, Swift.Int) -> Swift.Void)? + @objc public var doneBlock: (([Any]) -> Swift.Void)? + @objc public var videoHttpHeader: [Swift.String : Any]? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var preferredStatusBarStyle: UIKit.UIStatusBarStyle { + @objc get + } + @objc public init(datas: [Any], index: Swift.Int = 0, showSelectBtn: Swift.Bool = true, showBottomView: Swift.Bool = true, urlType: ((Foundation.URL) -> HHSDKVideo.ZLURLType)? = nil, urlImageLoader: ((Foundation.URL, UIKit.UIImageView, @escaping (CoreGraphics.CGFloat) -> Swift.Void, @escaping () -> Swift.Void) -> Swift.Void)? = nil) + @objc override dynamic public func viewDidLoad() + @objc override dynamic public func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc deinit +} +extension ZLImagePreviewController { + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) +} +extension ZLImagePreviewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, minimumInteritemSpacingForSectionAt section: Swift.Int) -> CoreGraphics.CGFloat + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, minimumLineSpacingForSectionAt section: Swift.Int) -> CoreGraphics.CGFloat + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, insetForSectionAt section: Swift.Int) -> UIKit.UIEdgeInsets + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, sizeForItemAt indexPath: Foundation.IndexPath) -> CoreGraphics.CGSize + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didEndDisplaying cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) +} +@objc @_hasMissingDesignatedInitializers public class ZLImageStickerState : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLLanguageType : Swift.Int { + case system + case chineseSimplified + case chineseTraditional + case english + case japanese + case french + case german + case russian + case vietnamese + case korean + case malay + case italian + case indonesian + case portuguese + case spanish + case turkish + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public struct ZLLocalLanguageKey : Swift.Hashable { + public let rawValue: Swift.String + public init(rawValue: Swift.String) + public static let previewCamera: HHSDKVideo.ZLLocalLanguageKey + public static let previewCameraRecord: HHSDKVideo.ZLLocalLanguageKey + public static let previewAlbum: HHSDKVideo.ZLLocalLanguageKey + public static let cancel: HHSDKVideo.ZLLocalLanguageKey + public static let noPhotoTips: HHSDKVideo.ZLLocalLanguageKey + public static let loading: HHSDKVideo.ZLLocalLanguageKey + public static let hudLoading: HHSDKVideo.ZLLocalLanguageKey + public static let done: HHSDKVideo.ZLLocalLanguageKey + public static let ok: HHSDKVideo.ZLLocalLanguageKey + public static let timeout: HHSDKVideo.ZLLocalLanguageKey + public static let noPhotoLibratyAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let noCameraAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let noMicrophoneAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let cameraUnavailable: HHSDKVideo.ZLLocalLanguageKey + public static let keepRecording: HHSDKVideo.ZLLocalLanguageKey + public static let gotoSettings: HHSDKVideo.ZLLocalLanguageKey + public static let photo: HHSDKVideo.ZLLocalLanguageKey + public static let originalPhoto: HHSDKVideo.ZLLocalLanguageKey + public static let back: HHSDKVideo.ZLLocalLanguageKey + public static let edit: HHSDKVideo.ZLLocalLanguageKey + public static let editFinish: HHSDKVideo.ZLLocalLanguageKey + public static let revert: HHSDKVideo.ZLLocalLanguageKey + public static let brightness: HHSDKVideo.ZLLocalLanguageKey + public static let contrast: HHSDKVideo.ZLLocalLanguageKey + public static let saturation: HHSDKVideo.ZLLocalLanguageKey + public static let preview: HHSDKVideo.ZLLocalLanguageKey + public static let notAllowMixSelect: HHSDKVideo.ZLLocalLanguageKey + public static let save: HHSDKVideo.ZLLocalLanguageKey + public static let saveImageError: HHSDKVideo.ZLLocalLanguageKey + public static let saveVideoError: HHSDKVideo.ZLLocalLanguageKey + public static let exceededMaxSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let exceededMaxVideoSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let lessThanMinVideoSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let longerThanMaxVideoDuration: HHSDKVideo.ZLLocalLanguageKey + public static let shorterThanMaxVideoDuration: HHSDKVideo.ZLLocalLanguageKey + public static let iCloudVideoLoadFaild: HHSDKVideo.ZLLocalLanguageKey + public static let imageLoadFailed: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraTips: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraTakePhotoTips: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraRecordVideoTips: HHSDKVideo.ZLLocalLanguageKey + public static let minRecordTimeTips: HHSDKVideo.ZLLocalLanguageKey + public static let cameraRoll: HHSDKVideo.ZLLocalLanguageKey + public static let panoramas: HHSDKVideo.ZLLocalLanguageKey + public static let videos: HHSDKVideo.ZLLocalLanguageKey + public static let favorites: HHSDKVideo.ZLLocalLanguageKey + public static let timelapses: HHSDKVideo.ZLLocalLanguageKey + public static let recentlyAdded: HHSDKVideo.ZLLocalLanguageKey + public static let bursts: HHSDKVideo.ZLLocalLanguageKey + public static let slomoVideos: HHSDKVideo.ZLLocalLanguageKey + public static let selfPortraits: HHSDKVideo.ZLLocalLanguageKey + public static let screenshots: HHSDKVideo.ZLLocalLanguageKey + public static let depthEffect: HHSDKVideo.ZLLocalLanguageKey + public static let livePhotos: HHSDKVideo.ZLLocalLanguageKey + public static let animated: HHSDKVideo.ZLLocalLanguageKey + public static let myPhotoStream: HHSDKVideo.ZLLocalLanguageKey + public static let noTitleAlbumListPlaceholder: HHSDKVideo.ZLLocalLanguageKey + public static let unableToAccessAllPhotos: HHSDKVideo.ZLLocalLanguageKey + public static let textStickerRemoveTips: HHSDKVideo.ZLLocalLanguageKey + public func hash(into hasher: inout Swift.Hasher) + public static func == (a: HHSDKVideo.ZLLocalLanguageKey, b: HHSDKVideo.ZLLocalLanguageKey) -> Swift.Bool + public var hashValue: Swift.Int { + get + } +} +public typealias Second = Swift.Int +@objc @_inheritsConvenienceInitializers public class ZLPhotoConfiguration : ObjectiveC.NSObject { + @objc public class func `default`() -> HHSDKVideo.ZLPhotoConfiguration + @objc public class func resetConfiguration() + @objc public var sortAscending: Swift.Bool + @objc public var maxSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var maxVideoSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var minVideoSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var allowMixSelect: Swift.Bool + @objc public var maxPreviewCount: Swift.Int + @objc public var cellCornerRadio: CoreGraphics.CGFloat + @objc public var allowSelectImage: Swift.Bool + @objc public var allowSelectVideo: Swift.Bool + @objc public var allowSelectGif: Swift.Bool + @objc public var allowSelectLivePhoto: Swift.Bool + @objc public var allowTakePhotoInLibrary: Swift.Bool { + @objc get + @objc set + } + @objc public var allowEditImage: Swift.Bool { + @objc get + @objc set + } + @objc public var allowEditVideo: Swift.Bool { + @objc get + @objc set + } + @objc public var animateSelectBtnWhenSelect: Swift.Bool + @objc public var selectBtnAnimationDuration: Swift.Double + @objc public var editAfterSelectThumbnailImage: Swift.Bool + @objc public var cropVideoAfterSelectThumbnail: Swift.Bool + @objc public var showClipDirectlyIfOnlyHasClipTool: Swift.Bool + @objc public var saveNewImageAfterEdit: Swift.Bool + @objc public var allowSlideSelect: Swift.Bool + @objc public var autoScrollWhenSlideSelectIsActive: Swift.Bool + @objc public var autoScrollMaxSpeed: CoreGraphics.CGFloat + @objc public var allowDragSelect: Swift.Bool + @objc public var allowSelectOriginal: Swift.Bool + @objc public var allowPreviewPhotos: Swift.Bool + @objc public var showPreviewButtonInAlbum: Swift.Bool + @objc public var showSelectCountOnDoneBtn: Swift.Bool + @objc public var columnCount: Swift.Int { + @objc get + @objc set + } + @objc public var maxEditVideoTime: Swift.Int + @objc public var maxSelectVideoDuration: Swift.Int + @objc public var minSelectVideoDuration: Swift.Int + @objc public var editImageConfiguration: HHSDKVideo.ZLEditImageConfiguration + @objc public var showCaptureImageOnTakePhotoBtn: Swift.Bool + @objc public var showSelectBtnWhenSingleSelect: Swift.Bool + @objc public var showSelectedMask: Swift.Bool + @objc public var showSelectedBorder: Swift.Bool + @objc public var showInvalidMask: Swift.Bool + @objc public var showSelectedIndex: Swift.Bool + @objc public var showSelectedPhotoPreview: Swift.Bool + @objc public var shouldAnialysisAsset: Swift.Bool + @objc public var timeout: Swift.Double + @objc public var languageType: HHSDKVideo.ZLLanguageType { + @objc get + @objc set + } + @objc public var useCustomCamera: Swift.Bool + @objc public var allowTakePhoto: Swift.Bool { + @objc get + @objc set + } + @objc public var allowRecordVideo: Swift.Bool { + @objc get + @objc set + } + @objc public var minRecordDuration: HHSDKVideo.Second { + @objc get + @objc set + } + @objc public var maxRecordDuration: HHSDKVideo.Second { + @objc get + @objc set + } + @objc public var cameraConfiguration: HHSDKVideo.ZLCameraConfiguration + @objc public var hudStyle: HHSDKVideo.ZLProgressHUD.HUDStyle + @objc public var canSelectAsset: ((Photos.PHAsset) -> Swift.Bool)? + @objc public var showAddPhotoButton: Swift.Bool + @objc public var showEnterSettingTips: Swift.Bool + @objc public var noAuthorityCallback: ((HHSDKVideo.ZLNoAuthorityType) -> Swift.Void)? + @objc public var operateBeforeDoneAction: ((UIKit.UIViewController, @escaping () -> Swift.Void) -> Swift.Void)? + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLNoAuthorityType : Swift.Int { + case library + case camera + case microphone + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension ZLPhotoConfiguration { + @discardableResult + public func sortAscending(_ ascending: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxVideoSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minVideoSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowMixSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxPreviewCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cellCornerRadio(_ cornerRadio: CoreGraphics.CGFloat) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func allowSelectVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectGif(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectLivePhoto(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowTakePhotoInLibrary(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowEditImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowEditVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func animateSelectBtnWhenSelect(_ animate: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func selectBtnAnimationDuration(_ duration: CoreFoundation.CFTimeInterval) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func editAfterSelectThumbnailImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cropVideoAfterSelectThumbnail(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showClipDirectlyIfOnlyHasClipTool(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func saveNewImageAfterEdit(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSlideSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func autoScrollWhenSlideSelectIsActive(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func autoScrollMaxSpeed(_ speed: CoreGraphics.CGFloat) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowDragSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectOriginal(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowPreviewPhotos(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showPreviewButtonInAlbum(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectCountOnDoneBtn(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func columnCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxEditVideoTime(_ second: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxSelectVideoDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minSelectVideoDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func editImageConfiguration(_ configuration: HHSDKVideo.ZLEditImageConfiguration) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showCaptureImageOnTakePhotoBtn(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectBtnWhenSingleSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedMask(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedBorder(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showInvalidMask(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedIndex(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedPhotoPreview(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func shouldAnialysisAsset(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func timeout(_ timeout: Foundation.TimeInterval) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func languageType(_ type: HHSDKVideo.ZLLanguageType) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func useCustomCamera(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowTakePhoto(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowRecordVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minRecordDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxRecordDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cameraConfiguration(_ configuration: HHSDKVideo.ZLCameraConfiguration) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func hudStyle(_ style: HHSDKVideo.ZLProgressHUD.HUDStyle) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func canSelectAsset(_ block: ((Photos.PHAsset) -> Swift.Bool)?) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func showAddPhotoButton(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func showEnterSettingTips(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func noAuthorityCallback(_ callback: ((HHSDKVideo.ZLNoAuthorityType) -> Swift.Void)?) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func operateBeforeDoneAction(_ block: ((UIKit.UIViewController, @escaping () -> Swift.Void) -> Swift.Void)?) -> HHSDKVideo.ZLPhotoConfiguration +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoManager : ObjectiveC.NSObject { + @objc public class func saveImageToAlbum(image: UIKit.UIImage, completion: ((Swift.Bool, Photos.PHAsset?) -> Swift.Void)?) + @objc public class func saveVideoToAlbum(url: Foundation.URL, completion: ((Swift.Bool, Photos.PHAsset?) -> Swift.Void)?) + @objc public class func fetchPhoto(in result: Photos.PHFetchResult<Photos.PHAsset>, ascending: Swift.Bool, allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, limitCount: Swift.Int = .max) -> [HHSDKVideo.ZLPhotoModel] + @objc public class func getPhotoAlbumList(ascending: Swift.Bool, allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, completion: ([HHSDKVideo.ZLAlbumListModel]) -> Swift.Void) + @objc public class func getCameraRollAlbum(allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, completion: @escaping (HHSDKVideo.ZLAlbumListModel) -> Swift.Void) + @discardableResult + @objc public class func fetchImage(for asset: Photos.PHAsset, size: CoreGraphics.CGSize, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (UIKit.UIImage?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @discardableResult + @objc public class func fetchOriginalImage(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (UIKit.UIImage?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @discardableResult + @objc public class func fetchOriginalImageData(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (Foundation.Data, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchLivePhoto(for asset: Photos.PHAsset, completion: @escaping (Photos.PHLivePhoto?, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchVideo(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (AVFoundation.AVPlayerItem?, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchAVAsset(forVideo asset: Photos.PHAsset, completion: @escaping (AVFoundation.AVAsset?, [Swift.AnyHashable : Any]?) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchAssetFilePath(asset: Photos.PHAsset, completion: @escaping (Swift.String?) -> Swift.Void) + @objc override dynamic public init() + @objc deinit +} +extension ZLPhotoManager { + @objc dynamic public class func hasPhotoLibratyAuthority() -> Swift.Bool + @objc dynamic public class func hasCameraAuthority() -> Swift.Bool + @objc dynamic public class func hasMicrophoneAuthority() -> Swift.Bool +} +extension ZLPhotoModel { + public enum MediaType : Swift.Int { + case unknown + case image + case gif + case livePhoto + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } +} +@objc public class ZLPhotoModel : ObjectiveC.NSObject { + final public let ident: Swift.String + final public let asset: Photos.PHAsset + public var type: HHSDKVideo.ZLPhotoModel.MediaType + public var duration: Swift.String + public var isSelected: Swift.Bool + public var editImage: UIKit.UIImage? { + get + set + } + public var second: HHSDKVideo.Second { + get + } + public var whRatio: CoreGraphics.CGFloat { + get + } + public var previewSize: CoreGraphics.CGSize { + get + } + public var editImageModel: HHSDKVideo.ZLEditImageModel? + public init(asset: Photos.PHAsset) + public func transformAssetType(for asset: Photos.PHAsset) -> HHSDKVideo.ZLPhotoModel.MediaType + public func transformDuration(for asset: Photos.PHAsset) -> Swift.String + @objc override dynamic public init() + @objc deinit +} +extension ZLPhotoModel { + public static func == (lhs: HHSDKVideo.ZLPhotoModel, rhs: HHSDKVideo.ZLPhotoModel) -> Swift.Bool +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoPreviewSheet : UIKit.UIView { + @objc public var selectImageBlock: (([UIKit.UIImage], [Photos.PHAsset], Swift.Bool) -> Swift.Void)? + @objc public var selectImageRequestErrorBlock: (([Photos.PHAsset], [Swift.Int]) -> Swift.Void)? + @objc public var cancelBlock: (() -> Swift.Void)? + @objc deinit + @objc convenience override dynamic public init(frame: CoreGraphics.CGRect) + @objc public init(selectedAssets: [Photos.PHAsset]? = nil) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic public func layoutSubviews() + @objc public func showPreview(animate: Swift.Bool = true, sender: UIKit.UIViewController) + @objc public func showPhotoLibrary(sender: UIKit.UIViewController) + @objc public func previewAssets(sender: UIKit.UIViewController, assets: [Photos.PHAsset], index: Swift.Int, isOriginal: Swift.Bool, showBottomViewAndSelectBtn: Swift.Bool = true) +} +extension ZLPhotoPreviewSheet : UIKit.UIGestureRecognizerDelegate { + @objc override dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLPhotoPreviewSheet : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, sizeForItemAt indexPath: Foundation.IndexPath) -> CoreGraphics.CGSize + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) +} +extension ZLPhotoPreviewSheet : UIKit.UIImagePickerControllerDelegate, UIKit.UINavigationControllerDelegate { + @objc dynamic public func imagePickerController(_ picker: UIKit.UIImagePickerController, didFinishPickingMediaWithInfo info: [UIKit.UIImagePickerController.InfoKey : Any]) +} +extension ZLPhotoPreviewSheet : Photos.PHPhotoLibraryChangeObserver { + @objc dynamic public func photoLibraryDidChange(_ changeInstance: Photos.PHChange) +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoUIConfiguration : ObjectiveC.NSObject { + @objc public enum CancelButtonStyle : Swift.Int { + case text + case image + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public class func `default`() -> HHSDKVideo.ZLPhotoUIConfiguration + @objc public class func resetConfiguration() + @objc public var style: HHSDKVideo.ZLPhotoBrowserStyle + @objc public var statusBarStyle: UIKit.UIStatusBarStyle + @objc public var navCancelButtonStyle: HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle + @objc public var showStatusBarInPreviewInterface: Swift.Bool + @objc public var navViewBlurEffectOfAlbumList: UIKit.UIBlurEffect? + @objc public var navViewBlurEffectOfPreview: UIKit.UIBlurEffect? + @objc public var bottomViewBlurEffectOfAlbumList: UIKit.UIBlurEffect? + @objc public var bottomViewBlurEffectOfPreview: UIKit.UIBlurEffect? + @objc public var customImageNames: [Swift.String] { + @objc get + @objc set + } + public var customImageForKey: [Swift.String : UIKit.UIImage?] { + get + set + } + @objc public var customImageForKey_objc: [Swift.String : UIKit.UIImage] { + @objc get + @objc set + } + public var customLanguageKeyValue: [HHSDKVideo.ZLLocalLanguageKey : Swift.String] { + get + set + } + @objc public var customLanguageKeyValue_objc: [Swift.String : Swift.String] { + @objc get + @objc set + } + @objc public var themeFontName: Swift.String? { + @objc get + @objc set + } + @objc public var sheetTranslucentColor: UIKit.UIColor + @objc public var sheetBtnBgColor: UIKit.UIColor + @objc public var sheetBtnTitleColor: UIKit.UIColor + @objc public var sheetBtnTitleTintColor: UIKit.UIColor + @objc public var navBarColor: UIKit.UIColor + @objc public var navBarColorOfPreviewVC: UIKit.UIColor + @objc public var navTitleColor: UIKit.UIColor + @objc public var navTitleColorOfPreviewVC: UIKit.UIColor + @objc public var navEmbedTitleViewBgColor: UIKit.UIColor + @objc public var albumListBgColor: UIKit.UIColor + @objc public var embedAlbumListTranslucentColor: UIKit.UIColor + @objc public var albumListTitleColor: UIKit.UIColor + @objc public var albumListCountColor: UIKit.UIColor + @objc public var separatorColor: UIKit.UIColor + @objc public var thumbnailBgColor: UIKit.UIColor + @objc public var previewVCBgColor: UIKit.UIColor + @objc public var bottomToolViewBgColor: UIKit.UIColor + @objc public var bottomToolViewBgColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnNormalTitleColor: UIKit.UIColor + @objc public var bottomToolViewDoneBtnNormalTitleColor: UIKit.UIColor + @objc public var bottomToolViewBtnNormalTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewDoneBtnNormalTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnDisableTitleColor: UIKit.UIColor + @objc public var bottomToolViewDoneBtnDisableTitleColor: UIKit.UIColor + @objc public var bottomToolViewBtnDisableTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewDoneBtnDisableTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnNormalBgColor: UIKit.UIColor + @objc public var bottomToolViewBtnNormalBgColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnDisableBgColor: UIKit.UIColor + @objc public var bottomToolViewBtnDisableBgColorOfPreviewVC: UIKit.UIColor + @objc public var selectMorePhotoWhenAuthIsLismitedTitleColor: UIKit.UIColor + @objc public var cameraRecodeProgressColor: UIKit.UIColor + @objc public var selectedMaskColor: UIKit.UIColor + @objc public var selectedBorderColor: UIKit.UIColor + @objc public var invalidMaskColor: UIKit.UIColor + @objc public var indexLabelTextColor: UIKit.UIColor + @objc public var indexLabelBgColor: UIKit.UIColor + @objc public var cameraCellBgColor: UIKit.UIColor + @objc public var adjustSliderNormalColor: UIKit.UIColor + @objc public var adjustSliderTintColor: UIKit.UIColor + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLPhotoBrowserStyle : Swift.Int { + case embedAlbumList + case externalAlbumList + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension ZLPhotoUIConfiguration { + @discardableResult + public func style(_ style: HHSDKVideo.ZLPhotoBrowserStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func statusBarStyle(_ statusBarStyle: UIKit.UIStatusBarStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navCancelButtonStyle(_ style: HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func showStatusBarInPreviewInterface(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navViewBlurEffectOfAlbumList(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navViewBlurEffectOfPreview(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomViewBlurEffectOfAlbumList(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomViewBlurEffectOfPreview(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customImageNames(_ names: [Swift.String]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customImageForKey(_ map: [Swift.String : UIKit.UIImage?]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customLanguageKeyValue(_ map: [HHSDKVideo.ZLLocalLanguageKey : Swift.String]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func themeFontName(_ name: Swift.String) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetTranslucentColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnTitleTintColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navBarColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navBarColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navEmbedTitleViewBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func embedAlbumListTranslucentColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListCountColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func separatorColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func thumbnailBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func previewVCBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnNormalTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnNormalTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnDisableTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnDisableTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectMorePhotoWhenAuthIsLismitedTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func cameraRecodeProgressColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectedMaskColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectedBorderColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func invalidMaskColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func indexLabelTextColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func indexLabelBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func cameraCellBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func adjustSliderNormalColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func adjustSliderTintColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration +} +@objc @_hasMissingDesignatedInitializers public class ZLProgressHUD : UIKit.UIView { + @objc public enum HUDStyle : Swift.Int { + case light + case lightBlur + case dark + case darkBlur + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc deinit + @objc public init(style: HHSDKVideo.ZLProgressHUD.HUDStyle) + @objc public func show(timeout: Foundation.TimeInterval = 100) + @objc public func hide() + @objc override dynamic public init(frame: CoreGraphics.CGRect) +} +@objc @_hasMissingDesignatedInitializers public class ZLTextStickerState : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class ZLVideoManager : ObjectiveC.NSObject { + @objc public class func mergeVideos(fileUrls: [Foundation.URL], completion: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) + @objc override dynamic public init() + @objc deinit +} +extension ZLVideoManager { + @objc dynamic public class func exportVideo(for asset: Photos.PHAsset, exportType: HHSDKVideo.ZLVideoManager.ExportType = .mov, presetName: Swift.String = AVAssetExportPresetMediumQuality, complete: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) + @objc dynamic public class func exportVideo(for asset: AVFoundation.AVAsset, range: CoreMedia.CMTimeRange = CMTimeRange(start: .zero, duration: .positiveInfinity), exportType: HHSDKVideo.ZLVideoManager.ExportType = .mov, presetName: Swift.String = AVAssetExportPresetMediumQuality, complete: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) +} +extension ZLVideoManager { + @objc public enum ExportType : Swift.Int { + case mov + case mp4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } +} +extension HHSDKVideo.AES.Error : Swift.Equatable {} +extension HHSDKVideo.AES.Error : Swift.Hashable {} +extension HHSDKVideo.AES.Variant : Swift.Equatable {} +extension HHSDKVideo.AES.Variant : Swift.Hashable {} +extension HHSDKVideo.AES.Variant : Swift.RawRepresentable {} +extension HHSDKVideo.Bit : Swift.Equatable {} +extension HHSDKVideo.Bit : Swift.Hashable {} +extension HHSDKVideo.Bit : Swift.RawRepresentable {} +extension HHSDKVideo.Blowfish.Error : Swift.Equatable {} +extension HHSDKVideo.Blowfish.Error : Swift.Hashable {} +extension HHSDKVideo.CBC.Error : Swift.Equatable {} +extension HHSDKVideo.CBC.Error : Swift.Hashable {} +extension HHSDKVideo.CCM : HHSDKVideo.BlockMode {} +extension HHSDKVideo.CCM.Error : Swift.Equatable {} +extension HHSDKVideo.CCM.Error : Swift.Hashable {} +extension HHSDKVideo.CFB.Error : Swift.Equatable {} +extension HHSDKVideo.CFB.Error : Swift.Hashable {} +extension HHSDKVideo.ChaCha20.Error : Swift.Equatable {} +extension HHSDKVideo.ChaCha20.Error : Swift.Hashable {} +extension HHSDKVideo.CipherError : Swift.Equatable {} +extension HHSDKVideo.CipherError : Swift.Hashable {} +extension HHSDKVideo.CMAC.Error : Swift.Equatable {} +extension HHSDKVideo.CMAC.Error : Swift.Hashable {} +extension HHSDKVideo.CTR : HHSDKVideo.BlockMode {} +extension HHSDKVideo.CTR.Error : Swift.Equatable {} +extension HHSDKVideo.CTR.Error : Swift.Hashable {} +extension HHSDKVideo.DateTransform.Unit : Swift.Equatable {} +extension HHSDKVideo.DateTransform.Unit : Swift.Hashable {} +extension HHSDKVideo.DateTransform.Unit : Swift.RawRepresentable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.Equatable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.Hashable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.RawRepresentable {} +extension HHSDKVideo.EKAlertMessage.ImagePosition : Swift.Equatable {} +extension HHSDKVideo.EKAlertMessage.ImagePosition : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.DisplayMode : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.DisplayMode : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.NotificationHapticFeedback : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.NotificationHapticFeedback : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Position : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.Position : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.StatusBar : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.StatusBar : Swift.Hashable {} +extension HHSDKVideo.GCM.Mode : Swift.Equatable {} +extension HHSDKVideo.GCM.Mode : Swift.Hashable {} +extension HHSDKVideo.GCM.Error : Swift.Equatable {} +extension HHSDKVideo.GCM.Error : Swift.Hashable {} +extension HHSDKVideo.HHBaseCallingState : Swift.Equatable {} +extension HHSDKVideo.HHBaseCallingState : Swift.Hashable {} +extension HHSDKVideo.HHBaseCallingState : Swift.RawRepresentable {} +extension HHSDKVideo.HHMediaType : Swift.Equatable {} +extension HHSDKVideo.HHMediaType : Swift.Hashable {} +extension HHSDKVideo.HHMediaType : Swift.RawRepresentable {} +extension HHSDKVideo.DateFormat : Swift.Equatable {} +extension HHSDKVideo.DateFormat : Swift.Hashable {} +extension HHSDKVideo.DateFormat : Swift.RawRepresentable {} +extension HHSDKVideo.HHConsType : Swift.Equatable {} +extension HHSDKVideo.HHConsType : Swift.Hashable {} +extension HHSDKVideo.HHConsType : Swift.RawRepresentable {} +extension HHSDKVideo.HHFileCacheManager.HHAssetPathType : Swift.Equatable {} +extension HHSDKVideo.HHFileCacheManager.HHAssetPathType : Swift.Hashable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.Equatable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.Hashable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.RawRepresentable {} +extension HHSDKVideo.HHLogMode : Swift.Equatable {} +extension HHSDKVideo.HHLogMode : Swift.Hashable {} +extension HHSDKVideo.HHLogMode : Swift.RawRepresentable {} +extension HHSDKVideo.HHCallType : Swift.Equatable {} +extension HHSDKVideo.HHCallType : Swift.Hashable {} +extension HHSDKVideo.HHCallType : Swift.RawRepresentable {} +extension HHSDKVideo.HHServerType : Swift.Equatable {} +extension HHSDKVideo.HHServerType : Swift.Hashable {} +extension HHSDKVideo.HHRequestMethod : Swift.Equatable {} +extension HHSDKVideo.HHRequestMethod : Swift.Hashable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.Equatable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.Hashable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.RawRepresentable {} +extension HHSDKVideo.HHRealNameType : Swift.Equatable {} +extension HHSDKVideo.HHRealNameType : Swift.Hashable {} +extension HHSDKVideo.HHRealNameType : Swift.RawRepresentable {} +extension HHSDKVideo.TrtcLog : Swift.Equatable {} +extension HHSDKVideo.TrtcLog : Swift.Hashable {} +extension HHSDKVideo.TrtcLog : Swift.RawRepresentable {} +extension HHSDKVideo.TrtcError : Swift.Equatable {} +extension HHSDKVideo.TrtcError : Swift.Hashable {} +extension HHSDKVideo.TrtcError : Swift.RawRepresentable {} +extension HHSDKVideo.hhToastPosition : Swift.Equatable {} +extension HHSDKVideo.hhToastPosition : Swift.Hashable {} +extension HHSDKVideo.HKDF.Error : Swift.Equatable {} +extension HHSDKVideo.HKDF.Error : Swift.Hashable {} +extension HHSDKVideo.HMAC.Error : Swift.Equatable {} +extension HHSDKVideo.HMAC.Error : Swift.Hashable {} +extension HHSDKVideo.HMAC.Variant : Swift.Equatable {} +extension HHSDKVideo.HMAC.Variant : Swift.Hashable {} +extension HHSDKVideo.ItemClass : Swift.Equatable {} +extension HHSDKVideo.ItemClass : Swift.Hashable {} +extension HHSDKVideo.ProtocolType : Swift.Equatable {} +extension HHSDKVideo.ProtocolType : Swift.Hashable {} +extension HHSDKVideo.AuthenticationType : Swift.Equatable {} +extension HHSDKVideo.AuthenticationType : Swift.Hashable {} +extension HHSDKVideo.Accessibility : Swift.Equatable {} +extension HHSDKVideo.Accessibility : Swift.Hashable {} +extension HHSDKVideo.Status : Swift.Equatable {} +extension HHSDKVideo.Status : Swift.Hashable {} +extension HHSDKVideo.MappingType : Swift.Equatable {} +extension HHSDKVideo.MappingType : Swift.Hashable {} +extension HHSDKVideo.OCB.Mode : Swift.Equatable {} +extension HHSDKVideo.OCB.Mode : Swift.Hashable {} +extension HHSDKVideo.OCB.Error : Swift.Equatable {} +extension HHSDKVideo.OCB.Error : Swift.Hashable {} +extension HHSDKVideo.OFB.Error : Swift.Equatable {} +extension HHSDKVideo.OFB.Error : Swift.Hashable {} +extension HHSDKVideo.Padding : Swift.Equatable {} +extension HHSDKVideo.Padding : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF1.Error : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF1.Error : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF1.Variant : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF1.Variant : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF2.Error : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF2.Error : Swift.Hashable {} +extension HHSDKVideo.PCBC.Error : Swift.Equatable {} +extension HHSDKVideo.PCBC.Error : Swift.Hashable {} +extension HHSDKVideo.PermissionStatus : Swift.Equatable {} +extension HHSDKVideo.PermissionStatus : Swift.Hashable {} +extension HHSDKVideo.PermissionStatus : Swift.RawRepresentable {} +extension HHSDKVideo.HHBasePermissionType : Swift.Equatable {} +extension HHSDKVideo.HHBasePermissionType : Swift.Hashable {} +extension HHSDKVideo.HHBasePermissionType : Swift.RawRepresentable {} +extension HHSDKVideo.Poly1305.Error : Swift.Equatable {} +extension HHSDKVideo.Poly1305.Error : Swift.Hashable {} +extension HHSDKVideo.Rabbit.Error : Swift.Equatable {} +extension HHSDKVideo.Rabbit.Error : Swift.Hashable {} +extension HHSDKVideo.Reachability.NetworkStatus : Swift.Equatable {} +extension HHSDKVideo.Reachability.NetworkStatus : Swift.Hashable {} +extension HHSDKVideo.RecordImgType : Swift.Equatable {} +extension HHSDKVideo.RecordImgType : Swift.Hashable {} +extension HHSDKVideo.RecordImgType : Swift.RawRepresentable {} +extension HHSDKVideo.SHA2.Variant : Swift.Equatable {} +extension HHSDKVideo.SHA2.Variant : Swift.Hashable {} +extension HHSDKVideo.SHA3.Variant : Swift.Equatable {} +extension HHSDKVideo.SHA3.Variant : Swift.Hashable {} +extension HHSDKVideo.HHIMCmd : Swift.Equatable {} +extension HHSDKVideo.HHIMCmd : Swift.Hashable {} +extension HHSDKVideo.HHIMCmd : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.Equatable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.Hashable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.RawRepresentable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.Equatable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.Hashable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.RawRepresentable {} +extension HHSDKVideo.ZLFilterType : Swift.Equatable {} +extension HHSDKVideo.ZLFilterType : Swift.Hashable {} +extension HHSDKVideo.ZLFilterType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLURLType : Swift.Equatable {} +extension HHSDKVideo.ZLURLType : Swift.Hashable {} +extension HHSDKVideo.ZLURLType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLLanguageType : Swift.Equatable {} +extension HHSDKVideo.ZLLanguageType : Swift.Hashable {} +extension HHSDKVideo.ZLLanguageType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.Equatable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.Hashable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.Equatable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.Hashable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.Equatable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.Hashable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.RawRepresentable {} diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftmodule b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftmodule new file mode 100644 index 0000000..53e7940 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64-apple-ios.swiftmodule differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftdoc b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftdoc new file mode 100644 index 0000000..34fba8f Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftdoc differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftinterface b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftinterface new file mode 100644 index 0000000..bd0c3c8 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftinterface @@ -0,0 +1,5650 @@ +// swift-interface-format-version: 1.0 +// swift-compiler-version: Apple Swift version 5.4.2 (swiftlang-1205.0.28.2 clang-1205.0.19.57) +// swift-module-flags: -target arm64-apple-ios10.0 -enable-objc-interop -enable-library-evolution -swift-version 5 -enforce-exclusivity=checked -O -module-name HHSDKVideo +import AVFoundation +import AVKit +import Accelerate +import CoreGraphics +import CoreLocation +import CoreMotion +import CoreTelephony +import Darwin +import Dispatch +import Foundation +@_exported import HHSDKVideo +import ImageIO +import LocalAuthentication +import MobileCoreServices +import ObjectiveC +import Photos +import PhotosUI +import Security +import SecurityKit +import Swift +import SystemConfiguration +import UIKit +import UserNotifications +import WebKit +public protocol AEAD { + static var kLen: Swift.Int { get } + static var ivRange: Swift.Range<Swift.Int> { get } +} +@_hasMissingDesignatedInitializers final public class AEADChaCha20Poly1305 : HHSDKVideo.AEAD { + public static let kLen: Swift.Int + public static var ivRange: Swift.Range<Swift.Int> + public static func encrypt(_ plainText: Swift.Array<Swift.UInt8>, key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>, authenticationHeader: Swift.Array<Swift.UInt8>) throws -> (cipherText: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>) + public static func decrypt(_ cipherText: Swift.Array<Swift.UInt8>, key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>, authenticationHeader: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>) throws -> (plainText: Swift.Array<Swift.UInt8>, success: Swift.Bool) + @objc deinit +} +final public class AES { + public enum Error : Swift.Error { + case invalidKeySize + case dataPaddingRequired + case invalidData + public static func == (a: HHSDKVideo.AES.Error, b: HHSDKVideo.AES.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant : Swift.Int { + case aes128, aes192, aes256 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + final public let variant: HHSDKVideo.AES.Variant + public init(key: Swift.Array<Swift.UInt8>, blockMode: HHSDKVideo.BlockMode, padding: HHSDKVideo.Padding = .pkcs7) throws + @objc deinit +} +extension AES : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension AES { + convenience public init(key: Swift.String, iv: Swift.String, padding: HHSDKVideo.Padding = .pkcs7) throws +} +extension AES : HHSDKVideo.Cryptors { + final public func makeEncryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + final public func makeDecryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable +} +extension Array where Element == Swift.UInt8 { + public init(hex: Swift.String) + public func toHexString() -> Swift.String +} +extension Array where Element == Swift.UInt8 { + @available(*, deprecated) + public func chunks(size chunksize: Swift.Int) -> Swift.Array<Swift.Array<Element>> + public func md5() -> [Element] + public func sha1() -> [Element] + public func sha224() -> [Element] + public func sha256() -> [Element] + public func sha384() -> [Element] + public func sha512() -> [Element] + public func sha2(_ variant: HHSDKVideo.SHA2.Variant) -> [Element] + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> [Element] + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public func crc16(seed: Swift.UInt16? = nil) -> Swift.UInt16 + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> [Element] + public func decrypt(cipher: HHSDKVideo.Cipher) throws -> [Element] + public func authenticate<A>(with authenticator: A) throws -> [Element] where A : HHSDKVideo.CryptoAuthenticator +} +extension Array where Element == Swift.UInt8 { + public func toBase64() -> Swift.String? + public init(base64: Swift.String) +} +public protocol CryptoAuthenticator { + func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +public enum Bit : Swift.Int { + case zero + case one + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@_hasMissingDesignatedInitializers public class BlockDecryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public func seek(to position: Swift.Int) throws + @objc deinit +} +public typealias CipherOperationOnBlock = (Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8>? +public protocol BlockMode { + var options: HHSDKVideo.BlockModeOption { get } + func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +public struct BlockModeOption : Swift.OptionSet { + public let rawValue: Swift.Int + public init(rawValue: Swift.Int) + public typealias ArrayLiteralElement = HHSDKVideo.BlockModeOption + public typealias Element = HHSDKVideo.BlockModeOption + public typealias RawValue = Swift.Int +} +final public class Blowfish { + public enum Error : Swift.Error { + case dataPaddingRequired + case invalidKeyOrInitializationVector + case invalidInitializationVector + case invalidBlockMode + public static func == (a: HHSDKVideo.Blowfish.Error, b: HHSDKVideo.Blowfish.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>, blockMode: HHSDKVideo.BlockMode = CBC(iv: Array<UInt8>(repeating: 0, count: Blowfish.blockSize)), padding: HHSDKVideo.Padding) throws + @objc deinit +} +extension Blowfish : HHSDKVideo.Cipher { + final public func encrypt<C>(_ bytes: C) throws -> Swift.Array<Swift.UInt8> where C : Swift.Collection, C.Element == Swift.UInt8, C.Index == Swift.Int + final public func decrypt<C>(_ bytes: C) throws -> Swift.Array<Swift.UInt8> where C : Swift.Collection, C.Element == Swift.UInt8, C.Index == Swift.Int +} +extension Blowfish { + convenience public init(key: Swift.String, iv: Swift.String, padding: HHSDKVideo.Padding = .pkcs7) throws +} +@_hasMissingDesignatedInitializers public class BusyPics { + public static let `default`: HHSDKVideo.BusyPics + public func cacheImgs() + public func getImgs() -> [Foundation.URL?] + @objc deinit +} +public struct CallDoctorModel : HHSDKVideo.Mappable { + public var doctor: HHSDKVideo.HHDoctorModel? + public var order: HHSDKVideo.OrderModel? + public var appoint: Swift.String? + public var pushFlowUrl: Swift.String? + public var realPatientUuid: Swift.Int? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct CBC : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CBC.Error, b: HHSDKVideo.CBC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@_inheritsConvenienceInitializers final public class CBCMAC : HHSDKVideo.CMAC { + override final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + override public init(key: Swift.Array<Swift.UInt8>) throws + @objc deinit +} +public struct CCM { + public enum Error : Swift.Error { + case invalidInitializationVector + case invalidParameter + case fail + public static func == (a: HHSDKVideo.CCM.Error, b: HHSDKVideo.CCM.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(iv: Swift.Array<Swift.UInt8>, tagLength: Swift.Int, messageLength: Swift.Int, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil) + public init(iv: Swift.Array<Swift.UInt8>, tagLength: Swift.Int, messageLength: Swift.Int, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +public struct CFB : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CFB.Error, b: HHSDKVideo.CFB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +final public class ChaCha20 { + public enum Error : Swift.Error { + case invalidKeyOrInitializationVector + case notSupported + public static func == (a: HHSDKVideo.ChaCha20.Error, b: HHSDKVideo.ChaCha20.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>, iv nonce: Swift.Array<Swift.UInt8>) throws + @objc deinit +} +extension ChaCha20 : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension ChaCha20 { + public struct ChaChaEncryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public func seek(to: Swift.Int) throws + } +} +extension ChaCha20 { + public struct ChaChaDecryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = true) throws -> Swift.Array<Swift.UInt8> + public func seek(to: Swift.Int) throws + } +} +extension ChaCha20 : HHSDKVideo.Cryptors { + final public func makeEncryptor() -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + final public func makeDecryptor() -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable +} +extension ChaCha20 { + convenience public init(key: Swift.String, iv: Swift.String) throws +} +public struct ChatApi { +} +@_hasMissingDesignatedInitializers final public class Checksum { + @objc deinit +} +extension Checksum { + public static func crc32(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public static func crc32c(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public static func crc16(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt16? = nil) -> Swift.UInt16 +} +public enum CipherError : Swift.Error { + case encrypt + case decrypt + public static func == (a: HHSDKVideo.CipherError, b: HHSDKVideo.CipherError) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public protocol Cipher : AnyObject { + var keySize: Swift.Int { get } + func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func encrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func decrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension Cipher { + public func encrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public func decrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +public protocol CipherModeWorker { + var cipherOperation: HHSDKVideo.CipherOperationOnBlock { get } + var additionalBufferSize: Swift.Int { get } + mutating func encrypt(block plaintext: Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + mutating func decrypt(block ciphertext: Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8> +} +public protocol BlockModeWorker : HHSDKVideo.CipherModeWorker { + var blockSize: Swift.Int { get } +} +public protocol CounterModeWorker : HHSDKVideo.CipherModeWorker { + associatedtype Counter + var counter: Self.Counter { get set } +} +public protocol SeekableModeWorker : HHSDKVideo.CipherModeWorker { + mutating func seek(to position: Swift.Int) throws +} +public protocol StreamModeWorker : HHSDKVideo.CipherModeWorker { +} +public protocol FinalizingEncryptModeWorker : HHSDKVideo.CipherModeWorker { + mutating func finalize(encrypt ciphertext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> +} +public protocol FinalizingDecryptModeWorker : HHSDKVideo.CipherModeWorker { + @discardableResult + mutating func willDecryptLast(bytes ciphertext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> + mutating func didDecryptLast(bytes plaintext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> + mutating func finalize(decrypt plaintext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> +} +public class CMAC : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case wrongKeyLength + public static func == (a: HHSDKVideo.CMAC.Error, b: HHSDKVideo.CMAC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(key: Swift.Array<Swift.UInt8>) throws + public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public func authenticate(_ bytes: Swift.Array<Swift.UInt8>, cipher: HHSDKVideo.Cipher) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +open class CodableTransform<T> : HHSDKVideo.TransformType where T : Swift.Decodable, T : Swift.Encodable { + public typealias Object = T + public typealias JSON = Any + public init() + open func transformFromJSON(_ value: Any?) -> HHSDKVideo.CodableTransform<T>.Object? + open func transformToJSON(_ value: T?) -> HHSDKVideo.CodableTransform<T>.JSON? + @objc deinit +} +public struct CommentApi { +} +@objc @_inheritsConvenienceInitializers public class CommentBaseVC : UIKit.UIViewController { + @objc override dynamic public func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class CommentVC : HHSDKVideo.CommentBaseVC { + @objc override dynamic public func viewDidLoad() + public static func show(_ orderId: Swift.String, docId: Swift.String, uuid: Swift.Int?, type: HHSDKVideo.HHCallType?, _ model: HHSDKVideo.HHGetQuesetionModel?) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public protocol Cryptor { + mutating func seek(to: Swift.Int) throws +} +public protocol Cryptors : AnyObject { + func makeEncryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + func makeDecryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + static func randomIV(_ blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> +} +extension Cryptors { + public static func randomIV(_ count: Swift.Int) -> Swift.Array<Swift.UInt8> +} +public struct CTR { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CTR.Error, b: HHSDKVideo.CTR.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>, counter: Swift.Int = 0) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +open class CustomDateFormatTransform : HHSDKVideo.DateFormatterTransform { + public init(formatString: Swift.String) + override public init(dateFormatter: Foundation.DateFormatter) + @objc deinit +} +extension Data { + public func checksum() -> Swift.UInt16 + public func md5() -> Foundation.Data + public func sha1() -> Foundation.Data + public func sha224() -> Foundation.Data + public func sha256() -> Foundation.Data + public func sha384() -> Foundation.Data + public func sha512() -> Foundation.Data + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> Foundation.Data + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Foundation.Data + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Foundation.Data + public func crc16(seed: Swift.UInt16? = nil) -> Foundation.Data + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> Foundation.Data + public func decrypt(cipher: HHSDKVideo.Cipher) throws -> Foundation.Data + public func authenticate(with authenticator: HHSDKVideo.CryptoAuthenticator) throws -> Foundation.Data +} +extension Data { + public init(hex: Swift.String) + public var bytes: Swift.Array<Swift.UInt8> { + get + } + public func toHexString() -> Swift.String +} +open class DataTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Data + public typealias JSON = Swift.String + public init() + open func transformFromJSON(_ value: Any?) -> Foundation.Data? + open func transformToJSON(_ value: Foundation.Data?) -> Swift.String? + @objc deinit +} +open class DateFormatterTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Date + public typealias JSON = Swift.String + final public let dateFormatter: Foundation.DateFormatter + public init(dateFormatter: Foundation.DateFormatter) + open func transformFromJSON(_ value: Any?) -> Foundation.Date? + open func transformToJSON(_ value: Foundation.Date?) -> Swift.String? + @objc deinit +} +open class DateTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Date + public typealias JSON = Swift.Double + public enum Unit : Foundation.TimeInterval { + case seconds + case milliseconds + public init?(rawValue: Foundation.TimeInterval) + public typealias RawValue = Foundation.TimeInterval + public var rawValue: Foundation.TimeInterval { + get + } + } + public init(unit: HHSDKVideo.DateTransform.Unit = .seconds) + open func transformFromJSON(_ value: Any?) -> Foundation.Date? + open func transformToJSON(_ value: Foundation.Date?) -> Swift.Double? + @objc deinit +} +public struct DGElasticPullToRefreshConstants { + public static var WaveMaxHeight: CoreGraphics.CGFloat + public static var MinOffsetToPull: CoreGraphics.CGFloat + public static var LoadingContentInset: CoreGraphics.CGFloat + public static var LoadingViewSize: CoreGraphics.CGFloat +} +extension NSObject { + public func dg_addObserver(_ observer: ObjectiveC.NSObject, forKeyPath keyPath: Swift.String) + public func dg_removeObserver(_ observer: ObjectiveC.NSObject, forKeyPath keyPath: Swift.String) +} +extension UIScrollView { + public func dg_addPullToRefreshWithActionHandler(_ actionHandler: @escaping () -> Swift.Void, loadingView: HHSDKVideo.DGElasticPullToRefreshLoadingView?) + public func dg_removePullToRefresh() + public func dg_setPullToRefreshBackgroundColor(_ color: UIKit.UIColor) + public func dg_setPullToRefreshFillColor(_ color: UIKit.UIColor) + public func dg_stopLoading() + public func dg_startLoading() +} +extension UIView { + public func dg_center(_ usePresentationLayerIfPossible: Swift.Bool) -> CoreGraphics.CGPoint +} +extension UIPanGestureRecognizer { + public func dg_resign() +} +extension UIGestureRecognizer.State { + public func dg_isAnyOf(_ values: [UIKit.UIGestureRecognizer.State]) -> Swift.Bool +} +@objc @_inheritsConvenienceInitializers open class DGElasticPullToRefreshLoadingView : UIKit.UIView { + @objc dynamic public init() + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + open func setPullProgress(_ progress: CoreGraphics.CGFloat) + open func startAnimating() + open func stopLoading() + @objc deinit +} +extension CGFloat { + public func toRadians() -> CoreGraphics.CGFloat + public func toDegrees() -> CoreGraphics.CGFloat +} +@objc open class DGElasticPullToRefreshLoadingViewCircle : HHSDKVideo.DGElasticPullToRefreshLoadingView { + @objc override dynamic public init() + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + override open func setPullProgress(_ progress: CoreGraphics.CGFloat) + override open func startAnimating() + override open func stopLoading() + @objc override dynamic open func tintColorDidChange() + @objc override dynamic open func layoutSubviews() + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public enum DGElasticPullToRefreshState : Swift.Int { + case stopped + case dragging + case animatingBounce + case loading + case animatingToStopped + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_hasMissingDesignatedInitializers open class DGElasticPullToRefreshView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc deinit + @objc override dynamic open func observeValue(forKeyPath keyPath: Swift.String?, of object: Any?, change: [Foundation.NSKeyValueChangeKey : Any]?, context: Swift.UnsafeMutableRawPointer?) + @objc override dynamic open func layoutSubviews() + @objc override dynamic public init(frame: CoreGraphics.CGRect) +} +public struct DictionaryTransform<Key, Value> : HHSDKVideo.TransformType where Key : Swift.Hashable, Key : Swift.RawRepresentable, Value : HHSDKVideo.Mappable, Key.RawValue == Swift.String { + public init() + public func transformFromJSON(_ value: Any?) -> [Key : Value]? + public func transformToJSON(_ value: [Key : Value]?) -> Any? + public typealias JSON = Any + public typealias Object = Swift.Dictionary<Key, Value> +} +@available(*, renamed: "Digest") +public typealias Hash = HHSDKVideo.Digest +public struct Digest { + public static func md5(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha1(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha224(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha256(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha384(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha512(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha2(_ bytes: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.SHA2.Variant) -> Swift.Array<Swift.UInt8> + public static func sha3(_ bytes: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.SHA3.Variant) -> Swift.Array<Swift.UInt8> +} +public struct ECB : HHSDKVideo.BlockMode { + public let options: HHSDKVideo.BlockModeOption + public init() + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@objc @_inheritsConvenienceInitializers public class EKAccessoryNoteMessageView : UIKit.UIView { + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public struct EKAlertMessage { + public enum ImagePosition { + case top + case left + public static func == (a: HHSDKVideo.EKAlertMessage.ImagePosition, b: HHSDKVideo.EKAlertMessage.ImagePosition) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let imagePosition: HHSDKVideo.EKAlertMessage.ImagePosition + public let simpleMessage: HHSDKVideo.EKSimpleMessage + public let buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent + public init(simpleMessage: HHSDKVideo.EKSimpleMessage, imagePosition: HHSDKVideo.EKAlertMessage.ImagePosition = .top, buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent) +} +@objc @_hasMissingDesignatedInitializers final public class EKAlertMessageView : HHSDKVideo.EKSimpleMessageView { + public init(with message: HHSDKVideo.EKAlertMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc deinit +} +public struct EKAttributes { + public var name: Swift.String? + public var windowLevel: HHSDKVideo.EKAttributes.WindowLevel + public var position: HHSDKVideo.EKAttributes.Position + public var precedence: HHSDKVideo.EKAttributes.Precedence + public var displayDuration: Swift.Double + public var positionConstraints: HHSDKVideo.EKAttributes.PositionConstraints + public var screenInteraction: HHSDKVideo.EKAttributes.UserInteraction + public var entryInteraction: HHSDKVideo.EKAttributes.UserInteraction + public var scroll: HHSDKVideo.EKAttributes.Scroll + public var hapticFeedbackType: HHSDKVideo.EKAttributes.NotificationHapticFeedback + public var lifecycleEvents: HHSDKVideo.EKAttributes.LifecycleEvents + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var entryBackground: HHSDKVideo.EKAttributes.BackgroundStyle + public var screenBackground: HHSDKVideo.EKAttributes.BackgroundStyle + public var shadow: HHSDKVideo.EKAttributes.Shadow + public var roundCorners: HHSDKVideo.EKAttributes.RoundCorners + public var border: HHSDKVideo.EKAttributes.Border + public var statusBar: HHSDKVideo.EKAttributes.StatusBar + public var entranceAnimation: HHSDKVideo.EKAttributes.Animation + public var exitAnimation: HHSDKVideo.EKAttributes.Animation + public var popBehavior: HHSDKVideo.EKAttributes.PopBehavior { + get + set + } + public init() +} +extension EKAttributes { + public struct Animation : Swift.Equatable { + public struct Spring : Swift.Equatable { + public var damping: CoreGraphics.CGFloat + public var initialVelocity: CoreGraphics.CGFloat + public init(damping: CoreGraphics.CGFloat, initialVelocity: CoreGraphics.CGFloat) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Spring, b: HHSDKVideo.EKAttributes.Animation.Spring) -> Swift.Bool + } + public struct RangeAnimation : Swift.Equatable { + public var duration: Foundation.TimeInterval + public var delay: Foundation.TimeInterval + public var start: CoreGraphics.CGFloat + public var end: CoreGraphics.CGFloat + public var spring: HHSDKVideo.EKAttributes.Animation.Spring? + public init(from start: CoreGraphics.CGFloat, to end: CoreGraphics.CGFloat, duration: Foundation.TimeInterval, delay: Foundation.TimeInterval = 0, spring: HHSDKVideo.EKAttributes.Animation.Spring? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation.RangeAnimation, b: HHSDKVideo.EKAttributes.Animation.RangeAnimation) -> Swift.Bool + } + public struct Translate : Swift.Equatable { + public enum AnchorPosition : Swift.Equatable { + case top + case bottom + case automatic + public func hash(into hasher: inout Swift.Hasher) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition, b: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition) -> Swift.Bool + public var hashValue: Swift.Int { + get + } + } + public var duration: Foundation.TimeInterval + public var delay: Foundation.TimeInterval + public var anchorPosition: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition + public var spring: HHSDKVideo.EKAttributes.Animation.Spring? + public init(duration: Foundation.TimeInterval, anchorPosition: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition = .automatic, delay: Foundation.TimeInterval = 0, spring: HHSDKVideo.EKAttributes.Animation.Spring? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Translate, b: HHSDKVideo.EKAttributes.Animation.Translate) -> Swift.Bool + } + public var translate: HHSDKVideo.EKAttributes.Animation.Translate? + public var scale: HHSDKVideo.EKAttributes.Animation.RangeAnimation? + public var fade: HHSDKVideo.EKAttributes.Animation.RangeAnimation? + public var containsTranslation: Swift.Bool { + get + } + public var containsScale: Swift.Bool { + get + } + public var containsFade: Swift.Bool { + get + } + public var containsAnimation: Swift.Bool { + get + } + public var maxDelay: Foundation.TimeInterval { + get + } + public var maxDuration: Foundation.TimeInterval { + get + } + public var totalDuration: Foundation.TimeInterval { + get + } + public static var translation: HHSDKVideo.EKAttributes.Animation { + get + } + public static var none: HHSDKVideo.EKAttributes.Animation { + get + } + public init(translate: HHSDKVideo.EKAttributes.Animation.Translate? = nil, scale: HHSDKVideo.EKAttributes.Animation.RangeAnimation? = nil, fade: HHSDKVideo.EKAttributes.Animation.RangeAnimation? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation, b: HHSDKVideo.EKAttributes.Animation) -> Swift.Bool + } +} +extension EKAttributes { + public enum BackgroundStyle : Swift.Equatable { + public struct BlurStyle : Swift.Equatable { + public static var extra: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public static var standard: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + @available(iOS 10.0, *) + public static var prominent: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public static var dark: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public init(style: UIKit.UIBlurEffect.Style) + public init(light: UIKit.UIBlurEffect.Style, dark: UIKit.UIBlurEffect.Style) + public func blurStyle(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIBlurEffect.Style + public func blurEffect(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIBlurEffect + public static func == (a: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle, b: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle) -> Swift.Bool + } + public struct Gradient { + public var colors: [HHSDKVideo.EKColor] + public var startPoint: CoreGraphics.CGPoint + public var endPoint: CoreGraphics.CGPoint + public init(colors: [HHSDKVideo.EKColor], startPoint: CoreGraphics.CGPoint, endPoint: CoreGraphics.CGPoint) + } + case visualEffect(style: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle) + case color(color: HHSDKVideo.EKColor) + case gradient(gradient: HHSDKVideo.EKAttributes.BackgroundStyle.Gradient) + case image(image: UIKit.UIImage) + case clear + public static func == (lhs: HHSDKVideo.EKAttributes.BackgroundStyle, rhs: HHSDKVideo.EKAttributes.BackgroundStyle) -> Swift.Bool + } +} +extension EKAttributes { + public enum DisplayMode { + case inferred + case light + case dark + public static func == (a: HHSDKVideo.EKAttributes.DisplayMode, b: HHSDKVideo.EKAttributes.DisplayMode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public typealias DisplayDuration = Foundation.TimeInterval +} +extension EKAttributes { + public enum RoundCorners { + case none + case all(radius: CoreGraphics.CGFloat) + case top(radius: CoreGraphics.CGFloat) + case bottom(radius: CoreGraphics.CGFloat) + } + public enum Border { + case none + case value(color: UIKit.UIColor, width: CoreGraphics.CGFloat) + } +} +extension EKAttributes { + public enum NotificationHapticFeedback { + case success + case warning + case error + case none + public static func == (a: HHSDKVideo.EKAttributes.NotificationHapticFeedback, b: HHSDKVideo.EKAttributes.NotificationHapticFeedback) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct LifecycleEvents { + public typealias Event = () -> Swift.Void + public var willAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var didAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var willDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var didDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public init(willAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, didAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, willDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, didDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil) + } +} +extension EKAttributes { + public enum PopBehavior { + case overridden + case animated(animation: HHSDKVideo.EKAttributes.Animation) + public var isOverriden: Swift.Bool { + get + } + } +} +extension EKAttributes { + public enum Position { + case top + case bottom + case center + public var isTop: Swift.Bool { + get + } + public var isCenter: Swift.Bool { + get + } + public var isBottom: Swift.Bool { + get + } + public static func == (a: HHSDKVideo.EKAttributes.Position, b: HHSDKVideo.EKAttributes.Position) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct PositionConstraints { + public enum SafeArea { + case overridden + case empty(fillSafeArea: Swift.Bool) + public var isOverridden: Swift.Bool { + get + } + } + public enum Edge { + case ratio(value: CoreGraphics.CGFloat) + case offset(value: CoreGraphics.CGFloat) + case constant(value: CoreGraphics.CGFloat) + case intrinsic + public static var fill: HHSDKVideo.EKAttributes.PositionConstraints.Edge { + get + } + } + public struct Size { + public var width: HHSDKVideo.EKAttributes.PositionConstraints.Edge + public var height: HHSDKVideo.EKAttributes.PositionConstraints.Edge + public init(width: HHSDKVideo.EKAttributes.PositionConstraints.Edge, height: HHSDKVideo.EKAttributes.PositionConstraints.Edge) + public static var intrinsic: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + public static var sizeToWidth: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + public static var screen: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + } + public enum KeyboardRelation { + public struct Offset { + public var bottom: CoreGraphics.CGFloat + public var screenEdgeResistance: CoreGraphics.CGFloat? + public init(bottom: CoreGraphics.CGFloat = 0, screenEdgeResistance: CoreGraphics.CGFloat? = nil) + public static var none: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation.Offset { + get + } + } + case bind(offset: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation.Offset) + case unbind + public var isBound: Swift.Bool { + get + } + } + public struct Rotation { + public enum SupportedInterfaceOrientation { + case standard + case all + public static func == (a: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation, b: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public var isEnabled: Swift.Bool + public var supportedInterfaceOrientations: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation + public init() + } + public var rotation: HHSDKVideo.EKAttributes.PositionConstraints.Rotation + public var keyboardRelation: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation + public var size: HHSDKVideo.EKAttributes.PositionConstraints.Size + public var maxSize: HHSDKVideo.EKAttributes.PositionConstraints.Size + public var verticalOffset: CoreGraphics.CGFloat + public var safeArea: HHSDKVideo.EKAttributes.PositionConstraints.SafeArea + public var hasVerticalOffset: Swift.Bool { + get + } + public static var float: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public static var fullWidth: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public static var fullScreen: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public init(verticalOffset: CoreGraphics.CGFloat = 0, size: HHSDKVideo.EKAttributes.PositionConstraints.Size = .sizeToWidth, maxSize: HHSDKVideo.EKAttributes.PositionConstraints.Size = .intrinsic) + } +} +extension EKAttributes { + public enum Precedence { + public struct Priority : Swift.Hashable, Swift.Equatable, Swift.RawRepresentable, Swift.Comparable { + public var rawValue: Swift.Int + public var hashValue: Swift.Int { + get + } + public init(_ rawValue: Swift.Int) + public init(rawValue: Swift.Int) + public static func == (lhs: HHSDKVideo.EKAttributes.Precedence.Priority, rhs: HHSDKVideo.EKAttributes.Precedence.Priority) -> Swift.Bool + public static func < (lhs: HHSDKVideo.EKAttributes.Precedence.Priority, rhs: HHSDKVideo.EKAttributes.Precedence.Priority) -> Swift.Bool + public typealias RawValue = Swift.Int + } + public enum QueueingHeuristic { + public static var value: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic + case chronological + case priority + public static func == (a: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic, b: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + case override(priority: HHSDKVideo.EKAttributes.Precedence.Priority, dropEnqueuedEntries: Swift.Bool) + case enqueue(priority: HHSDKVideo.EKAttributes.Precedence.Priority) + public var priority: HHSDKVideo.EKAttributes.Precedence.Priority { + get + set + } + } +} +extension EKAttributes.Precedence.Priority { + public static let maxRawValue: Swift.Int + public static let highRawValue: Swift.Int + public static let normalRawValue: Swift.Int + public static let lowRawValue: Swift.Int + public static let minRawValue: Swift.Int + public static let max: HHSDKVideo.EKAttributes.Precedence.Priority + public static let high: HHSDKVideo.EKAttributes.Precedence.Priority + public static let normal: HHSDKVideo.EKAttributes.Precedence.Priority + public static let low: HHSDKVideo.EKAttributes.Precedence.Priority + public static let min: HHSDKVideo.EKAttributes.Precedence.Priority +} +extension EKAttributes { + public static var `default`: HHSDKVideo.EKAttributes + public static var toast: HHSDKVideo.EKAttributes { + get + } + public static var float: HHSDKVideo.EKAttributes { + get + } + public static var topFloat: HHSDKVideo.EKAttributes { + get + } + public static var bottomFloat: HHSDKVideo.EKAttributes { + get + } + public static var centerFloat: HHSDKVideo.EKAttributes { + get + } + public static var bottomToast: HHSDKVideo.EKAttributes { + get + } + public static var topToast: HHSDKVideo.EKAttributes { + get + } + public static var topNote: HHSDKVideo.EKAttributes { + get + } + public static var bottomNote: HHSDKVideo.EKAttributes { + get + } + public static var statusBar: HHSDKVideo.EKAttributes { + get + } +} +extension EKAttributes { + public enum Scroll { + public struct PullbackAnimation { + public var duration: Foundation.TimeInterval + public var damping: CoreGraphics.CGFloat + public var initialSpringVelocity: CoreGraphics.CGFloat + public init(duration: Foundation.TimeInterval, damping: CoreGraphics.CGFloat, initialSpringVelocity: CoreGraphics.CGFloat) + public static var jolt: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation { + get + } + public static var easeOut: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation { + get + } + } + case disabled + case edgeCrossingDisabled(swipeable: Swift.Bool) + case enabled(swipeable: Swift.Bool, pullbackAnimation: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation) + } +} +extension EKAttributes { + public enum Shadow { + case none + case active(with: HHSDKVideo.EKAttributes.Shadow.Value) + public struct Value { + public let radius: CoreGraphics.CGFloat + public let opacity: Swift.Float + public let color: HHSDKVideo.EKColor + public let offset: CoreGraphics.CGSize + public init(color: HHSDKVideo.EKColor = .black, opacity: Swift.Float, radius: CoreGraphics.CGFloat, offset: CoreGraphics.CGSize = .zero) + } + } +} +extension EKAttributes { + public enum StatusBar { + public typealias Appearance = (visible: Swift.Bool, style: UIKit.UIStatusBarStyle) + case ignored + case hidden + case dark + case light + case inferred + public var appearance: HHSDKVideo.EKAttributes.StatusBar.Appearance { + get + } + public static func statusBar(by appearance: HHSDKVideo.EKAttributes.StatusBar.Appearance) -> HHSDKVideo.EKAttributes.StatusBar + public static var currentAppearance: HHSDKVideo.EKAttributes.StatusBar.Appearance { + get + } + public static var currentStatusBar: HHSDKVideo.EKAttributes.StatusBar { + get + } + public static func == (a: HHSDKVideo.EKAttributes.StatusBar, b: HHSDKVideo.EKAttributes.StatusBar) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct UserInteraction { + public typealias Action = () -> () + public enum Default { + case absorbTouches + case delayExit(by: Foundation.TimeInterval) + case dismissEntry + case forward + } + public var defaultAction: HHSDKVideo.EKAttributes.UserInteraction.Default + public var customTapActions: [HHSDKVideo.EKAttributes.UserInteraction.Action] + public init(defaultAction: HHSDKVideo.EKAttributes.UserInteraction.Default = .absorbTouches, customTapActions: [HHSDKVideo.EKAttributes.UserInteraction.Action] = []) + public static var dismiss: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static var forward: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static var absorbTouches: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static func delayExit(by delay: Foundation.TimeInterval) -> HHSDKVideo.EKAttributes.UserInteraction + } +} +extension EKAttributes { + public enum WindowLevel { + case alerts + case statusBar + case normal + case custom(level: UIKit.UIWindow.Level) + public var value: UIKit.UIWindow.Level { + get + } + } +} +@objc final public class EKButtonBarView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent) + @objc override final public func layoutSubviews() + final public func expand() + final public func compress() + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKColor : Swift.Equatable { + public var dark: UIKit.UIColor { + get + } + public var light: UIKit.UIColor { + get + } + public init(light: UIKit.UIColor, dark: UIKit.UIColor) + public init(_ unified: UIKit.UIColor) + public init(rgb: Swift.Int) + public init(red: Swift.Int, green: Swift.Int, blue: Swift.Int) + public func color(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIColor + public static func == (a: HHSDKVideo.EKColor, b: HHSDKVideo.EKColor) -> Swift.Bool +} +extension EKColor { + public var inverted: HHSDKVideo.EKColor { + get + } + public func with(alpha: CoreGraphics.CGFloat) -> HHSDKVideo.EKColor + public static var white: HHSDKVideo.EKColor { + get + } + public static var black: HHSDKVideo.EKColor { + get + } + public static var clear: HHSDKVideo.EKColor { + get + } + public static var standardBackground: HHSDKVideo.EKColor { + get + } + public static var standardContent: HHSDKVideo.EKColor { + get + } +} +@objc final public class EKFormMessageView : UIKit.UIView { + public init(with title: HHSDKVideo.EKProperty.LabelContent, textFieldsContent: [HHSDKVideo.EKProperty.TextFieldContent], buttonContent: HHSDKVideo.EKProperty.ButtonContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + final public func becomeFirstResponder(with textFieldIndex: Swift.Int) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKImageNoteMessageView : HHSDKVideo.EKAccessoryNoteMessageView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with content: HHSDKVideo.EKProperty.LabelContent, imageContent: HHSDKVideo.EKProperty.ImageContent) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKMessageContentView : UIKit.UIView { + public var titleContent: HHSDKVideo.EKProperty.LabelContent! { + get + set + } + public var subtitleContent: HHSDKVideo.EKProperty.LabelContent! { + get + set + } + public var titleAttributes: HHSDKVideo.EKProperty.LabelStyle! { + get + set + } + public var subtitleAttributes: HHSDKVideo.EKProperty.LabelStyle! { + get + set + } + public var title: Swift.String! { + get + set + } + public var subtitle: Swift.String! { + get + set + } + public var verticalMargins: CoreGraphics.CGFloat { + get + set + } + public var horizontalMargins: CoreGraphics.CGFloat { + get + set + } + public var labelsOffset: CoreGraphics.CGFloat { + get + set + } + @objc dynamic public init() + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKNoteMessageView : UIKit.UIView { + public var horizontalOffset: CoreGraphics.CGFloat { + get + set + } + public var verticalOffset: CoreGraphics.CGFloat { + get + set + } + public init(with content: HHSDKVideo.EKProperty.LabelContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKNotificationMessage { + public struct Insets { + public var contentInsets: UIKit.UIEdgeInsets + public var titleToDescription: CoreGraphics.CGFloat + public static var `default`: HHSDKVideo.EKNotificationMessage.Insets + } + public let simpleMessage: HHSDKVideo.EKSimpleMessage + public let auxiliary: HHSDKVideo.EKProperty.LabelContent? + public let insets: HHSDKVideo.EKNotificationMessage.Insets + public init(simpleMessage: HHSDKVideo.EKSimpleMessage, auxiliary: HHSDKVideo.EKProperty.LabelContent? = nil, insets: HHSDKVideo.EKNotificationMessage.Insets = .default) +} +@objc @_hasMissingDesignatedInitializers final public class EKNotificationMessageView : HHSDKVideo.EKSimpleMessageView { + public init(with message: HHSDKVideo.EKNotificationMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc deinit +} +public struct EKPopUpMessage { + public typealias EKPopUpMessageAction = () -> () + public struct ThemeImage { + public enum Position { + case topToTop(offset: CoreGraphics.CGFloat) + case centerToTop(offset: CoreGraphics.CGFloat) + } + public var image: HHSDKVideo.EKProperty.ImageContent + public var position: HHSDKVideo.EKPopUpMessage.ThemeImage.Position + public init(image: HHSDKVideo.EKProperty.ImageContent, position: HHSDKVideo.EKPopUpMessage.ThemeImage.Position = .topToTop(offset: 40)) + } + public var themeImage: HHSDKVideo.EKPopUpMessage.ThemeImage? + public var title: HHSDKVideo.EKProperty.LabelContent + public var description: HHSDKVideo.EKProperty.LabelContent + public var button: HHSDKVideo.EKProperty.ButtonContent + public var action: HHSDKVideo.EKPopUpMessage.EKPopUpMessageAction + public init(themeImage: HHSDKVideo.EKPopUpMessage.ThemeImage? = nil, title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent, button: HHSDKVideo.EKProperty.ButtonContent, action: @escaping HHSDKVideo.EKPopUpMessage.EKPopUpMessageAction) +} +@objc final public class EKPopUpMessageView : UIKit.UIView { + public init(with message: HHSDKVideo.EKPopUpMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKProcessingNoteMessageView : HHSDKVideo.EKAccessoryNoteMessageView { + public var isProcessing: Swift.Bool { + get + set + } + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with content: HHSDKVideo.EKProperty.LabelContent, activityIndicator: UIKit.UIActivityIndicatorView.Style) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKProperty { + public struct ButtonContent { + public typealias Action = () -> () + public var label: HHSDKVideo.EKProperty.LabelContent + public var backgroundColor: HHSDKVideo.EKColor + public var highlightedBackgroundColor: HHSDKVideo.EKColor + public var contentEdgeInset: CoreGraphics.CGFloat + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var accessibilityIdentifier: Swift.String? + public var action: HHSDKVideo.EKProperty.ButtonContent.Action? + public init(label: HHSDKVideo.EKProperty.LabelContent, backgroundColor: HHSDKVideo.EKColor, highlightedBackgroundColor: HHSDKVideo.EKColor, contentEdgeInset: CoreGraphics.CGFloat = 5, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, accessibilityIdentifier: Swift.String? = nil, action: @escaping HHSDKVideo.EKProperty.ButtonContent.Action = {}) + public func backgroundColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + public func highlightedBackgroundColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + public func highlighedLabelColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct LabelContent { + public var text: Swift.String + public var style: HHSDKVideo.EKProperty.LabelStyle + public var accessibilityIdentifier: Swift.String? + public init(text: Swift.String, style: HHSDKVideo.EKProperty.LabelStyle, accessibilityIdentifier: Swift.String? = nil) + } + public struct LabelStyle { + public var font: UIKit.UIFont + public var color: HHSDKVideo.EKColor + public var alignment: UIKit.NSTextAlignment + public var numberOfLines: Swift.Int + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public init(font: UIKit.UIFont, color: HHSDKVideo.EKColor, alignment: UIKit.NSTextAlignment = .left, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, numberOfLines: Swift.Int = 0) + public func color(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct ImageContent { + public enum TransformAnimation { + case animate(duration: Foundation.TimeInterval, options: UIKit.UIView.AnimationOptions, transform: CoreGraphics.CGAffineTransform) + case none + } + public var tint: HHSDKVideo.EKColor? + public var images: [UIKit.UIImage] + public var imageSequenceAnimationDuration: Foundation.TimeInterval + public var size: CoreGraphics.CGSize? + public var contentMode: UIKit.UIView.ContentMode + public var makesRound: Swift.Bool + public var animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var accessibilityIdentifier: Swift.String? + public init(imageName: Swift.String, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, size: CoreGraphics.CGSize? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, tint: HHSDKVideo.EKColor? = nil, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(image: UIKit.UIImage, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(images: [UIKit.UIImage], imageSequenceAnimationDuration: Foundation.TimeInterval = 1, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(imagesNames: [Swift.String], imageSequenceAnimationDuration: Foundation.TimeInterval = 1, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public static func thumb(with image: UIKit.UIImage, edgeSize: CoreGraphics.CGFloat) -> HHSDKVideo.EKProperty.ImageContent + public static func thumb(with imageName: Swift.String, edgeSize: CoreGraphics.CGFloat) -> HHSDKVideo.EKProperty.ImageContent + public func tintColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + } + public struct TextFieldContent { + weak public var delegate: UIKit.UITextFieldDelegate? + public var keyboardType: UIKit.UIKeyboardType + public var isSecure: Swift.Bool + public var leadingImage: UIKit.UIImage! + public var placeholder: HHSDKVideo.EKProperty.LabelContent + public var textStyle: HHSDKVideo.EKProperty.LabelStyle + public var tintColor: HHSDKVideo.EKColor! + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var bottomBorderColor: HHSDKVideo.EKColor + public var accessibilityIdentifier: Swift.String? + public var textContent: Swift.String { + get + set + } + public init(delegate: UIKit.UITextFieldDelegate? = nil, keyboardType: UIKit.UIKeyboardType = .default, placeholder: HHSDKVideo.EKProperty.LabelContent, tintColor: HHSDKVideo.EKColor? = nil, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, textStyle: HHSDKVideo.EKProperty.LabelStyle, isSecure: Swift.Bool = false, leadingImage: UIKit.UIImage? = nil, bottomBorderColor: HHSDKVideo.EKColor = .clear, accessibilityIdentifier: Swift.String? = nil) + public func tintColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + public func bottomBorderColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + } + public struct ButtonBarContent { + public var content: [HHSDKVideo.EKProperty.ButtonContent] + public var separatorColor: HHSDKVideo.EKColor + public var horizontalDistributionThreshold: Swift.Int + public var expandAnimatedly: Swift.Bool + public var buttonHeight: CoreGraphics.CGFloat + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public init(with buttonContents: HHSDKVideo.EKProperty.ButtonContent..., separatorColor: HHSDKVideo.EKColor, horizontalDistributionThreshold: Swift.Int = 2, buttonHeight: CoreGraphics.CGFloat = 50, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, expandAnimatedly: Swift.Bool) + public init(with buttonContents: [HHSDKVideo.EKProperty.ButtonContent], separatorColor: HHSDKVideo.EKColor, horizontalDistributionThreshold: Swift.Int = 2, buttonHeight: CoreGraphics.CGFloat = 50, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, expandAnimatedly: Swift.Bool) + public func separatorColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct EKRatingItemContent { + public var title: HHSDKVideo.EKProperty.LabelContent + public var description: HHSDKVideo.EKProperty.LabelContent + public var unselectedImage: HHSDKVideo.EKProperty.ImageContent + public var selectedImage: HHSDKVideo.EKProperty.ImageContent + public var size: CoreGraphics.CGSize + public init(title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent, unselectedImage: HHSDKVideo.EKProperty.ImageContent, selectedImage: HHSDKVideo.EKProperty.ImageContent, size: CoreGraphics.CGSize = CGSize(width: 50, height: 50)) + } +} +public struct EKRatingMessage { + public typealias Selection = (Swift.Int) -> Swift.Void + public var initialTitle: HHSDKVideo.EKProperty.LabelContent + public var initialDescription: HHSDKVideo.EKProperty.LabelContent + public var ratingItems: [HHSDKVideo.EKProperty.EKRatingItemContent] + public var buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent + public var selection: HHSDKVideo.EKRatingMessage.Selection! + public var selectedIndex: Swift.Int? { + get + set + } + public init(initialTitle: HHSDKVideo.EKProperty.LabelContent, initialDescription: HHSDKVideo.EKProperty.LabelContent, ratingItems: [HHSDKVideo.EKProperty.EKRatingItemContent], buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent, selection: HHSDKVideo.EKRatingMessage.Selection? = nil) +} +@objc final public class EKRatingMessageView : UIKit.UIView { + public init(with message: HHSDKVideo.EKRatingMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc @_inheritsConvenienceInitializers final public class EKRatingSymbolsContainerView : UIKit.UIView { + final public func setup(with message: HHSDKVideo.EKRatingMessage, externalSelection: @escaping HHSDKVideo.EKRatingMessage.Selection) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +@objc final public class EKRatingSymbolView : UIKit.UIView { + final public var isSelected: Swift.Bool { + get + set + } + public init(unselectedImage: HHSDKVideo.EKProperty.ImageContent, selectedImage: HHSDKVideo.EKProperty.ImageContent, selection: @escaping HHSDKVideo.EKRatingMessage.Selection) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKSimpleMessage { + public let image: HHSDKVideo.EKProperty.ImageContent? + public let title: HHSDKVideo.EKProperty.LabelContent + public let description: HHSDKVideo.EKProperty.LabelContent + public init(image: HHSDKVideo.EKProperty.ImageContent? = nil, title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent) +} +@objc @_hasMissingDesignatedInitializers public class EKSimpleMessageView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc final public class EKTextField : UIKit.UIView { + final public var text: Swift.String { + get + set + } + public init(with content: HHSDKVideo.EKProperty.TextFieldContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + final public func makeFirstResponder() + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKXStatusBarMessageView : UIKit.UIView { + public init(leading: HHSDKVideo.EKProperty.LabelContent, trailing: HHSDKVideo.EKProperty.LabelContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: T, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: T?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [Swift.String : T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [Swift.String : T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [Swift.String : T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [Swift.String : T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +open class EnumTransform<T> : HHSDKVideo.TransformType where T : Swift.RawRepresentable { + public typealias Object = T + public typealias JSON = T.RawValue + public init() + open func transformFromJSON(_ value: Any?) -> T? + open func transformToJSON(_ value: T?) -> T.RawValue? + @objc deinit +} +final public class GCM : HHSDKVideo.BlockMode { + public enum Mode { + case combined + case detached + public static func == (a: HHSDKVideo.GCM.Mode, b: HHSDKVideo.GCM.Mode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public let options: HHSDKVideo.BlockModeOption + public enum Error : Swift.Error { + case invalidInitializationVector + case fail + public static func == (a: HHSDKVideo.GCM.Error, b: HHSDKVideo.GCM.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(iv: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, tagLength: Swift.Int = 16, mode: HHSDKVideo.GCM.Mode = .detached) + convenience public init(iv: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, mode: HHSDKVideo.GCM.Mode = .detached) + final public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker + @objc deinit +} +open class HexColorTransform : HHSDKVideo.TransformType { + public typealias Object = UIKit.UIColor + public typealias JSON = Swift.String + public init(prefixToJSON: Swift.Bool = false, alphaToJSON: Swift.Bool = false) + open func transformFromJSON(_ value: Any?) -> HHSDKVideo.HexColorTransform.Object? + open func transformToJSON(_ value: HHSDKVideo.HexColorTransform.Object?) -> HHSDKVideo.HexColorTransform.JSON? + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHAppProtocolCheck : ObjectiveC.NSObject { + public static let instance: HHSDKVideo.HHAppProtocolCheck + @objc override dynamic public init() + public func showPrivacyDialog(content: Swift.String, userDoc: Swift.String, privateDoc: Swift.String, _ agreeBlock: ((Swift.Bool) -> Swift.Void)?) + @objc deinit +} +extension HHAppProtocolCheck : UIKit.UITextViewDelegate { + @objc dynamic public func textView(_ textView: UIKit.UITextView, shouldInteractWith URL: Foundation.URL, in characterRange: Foundation.NSRange, interaction: UIKit.UITextItemInteraction) -> Swift.Bool +} +extension Array { + public subscript(safe index: Swift.Int) -> Element? { + get + } +} +public struct HHBaseApi { +} +@propertyWrapper public struct ApiConfig { + public var wrappedValue: HHSDKVideo.HHBaseApi { + get + } + public init(path: Swift.String, method: HHSDKVideo.HHRequestMethod = .post, host: Swift.String = HHUrl.baseUrl(), domain: Swift.String = HHUrl.urlForFamily(), needUserInfo: Swift.Bool = true, needEncrypt: Swift.Bool = true, needDNS: Swift.Bool = true) +} +public typealias HHLoginHandler = ((Swift.String?) -> Swift.Void) +public var HMHudManager: HHSDKVideo.HHHUDable { + get +} +@_inheritsConvenienceInitializers @objc public class HHBaseSDK : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHBaseSDK + public var dnsCallback: HHSDKVideo.HHDNSProtocal? + @objc public func start() + @objc public func login(userToken: Swift.String, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func logout(_ callback: ((Swift.String?) -> Swift.Void)? = nil) + @objc override dynamic public init() + @objc deinit +} +@objc public enum HHBaseCallingState : Swift.Int { + case onStart = 0 + case waitingDoctor + case callFreeDoctor + case callConnect + case didRing + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc public protocol HHBaseVideoDelegate : ObjectiveC.NSObjectProtocol { + @objc func callStateChange(_ state: HHSDKVideo.HHBaseCallingState) + @objc optional func onStart(orderId: Swift.String?) + @objc func callDidEstablish() + @objc func getChatParentView(_ view: UIKit.UIView) + @objc func callFail(code: Swift.Int, error: Swift.String) + @objc func onFail(_ errorCode: Swift.Int, errrorStr: Swift.String?) + @objc func onCancel() + @objc func receivedOrder(_ orderId: Swift.String) + @objc func callDidFinish() + @objc func onExtensionDoctor() + @objc func onReceive(_ callID: Swift.String) + @objc func onResponse(_ accept: Swift.Bool) + @objc func onLeakPermission(_ type: HHSDKVideo.HHBasePermissionType) + @objc optional func onForceOffline() +} +@objc public protocol HHCallDelegate : ObjectiveC.NSObjectProtocol { + @objc optional func onCallStatus(_ error: Swift.Error?) + @objc optional func onCallSuccess() + @objc optional func callFinished() +} +@_inheritsConvenienceInitializers @objc public class HHCallerInfo : ObjectiveC.NSObject, HHSDKVideo.Mappable { + public var name: Swift.String? + public var photourl: Swift.String? + public var uuid: Swift.Int? + public var userToken: Swift.String? + @objc override dynamic public init() + required public init?(map: HHSDKVideo.Map) + public func mapping(map: HHSDKVideo.Map) + @objc deinit +} +public class HHCameraConfig { + weak public var sender: UIKit.UIViewController! + public var mediaType: HHSDKVideo.HHMediaType + public var isGrayCam: Swift.Bool + public var canReduce: Swift.Bool + public var autoUpload: Swift.Bool + public var maxCount: Swift.Int? + public var crop: HHSDKVideo.onCropFinish? + public var canceled: HHSDKVideo.onCanceled? + public init() + public func build(_ block: (inout HHSDKVideo.HHCameraConfig) -> Swift.Void) -> HHSDKVideo.HHCameraConfig + @objc deinit +} +public let HHSDKScreenWidth: CoreGraphics.CGFloat +public let HHSDKScreenHeight: CoreGraphics.CGFloat +public let China_Flag: Swift.String +public struct HHDimens { + public static func isPad() -> Swift.Bool + public static func isPlus() -> Swift.Bool +} +public func HHColor(_ red: CoreGraphics.CGFloat, green: CoreGraphics.CGFloat, blue: CoreGraphics.CGFloat, alpha: CoreGraphics.CGFloat = 1.0) -> UIKit.UIColor +public func HHUISingleColor(_ value: CoreGraphics.CGFloat, alpha: CoreGraphics.CGFloat = 1.0) -> UIKit.UIColor +public func visibleWindow() -> UIKit.UIWindow? +public func imageWithColor(color: UIKit.UIColor) -> UIKit.UIImage? +public func delayFunc(_ time: Swift.Double, block: @escaping () -> Swift.Void) +public func appLanguage() -> Swift.String +public func isChina() -> Swift.Bool +@_hasMissingDesignatedInitializers public class HHDevice { + public static func isIphoneX() -> Swift.Bool + public static func botOffset() -> CoreGraphics.CGFloat + public static func tOffset() -> CoreGraphics.CGFloat + public class func isSml() -> Swift.Bool + public class func isMid() -> Swift.Bool + public class func isPlus() -> Swift.Bool + public class func isX() -> Swift.Bool + public static func iphoneType() -> Swift.String + @objc deinit +} +public typealias HHFetchBlock = (UIKit.UIImage?, [Swift.AnyHashable : Any]?) -> Swift.Void +public typealias onCanceled = (() -> Swift.Void) +public typealias onCapFinished = (([HHSDKVideo.SDKCameraImageModel]?) -> Swift.Void) +public typealias onCropFinish = (UIKit.UIImage, Swift.String?) -> Swift.Void +public enum HHMediaType : Swift.Int { + case cusCamera + case sysCamera + case cusVideo + case sysVideo + case photoImage + case photoVideo + case cusPhoto + case sysCrop + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +open class HHDataController<T> where T : HHSDKVideo.Mappable { + open var mData: T? + public init() + open func request(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func emptyRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func noDataRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func request<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: ((E) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + @objc deinit +} +extension Date { + public static func currentDate() -> Foundation.Date +} +public enum DateFormat : Swift.String { + case Full + case SingleDate + case Single + case WithoutSecond + case WithoutYearAndSecond + case HourMinute + case CN_Month_Day + case CN_Hour_Minute + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +@objc @_inheritsConvenienceInitializers public class HHDateUtils : ObjectiveC.NSObject { + public class func getDateForChinaStr() -> Swift.String + public static func stringWithDurationFromSeconds(_ seconds: Foundation.TimeInterval) -> Swift.String + public static func component(_ date: Foundation.Date) -> Foundation.DateComponents + @objc override dynamic public init() + @objc deinit +} +extension HHDateUtils { + public class func date2String(_ date: Foundation.Date, format: Swift.String) -> Swift.String + public class func date2String(_ date: Foundation.Date, format: HHSDKVideo.DateFormat) -> Swift.String +} +extension HHDateUtils { + public class func string2Date(_ str: Swift.String, format: HHSDKVideo.DateFormat) -> Foundation.Date? + public class func string2Date(_ str: Swift.String, format: Swift.String) -> Foundation.Date? +} +extension HHDateUtils { + public static func dateStringFromNow(_ date: Swift.Int) -> Swift.String + public static func dateStringFromInt(_ date: Swift.Int) -> Swift.String + public static func dateYearStringFromInt(_ date: Swift.Int) -> Swift.String +} +@objc @_inheritsConvenienceInitializers open class HHDeviceManager : ObjectiveC.NSObject { + public static func jailBrokend() -> Swift.Bool + @objc override dynamic public init() + @objc deinit +} +public protocol HHDNSProtocal { + func changeHost(_ hostDomain: Swift.String) -> Swift.String + func requestHost(_ host: Swift.String, challenge: Foundation.URLAuthenticationChallenge, completion: @escaping (Foundation.URLSession.AuthChallengeDisposition, Foundation.URLCredential?) -> Swift.Void) +} +public typealias HHPriceInfo = (priceAttri: Foundation.NSMutableAttributedString, disPriceWidth: CoreGraphics.CGFloat?) +public struct HHDoctorModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public var agentUuid: Swift.String? + public var createtime: Swift.String? + public var department: Swift.String? + public var deptid: Swift.Int? + public var disease: Swift.String? + public var diseaseList: [Swift.String]? + public var doctorid: Swift.String? + public var expertStatus: Swift.String? + public var expertVideoTime: Swift.String? + public var famExpertVideoPrice: Swift.Float? + public var famServices: Swift.Int? + public var famprovidetypes: Swift.String? + public var hhTitle: Swift.String? + public var hospital: Swift.String? + public var hospitalid: Swift.Int? + public var introduction: Swift.String? + public var isTest: Swift.String? + public var login: HHSDKVideo.LoginModel? + public var workyear: Swift.Int? + public var name: Swift.String? + public var photourl: Swift.String? + public var price: Swift.Float? + public var providetype: Swift.String? + public var province: Swift.String? + public var service: Swift.String? + public var serviceTypeStatus: Swift.String? + public var speciality: Swift.String? + public var standardDeptid: Swift.Int? + public var standardDeptname: Swift.String? + public var standardid: Swift.Int? + public var subdept: Swift.String? + public var subdeptids: Swift.String? + public var title: Swift.String? + public var titleid: Swift.Int? + public var vedioTimeList: Swift.String? + public var videoprice: Swift.Float? + public var license: Swift.String? + public init() + public mutating func mapping(map: HHSDKVideo.Map) + public func isJianzhi() -> Swift.Bool + public func supportType(type: HHSDKVideo.HHConsType) -> Swift.Bool + public func getPrice() -> HHSDKVideo.HHPriceInfo? + public func isZhuanke() -> Swift.Bool +} +public struct LoginModel : HHSDKVideo.Mappable { + public var actionSource: Swift.String? + public var loginname: Swift.String? + public var name: Swift.String? + public var photourl: Swift.String? + public var uuid: Swift.Int? + public var videoToken: Swift.String? + public var phoneno: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public enum HHConsType : Swift.String { + case normal + case expert_video + case feiDao + case video + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public struct HHEmptyModel : HHSDKVideo.Mappable { + public init() + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +@_hasMissingDesignatedInitializers public class HHEncryptUtils { + public static func encrypto(key: Swift.String, content: Swift.String) -> Swift.String? + public static func decrypto(key: Swift.String, content: Swift.String) -> Swift.String? + public static func encrypto(key: Swift.String, content: Foundation.Data) -> Foundation.Data? + public static func decrypto(key: Swift.String, content: Foundation.Data) -> Foundation.Data? + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHFileCacheManager : ObjectiveC.NSObject { + public enum HHAssetPathType { + case image + case video + case sound + case dicom + case fb + case other + case dataBase + public static func == (a: HHSDKVideo.HHFileCacheManager.HHAssetPathType, b: HHSDKVideo.HHFileCacheManager.HHAssetPathType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum HHFileFormat : Swift.String { + case Jpg + case Png + case Jpeg + case webp + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } + } + @objc override dynamic public init() + @objc deinit +} +extension HHFileCacheManager { + public class func getFileFormat(_ name: Swift.String) -> HHSDKVideo.HHFileCacheManager.HHAssetPathType + public class func createSoundFilePath(_ aPath: Swift.String) -> Swift.String + public class func createDBPath(_ aPath: Swift.String) -> Swift.String + public class func assetsCachePath(_ pathType: HHSDKVideo.HHFileCacheManager.HHAssetPathType) -> Swift.String + public class func createImageFilePath(_ format: HHSDKVideo.HHFileCacheManager.HHFileFormat = .Jpg) -> Swift.String + public class func createVideoFilePath() -> Swift.String + public class func isWriteCache(_ path: Swift.String?, data: Foundation.Data?) -> Swift.Bool + public class func isWriteCache(_ path: Swift.String?, image: UIKit.UIImage, quality: CoreGraphics.CGFloat = 1.0) -> Swift.Bool + public class func getFilePath(_ name: Swift.String) -> Swift.String? +} +extension HHFileCacheManager { + public static func saveString2File(_ string: Swift.String?, fileName: Swift.String) + public static func stringFromFile(_ fileName: Swift.String) -> Swift.String? +} +extension FileManager { + public func addSkipBackupAttributeToItemAtURL(_ url: Foundation.URL) -> Swift.Bool +} +public var uploadManager: HHSDKVideo.UploadQueue { + get +} +@_hasMissingDesignatedInitializers public class UploadQueue { + @discardableResult + public func upload(files: [Swift.String], config: HHSDKVideo.SDKUploadConfig) -> HHSDKVideo.HHFileUploadManager + public func cancelAll(_ finished: (() -> Swift.Void)? = nil) + @objc deinit +} +public class HHFileUploadManager { + public var mFileQueue: [Swift.String] + public var config: HHSDKVideo.SDKUploadConfig! + public var mTransFile: Swift.String? + public var isUploading: Swift.Bool + public init(files: [Swift.String], config: HHSDKVideo.SDKUploadConfig) + public func uploadFile(_ file: [Swift.String]) + public func cancalFiles(_ files: [Swift.String], cancelFinish: ((Swift.String) -> Swift.Void)? = nil) + public func cancelAll(_ finished: (() -> Swift.Void)? = nil) + @objc deinit +} +@objc public protocol HHHUDable { + @objc optional var autoDismissDuration: Foundation.TimeInterval { get } + @objc func showHUD() + @objc func dismissHUD() + @objc func showSuccess(_ message: Swift.String?) + @objc func showError(_ messgae: Swift.String?) + @objc optional func setDismissDuration(_ duraion: Foundation.TimeInterval) +} +extension HHHUDable { + public var autoDismissDuration: Foundation.TimeInterval { + get + } + public func setDismissDuration(_ duraion: Foundation.TimeInterval) +} +@objc public protocol HHIM { + @objc func register(_ cerName: Swift.String?) + @objc func login(_ completion: ((Swift.String?) -> Swift.Void)?) + @objc func autoLogin(_ completion: ((Swift.String?) -> Swift.Void)?) + @objc func logout(_ callback: ((Swift.String?) -> Swift.Void)?) + @objc func canVideo() -> Swift.Bool +} +public struct HHInviteDocModel : HHSDKVideo.Mappable { + public var orderId: Swift.String? + public var channelId: Swift.UInt64? + public var doctorId: Swift.String? + public var imageUrl: Swift.String? + public var signalingType: Swift.String? + public var width: CoreGraphics.CGFloat + public var height: CoreGraphics.CGFloat + public init?(map: HHSDKVideo.Map) + public init(_ info: HHSDKVideo.HHNetCallChatInfo, meetId: Swift.UInt64?) + public func isWhiteBoard() -> Swift.Bool + public func isMultyCall() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +@objc public enum HHLogMode : Swift.Int { + case error = 0 + case warn = 1 + case info = 2 + case debug = 3 + case verbose = 4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public func logging(type: HHSDKVideo.HHLogMode = .info, _ tip: Swift.String) +@objc @_inheritsConvenienceInitializers open class HHMediaStatusCheckUtils : ObjectiveC.NSObject { + open class func checkCameraAccess() -> Swift.Bool + open class func checkCameraVideoPermission() -> Swift.Bool + open class func checkAlbumAccess() -> Swift.Bool + open class func checkAudioAccess() -> Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers open class HHMedicNetObserver : ObjectiveC.NSObject { + public static let sharedInstance: HHSDKVideo.HHMedicNetObserver + open func createReachability() + open func currentInWifi() -> Swift.Bool + open func haveNetWork() -> Swift.Bool + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHMedicPhotoPicker { + public static func openCamera(config: HHSDKVideo.HHCameraConfig, capFinished: HHSDKVideo.onCapFinished? = nil) + public static func reduceImages(paths: [Swift.String], finished: @escaping (([Swift.String]) -> Swift.Void)) + public class func changeAvatar(vc: UIKit.UIViewController, reference: UIKit.UIView? = nil, uuid: Swift.Int, imgClosure: @escaping (UIKit.UIImage) -> Swift.Void, keyClosure: @escaping (Swift.String) -> Swift.Void) + @objc deinit +} +extension HHMedicPhotoPicker { + public static func checkPermisstion(_ type: HHSDKVideo.HHBasePermissionType, authorized: (() -> Swift.Void)?, others: ((HHSDKVideo.HHBasePermissionType) -> Swift.Void)?) + public static func converSize(_ size: CoreGraphics.CGSize) -> CoreGraphics.CGSize +} +extension HHMedicPhotoPicker : HHSDKVideo.HHPhotoPickerManagerDelegate { + public func selectImage(_ selectedImages: [UIKit.UIImage]) + public func cancelImage() + public func selectImageRequestError(_ errorAssets: [Photos.PHAsset], errorIndexs: [Swift.Int]) +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers public class HHNeedRealNameView : UIKit.UIView { + public var realNameLinkClourse: (() -> ())? + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHNetCallChatInfo { + public init() + @objc deinit +} +@objc public enum HHCallType : Swift.Int { + case child = 600000 + case adult = 600002 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public enum HHServerType { + case pay + case pacs + case weixin + public static func == (a: HHSDKVideo.HHServerType, b: HHSDKVideo.HHServerType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public enum HHRequestMethod { + case get + case post + public static func == (a: HHSDKVideo.HHRequestMethod, b: HHSDKVideo.HHRequestMethod) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public let HH_RELOGIN_NOTIFICATION_STR: Swift.String +public struct HHRequestData { + public init(body: [Swift.String : Any] = ["default_sw":"default"], param: [Swift.String : Any] = ["default_sw":"default"]) + public var mHttpBody: [Swift.String : Any] + public var mParameters: [Swift.String : Any] +} +@_hasMissingDesignatedInitializers public class HHNetFetch { + public static func request<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: ((E) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + public static func requestArray<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: (([E]?) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + public static func noDataRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + public static func emptyRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + @objc deinit +} +extension UIControl.State : Swift.Hashable { + public var hashValue: Swift.Int { + get + } +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers @IBDesignable public class HHPagerView : UIKit.UIView, UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegate { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func layoutSubviews() + @objc override dynamic public func willMove(toWindow newWindow: UIKit.UIWindow?) + @objc override dynamic public func prepareForInterfaceBuilder() + @objc deinit + @objc public func numberOfSections(in collectionView: UIKit.UICollectionView) -> Swift.Int + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, shouldHighlightItemAt indexPath: Foundation.IndexPath) -> Swift.Bool + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didHighlightItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, shouldSelectItemAt indexPath: Foundation.IndexPath) -> Swift.Bool + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didEndDisplaying cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewWillBeginDragging(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewWillEndDragging(_ scrollView: UIKit.UIScrollView, withVelocity velocity: CoreGraphics.CGPoint, targetContentOffset: Swift.UnsafeMutablePointer<CoreGraphics.CGPoint>) + @objc public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewDidEndScrollingAnimation(_ scrollView: UIKit.UIScrollView) +} +@objc public enum HHPagerViewTransformerType : Swift.Int { + case crossFading + case zoomOut + case depth + case overlap + case linear + case coverFlow + case ferrisWheel + case invertedFerrisWheel + case cubic + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@_hasMissingDesignatedInitializers public class UrlParams { + public static func addUserParams(_ parameters: [Swift.String : Any]?) -> [Swift.String : Any]? + public static func addCommon(_ param: [Swift.String : Any]?) -> [Swift.String : Any] + public static func param2String(param: [Swift.String : Any]? = nil) -> Swift.String + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoPickerController : UIKit.UINavigationController { + @objc override dynamic public func viewDidLoad() + convenience public init(localPath: Swift.String? = nil, deleteMode: Swift.Bool = false, finish: (([HHSDKVideo.SDKCameraImageModel]?) -> Swift.Void)? = nil) + @objc deinit + @available(iOS 5.0, *) + @objc override dynamic public init(navigationBarClass: Swift.AnyClass?, toolbarClass: Swift.AnyClass?) + @objc override dynamic public init(rootViewController: UIKit.UIViewController) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) +} +public protocol HHPhotoPickerManagerDelegate { + func selectImage(_ selectedImages: [UIKit.UIImage]) + func cancelImage() + func selectImageRequestError(_ errorAssets: [Photos.PHAsset], errorIndexs: [Swift.Int]) +} +@objc public class HHPhotoPickerManager : ObjectiveC.NSObject { + public var viewDelegate: HHSDKVideo.HHPhotoPickerManagerDelegate? + public var photoConfigModel: HHSDKVideo.HHPhotoConfigModel + public var photoUIConfigModel: HHSDKVideo.HHPhotoUIConfigModel + required public init(showVC: UIKit.UIViewController) + public func showImagePicker() + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoConfigModel : ObjectiveC.NSObject { + public var maxPreviewCount: Swift.Int + public var maxSelectCount: Swift.Int { + get + set + } + public var minVideoSelectCount: Swift.Int { + get + set + } + public var maxVideoSelectCount: Swift.Int { + get + set + } + public var minSelectVideoDuration: Swift.Int + public var maxSelectVideoDuration: Swift.Int + public var cellCornerRadio: CoreGraphics.CGFloat + public var languageType: HHSDKVideo.ZLLanguageType { + get + set + } + public var columnCount: Swift.Int { + get + set + } + public var sortAscending: Swift.Bool + public var allowSelectImage: Swift.Bool + public var allowTakePhotoInLibrary: Swift.Bool + public var allowSelectOriginal: Swift.Bool + public var allowSelectGif: Swift.Bool + public var allowSelectVideo: Swift.Bool + public var allowSelectLivePhoto: Swift.Bool + public var allowEditImage: Swift.Bool + public var allowMixSelect: Swift.Bool + public var allowPreviewPhotos: Swift.Bool + public var editImageWithDraw: Swift.Bool + public var editImageWithClip: Swift.Bool + public var editImageWithImageSticker: Swift.Bool + public var editImageWithTextSticker: Swift.Bool + public var editImageWithMosaic: Swift.Bool + public var editImageWithFilter: Swift.Bool + public var editImageWithAdjust: Swift.Bool + public var editImageWitAdjustBrightness: Swift.Bool + public var editImageWitAdjustContrast: Swift.Bool + public var editImageWitAdjustSaturation: Swift.Bool + public var shouldAnialysisAsset: Swift.Bool + public var allowEditVideo: Swift.Bool { + get + set + } + public var saveNewImageAfterEdit: Swift.Bool + public var allowDragSelect: Swift.Bool + public var allowSlideSelect: Swift.Bool + public var autoScrollWhenSlideSelectIsActive: Swift.Bool + public var autoScrollMaxSpeed: CoreGraphics.CGFloat + public var showCaptureImageOnTakePhotoBtn: Swift.Bool + public var showSelectedIndex: Swift.Bool + public var showSelectedMask: Swift.Bool + public var showSelectedBorder: Swift.Bool + public var showInvalidMask: Swift.Bool + public var useCustomCamera: Swift.Bool + public var flashMode: HHSDKVideo.ZLCameraConfiguration.FlashMode + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoUIConfigModel : ObjectiveC.NSObject { + public var style: HHSDKVideo.ZLPhotoBrowserStyle + public var bottomToolViewBtnNormalBgColor: UIKit.UIColor + public var bottomToolViewBtnNormalBgColorOfPreviewVC: UIKit.UIColor + @objc public var indexLabelBgColor: UIKit.UIColor + @objc override dynamic public init() + @objc deinit +} +public class HHProgressHUD : HHSDKVideo.HHHUDable { + public init() + @objc public func showHUD() + @objc public func dismissHUD() + @objc public func showError(_ messgae: Swift.String?) + @objc public func showSuccess(_ message: Swift.String?) + public func hhMessageTips(message: Swift.String?) + @objc deinit +} +public struct HHGetQuesetionModel : HHSDKVideo.Mappable { + public var question: HHSDKVideo.HHQuesetionModel? + public var rate: [HHSDKVideo.rateModel]? + public init?(map: HHSDKVideo.Map) + public init() + public mutating func mapping(map: HHSDKVideo.Map) + public func isHaveQ() -> Swift.Bool +} +public struct HHQuesetionModel : HHSDKVideo.Mappable { + public var answerOne: Swift.String? + public var answerTwo: Swift.String? + public var content: Swift.String? + public var id: Swift.Int? + public init?(map: HHSDKVideo.Map) + public init() + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct rateModel : HHSDKVideo.Mappable { + public var createTime: Swift.Int? + public var content: Swift.String? + public var state: Swift.Int? + public var id: Swift.Int? + public var answerOne: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +@objc public enum HHRealNameType : Swift.Int { + case normal, buyMedic + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_inheritsConvenienceInitializers public class HHRealNameInputNewView : UIKit.UIView { + @objc @IBOutlet weak public var idCardTF: UIKit.UITextField! + public class func createRealNameInputNewView(realNameType: HHSDKVideo.HHRealNameType, hideNickName: Swift.Bool = false) -> HHSDKVideo.HHRealNameInputNewView + public func showErroTip(tip: Swift.String) + public func getInpuValues() -> [Swift.String : Swift.String]? + @objc override dynamic public func awakeFromNib() + public func load(userModel: HHSDKVideo.HHUserModel?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +extension HHRealNameInputNewView : UIKit.UITextFieldDelegate { + @objc dynamic public func textField(_ textField: UIKit.UITextField, shouldChangeCharactersIn range: Foundation.NSRange, replacementString string: Swift.String) -> Swift.Bool + @objc dynamic public func textFieldDidBeginEditing(_ textField: UIKit.UITextField) +} +@_inheritsConvenienceInitializers @objc public class HHRealNameInputView : UIKit.UIView { + public var nickName: Swift.String { + get + set + } + public class func createRealNameInputView(realNameType: HHSDKVideo.HHRealNameType) -> HHSDKVideo.HHRealNameInputView + public var showPassPort: Swift.Bool { + get + set + } + public func showErroTip(tip: Swift.String) + public func getInpuValues() -> [Swift.String : Swift.String]? + @objc override dynamic public func awakeFromNib() + public func load(userModel: HHSDKVideo.HHUserModel?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +extension HHRealNameInputView : UIKit.UITextFieldDelegate { + @objc dynamic public func textField(_ textField: UIKit.UITextField, shouldChangeCharactersIn range: Foundation.NSRange, replacementString string: Swift.String) -> Swift.Bool +} +public let KeyNetErrorStr: Swift.String +public let KeyNoNetErrorStr: Swift.String +public typealias HHNetError = ((Swift.String) -> (Swift.Void)) +public typealias HHNetSuccessNoData = () -> Swift.Void +public typealias NetResult = (Swift.Bool, Swift.String) -> (Swift.Void) +public class HHRequest<T> where T : HHSDKVideo.Mappable { + public typealias HHNetSuccess = (T) -> Swift.Void + public typealias HHNetSuccessForArray = ([T]?) -> Swift.Void + public var mRequestFail: HHSDKVideo.HHNetError? + public var mRequestSuccess: HHSDKVideo.HHRequest<T>.HHNetSuccess? + public var mRequestSuccessNoData: HHSDKVideo.HHNetSuccessNoData? + public var mRequestSuccessForArray: HHSDKVideo.HHRequest<T>.HHNetSuccessForArray? + public var errorCode: Swift.Int? + public var mApi: HHSDKVideo.HHBaseApi? + required public init(api: HHSDKVideo.HHBaseApi, requestData: HHSDKVideo.HHRequestData? = nil, postData: Foundation.Data? = nil) + public func start() + public func cancel() + @objc deinit +} +extension HHRequest { + public func startForArray(_ successCallBack: @escaping HHSDKVideo.HHRequest<T>.HHNetSuccessForArray, failCallBack: @escaping HHSDKVideo.HHNetError) +} +@objc public protocol HHRTC { + @objc optional func setOrderId(orderId: Swift.String) + @objc optional func startCall(callee: Swift.String, orderId: Swift.String?) + @objc optional func enterRoom(orderId: Swift.String) + @objc optional func switchLocalAudio(_ isOpen: Swift.Bool) + @objc optional func switchLocalVideo(_ isOpen: Swift.Bool, localView: UIKit.UIView) + @objc optional func openDoctorView(userId: Swift.String, view: UIKit.UIView) + @objc optional func closeDoctorView(userId: Swift.String) + @objc optional func switchCamera(_ isFront: Swift.Bool) + @objc optional func switchCameraFlash(_ isOpen: Swift.Bool) + @objc optional func sendMsg(isSignal: Swift.Bool, cmd: Swift.String, to: Swift.String, complete: ((Swift.String?) -> Swift.Void)?) + @objc optional func leaveRoom() + @objc optional func hangUp(callId: Swift.UInt64) + @objc optional func startRing(audioId: Swift.Int) + @objc optional func stopRing() + @objc optional func snapshotVideo(userId: Swift.String?, imageBack: @escaping (UIKit.UIImage) -> ()) +} +public protocol HHRTCDelegate : ObjectiveC.NSObject { + func onEnterRoom() + func checkHasAccept(_ isCmd: Swift.Bool, volumn: Swift.Int) + func switchVideo(_ isToAudio: Swift.Bool) + func onOtherViewAvailable(_ availableUserId: Swift.String, isAvailable: Swift.Bool) + func onRemoteUserEnterRoom(_ userId: Swift.String) + func onRemoteUserLeaveRoom(_ userId: Swift.String) + func sendRTCLog(action: HHSDKVideo.TrtcLog, ex: Swift.String) + func esdablishByRTC(error: HHSDKVideo.TrtcError, reason: Swift.String) + func processMsg(cmd: HHSDKVideo.HHIMCmd, orderId: Swift.String, uuid: Swift.String) + func waitingChanged(_ waitingInfo: HHSDKVideo.HHWaitDoctorModel) + func waitingSuccess(_ doctorInfo: HHSDKVideo.HHDoctorModel, orderId: Swift.String) + func onTransform(_ transInfo: HHSDKVideo.HHWaitDoctorModel) + func onExitRoom() + func hangup() + func getDoctorUserId() -> Swift.String? + func resumeRemote() + func onFirstVideoFrame(_ userId: Swift.String?, width: Swift.Int32, height: Swift.Int32) +} +public enum TrtcLog : Swift.String { + case waitingRecall + case missMessage + case ignoreCall + case enterError + case doctorJoinRoom + case micDidReady + case netQuality + case signalError + case killEror + case netDown + case joinSuccess + case schedule + case noSchedule + case video_busy + case permit_error + case transform + case camera_close + case camera_open + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public enum TrtcError : Swift.String { + case callTimeOut + case rtcError + case enterRoomFail + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +@_inheritsConvenienceInitializers @objc public class HHSDKBaseOptions : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHSDKBaseOptions + @objc public var isDebug: Swift.Bool + @objc public var isDevelopment: Swift.Bool + @objc public var isSDK: Swift.Bool + @objc public var isTRTC: Swift.Bool + @objc public var sdkProductId: Swift.String + @objc public var appVersion: Swift.String + @objc public var needDNS: Swift.Bool + public var hudManager: HHSDKVideo.HHHUDable + @objc public var sdkVersion: Swift.String + @objc public var hudDisTime: Swift.Double { + @objc get + @objc set + } + @objc public func setConfig(_ sdkProductId: Swift.String, isDebug: Swift.Bool, isDevelopment: Swift.Bool, isTrtc: Swift.Bool, needDNS: Swift.Bool = false) + @objc override dynamic public init() + @objc deinit +} +@objc public protocol OptionProtocal { + @objc var hudDisTime: Foundation.TimeInterval { get set } + @objc var isDebug: Swift.Bool { get set } + @objc var isDevelopment: Swift.Bool { get set } + @objc var hudManager: HHSDKVideo.HHHUDable { get set } + @objc var productId: Swift.String { get set } + @objc var cerName: Swift.String? { get set } + @objc var logLevel: HHSDKVideo.HHLogMode { get set } + @objc var mExtension: Swift.String { get set } + @objc var changeDoctorTime: Swift.Int { get set } + @objc var logCallback: ((Swift.String) -> Swift.Void)? { get set } + @objc var mVideoOptions: HHSDKVideo.VideoOptions { get set } + @objc var mMessageOptions: HHSDKVideo.MessageOptions { get set } + @objc var mUserCenterOptions: HHSDKVideo.UsercenterOptions { get set } + @objc var sdkVersion: Swift.String { get set } + @objc var appVersion: Swift.String { get set } + @objc var isTRTC: Swift.Bool { get set } + @objc var needDNS: Swift.Bool { get set } + @objc var shouldWaingCall: Swift.Bool { get set } +} +public var HMDefaultOpt: HHSDKVideo.OptionProtocal { + get +} +@_inheritsConvenienceInitializers @objc public class VideoOptions : ObjectiveC.NSObject { + public var filterCallerInfo: Swift.Bool + @objc public var allowBeauty: Swift.Bool + @objc public var allowEvaluate: Swift.Bool + @objc public var allowAddMember: Swift.Bool + @objc public var allowMulti: Swift.Bool + public var mCallExtension: Swift.String + @objc public var isShowDocInfo: Swift.Bool + @objc public var enableCloseCamera: Swift.Bool + @objc public var isCloseCameraCall: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class MessageOptions : ObjectiveC.NSObject { + @objc public var isByPresent: Swift.Bool + @objc public var isFilterSummary: Swift.Bool + @objc public var isFilterMedicinal: Swift.Bool + @objc public var defaultDocHeader: Swift.String + @objc public var defaultDocName: Swift.String + @objc public var messageTitle: Swift.String + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class UsercenterOptions : ObjectiveC.NSObject { + @objc public var enableVipInfo: Swift.Bool + @objc public var hideUserCenter: Swift.Bool + @objc public var enableActivate: Swift.Bool + @objc public var enableMedical: Swift.Bool + @objc public var enableAddMemberInDoc: Swift.Bool + @objc public var enableBuyService: Swift.Bool + @objc public var hideNickName: Swift.Bool + @objc public var enablePopRealName: Swift.Bool + @objc public var isCloseMoreFunc: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHStatics { + public static let `default`: HHSDKVideo.HHStatics + public func send(params: [Swift.String : Any]) + @objc deinit +} +public struct CommonApi { +} +extension String { + public func subFrom(_ index: Swift.Int) -> Swift.String + public func subTo(_ index: Swift.Int) -> Swift.String +} +extension String { + public func urlEncode() -> Swift.String + public func stringByAppendingPathComponent(_ pathComponent: Swift.String) -> Swift.String + public func hh_sha1() -> Swift.String + public func string2base64String() -> Swift.String + public func base64String2String() -> Swift.String + public var lastPathComponent: Swift.String { + get + } + public var pathExtension: Swift.String { + get + } +} +public enum hhToastPosition { + case top + case center + case bottom + public static func == (a: HHSDKVideo.hhToastPosition, b: HHSDKVideo.hhToastPosition) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +extension UIView { + public func hhmakeToast(_ message: Swift.String) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, style: HHSDKVideo.hhToastStyle?) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, style: HHSDKVideo.hhToastStyle?) + public func hhmakeToast(_ message: Swift.String?, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle?, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhmakeToast(_ message: Swift.String?, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle?, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhshowToast(_ toast: UIKit.UIView) + public func hhshowToast(_ toast: UIKit.UIView, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhshowToast(_ toast: UIKit.UIView, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhmakeToastActivity(_ position: HHSDKVideo.hhToastPosition) + public func hhmakeToastActivity(_ position: CoreGraphics.CGPoint) + public func hhhideToastActivity() + @objc dynamic public func hhhandleToastTapped(_ recognizer: UIKit.UITapGestureRecognizer) + @objc dynamic public func hhtoastTimerDidFinish(_ timer: Foundation.Timer) + public func hhtoastViewForMessage(_ message: Swift.String?, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle) throws -> UIKit.UIView +} +public struct hhToastStyle { + public init() + public var backgroundColor: UIKit.UIColor + public var titleColor: UIKit.UIColor + public var messageColor: UIKit.UIColor + public var maxWidthPercentage: CoreGraphics.CGFloat { + get + set + } + public var maxHeightPercentage: CoreGraphics.CGFloat { + get + set + } + public var horizontalPadding: CoreGraphics.CGFloat + public var verticalPadding: CoreGraphics.CGFloat + public var cornerRadius: CoreGraphics.CGFloat + public var titleFont: UIKit.UIFont + public var messageFont: UIKit.UIFont + public var titleAlignment: UIKit.NSTextAlignment + public var messageAlignment: UIKit.NSTextAlignment + public var titleNumberOfLines: Swift.Int + public var messageNumberOfLines: Swift.Int + public var displayShadow: Swift.Bool + public var shadowColor: UIKit.UIColor + public var shadowOpacity: Swift.Float { + get + set + } + public var shadowRadius: CoreGraphics.CGFloat + public var shadowOffset: CoreGraphics.CGSize + public var imageSize: CoreGraphics.CGSize + public var activitySize: CoreGraphics.CGSize + public var fadeDuration: Swift.Double +} +extension UIAlertController { + public func showAlter() + public func present(animated: Swift.Bool, completion: (() -> Swift.Void)?) + public func addAlterActions(_ actions: [UIKit.UIAlertAction]) + public func alterMessageStyle(_ fonsize: CoreGraphics.CGFloat = (HHDimens.isPad()) ? 18 : 16) + public static func closeAlert(_ title: Swift.String = "", msg: Swift.String = "", keyString: Swift.String = "取消", closeBlock: (() -> Swift.Void)? = nil) -> UIKit.UIAlertController +} +extension UIButton { + public func centerImageTitleVertically(spacing: CoreGraphics.CGFloat = 2) + public func imageTitleHorizonal(spacing: CoreGraphics.CGFloat = 2) +} +extension UIImage { + public func rotatedBy(_ degrees: CoreGraphics.CGFloat) -> UIKit.UIImage +} +extension UIImageView { + public func hh_image(url: Foundation.URL?) + public func hh_image(url: Foundation.URL?, complete: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)?) + public func hh_image(url: Foundation.URL?, placeHolder: UIKit.UIImage?) + public func hh_image(url: Foundation.URL?, placeHolder: UIKit.UIImage?, progresses: ((CoreGraphics.CGFloat) -> Swift.Void)?, complete: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)?) +} +public func hh_downloadImg(_ url: Foundation.URL?, finish: @escaping ((UIKit.UIImage?, Foundation.Data?, Swift.Error?) -> Swift.Void)) +extension UIViewController { + public func hhAddCloseBtn(_ atLeft: Swift.Bool? = nil, isDismiss: Swift.Bool = true, title: Swift.String = "关闭") + @objc dynamic public func hhCloseThisController() + @objc dynamic public func hhPopThisController() + public func setNavigationTheme() + public func setNaviBackImg(navi: UIKit.UINavigationController?, color: UIKit.UIColor) + public func imageFromColor(color: UIKit.UIColor, size: CoreGraphics.CGSize) -> UIKit.UIImage +} +extension UIView { + public var sj_width: CoreGraphics.CGFloat { + get + set + } + public var sj_height: CoreGraphics.CGFloat { + get + set + } + public var sj_size: CoreGraphics.CGSize { + get + set + } + public var sj_origin: CoreGraphics.CGPoint { + get + set + } + public var sj_x: CoreGraphics.CGFloat { + get + set + } + public var sj_y: CoreGraphics.CGFloat { + get + set + } + public var sj_centerX: CoreGraphics.CGFloat { + get + set + } + public var sj_centerY: CoreGraphics.CGFloat { + get + set + } + public var sj_top: CoreGraphics.CGFloat { + get + set + } + public var sj_bottom: CoreGraphics.CGFloat { + get + set + } + public var sj_right: CoreGraphics.CGFloat { + get + set + } + public var sj_left: CoreGraphics.CGFloat { + get + set + } +} +extension UIView { + public class func viewFromNib<T>(_ aClass: T.Type, frameworkPath: Swift.String) -> T +} +public typealias onSDKProgress = ((CoreGraphics.CGFloat, Swift.String) -> Swift.Void) +public typealias onSDKUploadOnce = ((Swift.Bool, HHSDKVideo.SDKUploadModel) -> Swift.Void) +public typealias onSDKFinished = (() -> Swift.Void) +public class SDKUploadConfig { + public var progress: HHSDKVideo.onSDKProgress? + public var uploadOnce: HHSDKVideo.onSDKUploadOnce? + public var finished: HHSDKVideo.onSDKFinished? + public var orderId: Swift.String? + public init() + @objc deinit +} +public class SDKUploadModel { + public var clouldKey: Swift.String? + public var filePath: Swift.String? { + get + set + } + public var smallImage: Swift.String + public var state: HHSDKVideo.SDKUploadState? + public init() + public init(full: Swift.String?, scale: Swift.String) + public init(clouldKey: Swift.String?, filePath: Swift.String?, uploadTime: Foundation.TimeInterval?, name: Swift.String?, smallImage: Swift.String) + @objc deinit +} +@_hasMissingDesignatedInitializers public class SDKUploadState { + public var file: Swift.String? + public var isSelect: Swift.Bool + public var changed: (() -> Swift.Void)? + public var progress: Swift.Float { + get + set + } + public func isSuccess() -> Swift.Bool + public func isFail() -> Swift.Bool + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHUrl { + public static func domains() -> [Swift.String] + public static var timeOffset: Swift.Double + public static func urlForPay() -> Swift.String + public static func urlForFamily() -> Swift.String + public static func urlForWeixin() -> Swift.String + public static func baseUrl() -> Swift.String + public static func basePayUrl() -> Swift.String + public static func baseMedicUrl() -> Swift.String + public static func baseSecUrl() -> Swift.String + public static func testURL() -> Swift.String + public static func fileLogUrl(_ name: Swift.String, orderId: Swift.String) -> Foundation.URL + public static func expertDetailUrl(expertId: Swift.String) -> Swift.String + public static func buyVIPUrl() -> Swift.String + public static func productRightUrl() -> Swift.String + @objc deinit +} +extension HHUrl { + public static func headers(host: Swift.String) -> [Swift.String : Swift.String] +} +public func languagePrefix() -> Swift.String +@_hasMissingDesignatedInitializers public class HHUserDefaults { + public class func setString(_ str: Swift.String, key: Swift.String) + public class func stringValue(_ key: Swift.String) -> Swift.String? + public class func setArray(_ array: [Swift.AnyObject], key: Swift.String) + public class func arrayForKey(_ key: Swift.String) -> [Swift.AnyObject]? + public class func setImage(_ image: UIKit.UIImage, key: Swift.String) + public class func imageForKey(_ key: Swift.String) -> UIKit.UIImage? + @objc deinit +} +extension HHUserDefaults { + public class func setBool(_ flag: Swift.Bool, key: Swift.String) + public class func boolForKey(_ key: Swift.String) -> Swift.Bool + public class func setObject(_ obj: Swift.AnyObject, key: Swift.String) + public class func objectForKey(_ key: Swift.String) -> Swift.AnyObject? + public class func removeObject(_ key: Swift.String) +} +extension HHUserDefaults { + public class func setData(_ data: Foundation.Data?, key: Swift.String) + public class func dataForKey(_ key: Swift.String) -> Foundation.Data? + public class func userDefaults() -> Foundation.UserDefaults + public class func synchronize() + public class func encryptkey(_ key: Swift.String) -> Swift.String +} +public struct HHMemberInfoModel : HHSDKVideo.Mappable { + public var productStatusDescn: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public class HHUserModel : HHSDKVideo.Mappable { + public var age: Swift.String? + public var companyLogo: Swift.String? + public var birthday: Swift.Int64? + public var loginname: Swift.String? + public var name: Swift.String? + public var photourl: Swift.String? + public var pid: Swift.Int? + public var product: HHSDKVideo.HHMemberInfoModel? + public var relation: Swift.String? + public var sex: Swift.String? + public var uuid: Swift.Int? + public var userToken: Swift.String? + public var videoToken: Swift.String? + public var auth: Swift.Bool? + public var isMember: Swift.Bool? + public var isAccount: Swift.Bool? + public var license: Swift.String? + public var userSig: Swift.String? + public var phoneNum: Swift.String? + required public init?(map: HHSDKVideo.Map) + public init() + public func mapping(map: HHSDKVideo.Map) + @objc deinit +} +public struct HHUserProtocolModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +open class HHValueObservable<T> { + public typealias Observer = (T) -> Swift.Void + open var observer: HHSDKVideo.HHValueObservable<T>.Observer? + open func observe(_ observer: HHSDKVideo.HHValueObservable<T>.Observer?) + open var value: T { + get + set + } + public init(_ v: T) + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class HHVideoLocation : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHVideoLocation + @objc public func startLocation(lng: Swift.String, lat: Swift.String) + @objc public func closeLocation() + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class HHVideoSDK : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHVideoSDK + public var mHHRTC: HHSDKVideo.HHRTC? + public var mSDKOption: HHSDKVideo.OptionProtocal? + weak public var mCallDelegate: HHSDKVideo.HHCallDelegate? + weak public var mHHRTCDelegate: HHSDKVideo.HHRTCDelegate? + weak public var videoManager: HHSDKVideo.HHBaseVideoDelegate? + public var expertVideoCallback: (() -> Swift.Void)? + public var autoLoginCheck: (() -> Swift.Void)? + public var onReceiveNewMsg: (([Swift.String : Any]) -> Swift.Void)? + public var userProtocolModel: HHSDKVideo.HHUserProtocolModel? + @objc public var photosPreview: ((Swift.Array<Swift.String>) -> Swift.Void)? + @objc public func start(option: HHSDKVideo.OptionProtocal, im: HHSDKVideo.HHIM, rtc: HHSDKVideo.HHRTC) + @objc public func login(userToken: Swift.String, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func autoLogin(uuid: Swift.Int, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func logout(_ callback: ((Swift.String?) -> Swift.Void)? = nil) + @objc public func terminate() + @objc public func setAlipayHook(alipayCallback: @escaping (Swift.String, Swift.String, @escaping (([Swift.String : Any]) -> Swift.Void)) -> Swift.Bool) + @objc override dynamic public init() + @objc deinit +} +extension HHVideoSDK { + @objc dynamic public func startCall(_ type: HHSDKVideo.HHCallType = .adult, scene: Swift.String? = nil, callDelegate: HHSDKVideo.HHCallDelegate? = nil) + @objc dynamic public func startNewCall(_ uuid: Swift.Int, type: HHSDKVideo.HHCallType = .adult, callDelegate: HHSDKVideo.HHCallDelegate? = nil) + @objc dynamic public func startCall(_ uuid: Swift.Int, scene: Swift.String? = nil, type: HHSDKVideo.HHCallType = .adult, callDelegate: HHSDKVideo.HHCallDelegate? = nil) +} +extension HHVideoSDK { + @objc dynamic public func startTeamCall(_ type: HHSDKVideo.HHCallType, callee: HHSDKVideo.HHCallerInfo, callDelegate: HHSDKVideo.HHCallDelegate? = nil) +} +extension HHVideoSDK { + @objc dynamic public func call(_ memberToken: Swift.String, scene: Swift.String? = nil) +} +extension HHVideoSDK { + public func waitExpert(userToken: Swift.String, callOrderId: Swift.String) +} +extension HHVideoSDK { + @objc dynamic public func startMemberCall(needSelectMember: Swift.Bool = true) +} +extension HHVideoSDK { + @objc dynamic public func skipChatHome(isByPresent: Swift.Bool = false, vc: UIKit.UIViewController? = nil) + @objc dynamic public func skipChatHome(_ nav: UIKit.UINavigationController) + @objc dynamic public func chatHomeVC() -> UIKit.UIViewController? +} +extension HHVideoSDK { + public func sendBaseLog(ex: [Swift.String : Swift.String]? = nil, action: [Swift.String : Swift.String]? = nil) +} +public func topviewController() -> UIKit.UIViewController? +extension HHVideoSDK { + @objc dynamic public func loginForThirdId(_ thirdInfo: [Swift.String : Any], completion: @escaping HHSDKVideo.HHLoginHandler) +} +extension HHVideoSDK { + public func checkProtocolUpdate(agreeBlock: ((Swift.Bool) -> Swift.Void)?) +} +extension HHVideoSDK { + @objc dynamic public func getMedicDetail(userToken: Swift.String, medicId: Swift.String) -> Swift.String + @objc dynamic public func getMedicList(userToken: Swift.String) -> Swift.String + @objc dynamic public func getAllMedics(userToken: Swift.String) -> Swift.String +} +extension HHVideoSDK { + @objc dynamic public func onKickedOffline() +} +public struct HHWaitDoctorModel : HHSDKVideo.Mappable { + public var isNormalTrans: Swift.Bool + public var deptId: Swift.String? + public var uuid: Swift.Int? + public var transUuid: Swift.Int? + public init?(map: HHSDKVideo.Map) + public func isWaiting() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HHWaitingCallModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public func isCall() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HHAgentCallModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public func isAgent() -> Swift.Bool + public func isTransform() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HKDF { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.HKDF.Error, b: HHSDKVideo.HKDF.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>? = nil, info: Swift.Array<Swift.UInt8>? = nil, keyLength: Swift.Int? = nil, variant: HHSDKVideo.HMAC.Variant = .sha256) throws + public func calculate() throws -> Swift.Array<Swift.UInt8> +} +final public class HMAC : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case authenticateError + case invalidInput + public static func == (a: HHSDKVideo.HMAC.Error, b: HHSDKVideo.HMAC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant { + case sha1, sha256, sha384, sha512, md5 + public static func == (a: HHSDKVideo.HMAC.Variant, b: HHSDKVideo.HMAC.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(key: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.HMAC.Variant = .md5) + final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension HMAC { + convenience public init(key: Swift.String, variant: HHSDKVideo.HMAC.Variant = .md5) throws +} +public protocol ImmutableMappable : HHSDKVideo.BaseMappable { + init(map: HHSDKVideo.Map) throws +} +extension ImmutableMappable { + public func mapping(map: HHSDKVideo.Map) + public init(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) throws + public init(JSON: [Swift.String : Any], context: HHSDKVideo.MapContext? = nil) throws + public init(JSONObject: Any, context: HHSDKVideo.MapContext? = nil) throws +} +extension Map { + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> Transform.Object where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T? where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T] where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T]? where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T? where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T]? where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Transform.Object] where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : T]? where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : Transform.Object] where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[T]]? where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[T]] where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[Transform.Object]] where Transform : HHSDKVideo.TransformType +} +extension Mapper where N : HHSDKVideo.ImmutableMappable { + final public func map(JSON: [Swift.String : Any]) throws -> N + final public func map(JSONString: Swift.String) throws -> N + final public func map(JSONObject: Any) throws -> N + final public func mapArray(JSONArray: [[Swift.String : Any]]) throws -> [N] + final public func mapArray(JSONString: Swift.String) throws -> [N] + final public func mapArray(JSONObject: Any) throws -> [N] + final public func mapDictionary(JSONString: Swift.String) throws -> [Swift.String : N] + final public func mapDictionary(JSONObject: Any?) throws -> [Swift.String : N] + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]]) throws -> [Swift.String : N] + final public func mapDictionaryOfArrays(JSONObject: Any?) throws -> [Swift.String : [N]] + final public func mapDictionaryOfArrays(JSON: [Swift.String : [[Swift.String : Any]]]) throws -> [Swift.String : [N]] + final public func mapArrayOfArrays(JSONObject: Any?) throws -> [[N]] +} +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.SignedInteger +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.SignedInteger +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.UnsignedInteger +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.UnsignedInteger +extension DateFormatter { + convenience public init(withFormat format: Swift.String, locale: Swift.String) +} +open class ISO8601DateTransform : HHSDKVideo.DateFormatterTransform { + public init() + override public init(dateFormatter: Foundation.DateFormatter) + @objc deinit +} +public let KeychainAccessErrorDomain: Swift.String +public enum ItemClass { + case genericPassword + case internetPassword +} +public enum ProtocolType { + case ftp + case ftpAccount + case http + case irc + case nntp + case pop3 + case smtp + case socks + case imap + case ldap + case appleTalk + case afp + case telnet + case ssh + case ftps + case https + case httpProxy + case httpsProxy + case ftpProxy + case smb + case rtsp + case rtspProxy + case daap + case eppc + case ipp + case nntps + case ldaps + case telnetS + case imaps + case ircs + case pop3S +} +public enum AuthenticationType { + case ntlm + case msn + case dpa + case rpa + case httpBasic + case httpDigest + case htmlForm + case `default` +} +public enum Accessibility { + case whenUnlocked + case afterFirstUnlock + case always + @available(iOS 8.0, macOS 10.10, *) + case whenPasscodeSetThisDeviceOnly + case whenUnlockedThisDeviceOnly + case afterFirstUnlockThisDeviceOnly + case alwaysThisDeviceOnly +} +public struct AuthenticationPolicy : Swift.OptionSet { + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + public static let userPresence: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let touchIDAny: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let touchIDCurrentSet: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, macOS 10.11, *) + @available(watchOS, unavailable) + public static let devicePasscode: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let or: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let and: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let privateKeyUsage: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let applicationPassword: HHSDKVideo.AuthenticationPolicy + public let rawValue: Swift.UInt + public init(rawValue: Swift.UInt) + public typealias ArrayLiteralElement = HHSDKVideo.AuthenticationPolicy + public typealias Element = HHSDKVideo.AuthenticationPolicy + public typealias RawValue = Swift.UInt +} +public struct Attributes { + public var `class`: Swift.String? { + get + } + public var data: Foundation.Data? { + get + } + public var ref: Foundation.Data? { + get + } + public var persistentRef: Foundation.Data? { + get + } + public var accessible: Swift.String? { + get + } + public var accessControl: Security.SecAccessControl? { + get + } + public var accessGroup: Swift.String? { + get + } + public var synchronizable: Swift.Bool? { + get + } + public var creationDate: Foundation.Date? { + get + } + public var modificationDate: Foundation.Date? { + get + } + public var attributeDescription: Swift.String? { + get + } + public var comment: Swift.String? { + get + } + public var creator: Swift.String? { + get + } + public var type: Swift.String? { + get + } + public var label: Swift.String? { + get + } + public var isInvisible: Swift.Bool? { + get + } + public var isNegative: Swift.Bool? { + get + } + public var account: Swift.String? { + get + } + public var service: Swift.String? { + get + } + public var generic: Foundation.Data? { + get + } + public var securityDomain: Swift.String? { + get + } + public var server: Swift.String? { + get + } + public var `protocol`: Swift.String? { + get + } + public var authenticationType: Swift.String? { + get + } + public var port: Swift.Int? { + get + } + public var path: Swift.String? { + get + } + public subscript(key: Swift.String) -> Any? { + get + } +} +@_hasMissingDesignatedInitializers final public class Keychain { + final public var itemClass: HHSDKVideo.ItemClass { + get + } + final public var service: Swift.String { + get + } + final public var accessGroup: Swift.String? { + get + } + final public var server: Foundation.URL { + get + } + final public var protocolType: HHSDKVideo.ProtocolType { + get + } + final public var authenticationType: HHSDKVideo.AuthenticationType { + get + } + final public var accessibility: HHSDKVideo.Accessibility { + get + } + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public var authenticationPolicy: HHSDKVideo.AuthenticationPolicy? { + get + } + final public var synchronizable: Swift.Bool { + get + } + final public var label: Swift.String? { + get + } + final public var comment: Swift.String? { + get + } + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public var authenticationPrompt: Swift.String? { + get + } + @available(iOS 9.0, macOS 10.11, *) + final public var authenticationContext: LocalAuthentication.LAContext? { + get + } + convenience public init() + convenience public init(service: Swift.String) + convenience public init(accessGroup: Swift.String) + convenience public init(service: Swift.String, accessGroup: Swift.String) + convenience public init(server: Swift.String, protocolType: HHSDKVideo.ProtocolType, authenticationType: HHSDKVideo.AuthenticationType = .default) + convenience public init(server: Foundation.URL, protocolType: HHSDKVideo.ProtocolType, authenticationType: HHSDKVideo.AuthenticationType = .default) + final public func accessibility(_ accessibility: HHSDKVideo.Accessibility) -> HHSDKVideo.Keychain + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public func accessibility(_ accessibility: HHSDKVideo.Accessibility, authenticationPolicy: HHSDKVideo.AuthenticationPolicy) -> HHSDKVideo.Keychain + final public func synchronizable(_ synchronizable: Swift.Bool) -> HHSDKVideo.Keychain + final public func label(_ label: Swift.String) -> HHSDKVideo.Keychain + final public func comment(_ comment: Swift.String) -> HHSDKVideo.Keychain + final public func attributes(_ attributes: [Swift.String : Any]) -> HHSDKVideo.Keychain + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public func authenticationPrompt(_ authenticationPrompt: Swift.String) -> HHSDKVideo.Keychain + @available(iOS 9.0, macOS 10.11, *) + final public func authenticationContext(_ authenticationContext: LocalAuthentication.LAContext) -> HHSDKVideo.Keychain + final public func get(_ key: Swift.String) throws -> Swift.String? + final public func getString(_ key: Swift.String) throws -> Swift.String? + final public func getData(_ key: Swift.String) throws -> Foundation.Data? + final public func get<T>(_ key: Swift.String, handler: (HHSDKVideo.Attributes?) -> T) throws -> T + final public func set(_ value: Swift.String, key: Swift.String) throws + final public func set(_ value: Foundation.Data, key: Swift.String) throws + final public subscript(key: Swift.String) -> Swift.String? { + get + set + } + final public subscript(string key: Swift.String) -> Swift.String? { + get + set + } + final public subscript(data key: Swift.String) -> Foundation.Data? { + get + set + } + final public subscript(attributes key: Swift.String) -> HHSDKVideo.Attributes? { + get + } + final public func remove(_ key: Swift.String) throws + final public func removeAll() throws + final public func contains(_ key: Swift.String) throws -> Swift.Bool + final public class func allKeys(_ itemClass: HHSDKVideo.ItemClass) -> [(Swift.String, Swift.String)] + final public func allKeys() -> [Swift.String] + final public class func allItems(_ itemClass: HHSDKVideo.ItemClass) -> [[Swift.String : Any]] + final public func allItems() -> [[Swift.String : Any]] + @available(iOS 8.0, *) + final public func getSharedPassword(_ completion: @escaping (Swift.String?, Swift.String?, Swift.Error?) -> () = { account, password, error -> () in }) + @available(iOS 8.0, *) + final public func getSharedPassword(_ account: Swift.String, completion: @escaping (Swift.String?, Swift.Error?) -> () = { password, error -> () in }) + @available(iOS 8.0, *) + final public func setSharedPassword(_ password: Swift.String, account: Swift.String, completion: @escaping (Swift.Error?) -> () = { e -> () in }) + @available(iOS 8.0, *) + final public func removeSharedPassword(_ account: Swift.String, completion: @escaping (Swift.Error?) -> () = { e -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(_ completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(domain: Swift.String, completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(domain: Swift.String, account: Swift.String, completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func generatePassword() -> Swift.String + @objc deinit +} +extension Keychain : Swift.CustomStringConvertible, Swift.CustomDebugStringConvertible { + final public var description: Swift.String { + get + } + final public var debugDescription: Swift.String { + get + } +} +extension Attributes : Swift.CustomStringConvertible, Swift.CustomDebugStringConvertible { + public var description: Swift.String { + get + } + public var debugDescription: Swift.String { + get + } +} +extension ItemClass : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension ProtocolType : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension AuthenticationType : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension Accessibility : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +public enum Status : Darwin.OSStatus, Swift.Error { + case success + case unimplemented + case diskFull + case io + case opWr + case param + case wrPerm + case allocate + case userCanceled + case badReq + case internalComponent + case notAvailable + case readOnly + case authFailed + case noSuchKeychain + case invalidKeychain + case duplicateKeychain + case duplicateCallback + case invalidCallback + case duplicateItem + case itemNotFound + case bufferTooSmall + case dataTooLarge + case noSuchAttr + case invalidItemRef + case invalidSearchRef + case noSuchClass + case noDefaultKeychain + case interactionNotAllowed + case readOnlyAttr + case wrongSecVersion + case keySizeNotAllowed + case noStorageModule + case noCertificateModule + case noPolicyModule + case interactionRequired + case dataNotAvailable + case dataNotModifiable + case createChainFailed + case invalidPrefsDomain + case inDarkWake + case aclNotSimple + case policyNotFound + case invalidTrustSetting + case noAccessForItem + case invalidOwnerEdit + case trustNotAvailable + case unsupportedFormat + case unknownFormat + case keyIsSensitive + case multiplePrivKeys + case passphraseRequired + case invalidPasswordRef + case invalidTrustSettings + case noTrustSettings + case pkcs12VerifyFailure + case invalidCertificate + case notSigner + case policyDenied + case invalidKey + case decode + case `internal` + case unsupportedAlgorithm + case unsupportedOperation + case unsupportedPadding + case itemInvalidKey + case itemInvalidKeyType + case itemInvalidValue + case itemClassMissing + case itemMatchUnsupported + case useItemListUnsupported + case useKeychainUnsupported + case useKeychainListUnsupported + case returnDataUnsupported + case returnAttributesUnsupported + case returnRefUnsupported + case returnPersitentRefUnsupported + case valueRefUnsupported + case valuePersistentRefUnsupported + case returnMissingPointer + case matchLimitUnsupported + case itemIllegalQuery + case waitForCallback + case missingEntitlement + case upgradePending + case mpSignatureInvalid + case otrTooOld + case otrIDTooNew + case serviceNotAvailable + case insufficientClientID + case deviceReset + case deviceFailed + case appleAddAppACLSubject + case applePublicKeyIncomplete + case appleSignatureMismatch + case appleInvalidKeyStartDate + case appleInvalidKeyEndDate + case conversionError + case appleSSLv2Rollback + case quotaExceeded + case fileTooBig + case invalidDatabaseBlob + case invalidKeyBlob + case incompatibleDatabaseBlob + case incompatibleKeyBlob + case hostNameMismatch + case unknownCriticalExtensionFlag + case noBasicConstraints + case noBasicConstraintsCA + case invalidAuthorityKeyID + case invalidSubjectKeyID + case invalidKeyUsageForPolicy + case invalidExtendedKeyUsage + case invalidIDLinkage + case pathLengthConstraintExceeded + case invalidRoot + case crlExpired + case crlNotValidYet + case crlNotFound + case crlServerDown + case crlBadURI + case unknownCertExtension + case unknownCRLExtension + case crlNotTrusted + case crlPolicyFailed + case idpFailure + case smimeEmailAddressesNotFound + case smimeBadExtendedKeyUsage + case smimeBadKeyUsage + case smimeKeyUsageNotCritical + case smimeNoEmailAddress + case smimeSubjAltNameNotCritical + case sslBadExtendedKeyUsage + case ocspBadResponse + case ocspBadRequest + case ocspUnavailable + case ocspStatusUnrecognized + case endOfData + case incompleteCertRevocationCheck + case networkFailure + case ocspNotTrustedToAnchor + case recordModified + case ocspSignatureError + case ocspNoSigner + case ocspResponderMalformedReq + case ocspResponderInternalError + case ocspResponderTryLater + case ocspResponderSignatureRequired + case ocspResponderUnauthorized + case ocspResponseNonceMismatch + case codeSigningBadCertChainLength + case codeSigningNoBasicConstraints + case codeSigningBadPathLengthConstraint + case codeSigningNoExtendedKeyUsage + case codeSigningDevelopment + case resourceSignBadCertChainLength + case resourceSignBadExtKeyUsage + case trustSettingDeny + case invalidSubjectName + case unknownQualifiedCertStatement + case mobileMeRequestQueued + case mobileMeRequestRedirected + case mobileMeServerError + case mobileMeServerNotAvailable + case mobileMeServerAlreadyExists + case mobileMeServerServiceErr + case mobileMeRequestAlreadyPending + case mobileMeNoRequestPending + case mobileMeCSRVerifyFailure + case mobileMeFailedConsistencyCheck + case notInitialized + case invalidHandleUsage + case pvcReferentNotFound + case functionIntegrityFail + case internalError + case memoryError + case invalidData + case mdsError + case invalidPointer + case selfCheckFailed + case functionFailed + case moduleManifestVerifyFailed + case invalidGUID + case invalidHandle + case invalidDBList + case invalidPassthroughID + case invalidNetworkAddress + case crlAlreadySigned + case invalidNumberOfFields + case verificationFailure + case unknownTag + case invalidSignature + case invalidName + case invalidCertificateRef + case invalidCertificateGroup + case tagNotFound + case invalidQuery + case invalidValue + case callbackFailed + case aclDeleteFailed + case aclReplaceFailed + case aclAddFailed + case aclChangeFailed + case invalidAccessCredentials + case invalidRecord + case invalidACL + case invalidSampleValue + case incompatibleVersion + case privilegeNotGranted + case invalidScope + case pvcAlreadyConfigured + case invalidPVC + case emmLoadFailed + case emmUnloadFailed + case addinLoadFailed + case invalidKeyRef + case invalidKeyHierarchy + case addinUnloadFailed + case libraryReferenceNotFound + case invalidAddinFunctionTable + case invalidServiceMask + case moduleNotLoaded + case invalidSubServiceID + case attributeNotInContext + case moduleManagerInitializeFailed + case moduleManagerNotFound + case eventNotificationCallbackNotFound + case inputLengthError + case outputLengthError + case privilegeNotSupported + case deviceError + case attachHandleBusy + case notLoggedIn + case algorithmMismatch + case keyUsageIncorrect + case keyBlobTypeIncorrect + case keyHeaderInconsistent + case unsupportedKeyFormat + case unsupportedKeySize + case invalidKeyUsageMask + case unsupportedKeyUsageMask + case invalidKeyAttributeMask + case unsupportedKeyAttributeMask + case invalidKeyLabel + case unsupportedKeyLabel + case invalidKeyFormat + case unsupportedVectorOfBuffers + case invalidInputVector + case invalidOutputVector + case invalidContext + case invalidAlgorithm + case invalidAttributeKey + case missingAttributeKey + case invalidAttributeInitVector + case missingAttributeInitVector + case invalidAttributeSalt + case missingAttributeSalt + case invalidAttributePadding + case missingAttributePadding + case invalidAttributeRandom + case missingAttributeRandom + case invalidAttributeSeed + case missingAttributeSeed + case invalidAttributePassphrase + case missingAttributePassphrase + case invalidAttributeKeyLength + case missingAttributeKeyLength + case invalidAttributeBlockSize + case missingAttributeBlockSize + case invalidAttributeOutputSize + case missingAttributeOutputSize + case invalidAttributeRounds + case missingAttributeRounds + case invalidAlgorithmParms + case missingAlgorithmParms + case invalidAttributeLabel + case missingAttributeLabel + case invalidAttributeKeyType + case missingAttributeKeyType + case invalidAttributeMode + case missingAttributeMode + case invalidAttributeEffectiveBits + case missingAttributeEffectiveBits + case invalidAttributeStartDate + case missingAttributeStartDate + case invalidAttributeEndDate + case missingAttributeEndDate + case invalidAttributeVersion + case missingAttributeVersion + case invalidAttributePrime + case missingAttributePrime + case invalidAttributeBase + case missingAttributeBase + case invalidAttributeSubprime + case missingAttributeSubprime + case invalidAttributeIterationCount + case missingAttributeIterationCount + case invalidAttributeDLDBHandle + case missingAttributeDLDBHandle + case invalidAttributeAccessCredentials + case missingAttributeAccessCredentials + case invalidAttributePublicKeyFormat + case missingAttributePublicKeyFormat + case invalidAttributePrivateKeyFormat + case missingAttributePrivateKeyFormat + case invalidAttributeSymmetricKeyFormat + case missingAttributeSymmetricKeyFormat + case invalidAttributeWrappedKeyFormat + case missingAttributeWrappedKeyFormat + case stagedOperationInProgress + case stagedOperationNotStarted + case verifyFailed + case querySizeUnknown + case blockSizeMismatch + case publicKeyInconsistent + case deviceVerifyFailed + case invalidLoginName + case alreadyLoggedIn + case invalidDigestAlgorithm + case invalidCRLGroup + case certificateCannotOperate + case certificateExpired + case certificateNotValidYet + case certificateRevoked + case certificateSuspended + case insufficientCredentials + case invalidAction + case invalidAuthority + case verifyActionFailed + case invalidCertAuthority + case invaldCRLAuthority + case invalidCRLEncoding + case invalidCRLType + case invalidCRL + case invalidFormType + case invalidID + case invalidIdentifier + case invalidIndex + case invalidPolicyIdentifiers + case invalidTimeString + case invalidReason + case invalidRequestInputs + case invalidResponseVector + case invalidStopOnPolicy + case invalidTuple + case multipleValuesUnsupported + case notTrusted + case noDefaultAuthority + case rejectedForm + case requestLost + case requestRejected + case unsupportedAddressType + case unsupportedService + case invalidTupleGroup + case invalidBaseACLs + case invalidTupleCredendtials + case invalidEncoding + case invalidValidityPeriod + case invalidRequestor + case requestDescriptor + case invalidBundleInfo + case invalidCRLIndex + case noFieldValues + case unsupportedFieldFormat + case unsupportedIndexInfo + case unsupportedLocality + case unsupportedNumAttributes + case unsupportedNumIndexes + case unsupportedNumRecordTypes + case fieldSpecifiedMultiple + case incompatibleFieldFormat + case invalidParsingModule + case databaseLocked + case datastoreIsOpen + case missingValue + case unsupportedQueryLimits + case unsupportedNumSelectionPreds + case unsupportedOperator + case invalidDBLocation + case invalidAccessRequest + case invalidIndexInfo + case invalidNewOwner + case invalidModifyMode + case missingRequiredExtension + case extendedKeyUsageNotCritical + case timestampMissing + case timestampInvalid + case timestampNotTrusted + case timestampServiceNotAvailable + case timestampBadAlg + case timestampBadRequest + case timestampBadDataFormat + case timestampTimeNotAvailable + case timestampUnacceptedPolicy + case timestampUnacceptedExtension + case timestampAddInfoNotAvailable + case timestampSystemFailure + case signingTimeMissing + case timestampRejection + case timestampWaiting + case timestampRevocationWarning + case timestampRevocationNotification + case unexpectedError +} +extension Status : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init(status: Darwin.OSStatus) + public var description: Swift.String { + get + } + public init?(rawValue: Darwin.OSStatus) + public typealias RawValue = Darwin.OSStatus + public var rawValue: Darwin.OSStatus { + get + } +} +extension Status : Foundation.CustomNSError { + public static let errorDomain: Swift.String + public var errorCode: Swift.Int { + get + } + public var errorUserInfo: [Swift.String : Any] { + get + } +} +extension HHPermission : CoreLocation.CLLocationManagerDelegate { + @objc dynamic public func locationManager(_ manager: CoreLocation.CLLocationManager, didChangeAuthorization status: CoreLocation.CLAuthorizationStatus) +} +public let HHUUID: Swift.String +public let HHUserToken: Swift.String +@_hasMissingDesignatedInitializers public class LoginManager { + public static let `default`: HHSDKVideo.LoginManager + public var mUUID: Swift.Int? + public var mUserInfo: HHSDKVideo.HHUserModel? + public func loadCache() + public func removeCache() + public func getUserInfo(token: Swift.String, success: ((Swift.String?) -> Swift.Void)? = nil, fail: ((Swift.String) -> Swift.Void)? = nil) + public func getUserInfoRequest(success: ((Swift.String?) -> Swift.Void)? = nil, fail: ((Swift.String) -> Swift.Void)? = nil) + public func convert2Model() -> Swift.String? + public func getUserInfo() -> HHSDKVideo.HHUserModel? + public func getCacheUserInfo() -> HHSDKVideo.HHUserModel? + public func hasLoginData() -> Swift.Bool + public func getUUID() -> Swift.Int? + public func setUUID(uuid: Swift.Int) + public func getToken() -> Swift.String? + public func uuidStr() -> Swift.String? + public func isMemeber() -> Swift.Bool + public func isVIP() -> Swift.Bool + public func getUpgradeVIPTips() -> Swift.String? + public func isBuyProduct() -> Swift.Bool + public func getMemberDes() -> Swift.String? + public func isPhoneAccount() -> Swift.Bool + @objc deinit +} +public protocol MapContext { +} +final public class Map { + final public let mappingType: HHSDKVideo.MappingType + final public var JSON: [Swift.String : Any] { + get + } + final public var isKeyPresent: Swift.Bool { + get + } + final public var currentValue: Any? { + get + } + final public var currentKey: Swift.String? { + get + } + final public var nestedKeyDelimiter: Swift.String { + get + } + final public var context: HHSDKVideo.MapContext? + final public var shouldIncludeNilValues: Swift.Bool + final public let toObject: Swift.Bool + public init(mappingType: HHSDKVideo.MappingType, JSON: [Swift.String : Any], toObject: Swift.Bool = false, context: HHSDKVideo.MapContext? = nil, shouldIncludeNilValues: Swift.Bool = false) + final public subscript(key: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, delimiter delimiter: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool, delimiter delimiter: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, delimiter delimiter: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool?, delimiter delimiter: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public func value<T>() -> T? + @objc deinit +} +extension Map { + final public func value<T>(_ key: Swift.String, default: T.Object, using transform: T) throws -> T.Object where T : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, default: T) throws -> T + final public func value<T>(_ key: Swift.String, default: [T]) -> [T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, default: T) throws -> T where T : HHSDKVideo.BaseMappable +} +public struct MapError : Swift.Error { + public var key: Swift.String? + public var currentValue: Any? + public var reason: Swift.String? + public var file: Swift.StaticString? + public var function: Swift.StaticString? + public var line: Swift.UInt? + public init(key: Swift.String?, currentValue: Any?, reason: Swift.String?, file: Swift.StaticString? = nil, function: Swift.StaticString? = nil, line: Swift.UInt? = nil) +} +extension MapError : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +public protocol BaseMappable { + mutating func mapping(map: HHSDKVideo.Map) +} +public protocol Mappable : HHSDKVideo.BaseMappable { + init?(map: HHSDKVideo.Map) +} +public protocol StaticMappable : HHSDKVideo.BaseMappable { + static func objectForMapping(map: HHSDKVideo.Map) -> HHSDKVideo.BaseMappable? +} +extension Mappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init?(JSON: [Swift.String : Any], context: HHSDKVideo.MapContext? = nil) +} +extension BaseMappable { + public func toJSON() -> [Swift.String : Any] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +extension Array where Element : HHSDKVideo.BaseMappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init(JSONArray: [[Swift.String : Any]], context: HHSDKVideo.MapContext? = nil) + public func toJSON() -> [[Swift.String : Any]] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +extension Set where Element : HHSDKVideo.BaseMappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init?(JSONArray: [[Swift.String : Any]], context: HHSDKVideo.MapContext? = nil) + public func toJSON() -> [[Swift.String : Any]] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +public enum MappingType { + case fromJSON + case toJSON + public static func == (a: HHSDKVideo.MappingType, b: HHSDKVideo.MappingType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +final public class Mapper<N> where N : HHSDKVideo.BaseMappable { + final public var context: HHSDKVideo.MapContext? + final public var shouldIncludeNilValues: Swift.Bool + public init(context: HHSDKVideo.MapContext? = nil, shouldIncludeNilValues: Swift.Bool = false) + final public func map(JSONObject: Any?, toObject object: N) -> N + final public func map(JSONString: Swift.String, toObject object: N) -> N + final public func map(JSON: [Swift.String : Any], toObject object: N) -> N + final public func map(JSONString: Swift.String) -> N? + final public func map(JSONObject: Any?) -> N? + final public func map(JSON: [Swift.String : Any]) -> N? + final public func mapArray(JSONString: Swift.String) -> [N]? + final public func mapArray(JSONObject: Any?) -> [N]? + final public func mapArray(JSONArray: [[Swift.String : Any]]) -> [N] + final public func mapDictionary(JSONString: Swift.String) -> [Swift.String : N]? + final public func mapDictionary(JSONObject: Any?) -> [Swift.String : N]? + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]]) -> [Swift.String : N]? + final public func mapDictionary(JSONObject: Any?, toDictionary dictionary: [Swift.String : N]) -> [Swift.String : N] + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]], toDictionary dictionary: [Swift.String : N]) -> [Swift.String : N] + final public func mapDictionaryOfArrays(JSONObject: Any?) -> [Swift.String : [N]]? + final public func mapDictionaryOfArrays(JSON: [Swift.String : [[Swift.String : Any]]]) -> [Swift.String : [N]]? + final public func mapArrayOfArrays(JSONObject: Any?) -> [[N]]? + public static func parseJSONStringIntoDictionary(JSONString: Swift.String) -> [Swift.String : Any]? + public static func parseJSONString(JSONString: Swift.String) -> Any? + @objc deinit +} +extension Mapper { + final public func map(JSONfile: Swift.String) -> N? + final public func mapArray(JSONfile: Swift.String) -> [N]? +} +extension Mapper { + final public func toJSON(_ object: N) -> [Swift.String : Any] + final public func toJSONArray(_ array: [N]) -> [[Swift.String : Any]] + final public func toJSONDictionary(_ dictionary: [Swift.String : N]) -> [Swift.String : [Swift.String : Any]] + final public func toJSONDictionaryOfArrays(_ dictionary: [Swift.String : [N]]) -> [Swift.String : [[Swift.String : Any]]] + final public func toJSONString(_ object: N, prettyPrint: Swift.Bool = false) -> Swift.String? + final public func toJSONString(_ array: [N], prettyPrint: Swift.Bool = false) -> Swift.String? + public static func toJSONString(_ JSONObject: Any, prettyPrint: Swift.Bool) -> Swift.String? + public static func toJSONData(_ JSONObject: Any, options: Foundation.JSONSerialization.WritingOptions) -> Foundation.Data? +} +extension Mapper where N : Swift.Hashable { + final public func mapSet(JSONString: Swift.String) -> Swift.Set<N>? + final public func mapSet(JSONObject: Any?) -> Swift.Set<N>? + final public func mapSet(JSONArray: [[Swift.String : Any]]) -> Swift.Set<N> + final public func toJSONSet(_ set: Swift.Set<N>) -> [[Swift.String : Any]] + final public func toJSONString(_ set: Swift.Set<N>, prettyPrint: Swift.Bool = false) -> Swift.String? +} +final public class MD5 { + public init() + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension MD5 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +public struct NotifyInfo { + public init() + public var fromAccountId: Swift.String? + public var requestId: Swift.String? + public var channelId: Swift.String? + public var customInfo: Swift.String? +} +open class NSDecimalNumberTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.NSDecimalNumber + public typealias JSON = Swift.String + public init() + open func transformFromJSON(_ value: Any?) -> Foundation.NSDecimalNumber? + open func transformToJSON(_ value: Foundation.NSDecimalNumber?) -> Swift.String? + @objc deinit +} +final public class OCB : HHSDKVideo.BlockMode { + public enum Mode { + case combined + case detached + public static func == (a: HHSDKVideo.OCB.Mode, b: HHSDKVideo.OCB.Mode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public let options: HHSDKVideo.BlockModeOption + public enum Error : Swift.Error { + case invalidNonce + case fail + public static func == (a: HHSDKVideo.OCB.Error, b: HHSDKVideo.OCB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(nonce N: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, tagLength: Swift.Int = 16, mode: HHSDKVideo.OCB.Mode = .detached) + convenience public init(nonce N: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, mode: HHSDKVideo.OCB.Mode = .detached) + final public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker + @objc deinit +} +public struct OFB : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.OFB.Error, b: HHSDKVideo.OFB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +infix operator <- : DefaultPrecedence +infix operator >>> : DefaultPrecedence +public func <- <T>(left: inout T, right: HHSDKVideo.Map) +public func >>> <T>(left: T, right: HHSDKVideo.Map) +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) +public func >>> <T>(left: T?, right: HHSDKVideo.Map) +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: T, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: T?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, [T]>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, [T]>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, [T]>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, [T]>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<Swift.Array<T>>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<Swift.Array<T>>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<Swift.Array<T>>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<Swift.Array<T>>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Set<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func >>> <T>(left: Swift.Set<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func <- <T>(left: inout Swift.Set<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func >>> <T>(left: Swift.Set<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public struct OrderModel : HHSDKVideo.Mappable { + public var orderid: Swift.String? + public var price: Swift.Float? + public var buyServiceName: Swift.String? + public var expertId: Swift.String? + public var expertName: Swift.String? + public var patientName: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public protocol PaddingProtocol { + func add(to: Swift.Array<Swift.UInt8>, blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> + func remove(from: Swift.Array<Swift.UInt8>, blockSize: Swift.Int?) -> Swift.Array<Swift.UInt8> +} +public enum Padding : HHSDKVideo.PaddingProtocol { + case noPadding, zeroPadding, pkcs7, pkcs5, iso78164 + public func add(to: Swift.Array<Swift.UInt8>, blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> + public func remove(from: Swift.Array<Swift.UInt8>, blockSize: Swift.Int?) -> Swift.Array<Swift.UInt8> + public static func == (a: HHSDKVideo.Padding, b: HHSDKVideo.Padding) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +extension PKCS5 { + public struct PBKDF1 { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.PKCS5.PBKDF1.Error, b: HHSDKVideo.PKCS5.PBKDF1.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant { + case md5, sha1 + public static func == (a: HHSDKVideo.PKCS5.PBKDF1.Variant, b: HHSDKVideo.PKCS5.PBKDF1.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.PKCS5.PBKDF1.Variant = .sha1, iterations: Swift.Int = 4096, keyLength: Swift.Int? = nil) throws + public func calculate() -> Swift.Array<Swift.UInt8> + } +} +extension PKCS5 { + public struct PBKDF2 { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.PKCS5.PBKDF2.Error, b: HHSDKVideo.PKCS5.PBKDF2.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, iterations: Swift.Int = 4096, keyLength: Swift.Int? = nil, variant: HHSDKVideo.HMAC.Variant = .sha256) throws + public func calculate() throws -> Swift.Array<Swift.UInt8> + } +} +public struct PCBC : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.PCBC.Error, b: HHSDKVideo.PCBC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@objc @_hasMissingDesignatedInitializers public class HHPermission : ObjectiveC.NSObject { + public static let locationAlways: HHSDKVideo.HHPermission + public static let locationWhenInUse: HHSDKVideo.HHPermission + public static let microphone: HHSDKVideo.HHPermission + public static let camera: HHSDKVideo.HHPermission + public static let photos: HHSDKVideo.HHPermission + final public let type: HHSDKVideo.HHBasePermissionType + public var status: HHSDKVideo.PermissionStatus { + get + } + public var presentPrePermissionAlert: Swift.Bool + public var prePermissionAlert: HHSDKVideo.PermissionAlert { + get + set + } + public var presentDeniedAlert: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +extension HHPermission { + @objc override dynamic public var description: Swift.String { + @objc get + } + @objc override dynamic public var debugDescription: Swift.String { + @objc get + } +} +@_hasMissingDesignatedInitializers public class PermissionAlert { + @objc deinit +} +public enum PermissionStatus : Swift.String { + case authorized + case denied + case disabled + case notDetermined + case limited + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +extension PermissionStatus : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +@objc public enum HHBasePermissionType : Swift.Int { + case locationAlways + case locationWhenInUse + case microphone + case camera + case photos + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension HHBasePermissionType : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +@_hasMissingDesignatedInitializers public class PhotoPickerConfig { + public static let `default`: HHSDKVideo.PhotoPickerConfig + public var miniPicTip: Swift.Bool + public var mMaxSelectCount: Swift.Int + public var mDetailColumnCount: Swift.Int + @objc deinit +} +public enum PKCS5 { +} +public enum PKCS7 { +} +final public class Poly1305 : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case authenticateError + public static func == (a: HHSDKVideo.Poly1305.Error, b: HHSDKVideo.Poly1305.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>) + final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +@_hasMissingDesignatedInitializers public class PostBodyEncoding { + @objc deinit +} +final public class Rabbit { + public enum Error : Swift.Error { + case invalidKeyOrInitializationVector + public static func == (a: HHSDKVideo.Rabbit.Error, b: HHSDKVideo.Rabbit.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let ivSize: Swift.Int + public static let keySize: Swift.Int + public static let blockSize: Swift.Int + final public var keySize: Swift.Int { + get + } + convenience public init(key: Swift.Array<Swift.UInt8>) throws + public init(key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>?) throws + @objc deinit +} +extension Rabbit : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension Rabbit { + convenience public init(key: Swift.String) throws + convenience public init(key: Swift.String, iv: Swift.String) throws +} +public enum ReachabilityError : Swift.Error { + case FailedToCreateWithAddress(Darwin.sockaddr_in) + case FailedToCreateWithHostname(Swift.String) + case UnableToSetCallback + case UnableToSetDispatchQueue +} +public let ReachabilityChangedNotification: Foundation.NSNotification.Name +public class Reachability { + public typealias NetworkReachable = (HHSDKVideo.Reachability) -> () + public typealias NetworkUnreachable = (HHSDKVideo.Reachability) -> () + public enum NetworkStatus : Swift.CustomStringConvertible { + case notReachable, reachableViaWiFi, reachableViaWWAN + public var description: Swift.String { + get + } + public static func == (a: HHSDKVideo.Reachability.NetworkStatus, b: HHSDKVideo.Reachability.NetworkStatus) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public var whenReachable: HHSDKVideo.Reachability.NetworkReachable? + public var whenUnreachable: HHSDKVideo.Reachability.NetworkUnreachable? + public var reachableOnWWAN: Swift.Bool + public var currentReachabilityString: Swift.String { + get + } + public var currentReachabilityStatus: HHSDKVideo.Reachability.NetworkStatus { + get + } + required public init(reachabilityRef: SystemConfiguration.SCNetworkReachability) + convenience public init?(hostname: Swift.String) + convenience public init?() + @objc deinit +} +extension Reachability { + public func startNotifier() throws + public func stopNotifier() + public var isReachable: Swift.Bool { + get + } + public var isReachableViaWWAN: Swift.Bool { + get + } + public var isReachableViaWiFi: Swift.Bool { + get + } + public var description: Swift.String { + get + } +} +public enum RecordImgType : Swift.Int { + case medic + case check + case yingXiang + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public struct RemoteData : HHSDKVideo.Mappable { + public var changeDoctorTime: Swift.Int + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +final public class Scrypt { + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, dkLen: Swift.Int, N: Swift.Int, r: Swift.Int, p: Swift.Int) throws + final public func calculate() throws -> [Swift.UInt8] + @objc deinit +} +public struct SDKConfigModel : HHSDKVideo.Mappable { + public var cardIdActiveShow: Swift.Int + public var changeDoctorTime: Swift.Int? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +final public class SHA1 { + public init() + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA1 : HHSDKVideo.Updatable { + @discardableResult + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +final public class SHA2 { + public enum Variant : Swift.RawRepresentable { + case sha224, sha256, sha384, sha512 + public var digestLength: Swift.Int { + get + } + public var blockSize: Swift.Int { + get + } + public typealias RawValue = Swift.Int + public var rawValue: HHSDKVideo.SHA2.Variant.RawValue { + get + } + public init?(rawValue: HHSDKVideo.SHA2.Variant.RawValue) + } + public init(variant: HHSDKVideo.SHA2.Variant) + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA2 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +final public class SHA3 { + final public let blockSize: Swift.Int + final public let digestLength: Swift.Int + final public let markByte: Swift.UInt8 + public enum Variant { + case sha224, sha256, sha384, sha512, keccak224, keccak256, keccak384, keccak512 + public var outputLength: Swift.Int { + get + } + public static func == (a: HHSDKVideo.SHA3.Variant, b: HHSDKVideo.SHA3.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(variant: HHSDKVideo.SHA3.Variant) + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA3 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +extension String { + public var bytes: Swift.Array<Swift.UInt8> { + get + } + public func md5() -> Swift.String + public func sha1() -> Swift.String + public func sha224() -> Swift.String + public func sha256() -> Swift.String + public func sha384() -> Swift.String + public func sha512() -> Swift.String + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> Swift.String + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.String + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.String + public func crc16(seed: Swift.UInt16? = nil) -> Swift.String + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> Swift.String + public func encryptToBase64(cipher: HHSDKVideo.Cipher) throws -> Swift.String? + public func authenticate<A>(with authenticator: A) throws -> Swift.String where A : HHSDKVideo.CryptoAuthenticator +} +extension String { + public func decryptBase64ToString(cipher: HHSDKVideo.Cipher) throws -> Swift.String + public func decryptBase64(cipher: HHSDKVideo.Cipher) throws -> Swift.Array<Swift.UInt8> +} +@_hasMissingDesignatedInitializers final public class SwiftEntryKit { + public enum EntryDismissalDescriptor { + case specific(entryName: Swift.String) + case prioritizedLowerOrEqualTo(priority: HHSDKVideo.EKAttributes.Precedence.Priority) + case enqueued + case all + case displayed + } + public enum RollbackWindow { + case main + case custom(window: UIKit.UIWindow) + } + public typealias DismissCompletionHandler = () -> Swift.Void + final public class var window: UIKit.UIWindow? { + get + } + final public class var isCurrentlyDisplaying: Swift.Bool { + get + } + final public class func isCurrentlyDisplaying(entryNamed name: Swift.String? = nil) -> Swift.Bool + final public class var isQueueEmpty: Swift.Bool { + get + } + final public class func queueContains(entryNamed name: Swift.String? = nil) -> Swift.Bool + final public class func display(entry view: UIKit.UIView, using attributes: HHSDKVideo.EKAttributes, presentInsideKeyWindow: Swift.Bool = false, rollbackWindow: HHSDKVideo.SwiftEntryKit.RollbackWindow = .main) + final public class func display(entry viewController: UIKit.UIViewController, using attributes: HHSDKVideo.EKAttributes, presentInsideKeyWindow: Swift.Bool = false, rollbackWindow: HHSDKVideo.SwiftEntryKit.RollbackWindow = .main) + final public class func transform(to view: UIKit.UIView) + final public class func dismiss(_ descriptor: HHSDKVideo.SwiftEntryKit.EntryDismissalDescriptor = .displayed, with completion: HHSDKVideo.SwiftEntryKit.DismissCompletionHandler? = nil) + final public class func layoutIfNeeded() + @objc deinit +} +open class TransformOf<ObjectType, JSONType> : HHSDKVideo.TransformType { + public typealias Object = ObjectType + public typealias JSON = JSONType + public init(fromJSON: @escaping (JSONType?) -> ObjectType?, toJSON: @escaping (ObjectType?) -> JSONType?) + open func transformFromJSON(_ value: Any?) -> ObjectType? + open func transformToJSON(_ value: ObjectType?) -> JSONType? + @objc deinit +} +public func <- <Transform>(left: inout Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Swift.String : Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Swift.String : Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Swift.String : Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Swift.String : Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, [Transform.Object]>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, [Transform.Object]>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, [Transform.Object]>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, [Transform.Object]>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Array<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Array<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Array<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Array<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout [[Transform.Object]], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [[Transform.Object]], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [[Transform.Object]]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [[Transform.Object]]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Swift.Set<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func >>> <Transform>(left: Swift.Set<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func <- <Transform>(left: inout Swift.Set<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func >>> <Transform>(left: Swift.Set<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public protocol TransformType { + associatedtype Object + associatedtype JSON + func transformFromJSON(_ value: Any?) -> Self.Object? + func transformToJSON(_ value: Self.Object?) -> Self.JSON? +} +extension UIImage { + public class func gifImageWithData(_ data: Foundation.Data) -> UIKit.UIImage? + public class func gifImageWithURL(_ gifUrl: Swift.String) -> UIKit.UIImage? + public class func gifImageWithName(_ name: Swift.String) -> UIKit.UIImage? +} +public protocol _UInt8Type { +} +extension UInt8 : HHSDKVideo._UInt8Type { +} +extension UInt8 { + public func bits() -> [HHSDKVideo.Bit] + public func bits() -> Swift.String +} +public protocol Updatable { + mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool) throws -> Swift.Array<Swift.UInt8> + mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws +} +extension Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public mutating func update(withBytes bytes: Swift.Array<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public mutating func update(withBytes bytes: Swift.Array<Swift.UInt8>, isLast: Swift.Bool = false, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(withBytes bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public mutating func finish(withBytes bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public mutating func finish() throws -> Swift.Array<Swift.UInt8> + public mutating func finish(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(withBytes bytes: Swift.Array<Swift.UInt8>, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws +} +open class URLTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.URL + public typealias JSON = Swift.String + public init(shouldEncodeURLString: Swift.Bool = false, allowedCharacterSet: Foundation.CharacterSet = .urlQueryAllowed) + open func transformFromJSON(_ value: Any?) -> Foundation.URL? + open func transformToJSON(_ value: Foundation.URL?) -> Swift.String? + @objc deinit +} +public struct UserApi { +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers public class VCManager : ObjectiveC.NSObject { + public static let `default`: HHSDKVideo.VCManager + public var waitModel: HHSDKVideo.HHWaitDoctorModel? + @objc deinit +} +extension VCManager { + public func onReceiveCall(callee: Swift.String, caller: Swift.String, orderId: Swift.String) + public func isInBusy() -> Swift.Bool +} +extension VCManager { + public func onReceiveInvite(docModel: HHSDKVideo.HHInviteDocModel) + public static func onUserReject(_ fromUuid: Swift.String) + public static func onCancelInvite(_ fromUuid: Swift.String) + public static func changeVideo(_ isVoice: Swift.Bool) +} +extension VCManager { + public func showEduBoard(groupId: Swift.String, orderId: Swift.String) + public func closeEduBoard() +} +public struct VideoApi { +} +public enum HHIMCmd : Swift.String { + case audio + case video + case closeVideo + case openVideo + case transfor + case accept + case call + case reject + case cancelCall + case pcCancel + case phoneCall + case busy + case waiting + case waitingTip + case agentTrans + case web_transform + case callWeb + case SWITCH_TO_CAMERA_wmp + case cancelCallWeb + case call_invite + case reject_invite + case cancel_invite + case exit_camera + case enter_camera + case conference_begin + case conference_end + case user_certification + case cancel_user_certification + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public class WeakArray<T> { + public func add(_ delegate: T) + public func remove(_ delegate: T) + public func excute(_ block: @escaping ((T?) -> Swift.Void)) + public init() + @objc deinit +} +@objc public class ZLAlbumListModel : ObjectiveC.NSObject { + final public let title: Swift.String + public var count: Swift.Int { + get + } + public var result: Photos.PHFetchResult<Photos.PHAsset> + final public let collection: Photos.PHAssetCollection + final public let option: Photos.PHFetchOptions + final public let isCameraRoll: Swift.Bool + public var headImageAsset: Photos.PHAsset? { + get + } + public var models: [HHSDKVideo.ZLPhotoModel] + public init(title: Swift.String, result: Photos.PHFetchResult<Photos.PHAsset>, collection: Photos.PHAssetCollection, option: Photos.PHFetchOptions, isCameraRoll: Swift.Bool) + public func refetchPhotos() + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class ZLCameraConfiguration : ObjectiveC.NSObject { + @objc public enum CaptureSessionPreset : Swift.Int { + case cif352x288 + case vga640x480 + case hd1280x720 + case hd1920x1080 + case hd4K3840x2160 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum FocusMode : Swift.Int { + case autoFocus + case continuousAutoFocus + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum ExposureMode : Swift.Int { + case autoExpose + case continuousAutoExposure + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum FlashMode : Swift.Int { + case auto + case on + case off + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum VideoExportType : Swift.Int { + case mov + case mp4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public var sessionPreset: HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset + @objc public var focusMode: HHSDKVideo.ZLCameraConfiguration.FocusMode + @objc public var exposureMode: HHSDKVideo.ZLCameraConfiguration.ExposureMode + @objc public var flashMode: HHSDKVideo.ZLCameraConfiguration.FlashMode + @objc public var videoExportType: HHSDKVideo.ZLCameraConfiguration.VideoExportType + @objc override dynamic public init() + @objc deinit +} +extension ZLCameraConfiguration { + @discardableResult + public func sessionPreset(_ sessionPreset: HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func focusMode(_ mode: HHSDKVideo.ZLCameraConfiguration.FocusMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func exposureMode(_ mode: HHSDKVideo.ZLCameraConfiguration.ExposureMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func flashMode(_ mode: HHSDKVideo.ZLCameraConfiguration.FlashMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func videoExportType(_ type: HHSDKVideo.ZLCameraConfiguration.VideoExportType) -> HHSDKVideo.ZLCameraConfiguration +} +@objc open class ZLCustomCamera : UIKit.UIViewController, QuartzCore.CAAnimationDelegate { + @objc public var takeDoneBlock: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)? + @objc public var cancelBlock: (() -> Swift.Void)? + public var tipsLabel: UIKit.UILabel { + get + set + } + public var bottomView: UIKit.UIView { + get + set + } + public var largeCircleView: UIKit.UIVisualEffectView { + get + set + } + public var smallCircleView: UIKit.UIView { + get + set + } + public var animateLayer: QuartzCore.CAShapeLayer { + get + set + } + public var retakeBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var doneBtn: UIKit.UIButton { + get + set + } + public var dismissBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var switchCameraBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var focusCursorView: UIKit.UIImageView { + get + set + } + public var takedImageView: UIKit.UIImageView { + get + set + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc deinit + @objc dynamic public init() + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic open func viewDidLoad() + @objc override dynamic open func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic open func viewWillDisappear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidDisappear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidLayoutSubviews() + @objc public func animationDidStop(_ anim: QuartzCore.CAAnimation, finished flag: Swift.Bool) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLCustomCamera : AVFoundation.AVCapturePhotoCaptureDelegate { + @objc dynamic public func photoOutput(_ output: AVFoundation.AVCapturePhotoOutput, willCapturePhotoFor resolvedSettings: AVFoundation.AVCaptureResolvedPhotoSettings) + @objc dynamic public func photoOutput(_ output: AVFoundation.AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CoreMedia.CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CoreMedia.CMSampleBuffer?, resolvedSettings: AVFoundation.AVCaptureResolvedPhotoSettings, bracketSettings: AVFoundation.AVCaptureBracketedStillImageSettings?, error: Swift.Error?) +} +extension ZLCustomCamera : AVFoundation.AVCaptureFileOutputRecordingDelegate { + @objc dynamic public func fileOutput(_ output: AVFoundation.AVCaptureFileOutput, didStartRecordingTo fileURL: Foundation.URL, from connections: [AVFoundation.AVCaptureConnection]) + @objc dynamic public func fileOutput(_ output: AVFoundation.AVCaptureFileOutput, didFinishRecordingTo outputFileURL: Foundation.URL, from connections: [AVFoundation.AVCaptureConnection], error: Swift.Error?) +} +extension ZLCustomCamera : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizer(_ gestureRecognizer: UIKit.UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +@objc public protocol ZLImageStickerContainerDelegate { + @objc var selectImageBlock: ((UIKit.UIImage) -> Swift.Void)? { get set } + @objc var hideBlock: (() -> Swift.Void)? { get set } + @objc func show(in view: UIKit.UIView) +} +@objc @_inheritsConvenienceInitializers public class ZLEditImageConfiguration : ObjectiveC.NSObject { + @objc public enum EditTool : Swift.Int, Swift.CaseIterable { + case draw + case clip + case imageSticker + case textSticker + case mosaic + case filter + case adjust + public init?(rawValue: Swift.Int) + public typealias AllCases = [HHSDKVideo.ZLEditImageConfiguration.EditTool] + public typealias RawValue = Swift.Int + public static var allCases: [HHSDKVideo.ZLEditImageConfiguration.EditTool] { + get + } + public var rawValue: Swift.Int { + get + } + } + @objc public enum AdjustTool : Swift.Int, Swift.CaseIterable { + case brightness + case contrast + case saturation + public init?(rawValue: Swift.Int) + public typealias AllCases = [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] + public typealias RawValue = Swift.Int + public static var allCases: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] { + get + } + public var rawValue: Swift.Int { + get + } + } + public var tools: [HHSDKVideo.ZLEditImageConfiguration.EditTool] { + get + set + } + @objc public var tools_objc: [Swift.Int] { + @objc get + @objc set + } + @objc public var drawColors: [UIKit.UIColor] { + @objc get + @objc set + } + @objc public var defaultDrawColor: UIKit.UIColor + @objc public var clipRatios: [HHSDKVideo.ZLImageClipRatio] { + @objc get + @objc set + } + @objc public var textStickerTextColors: [UIKit.UIColor] { + @objc get + @objc set + } + @objc public var textStickerDefaultTextColor: UIKit.UIColor + @objc public var filters: [HHSDKVideo.ZLFilter] { + @objc get + @objc set + } + @objc public var imageStickerContainerView: (UIKit.UIView & HHSDKVideo.ZLImageStickerContainerDelegate)? + public var adjustTools: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] { + get + set + } + @objc public var adjustTools_objc: [Swift.Int] { + @objc get + @objc set + } + @objc public var impactFeedbackWhenAdjustSliderValueIsZero: Swift.Bool + @objc public var impactFeedbackStyle: UIKit.UIImpactFeedbackGenerator.FeedbackStyle + @objc override dynamic public init() + @objc deinit +} +extension ZLEditImageConfiguration { + @discardableResult + public func tools(_ tools: [HHSDKVideo.ZLEditImageConfiguration.EditTool]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func drawColors(_ colors: [UIKit.UIColor]) -> HHSDKVideo.ZLEditImageConfiguration + public func defaultDrawColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func clipRatios(_ ratios: [HHSDKVideo.ZLImageClipRatio]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func textStickerTextColors(_ colors: [UIKit.UIColor]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func textStickerDefaultTextColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func filters(_ filters: [HHSDKVideo.ZLFilter]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func imageStickerContainerView(_ view: (UIKit.UIView & HHSDKVideo.ZLImageStickerContainerDelegate)?) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func adjustTools(_ tools: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func impactFeedbackWhenAdjustSliderValueIsZero(_ value: Swift.Bool) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func impactFeedbackStyle(_ style: UIKit.UIImpactFeedbackGenerator.FeedbackStyle) -> HHSDKVideo.ZLEditImageConfiguration +} +@objc public class ZLImageClipRatio : ObjectiveC.NSObject { + public var title: Swift.String + final public let whRatio: CoreGraphics.CGFloat + @objc public init(title: Swift.String, whRatio: CoreGraphics.CGFloat, isCircle: Swift.Bool = false) + @objc override dynamic public init() + @objc deinit +} +extension ZLImageClipRatio { + @objc public static let custom: HHSDKVideo.ZLImageClipRatio + @objc public static let circle: HHSDKVideo.ZLImageClipRatio + @objc public static let wh1x1: HHSDKVideo.ZLImageClipRatio + @objc public static let wh3x4: HHSDKVideo.ZLImageClipRatio + @objc public static let wh4x3: HHSDKVideo.ZLImageClipRatio + @objc public static let wh2x3: HHSDKVideo.ZLImageClipRatio + @objc public static let wh3x2: HHSDKVideo.ZLImageClipRatio + @objc public static let wh9x16: HHSDKVideo.ZLImageClipRatio + @objc public static let wh16x9: HHSDKVideo.ZLImageClipRatio +} +@objc public class ZLEditImageModel : ObjectiveC.NSObject { + final public let drawPaths: [HHSDKVideo.ZLDrawPath] + final public let mosaicPaths: [HHSDKVideo.ZLMosaicPath] + final public let editRect: CoreGraphics.CGRect? + final public let angle: CoreGraphics.CGFloat + final public let brightness: Swift.Float + final public let contrast: Swift.Float + final public let saturation: Swift.Float + final public let selectRatio: HHSDKVideo.ZLImageClipRatio? + final public let selectFilter: HHSDKVideo.ZLFilter? + final public let textStickers: [(state: HHSDKVideo.ZLTextStickerState, index: Swift.Int)]? + final public let imageStickers: [(state: HHSDKVideo.ZLImageStickerState, index: Swift.Int)]? + public init(drawPaths: [HHSDKVideo.ZLDrawPath], mosaicPaths: [HHSDKVideo.ZLMosaicPath], editRect: CoreGraphics.CGRect?, angle: CoreGraphics.CGFloat, brightness: Swift.Float, contrast: Swift.Float, saturation: Swift.Float, selectRatio: HHSDKVideo.ZLImageClipRatio?, selectFilter: HHSDKVideo.ZLFilter, textStickers: [(state: HHSDKVideo.ZLTextStickerState, index: Swift.Int)]?, imageStickers: [(state: HHSDKVideo.ZLImageStickerState, index: Swift.Int)]?) + @objc override dynamic public init() + @objc deinit +} +@objc open class ZLEditImageViewController : UIKit.UIViewController { + @objc public var drawColViewH: CoreGraphics.CGFloat + @objc public var filterColViewH: CoreGraphics.CGFloat + @objc public var adjustColViewH: CoreGraphics.CGFloat + @objc public var ashbinNormalBgColor: UIKit.UIColor + @objc public var cancelBtn: HHSDKVideo.ZLEnlargeButton { + @objc get + @objc set + } + @objc public var mainScrollView: UIKit.UIScrollView { + @objc get + @objc set + } + @objc public var topShadowView: UIKit.UIView { + @objc get + @objc set + } + @objc public var topShadowLayer: QuartzCore.CAGradientLayer { + @objc get + @objc set + } + @objc public var bottomShadowView: UIKit.UIView + @objc public var bottomShadowLayer: QuartzCore.CAGradientLayer + @objc public var doneBtn: UIKit.UIButton + @objc public var revokeBtn: UIKit.UIButton + @objc public var ashbinView: UIKit.UIView { + @objc get + @objc set + } + @objc public var ashbinImgView: UIKit.UIImageView { + @objc get + @objc set + } + @objc public var drawLineWidth: CoreGraphics.CGFloat + @objc public var mosaicLineWidth: CoreGraphics.CGFloat + @objc public var editFinishBlock: ((UIKit.UIImage, HHSDKVideo.ZLEditImageModel?) -> Swift.Void)? + @objc public var cancelEditBlock: (() -> Swift.Void)? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc deinit + @objc public class func showEditImageVC(parentVC: UIKit.UIViewController?, animate: Swift.Bool = false, image: UIKit.UIImage, editModel: HHSDKVideo.ZLEditImageModel? = nil, cancel: (() -> Swift.Void)? = nil, completion: ((UIKit.UIImage, HHSDKVideo.ZLEditImageModel?) -> Swift.Void)?) + @objc public init(image: UIKit.UIImage, editModel: HHSDKVideo.ZLEditImageModel? = nil) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic open func viewDidLoad() + @objc override dynamic open func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLEditImageViewController : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLEditImageViewController : UIKit.UIScrollViewDelegate { + @objc dynamic public func viewForZooming(in scrollView: UIKit.UIScrollView) -> UIKit.UIView? + @objc dynamic public func scrollViewDidZoom(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndZooming(_ scrollView: UIKit.UIScrollView, with view: UIKit.UIView?, atScale scale: CoreGraphics.CGFloat) + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDragging(_ scrollView: UIKit.UIScrollView, willDecelerate decelerate: Swift.Bool) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndScrollingAnimation(_ scrollView: UIKit.UIScrollView) +} +extension ZLEditImageViewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegate { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) +} +@objc @_hasMissingDesignatedInitializers public class ZLDrawPath : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_hasMissingDesignatedInitializers public class ZLMosaicPath : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_hasMissingDesignatedInitializers public class ZLEditVideoViewController : UIKit.UIViewController { + @objc public var editFinishBlock: ((Foundation.URL?) -> Swift.Void)? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc deinit + @objc public init(avAsset: AVFoundation.AVAsset, animateDismiss: Swift.Bool = false) + @objc override dynamic public func viewDidLoad() + @objc override dynamic public func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLEditVideoViewController : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLEditVideoViewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDragging(_ scrollView: UIKit.UIScrollView, willDecelerate decelerate: Swift.Bool) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, insetForSectionAt section: Swift.Int) -> UIKit.UIEdgeInsets + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) +} +@objc @_inheritsConvenienceInitializers public class ZLEnlargeButton : UIKit.UIButton { + public var enlargeInsets: UIKit.UIEdgeInsets + public var enlargeInset: CoreGraphics.CGFloat { + get + set + } + @objc override dynamic public func point(inside point: CoreGraphics.CGPoint, with event: UIKit.UIEvent?) -> Swift.Bool + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public typealias ZLFilterApplierType = ((UIKit.UIImage) -> UIKit.UIImage) +@objc public enum ZLFilterType : Swift.Int { + case normal + case chrome + case fade + case instant + case process + case transfer + case tone + case linear + case sepia + case mono + case noir + case tonal + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc public class ZLFilter : ObjectiveC.NSObject { + public var name: Swift.String + @objc public init(name: Swift.String, filterType: HHSDKVideo.ZLFilterType) + @objc public init(name: Swift.String, applier: HHSDKVideo.ZLFilterApplierType?) + @objc override dynamic public init() + @objc deinit +} +extension ZLFilter { + @objc public static let all: [HHSDKVideo.ZLFilter] + @objc public static let normal: HHSDKVideo.ZLFilter + @objc public static let clarendon: HHSDKVideo.ZLFilter + @objc public static let nashville: HHSDKVideo.ZLFilter + @objc public static let apply1977: HHSDKVideo.ZLFilter + @objc public static let toaster: HHSDKVideo.ZLFilter + @objc public static let chrome: HHSDKVideo.ZLFilter + @objc public static let fade: HHSDKVideo.ZLFilter + @objc public static let instant: HHSDKVideo.ZLFilter + @objc public static let process: HHSDKVideo.ZLFilter + @objc public static let transfer: HHSDKVideo.ZLFilter + @objc public static let tone: HHSDKVideo.ZLFilter + @objc public static let linear: HHSDKVideo.ZLFilter + @objc public static let sepia: HHSDKVideo.ZLFilter + @objc public static let mono: HHSDKVideo.ZLFilter + @objc public static let noir: HHSDKVideo.ZLFilter + @objc public static let tonal: HHSDKVideo.ZLFilter +} +@objc public enum ZLURLType : Swift.Int { + case image + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_hasMissingDesignatedInitializers public class ZLImagePreviewController : UIKit.UIViewController { + @objc public var longPressBlock: ((HHSDKVideo.ZLImagePreviewController?, UIKit.UIImage?, Swift.Int) -> Swift.Void)? + @objc public var doneBlock: (([Any]) -> Swift.Void)? + @objc public var videoHttpHeader: [Swift.String : Any]? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var preferredStatusBarStyle: UIKit.UIStatusBarStyle { + @objc get + } + @objc public init(datas: [Any], index: Swift.Int = 0, showSelectBtn: Swift.Bool = true, showBottomView: Swift.Bool = true, urlType: ((Foundation.URL) -> HHSDKVideo.ZLURLType)? = nil, urlImageLoader: ((Foundation.URL, UIKit.UIImageView, @escaping (CoreGraphics.CGFloat) -> Swift.Void, @escaping () -> Swift.Void) -> Swift.Void)? = nil) + @objc override dynamic public func viewDidLoad() + @objc override dynamic public func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc deinit +} +extension ZLImagePreviewController { + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) +} +extension ZLImagePreviewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, minimumInteritemSpacingForSectionAt section: Swift.Int) -> CoreGraphics.CGFloat + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, minimumLineSpacingForSectionAt section: Swift.Int) -> CoreGraphics.CGFloat + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, insetForSectionAt section: Swift.Int) -> UIKit.UIEdgeInsets + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, sizeForItemAt indexPath: Foundation.IndexPath) -> CoreGraphics.CGSize + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didEndDisplaying cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) +} +@objc @_hasMissingDesignatedInitializers public class ZLImageStickerState : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLLanguageType : Swift.Int { + case system + case chineseSimplified + case chineseTraditional + case english + case japanese + case french + case german + case russian + case vietnamese + case korean + case malay + case italian + case indonesian + case portuguese + case spanish + case turkish + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public struct ZLLocalLanguageKey : Swift.Hashable { + public let rawValue: Swift.String + public init(rawValue: Swift.String) + public static let previewCamera: HHSDKVideo.ZLLocalLanguageKey + public static let previewCameraRecord: HHSDKVideo.ZLLocalLanguageKey + public static let previewAlbum: HHSDKVideo.ZLLocalLanguageKey + public static let cancel: HHSDKVideo.ZLLocalLanguageKey + public static let noPhotoTips: HHSDKVideo.ZLLocalLanguageKey + public static let loading: HHSDKVideo.ZLLocalLanguageKey + public static let hudLoading: HHSDKVideo.ZLLocalLanguageKey + public static let done: HHSDKVideo.ZLLocalLanguageKey + public static let ok: HHSDKVideo.ZLLocalLanguageKey + public static let timeout: HHSDKVideo.ZLLocalLanguageKey + public static let noPhotoLibratyAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let noCameraAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let noMicrophoneAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let cameraUnavailable: HHSDKVideo.ZLLocalLanguageKey + public static let keepRecording: HHSDKVideo.ZLLocalLanguageKey + public static let gotoSettings: HHSDKVideo.ZLLocalLanguageKey + public static let photo: HHSDKVideo.ZLLocalLanguageKey + public static let originalPhoto: HHSDKVideo.ZLLocalLanguageKey + public static let back: HHSDKVideo.ZLLocalLanguageKey + public static let edit: HHSDKVideo.ZLLocalLanguageKey + public static let editFinish: HHSDKVideo.ZLLocalLanguageKey + public static let revert: HHSDKVideo.ZLLocalLanguageKey + public static let brightness: HHSDKVideo.ZLLocalLanguageKey + public static let contrast: HHSDKVideo.ZLLocalLanguageKey + public static let saturation: HHSDKVideo.ZLLocalLanguageKey + public static let preview: HHSDKVideo.ZLLocalLanguageKey + public static let notAllowMixSelect: HHSDKVideo.ZLLocalLanguageKey + public static let save: HHSDKVideo.ZLLocalLanguageKey + public static let saveImageError: HHSDKVideo.ZLLocalLanguageKey + public static let saveVideoError: HHSDKVideo.ZLLocalLanguageKey + public static let exceededMaxSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let exceededMaxVideoSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let lessThanMinVideoSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let longerThanMaxVideoDuration: HHSDKVideo.ZLLocalLanguageKey + public static let shorterThanMaxVideoDuration: HHSDKVideo.ZLLocalLanguageKey + public static let iCloudVideoLoadFaild: HHSDKVideo.ZLLocalLanguageKey + public static let imageLoadFailed: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraTips: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraTakePhotoTips: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraRecordVideoTips: HHSDKVideo.ZLLocalLanguageKey + public static let minRecordTimeTips: HHSDKVideo.ZLLocalLanguageKey + public static let cameraRoll: HHSDKVideo.ZLLocalLanguageKey + public static let panoramas: HHSDKVideo.ZLLocalLanguageKey + public static let videos: HHSDKVideo.ZLLocalLanguageKey + public static let favorites: HHSDKVideo.ZLLocalLanguageKey + public static let timelapses: HHSDKVideo.ZLLocalLanguageKey + public static let recentlyAdded: HHSDKVideo.ZLLocalLanguageKey + public static let bursts: HHSDKVideo.ZLLocalLanguageKey + public static let slomoVideos: HHSDKVideo.ZLLocalLanguageKey + public static let selfPortraits: HHSDKVideo.ZLLocalLanguageKey + public static let screenshots: HHSDKVideo.ZLLocalLanguageKey + public static let depthEffect: HHSDKVideo.ZLLocalLanguageKey + public static let livePhotos: HHSDKVideo.ZLLocalLanguageKey + public static let animated: HHSDKVideo.ZLLocalLanguageKey + public static let myPhotoStream: HHSDKVideo.ZLLocalLanguageKey + public static let noTitleAlbumListPlaceholder: HHSDKVideo.ZLLocalLanguageKey + public static let unableToAccessAllPhotos: HHSDKVideo.ZLLocalLanguageKey + public static let textStickerRemoveTips: HHSDKVideo.ZLLocalLanguageKey + public func hash(into hasher: inout Swift.Hasher) + public static func == (a: HHSDKVideo.ZLLocalLanguageKey, b: HHSDKVideo.ZLLocalLanguageKey) -> Swift.Bool + public var hashValue: Swift.Int { + get + } +} +public typealias Second = Swift.Int +@objc @_inheritsConvenienceInitializers public class ZLPhotoConfiguration : ObjectiveC.NSObject { + @objc public class func `default`() -> HHSDKVideo.ZLPhotoConfiguration + @objc public class func resetConfiguration() + @objc public var sortAscending: Swift.Bool + @objc public var maxSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var maxVideoSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var minVideoSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var allowMixSelect: Swift.Bool + @objc public var maxPreviewCount: Swift.Int + @objc public var cellCornerRadio: CoreGraphics.CGFloat + @objc public var allowSelectImage: Swift.Bool + @objc public var allowSelectVideo: Swift.Bool + @objc public var allowSelectGif: Swift.Bool + @objc public var allowSelectLivePhoto: Swift.Bool + @objc public var allowTakePhotoInLibrary: Swift.Bool { + @objc get + @objc set + } + @objc public var allowEditImage: Swift.Bool { + @objc get + @objc set + } + @objc public var allowEditVideo: Swift.Bool { + @objc get + @objc set + } + @objc public var animateSelectBtnWhenSelect: Swift.Bool + @objc public var selectBtnAnimationDuration: Swift.Double + @objc public var editAfterSelectThumbnailImage: Swift.Bool + @objc public var cropVideoAfterSelectThumbnail: Swift.Bool + @objc public var showClipDirectlyIfOnlyHasClipTool: Swift.Bool + @objc public var saveNewImageAfterEdit: Swift.Bool + @objc public var allowSlideSelect: Swift.Bool + @objc public var autoScrollWhenSlideSelectIsActive: Swift.Bool + @objc public var autoScrollMaxSpeed: CoreGraphics.CGFloat + @objc public var allowDragSelect: Swift.Bool + @objc public var allowSelectOriginal: Swift.Bool + @objc public var allowPreviewPhotos: Swift.Bool + @objc public var showPreviewButtonInAlbum: Swift.Bool + @objc public var showSelectCountOnDoneBtn: Swift.Bool + @objc public var columnCount: Swift.Int { + @objc get + @objc set + } + @objc public var maxEditVideoTime: Swift.Int + @objc public var maxSelectVideoDuration: Swift.Int + @objc public var minSelectVideoDuration: Swift.Int + @objc public var editImageConfiguration: HHSDKVideo.ZLEditImageConfiguration + @objc public var showCaptureImageOnTakePhotoBtn: Swift.Bool + @objc public var showSelectBtnWhenSingleSelect: Swift.Bool + @objc public var showSelectedMask: Swift.Bool + @objc public var showSelectedBorder: Swift.Bool + @objc public var showInvalidMask: Swift.Bool + @objc public var showSelectedIndex: Swift.Bool + @objc public var showSelectedPhotoPreview: Swift.Bool + @objc public var shouldAnialysisAsset: Swift.Bool + @objc public var timeout: Swift.Double + @objc public var languageType: HHSDKVideo.ZLLanguageType { + @objc get + @objc set + } + @objc public var useCustomCamera: Swift.Bool + @objc public var allowTakePhoto: Swift.Bool { + @objc get + @objc set + } + @objc public var allowRecordVideo: Swift.Bool { + @objc get + @objc set + } + @objc public var minRecordDuration: HHSDKVideo.Second { + @objc get + @objc set + } + @objc public var maxRecordDuration: HHSDKVideo.Second { + @objc get + @objc set + } + @objc public var cameraConfiguration: HHSDKVideo.ZLCameraConfiguration + @objc public var hudStyle: HHSDKVideo.ZLProgressHUD.HUDStyle + @objc public var canSelectAsset: ((Photos.PHAsset) -> Swift.Bool)? + @objc public var showAddPhotoButton: Swift.Bool + @objc public var showEnterSettingTips: Swift.Bool + @objc public var noAuthorityCallback: ((HHSDKVideo.ZLNoAuthorityType) -> Swift.Void)? + @objc public var operateBeforeDoneAction: ((UIKit.UIViewController, @escaping () -> Swift.Void) -> Swift.Void)? + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLNoAuthorityType : Swift.Int { + case library + case camera + case microphone + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension ZLPhotoConfiguration { + @discardableResult + public func sortAscending(_ ascending: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxVideoSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minVideoSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowMixSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxPreviewCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cellCornerRadio(_ cornerRadio: CoreGraphics.CGFloat) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func allowSelectVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectGif(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectLivePhoto(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowTakePhotoInLibrary(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowEditImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowEditVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func animateSelectBtnWhenSelect(_ animate: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func selectBtnAnimationDuration(_ duration: CoreFoundation.CFTimeInterval) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func editAfterSelectThumbnailImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cropVideoAfterSelectThumbnail(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showClipDirectlyIfOnlyHasClipTool(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func saveNewImageAfterEdit(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSlideSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func autoScrollWhenSlideSelectIsActive(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func autoScrollMaxSpeed(_ speed: CoreGraphics.CGFloat) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowDragSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectOriginal(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowPreviewPhotos(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showPreviewButtonInAlbum(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectCountOnDoneBtn(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func columnCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxEditVideoTime(_ second: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxSelectVideoDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minSelectVideoDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func editImageConfiguration(_ configuration: HHSDKVideo.ZLEditImageConfiguration) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showCaptureImageOnTakePhotoBtn(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectBtnWhenSingleSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedMask(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedBorder(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showInvalidMask(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedIndex(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedPhotoPreview(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func shouldAnialysisAsset(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func timeout(_ timeout: Foundation.TimeInterval) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func languageType(_ type: HHSDKVideo.ZLLanguageType) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func useCustomCamera(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowTakePhoto(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowRecordVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minRecordDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxRecordDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cameraConfiguration(_ configuration: HHSDKVideo.ZLCameraConfiguration) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func hudStyle(_ style: HHSDKVideo.ZLProgressHUD.HUDStyle) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func canSelectAsset(_ block: ((Photos.PHAsset) -> Swift.Bool)?) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func showAddPhotoButton(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func showEnterSettingTips(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func noAuthorityCallback(_ callback: ((HHSDKVideo.ZLNoAuthorityType) -> Swift.Void)?) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func operateBeforeDoneAction(_ block: ((UIKit.UIViewController, @escaping () -> Swift.Void) -> Swift.Void)?) -> HHSDKVideo.ZLPhotoConfiguration +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoManager : ObjectiveC.NSObject { + @objc public class func saveImageToAlbum(image: UIKit.UIImage, completion: ((Swift.Bool, Photos.PHAsset?) -> Swift.Void)?) + @objc public class func saveVideoToAlbum(url: Foundation.URL, completion: ((Swift.Bool, Photos.PHAsset?) -> Swift.Void)?) + @objc public class func fetchPhoto(in result: Photos.PHFetchResult<Photos.PHAsset>, ascending: Swift.Bool, allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, limitCount: Swift.Int = .max) -> [HHSDKVideo.ZLPhotoModel] + @objc public class func getPhotoAlbumList(ascending: Swift.Bool, allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, completion: ([HHSDKVideo.ZLAlbumListModel]) -> Swift.Void) + @objc public class func getCameraRollAlbum(allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, completion: @escaping (HHSDKVideo.ZLAlbumListModel) -> Swift.Void) + @discardableResult + @objc public class func fetchImage(for asset: Photos.PHAsset, size: CoreGraphics.CGSize, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (UIKit.UIImage?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @discardableResult + @objc public class func fetchOriginalImage(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (UIKit.UIImage?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @discardableResult + @objc public class func fetchOriginalImageData(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (Foundation.Data, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchLivePhoto(for asset: Photos.PHAsset, completion: @escaping (Photos.PHLivePhoto?, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchVideo(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (AVFoundation.AVPlayerItem?, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchAVAsset(forVideo asset: Photos.PHAsset, completion: @escaping (AVFoundation.AVAsset?, [Swift.AnyHashable : Any]?) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchAssetFilePath(asset: Photos.PHAsset, completion: @escaping (Swift.String?) -> Swift.Void) + @objc override dynamic public init() + @objc deinit +} +extension ZLPhotoManager { + @objc dynamic public class func hasPhotoLibratyAuthority() -> Swift.Bool + @objc dynamic public class func hasCameraAuthority() -> Swift.Bool + @objc dynamic public class func hasMicrophoneAuthority() -> Swift.Bool +} +extension ZLPhotoModel { + public enum MediaType : Swift.Int { + case unknown + case image + case gif + case livePhoto + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } +} +@objc public class ZLPhotoModel : ObjectiveC.NSObject { + final public let ident: Swift.String + final public let asset: Photos.PHAsset + public var type: HHSDKVideo.ZLPhotoModel.MediaType + public var duration: Swift.String + public var isSelected: Swift.Bool + public var editImage: UIKit.UIImage? { + get + set + } + public var second: HHSDKVideo.Second { + get + } + public var whRatio: CoreGraphics.CGFloat { + get + } + public var previewSize: CoreGraphics.CGSize { + get + } + public var editImageModel: HHSDKVideo.ZLEditImageModel? + public init(asset: Photos.PHAsset) + public func transformAssetType(for asset: Photos.PHAsset) -> HHSDKVideo.ZLPhotoModel.MediaType + public func transformDuration(for asset: Photos.PHAsset) -> Swift.String + @objc override dynamic public init() + @objc deinit +} +extension ZLPhotoModel { + public static func == (lhs: HHSDKVideo.ZLPhotoModel, rhs: HHSDKVideo.ZLPhotoModel) -> Swift.Bool +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoPreviewSheet : UIKit.UIView { + @objc public var selectImageBlock: (([UIKit.UIImage], [Photos.PHAsset], Swift.Bool) -> Swift.Void)? + @objc public var selectImageRequestErrorBlock: (([Photos.PHAsset], [Swift.Int]) -> Swift.Void)? + @objc public var cancelBlock: (() -> Swift.Void)? + @objc deinit + @objc convenience override dynamic public init(frame: CoreGraphics.CGRect) + @objc public init(selectedAssets: [Photos.PHAsset]? = nil) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic public func layoutSubviews() + @objc public func showPreview(animate: Swift.Bool = true, sender: UIKit.UIViewController) + @objc public func showPhotoLibrary(sender: UIKit.UIViewController) + @objc public func previewAssets(sender: UIKit.UIViewController, assets: [Photos.PHAsset], index: Swift.Int, isOriginal: Swift.Bool, showBottomViewAndSelectBtn: Swift.Bool = true) +} +extension ZLPhotoPreviewSheet : UIKit.UIGestureRecognizerDelegate { + @objc override dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLPhotoPreviewSheet : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, sizeForItemAt indexPath: Foundation.IndexPath) -> CoreGraphics.CGSize + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) +} +extension ZLPhotoPreviewSheet : UIKit.UIImagePickerControllerDelegate, UIKit.UINavigationControllerDelegate { + @objc dynamic public func imagePickerController(_ picker: UIKit.UIImagePickerController, didFinishPickingMediaWithInfo info: [UIKit.UIImagePickerController.InfoKey : Any]) +} +extension ZLPhotoPreviewSheet : Photos.PHPhotoLibraryChangeObserver { + @objc dynamic public func photoLibraryDidChange(_ changeInstance: Photos.PHChange) +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoUIConfiguration : ObjectiveC.NSObject { + @objc public enum CancelButtonStyle : Swift.Int { + case text + case image + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public class func `default`() -> HHSDKVideo.ZLPhotoUIConfiguration + @objc public class func resetConfiguration() + @objc public var style: HHSDKVideo.ZLPhotoBrowserStyle + @objc public var statusBarStyle: UIKit.UIStatusBarStyle + @objc public var navCancelButtonStyle: HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle + @objc public var showStatusBarInPreviewInterface: Swift.Bool + @objc public var navViewBlurEffectOfAlbumList: UIKit.UIBlurEffect? + @objc public var navViewBlurEffectOfPreview: UIKit.UIBlurEffect? + @objc public var bottomViewBlurEffectOfAlbumList: UIKit.UIBlurEffect? + @objc public var bottomViewBlurEffectOfPreview: UIKit.UIBlurEffect? + @objc public var customImageNames: [Swift.String] { + @objc get + @objc set + } + public var customImageForKey: [Swift.String : UIKit.UIImage?] { + get + set + } + @objc public var customImageForKey_objc: [Swift.String : UIKit.UIImage] { + @objc get + @objc set + } + public var customLanguageKeyValue: [HHSDKVideo.ZLLocalLanguageKey : Swift.String] { + get + set + } + @objc public var customLanguageKeyValue_objc: [Swift.String : Swift.String] { + @objc get + @objc set + } + @objc public var themeFontName: Swift.String? { + @objc get + @objc set + } + @objc public var sheetTranslucentColor: UIKit.UIColor + @objc public var sheetBtnBgColor: UIKit.UIColor + @objc public var sheetBtnTitleColor: UIKit.UIColor + @objc public var sheetBtnTitleTintColor: UIKit.UIColor + @objc public var navBarColor: UIKit.UIColor + @objc public var navBarColorOfPreviewVC: UIKit.UIColor + @objc public var navTitleColor: UIKit.UIColor + @objc public var navTitleColorOfPreviewVC: UIKit.UIColor + @objc public var navEmbedTitleViewBgColor: UIKit.UIColor + @objc public var albumListBgColor: UIKit.UIColor + @objc public var embedAlbumListTranslucentColor: UIKit.UIColor + @objc public var albumListTitleColor: UIKit.UIColor + @objc public var albumListCountColor: UIKit.UIColor + @objc public var separatorColor: UIKit.UIColor + @objc public var thumbnailBgColor: UIKit.UIColor + @objc public var previewVCBgColor: UIKit.UIColor + @objc public var bottomToolViewBgColor: UIKit.UIColor + @objc public var bottomToolViewBgColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnNormalTitleColor: UIKit.UIColor + @objc public var bottomToolViewDoneBtnNormalTitleColor: UIKit.UIColor + @objc public var bottomToolViewBtnNormalTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewDoneBtnNormalTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnDisableTitleColor: UIKit.UIColor + @objc public var bottomToolViewDoneBtnDisableTitleColor: UIKit.UIColor + @objc public var bottomToolViewBtnDisableTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewDoneBtnDisableTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnNormalBgColor: UIKit.UIColor + @objc public var bottomToolViewBtnNormalBgColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnDisableBgColor: UIKit.UIColor + @objc public var bottomToolViewBtnDisableBgColorOfPreviewVC: UIKit.UIColor + @objc public var selectMorePhotoWhenAuthIsLismitedTitleColor: UIKit.UIColor + @objc public var cameraRecodeProgressColor: UIKit.UIColor + @objc public var selectedMaskColor: UIKit.UIColor + @objc public var selectedBorderColor: UIKit.UIColor + @objc public var invalidMaskColor: UIKit.UIColor + @objc public var indexLabelTextColor: UIKit.UIColor + @objc public var indexLabelBgColor: UIKit.UIColor + @objc public var cameraCellBgColor: UIKit.UIColor + @objc public var adjustSliderNormalColor: UIKit.UIColor + @objc public var adjustSliderTintColor: UIKit.UIColor + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLPhotoBrowserStyle : Swift.Int { + case embedAlbumList + case externalAlbumList + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension ZLPhotoUIConfiguration { + @discardableResult + public func style(_ style: HHSDKVideo.ZLPhotoBrowserStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func statusBarStyle(_ statusBarStyle: UIKit.UIStatusBarStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navCancelButtonStyle(_ style: HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func showStatusBarInPreviewInterface(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navViewBlurEffectOfAlbumList(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navViewBlurEffectOfPreview(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomViewBlurEffectOfAlbumList(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomViewBlurEffectOfPreview(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customImageNames(_ names: [Swift.String]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customImageForKey(_ map: [Swift.String : UIKit.UIImage?]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customLanguageKeyValue(_ map: [HHSDKVideo.ZLLocalLanguageKey : Swift.String]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func themeFontName(_ name: Swift.String) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetTranslucentColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnTitleTintColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navBarColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navBarColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navEmbedTitleViewBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func embedAlbumListTranslucentColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListCountColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func separatorColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func thumbnailBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func previewVCBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnNormalTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnNormalTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnDisableTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnDisableTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectMorePhotoWhenAuthIsLismitedTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func cameraRecodeProgressColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectedMaskColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectedBorderColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func invalidMaskColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func indexLabelTextColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func indexLabelBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func cameraCellBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func adjustSliderNormalColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func adjustSliderTintColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration +} +@objc @_hasMissingDesignatedInitializers public class ZLProgressHUD : UIKit.UIView { + @objc public enum HUDStyle : Swift.Int { + case light + case lightBlur + case dark + case darkBlur + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc deinit + @objc public init(style: HHSDKVideo.ZLProgressHUD.HUDStyle) + @objc public func show(timeout: Foundation.TimeInterval = 100) + @objc public func hide() + @objc override dynamic public init(frame: CoreGraphics.CGRect) +} +@objc @_hasMissingDesignatedInitializers public class ZLTextStickerState : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class ZLVideoManager : ObjectiveC.NSObject { + @objc public class func mergeVideos(fileUrls: [Foundation.URL], completion: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) + @objc override dynamic public init() + @objc deinit +} +extension ZLVideoManager { + @objc dynamic public class func exportVideo(for asset: Photos.PHAsset, exportType: HHSDKVideo.ZLVideoManager.ExportType = .mov, presetName: Swift.String = AVAssetExportPresetMediumQuality, complete: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) + @objc dynamic public class func exportVideo(for asset: AVFoundation.AVAsset, range: CoreMedia.CMTimeRange = CMTimeRange(start: .zero, duration: .positiveInfinity), exportType: HHSDKVideo.ZLVideoManager.ExportType = .mov, presetName: Swift.String = AVAssetExportPresetMediumQuality, complete: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) +} +extension ZLVideoManager { + @objc public enum ExportType : Swift.Int { + case mov + case mp4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } +} +extension HHSDKVideo.AES.Error : Swift.Equatable {} +extension HHSDKVideo.AES.Error : Swift.Hashable {} +extension HHSDKVideo.AES.Variant : Swift.Equatable {} +extension HHSDKVideo.AES.Variant : Swift.Hashable {} +extension HHSDKVideo.AES.Variant : Swift.RawRepresentable {} +extension HHSDKVideo.Bit : Swift.Equatable {} +extension HHSDKVideo.Bit : Swift.Hashable {} +extension HHSDKVideo.Bit : Swift.RawRepresentable {} +extension HHSDKVideo.Blowfish.Error : Swift.Equatable {} +extension HHSDKVideo.Blowfish.Error : Swift.Hashable {} +extension HHSDKVideo.CBC.Error : Swift.Equatable {} +extension HHSDKVideo.CBC.Error : Swift.Hashable {} +extension HHSDKVideo.CCM : HHSDKVideo.BlockMode {} +extension HHSDKVideo.CCM.Error : Swift.Equatable {} +extension HHSDKVideo.CCM.Error : Swift.Hashable {} +extension HHSDKVideo.CFB.Error : Swift.Equatable {} +extension HHSDKVideo.CFB.Error : Swift.Hashable {} +extension HHSDKVideo.ChaCha20.Error : Swift.Equatable {} +extension HHSDKVideo.ChaCha20.Error : Swift.Hashable {} +extension HHSDKVideo.CipherError : Swift.Equatable {} +extension HHSDKVideo.CipherError : Swift.Hashable {} +extension HHSDKVideo.CMAC.Error : Swift.Equatable {} +extension HHSDKVideo.CMAC.Error : Swift.Hashable {} +extension HHSDKVideo.CTR : HHSDKVideo.BlockMode {} +extension HHSDKVideo.CTR.Error : Swift.Equatable {} +extension HHSDKVideo.CTR.Error : Swift.Hashable {} +extension HHSDKVideo.DateTransform.Unit : Swift.Equatable {} +extension HHSDKVideo.DateTransform.Unit : Swift.Hashable {} +extension HHSDKVideo.DateTransform.Unit : Swift.RawRepresentable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.Equatable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.Hashable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.RawRepresentable {} +extension HHSDKVideo.EKAlertMessage.ImagePosition : Swift.Equatable {} +extension HHSDKVideo.EKAlertMessage.ImagePosition : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.DisplayMode : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.DisplayMode : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.NotificationHapticFeedback : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.NotificationHapticFeedback : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Position : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.Position : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.StatusBar : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.StatusBar : Swift.Hashable {} +extension HHSDKVideo.GCM.Mode : Swift.Equatable {} +extension HHSDKVideo.GCM.Mode : Swift.Hashable {} +extension HHSDKVideo.GCM.Error : Swift.Equatable {} +extension HHSDKVideo.GCM.Error : Swift.Hashable {} +extension HHSDKVideo.HHBaseCallingState : Swift.Equatable {} +extension HHSDKVideo.HHBaseCallingState : Swift.Hashable {} +extension HHSDKVideo.HHBaseCallingState : Swift.RawRepresentable {} +extension HHSDKVideo.HHMediaType : Swift.Equatable {} +extension HHSDKVideo.HHMediaType : Swift.Hashable {} +extension HHSDKVideo.HHMediaType : Swift.RawRepresentable {} +extension HHSDKVideo.DateFormat : Swift.Equatable {} +extension HHSDKVideo.DateFormat : Swift.Hashable {} +extension HHSDKVideo.DateFormat : Swift.RawRepresentable {} +extension HHSDKVideo.HHConsType : Swift.Equatable {} +extension HHSDKVideo.HHConsType : Swift.Hashable {} +extension HHSDKVideo.HHConsType : Swift.RawRepresentable {} +extension HHSDKVideo.HHFileCacheManager.HHAssetPathType : Swift.Equatable {} +extension HHSDKVideo.HHFileCacheManager.HHAssetPathType : Swift.Hashable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.Equatable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.Hashable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.RawRepresentable {} +extension HHSDKVideo.HHLogMode : Swift.Equatable {} +extension HHSDKVideo.HHLogMode : Swift.Hashable {} +extension HHSDKVideo.HHLogMode : Swift.RawRepresentable {} +extension HHSDKVideo.HHCallType : Swift.Equatable {} +extension HHSDKVideo.HHCallType : Swift.Hashable {} +extension HHSDKVideo.HHCallType : Swift.RawRepresentable {} +extension HHSDKVideo.HHServerType : Swift.Equatable {} +extension HHSDKVideo.HHServerType : Swift.Hashable {} +extension HHSDKVideo.HHRequestMethod : Swift.Equatable {} +extension HHSDKVideo.HHRequestMethod : Swift.Hashable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.Equatable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.Hashable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.RawRepresentable {} +extension HHSDKVideo.HHRealNameType : Swift.Equatable {} +extension HHSDKVideo.HHRealNameType : Swift.Hashable {} +extension HHSDKVideo.HHRealNameType : Swift.RawRepresentable {} +extension HHSDKVideo.TrtcLog : Swift.Equatable {} +extension HHSDKVideo.TrtcLog : Swift.Hashable {} +extension HHSDKVideo.TrtcLog : Swift.RawRepresentable {} +extension HHSDKVideo.TrtcError : Swift.Equatable {} +extension HHSDKVideo.TrtcError : Swift.Hashable {} +extension HHSDKVideo.TrtcError : Swift.RawRepresentable {} +extension HHSDKVideo.hhToastPosition : Swift.Equatable {} +extension HHSDKVideo.hhToastPosition : Swift.Hashable {} +extension HHSDKVideo.HKDF.Error : Swift.Equatable {} +extension HHSDKVideo.HKDF.Error : Swift.Hashable {} +extension HHSDKVideo.HMAC.Error : Swift.Equatable {} +extension HHSDKVideo.HMAC.Error : Swift.Hashable {} +extension HHSDKVideo.HMAC.Variant : Swift.Equatable {} +extension HHSDKVideo.HMAC.Variant : Swift.Hashable {} +extension HHSDKVideo.ItemClass : Swift.Equatable {} +extension HHSDKVideo.ItemClass : Swift.Hashable {} +extension HHSDKVideo.ProtocolType : Swift.Equatable {} +extension HHSDKVideo.ProtocolType : Swift.Hashable {} +extension HHSDKVideo.AuthenticationType : Swift.Equatable {} +extension HHSDKVideo.AuthenticationType : Swift.Hashable {} +extension HHSDKVideo.Accessibility : Swift.Equatable {} +extension HHSDKVideo.Accessibility : Swift.Hashable {} +extension HHSDKVideo.Status : Swift.Equatable {} +extension HHSDKVideo.Status : Swift.Hashable {} +extension HHSDKVideo.MappingType : Swift.Equatable {} +extension HHSDKVideo.MappingType : Swift.Hashable {} +extension HHSDKVideo.OCB.Mode : Swift.Equatable {} +extension HHSDKVideo.OCB.Mode : Swift.Hashable {} +extension HHSDKVideo.OCB.Error : Swift.Equatable {} +extension HHSDKVideo.OCB.Error : Swift.Hashable {} +extension HHSDKVideo.OFB.Error : Swift.Equatable {} +extension HHSDKVideo.OFB.Error : Swift.Hashable {} +extension HHSDKVideo.Padding : Swift.Equatable {} +extension HHSDKVideo.Padding : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF1.Error : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF1.Error : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF1.Variant : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF1.Variant : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF2.Error : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF2.Error : Swift.Hashable {} +extension HHSDKVideo.PCBC.Error : Swift.Equatable {} +extension HHSDKVideo.PCBC.Error : Swift.Hashable {} +extension HHSDKVideo.PermissionStatus : Swift.Equatable {} +extension HHSDKVideo.PermissionStatus : Swift.Hashable {} +extension HHSDKVideo.PermissionStatus : Swift.RawRepresentable {} +extension HHSDKVideo.HHBasePermissionType : Swift.Equatable {} +extension HHSDKVideo.HHBasePermissionType : Swift.Hashable {} +extension HHSDKVideo.HHBasePermissionType : Swift.RawRepresentable {} +extension HHSDKVideo.Poly1305.Error : Swift.Equatable {} +extension HHSDKVideo.Poly1305.Error : Swift.Hashable {} +extension HHSDKVideo.Rabbit.Error : Swift.Equatable {} +extension HHSDKVideo.Rabbit.Error : Swift.Hashable {} +extension HHSDKVideo.Reachability.NetworkStatus : Swift.Equatable {} +extension HHSDKVideo.Reachability.NetworkStatus : Swift.Hashable {} +extension HHSDKVideo.RecordImgType : Swift.Equatable {} +extension HHSDKVideo.RecordImgType : Swift.Hashable {} +extension HHSDKVideo.RecordImgType : Swift.RawRepresentable {} +extension HHSDKVideo.SHA2.Variant : Swift.Equatable {} +extension HHSDKVideo.SHA2.Variant : Swift.Hashable {} +extension HHSDKVideo.SHA3.Variant : Swift.Equatable {} +extension HHSDKVideo.SHA3.Variant : Swift.Hashable {} +extension HHSDKVideo.HHIMCmd : Swift.Equatable {} +extension HHSDKVideo.HHIMCmd : Swift.Hashable {} +extension HHSDKVideo.HHIMCmd : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.Equatable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.Hashable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.RawRepresentable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.Equatable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.Hashable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.RawRepresentable {} +extension HHSDKVideo.ZLFilterType : Swift.Equatable {} +extension HHSDKVideo.ZLFilterType : Swift.Hashable {} +extension HHSDKVideo.ZLFilterType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLURLType : Swift.Equatable {} +extension HHSDKVideo.ZLURLType : Swift.Hashable {} +extension HHSDKVideo.ZLURLType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLLanguageType : Swift.Equatable {} +extension HHSDKVideo.ZLLanguageType : Swift.Hashable {} +extension HHSDKVideo.ZLLanguageType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.Equatable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.Hashable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.Equatable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.Hashable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.Equatable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.Hashable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.RawRepresentable {} diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftmodule b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftmodule new file mode 100644 index 0000000..53e7940 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/arm64.swiftmodule differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftdoc b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftdoc new file mode 100644 index 0000000..b8ddb67 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftdoc differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftinterface b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftinterface new file mode 100644 index 0000000..b95ec3b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftinterface @@ -0,0 +1,5650 @@ +// swift-interface-format-version: 1.0 +// swift-compiler-version: Apple Swift version 5.4.2 (swiftlang-1205.0.28.2 clang-1205.0.19.57) +// swift-module-flags: -target x86_64-apple-ios10.0-simulator -enable-objc-interop -enable-library-evolution -swift-version 5 -enforce-exclusivity=checked -O -module-name HHSDKVideo +import AVFoundation +import AVKit +import Accelerate +import CoreGraphics +import CoreLocation +import CoreMotion +import CoreTelephony +import Darwin +import Dispatch +import Foundation +@_exported import HHSDKVideo +import ImageIO +import LocalAuthentication +import MobileCoreServices +import ObjectiveC +import Photos +import PhotosUI +import Security +import SecurityKit +import Swift +import SystemConfiguration +import UIKit +import UserNotifications +import WebKit +public protocol AEAD { + static var kLen: Swift.Int { get } + static var ivRange: Swift.Range<Swift.Int> { get } +} +@_hasMissingDesignatedInitializers final public class AEADChaCha20Poly1305 : HHSDKVideo.AEAD { + public static let kLen: Swift.Int + public static var ivRange: Swift.Range<Swift.Int> + public static func encrypt(_ plainText: Swift.Array<Swift.UInt8>, key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>, authenticationHeader: Swift.Array<Swift.UInt8>) throws -> (cipherText: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>) + public static func decrypt(_ cipherText: Swift.Array<Swift.UInt8>, key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>, authenticationHeader: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>) throws -> (plainText: Swift.Array<Swift.UInt8>, success: Swift.Bool) + @objc deinit +} +final public class AES { + public enum Error : Swift.Error { + case invalidKeySize + case dataPaddingRequired + case invalidData + public static func == (a: HHSDKVideo.AES.Error, b: HHSDKVideo.AES.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant : Swift.Int { + case aes128, aes192, aes256 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + final public let variant: HHSDKVideo.AES.Variant + public init(key: Swift.Array<Swift.UInt8>, blockMode: HHSDKVideo.BlockMode, padding: HHSDKVideo.Padding = .pkcs7) throws + @objc deinit +} +extension AES : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension AES { + convenience public init(key: Swift.String, iv: Swift.String, padding: HHSDKVideo.Padding = .pkcs7) throws +} +extension AES : HHSDKVideo.Cryptors { + final public func makeEncryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + final public func makeDecryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable +} +extension Array where Element == Swift.UInt8 { + public init(hex: Swift.String) + public func toHexString() -> Swift.String +} +extension Array where Element == Swift.UInt8 { + @available(*, deprecated) + public func chunks(size chunksize: Swift.Int) -> Swift.Array<Swift.Array<Element>> + public func md5() -> [Element] + public func sha1() -> [Element] + public func sha224() -> [Element] + public func sha256() -> [Element] + public func sha384() -> [Element] + public func sha512() -> [Element] + public func sha2(_ variant: HHSDKVideo.SHA2.Variant) -> [Element] + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> [Element] + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public func crc16(seed: Swift.UInt16? = nil) -> Swift.UInt16 + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> [Element] + public func decrypt(cipher: HHSDKVideo.Cipher) throws -> [Element] + public func authenticate<A>(with authenticator: A) throws -> [Element] where A : HHSDKVideo.CryptoAuthenticator +} +extension Array where Element == Swift.UInt8 { + public func toBase64() -> Swift.String? + public init(base64: Swift.String) +} +public protocol CryptoAuthenticator { + func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +public enum Bit : Swift.Int { + case zero + case one + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@_hasMissingDesignatedInitializers public class BlockDecryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public func seek(to position: Swift.Int) throws + @objc deinit +} +public typealias CipherOperationOnBlock = (Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8>? +public protocol BlockMode { + var options: HHSDKVideo.BlockModeOption { get } + func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +public struct BlockModeOption : Swift.OptionSet { + public let rawValue: Swift.Int + public init(rawValue: Swift.Int) + public typealias ArrayLiteralElement = HHSDKVideo.BlockModeOption + public typealias Element = HHSDKVideo.BlockModeOption + public typealias RawValue = Swift.Int +} +final public class Blowfish { + public enum Error : Swift.Error { + case dataPaddingRequired + case invalidKeyOrInitializationVector + case invalidInitializationVector + case invalidBlockMode + public static func == (a: HHSDKVideo.Blowfish.Error, b: HHSDKVideo.Blowfish.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>, blockMode: HHSDKVideo.BlockMode = CBC(iv: Array<UInt8>(repeating: 0, count: Blowfish.blockSize)), padding: HHSDKVideo.Padding) throws + @objc deinit +} +extension Blowfish : HHSDKVideo.Cipher { + final public func encrypt<C>(_ bytes: C) throws -> Swift.Array<Swift.UInt8> where C : Swift.Collection, C.Element == Swift.UInt8, C.Index == Swift.Int + final public func decrypt<C>(_ bytes: C) throws -> Swift.Array<Swift.UInt8> where C : Swift.Collection, C.Element == Swift.UInt8, C.Index == Swift.Int +} +extension Blowfish { + convenience public init(key: Swift.String, iv: Swift.String, padding: HHSDKVideo.Padding = .pkcs7) throws +} +@_hasMissingDesignatedInitializers public class BusyPics { + public static let `default`: HHSDKVideo.BusyPics + public func cacheImgs() + public func getImgs() -> [Foundation.URL?] + @objc deinit +} +public struct CallDoctorModel : HHSDKVideo.Mappable { + public var doctor: HHSDKVideo.HHDoctorModel? + public var order: HHSDKVideo.OrderModel? + public var appoint: Swift.String? + public var pushFlowUrl: Swift.String? + public var realPatientUuid: Swift.Int? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct CBC : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CBC.Error, b: HHSDKVideo.CBC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@_inheritsConvenienceInitializers final public class CBCMAC : HHSDKVideo.CMAC { + override final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + override public init(key: Swift.Array<Swift.UInt8>) throws + @objc deinit +} +public struct CCM { + public enum Error : Swift.Error { + case invalidInitializationVector + case invalidParameter + case fail + public static func == (a: HHSDKVideo.CCM.Error, b: HHSDKVideo.CCM.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(iv: Swift.Array<Swift.UInt8>, tagLength: Swift.Int, messageLength: Swift.Int, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil) + public init(iv: Swift.Array<Swift.UInt8>, tagLength: Swift.Int, messageLength: Swift.Int, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +public struct CFB : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CFB.Error, b: HHSDKVideo.CFB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +final public class ChaCha20 { + public enum Error : Swift.Error { + case invalidKeyOrInitializationVector + case notSupported + public static func == (a: HHSDKVideo.ChaCha20.Error, b: HHSDKVideo.ChaCha20.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>, iv nonce: Swift.Array<Swift.UInt8>) throws + @objc deinit +} +extension ChaCha20 : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension ChaCha20 { + public struct ChaChaEncryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public func seek(to: Swift.Int) throws + } +} +extension ChaCha20 { + public struct ChaChaDecryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = true) throws -> Swift.Array<Swift.UInt8> + public func seek(to: Swift.Int) throws + } +} +extension ChaCha20 : HHSDKVideo.Cryptors { + final public func makeEncryptor() -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + final public func makeDecryptor() -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable +} +extension ChaCha20 { + convenience public init(key: Swift.String, iv: Swift.String) throws +} +public struct ChatApi { +} +@_hasMissingDesignatedInitializers final public class Checksum { + @objc deinit +} +extension Checksum { + public static func crc32(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public static func crc32c(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public static func crc16(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt16? = nil) -> Swift.UInt16 +} +public enum CipherError : Swift.Error { + case encrypt + case decrypt + public static func == (a: HHSDKVideo.CipherError, b: HHSDKVideo.CipherError) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public protocol Cipher : AnyObject { + var keySize: Swift.Int { get } + func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func encrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func decrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension Cipher { + public func encrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public func decrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +public protocol CipherModeWorker { + var cipherOperation: HHSDKVideo.CipherOperationOnBlock { get } + var additionalBufferSize: Swift.Int { get } + mutating func encrypt(block plaintext: Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + mutating func decrypt(block ciphertext: Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8> +} +public protocol BlockModeWorker : HHSDKVideo.CipherModeWorker { + var blockSize: Swift.Int { get } +} +public protocol CounterModeWorker : HHSDKVideo.CipherModeWorker { + associatedtype Counter + var counter: Self.Counter { get set } +} +public protocol SeekableModeWorker : HHSDKVideo.CipherModeWorker { + mutating func seek(to position: Swift.Int) throws +} +public protocol StreamModeWorker : HHSDKVideo.CipherModeWorker { +} +public protocol FinalizingEncryptModeWorker : HHSDKVideo.CipherModeWorker { + mutating func finalize(encrypt ciphertext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> +} +public protocol FinalizingDecryptModeWorker : HHSDKVideo.CipherModeWorker { + @discardableResult + mutating func willDecryptLast(bytes ciphertext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> + mutating func didDecryptLast(bytes plaintext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> + mutating func finalize(decrypt plaintext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> +} +public class CMAC : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case wrongKeyLength + public static func == (a: HHSDKVideo.CMAC.Error, b: HHSDKVideo.CMAC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(key: Swift.Array<Swift.UInt8>) throws + public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public func authenticate(_ bytes: Swift.Array<Swift.UInt8>, cipher: HHSDKVideo.Cipher) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +open class CodableTransform<T> : HHSDKVideo.TransformType where T : Swift.Decodable, T : Swift.Encodable { + public typealias Object = T + public typealias JSON = Any + public init() + open func transformFromJSON(_ value: Any?) -> HHSDKVideo.CodableTransform<T>.Object? + open func transformToJSON(_ value: T?) -> HHSDKVideo.CodableTransform<T>.JSON? + @objc deinit +} +public struct CommentApi { +} +@objc @_inheritsConvenienceInitializers public class CommentBaseVC : UIKit.UIViewController { + @objc override dynamic public func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class CommentVC : HHSDKVideo.CommentBaseVC { + @objc override dynamic public func viewDidLoad() + public static func show(_ orderId: Swift.String, docId: Swift.String, uuid: Swift.Int?, type: HHSDKVideo.HHCallType?, _ model: HHSDKVideo.HHGetQuesetionModel?) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public protocol Cryptor { + mutating func seek(to: Swift.Int) throws +} +public protocol Cryptors : AnyObject { + func makeEncryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + func makeDecryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + static func randomIV(_ blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> +} +extension Cryptors { + public static func randomIV(_ count: Swift.Int) -> Swift.Array<Swift.UInt8> +} +public struct CTR { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CTR.Error, b: HHSDKVideo.CTR.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>, counter: Swift.Int = 0) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +open class CustomDateFormatTransform : HHSDKVideo.DateFormatterTransform { + public init(formatString: Swift.String) + override public init(dateFormatter: Foundation.DateFormatter) + @objc deinit +} +extension Data { + public func checksum() -> Swift.UInt16 + public func md5() -> Foundation.Data + public func sha1() -> Foundation.Data + public func sha224() -> Foundation.Data + public func sha256() -> Foundation.Data + public func sha384() -> Foundation.Data + public func sha512() -> Foundation.Data + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> Foundation.Data + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Foundation.Data + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Foundation.Data + public func crc16(seed: Swift.UInt16? = nil) -> Foundation.Data + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> Foundation.Data + public func decrypt(cipher: HHSDKVideo.Cipher) throws -> Foundation.Data + public func authenticate(with authenticator: HHSDKVideo.CryptoAuthenticator) throws -> Foundation.Data +} +extension Data { + public init(hex: Swift.String) + public var bytes: Swift.Array<Swift.UInt8> { + get + } + public func toHexString() -> Swift.String +} +open class DataTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Data + public typealias JSON = Swift.String + public init() + open func transformFromJSON(_ value: Any?) -> Foundation.Data? + open func transformToJSON(_ value: Foundation.Data?) -> Swift.String? + @objc deinit +} +open class DateFormatterTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Date + public typealias JSON = Swift.String + final public let dateFormatter: Foundation.DateFormatter + public init(dateFormatter: Foundation.DateFormatter) + open func transformFromJSON(_ value: Any?) -> Foundation.Date? + open func transformToJSON(_ value: Foundation.Date?) -> Swift.String? + @objc deinit +} +open class DateTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Date + public typealias JSON = Swift.Double + public enum Unit : Foundation.TimeInterval { + case seconds + case milliseconds + public init?(rawValue: Foundation.TimeInterval) + public typealias RawValue = Foundation.TimeInterval + public var rawValue: Foundation.TimeInterval { + get + } + } + public init(unit: HHSDKVideo.DateTransform.Unit = .seconds) + open func transformFromJSON(_ value: Any?) -> Foundation.Date? + open func transformToJSON(_ value: Foundation.Date?) -> Swift.Double? + @objc deinit +} +public struct DGElasticPullToRefreshConstants { + public static var WaveMaxHeight: CoreGraphics.CGFloat + public static var MinOffsetToPull: CoreGraphics.CGFloat + public static var LoadingContentInset: CoreGraphics.CGFloat + public static var LoadingViewSize: CoreGraphics.CGFloat +} +extension NSObject { + public func dg_addObserver(_ observer: ObjectiveC.NSObject, forKeyPath keyPath: Swift.String) + public func dg_removeObserver(_ observer: ObjectiveC.NSObject, forKeyPath keyPath: Swift.String) +} +extension UIScrollView { + public func dg_addPullToRefreshWithActionHandler(_ actionHandler: @escaping () -> Swift.Void, loadingView: HHSDKVideo.DGElasticPullToRefreshLoadingView?) + public func dg_removePullToRefresh() + public func dg_setPullToRefreshBackgroundColor(_ color: UIKit.UIColor) + public func dg_setPullToRefreshFillColor(_ color: UIKit.UIColor) + public func dg_stopLoading() + public func dg_startLoading() +} +extension UIView { + public func dg_center(_ usePresentationLayerIfPossible: Swift.Bool) -> CoreGraphics.CGPoint +} +extension UIPanGestureRecognizer { + public func dg_resign() +} +extension UIGestureRecognizer.State { + public func dg_isAnyOf(_ values: [UIKit.UIGestureRecognizer.State]) -> Swift.Bool +} +@objc @_inheritsConvenienceInitializers open class DGElasticPullToRefreshLoadingView : UIKit.UIView { + @objc dynamic public init() + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + open func setPullProgress(_ progress: CoreGraphics.CGFloat) + open func startAnimating() + open func stopLoading() + @objc deinit +} +extension CGFloat { + public func toRadians() -> CoreGraphics.CGFloat + public func toDegrees() -> CoreGraphics.CGFloat +} +@objc open class DGElasticPullToRefreshLoadingViewCircle : HHSDKVideo.DGElasticPullToRefreshLoadingView { + @objc override dynamic public init() + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + override open func setPullProgress(_ progress: CoreGraphics.CGFloat) + override open func startAnimating() + override open func stopLoading() + @objc override dynamic open func tintColorDidChange() + @objc override dynamic open func layoutSubviews() + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public enum DGElasticPullToRefreshState : Swift.Int { + case stopped + case dragging + case animatingBounce + case loading + case animatingToStopped + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_hasMissingDesignatedInitializers open class DGElasticPullToRefreshView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc deinit + @objc override dynamic open func observeValue(forKeyPath keyPath: Swift.String?, of object: Any?, change: [Foundation.NSKeyValueChangeKey : Any]?, context: Swift.UnsafeMutableRawPointer?) + @objc override dynamic open func layoutSubviews() + @objc override dynamic public init(frame: CoreGraphics.CGRect) +} +public struct DictionaryTransform<Key, Value> : HHSDKVideo.TransformType where Key : Swift.Hashable, Key : Swift.RawRepresentable, Value : HHSDKVideo.Mappable, Key.RawValue == Swift.String { + public init() + public func transformFromJSON(_ value: Any?) -> [Key : Value]? + public func transformToJSON(_ value: [Key : Value]?) -> Any? + public typealias JSON = Any + public typealias Object = Swift.Dictionary<Key, Value> +} +@available(*, renamed: "Digest") +public typealias Hash = HHSDKVideo.Digest +public struct Digest { + public static func md5(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha1(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha224(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha256(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha384(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha512(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha2(_ bytes: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.SHA2.Variant) -> Swift.Array<Swift.UInt8> + public static func sha3(_ bytes: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.SHA3.Variant) -> Swift.Array<Swift.UInt8> +} +public struct ECB : HHSDKVideo.BlockMode { + public let options: HHSDKVideo.BlockModeOption + public init() + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@objc @_inheritsConvenienceInitializers public class EKAccessoryNoteMessageView : UIKit.UIView { + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public struct EKAlertMessage { + public enum ImagePosition { + case top + case left + public static func == (a: HHSDKVideo.EKAlertMessage.ImagePosition, b: HHSDKVideo.EKAlertMessage.ImagePosition) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let imagePosition: HHSDKVideo.EKAlertMessage.ImagePosition + public let simpleMessage: HHSDKVideo.EKSimpleMessage + public let buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent + public init(simpleMessage: HHSDKVideo.EKSimpleMessage, imagePosition: HHSDKVideo.EKAlertMessage.ImagePosition = .top, buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent) +} +@objc @_hasMissingDesignatedInitializers final public class EKAlertMessageView : HHSDKVideo.EKSimpleMessageView { + public init(with message: HHSDKVideo.EKAlertMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc deinit +} +public struct EKAttributes { + public var name: Swift.String? + public var windowLevel: HHSDKVideo.EKAttributes.WindowLevel + public var position: HHSDKVideo.EKAttributes.Position + public var precedence: HHSDKVideo.EKAttributes.Precedence + public var displayDuration: Swift.Double + public var positionConstraints: HHSDKVideo.EKAttributes.PositionConstraints + public var screenInteraction: HHSDKVideo.EKAttributes.UserInteraction + public var entryInteraction: HHSDKVideo.EKAttributes.UserInteraction + public var scroll: HHSDKVideo.EKAttributes.Scroll + public var hapticFeedbackType: HHSDKVideo.EKAttributes.NotificationHapticFeedback + public var lifecycleEvents: HHSDKVideo.EKAttributes.LifecycleEvents + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var entryBackground: HHSDKVideo.EKAttributes.BackgroundStyle + public var screenBackground: HHSDKVideo.EKAttributes.BackgroundStyle + public var shadow: HHSDKVideo.EKAttributes.Shadow + public var roundCorners: HHSDKVideo.EKAttributes.RoundCorners + public var border: HHSDKVideo.EKAttributes.Border + public var statusBar: HHSDKVideo.EKAttributes.StatusBar + public var entranceAnimation: HHSDKVideo.EKAttributes.Animation + public var exitAnimation: HHSDKVideo.EKAttributes.Animation + public var popBehavior: HHSDKVideo.EKAttributes.PopBehavior { + get + set + } + public init() +} +extension EKAttributes { + public struct Animation : Swift.Equatable { + public struct Spring : Swift.Equatable { + public var damping: CoreGraphics.CGFloat + public var initialVelocity: CoreGraphics.CGFloat + public init(damping: CoreGraphics.CGFloat, initialVelocity: CoreGraphics.CGFloat) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Spring, b: HHSDKVideo.EKAttributes.Animation.Spring) -> Swift.Bool + } + public struct RangeAnimation : Swift.Equatable { + public var duration: Foundation.TimeInterval + public var delay: Foundation.TimeInterval + public var start: CoreGraphics.CGFloat + public var end: CoreGraphics.CGFloat + public var spring: HHSDKVideo.EKAttributes.Animation.Spring? + public init(from start: CoreGraphics.CGFloat, to end: CoreGraphics.CGFloat, duration: Foundation.TimeInterval, delay: Foundation.TimeInterval = 0, spring: HHSDKVideo.EKAttributes.Animation.Spring? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation.RangeAnimation, b: HHSDKVideo.EKAttributes.Animation.RangeAnimation) -> Swift.Bool + } + public struct Translate : Swift.Equatable { + public enum AnchorPosition : Swift.Equatable { + case top + case bottom + case automatic + public func hash(into hasher: inout Swift.Hasher) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition, b: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition) -> Swift.Bool + public var hashValue: Swift.Int { + get + } + } + public var duration: Foundation.TimeInterval + public var delay: Foundation.TimeInterval + public var anchorPosition: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition + public var spring: HHSDKVideo.EKAttributes.Animation.Spring? + public init(duration: Foundation.TimeInterval, anchorPosition: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition = .automatic, delay: Foundation.TimeInterval = 0, spring: HHSDKVideo.EKAttributes.Animation.Spring? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Translate, b: HHSDKVideo.EKAttributes.Animation.Translate) -> Swift.Bool + } + public var translate: HHSDKVideo.EKAttributes.Animation.Translate? + public var scale: HHSDKVideo.EKAttributes.Animation.RangeAnimation? + public var fade: HHSDKVideo.EKAttributes.Animation.RangeAnimation? + public var containsTranslation: Swift.Bool { + get + } + public var containsScale: Swift.Bool { + get + } + public var containsFade: Swift.Bool { + get + } + public var containsAnimation: Swift.Bool { + get + } + public var maxDelay: Foundation.TimeInterval { + get + } + public var maxDuration: Foundation.TimeInterval { + get + } + public var totalDuration: Foundation.TimeInterval { + get + } + public static var translation: HHSDKVideo.EKAttributes.Animation { + get + } + public static var none: HHSDKVideo.EKAttributes.Animation { + get + } + public init(translate: HHSDKVideo.EKAttributes.Animation.Translate? = nil, scale: HHSDKVideo.EKAttributes.Animation.RangeAnimation? = nil, fade: HHSDKVideo.EKAttributes.Animation.RangeAnimation? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation, b: HHSDKVideo.EKAttributes.Animation) -> Swift.Bool + } +} +extension EKAttributes { + public enum BackgroundStyle : Swift.Equatable { + public struct BlurStyle : Swift.Equatable { + public static var extra: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public static var standard: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + @available(iOS 10.0, *) + public static var prominent: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public static var dark: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public init(style: UIKit.UIBlurEffect.Style) + public init(light: UIKit.UIBlurEffect.Style, dark: UIKit.UIBlurEffect.Style) + public func blurStyle(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIBlurEffect.Style + public func blurEffect(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIBlurEffect + public static func == (a: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle, b: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle) -> Swift.Bool + } + public struct Gradient { + public var colors: [HHSDKVideo.EKColor] + public var startPoint: CoreGraphics.CGPoint + public var endPoint: CoreGraphics.CGPoint + public init(colors: [HHSDKVideo.EKColor], startPoint: CoreGraphics.CGPoint, endPoint: CoreGraphics.CGPoint) + } + case visualEffect(style: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle) + case color(color: HHSDKVideo.EKColor) + case gradient(gradient: HHSDKVideo.EKAttributes.BackgroundStyle.Gradient) + case image(image: UIKit.UIImage) + case clear + public static func == (lhs: HHSDKVideo.EKAttributes.BackgroundStyle, rhs: HHSDKVideo.EKAttributes.BackgroundStyle) -> Swift.Bool + } +} +extension EKAttributes { + public enum DisplayMode { + case inferred + case light + case dark + public static func == (a: HHSDKVideo.EKAttributes.DisplayMode, b: HHSDKVideo.EKAttributes.DisplayMode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public typealias DisplayDuration = Foundation.TimeInterval +} +extension EKAttributes { + public enum RoundCorners { + case none + case all(radius: CoreGraphics.CGFloat) + case top(radius: CoreGraphics.CGFloat) + case bottom(radius: CoreGraphics.CGFloat) + } + public enum Border { + case none + case value(color: UIKit.UIColor, width: CoreGraphics.CGFloat) + } +} +extension EKAttributes { + public enum NotificationHapticFeedback { + case success + case warning + case error + case none + public static func == (a: HHSDKVideo.EKAttributes.NotificationHapticFeedback, b: HHSDKVideo.EKAttributes.NotificationHapticFeedback) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct LifecycleEvents { + public typealias Event = () -> Swift.Void + public var willAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var didAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var willDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var didDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public init(willAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, didAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, willDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, didDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil) + } +} +extension EKAttributes { + public enum PopBehavior { + case overridden + case animated(animation: HHSDKVideo.EKAttributes.Animation) + public var isOverriden: Swift.Bool { + get + } + } +} +extension EKAttributes { + public enum Position { + case top + case bottom + case center + public var isTop: Swift.Bool { + get + } + public var isCenter: Swift.Bool { + get + } + public var isBottom: Swift.Bool { + get + } + public static func == (a: HHSDKVideo.EKAttributes.Position, b: HHSDKVideo.EKAttributes.Position) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct PositionConstraints { + public enum SafeArea { + case overridden + case empty(fillSafeArea: Swift.Bool) + public var isOverridden: Swift.Bool { + get + } + } + public enum Edge { + case ratio(value: CoreGraphics.CGFloat) + case offset(value: CoreGraphics.CGFloat) + case constant(value: CoreGraphics.CGFloat) + case intrinsic + public static var fill: HHSDKVideo.EKAttributes.PositionConstraints.Edge { + get + } + } + public struct Size { + public var width: HHSDKVideo.EKAttributes.PositionConstraints.Edge + public var height: HHSDKVideo.EKAttributes.PositionConstraints.Edge + public init(width: HHSDKVideo.EKAttributes.PositionConstraints.Edge, height: HHSDKVideo.EKAttributes.PositionConstraints.Edge) + public static var intrinsic: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + public static var sizeToWidth: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + public static var screen: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + } + public enum KeyboardRelation { + public struct Offset { + public var bottom: CoreGraphics.CGFloat + public var screenEdgeResistance: CoreGraphics.CGFloat? + public init(bottom: CoreGraphics.CGFloat = 0, screenEdgeResistance: CoreGraphics.CGFloat? = nil) + public static var none: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation.Offset { + get + } + } + case bind(offset: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation.Offset) + case unbind + public var isBound: Swift.Bool { + get + } + } + public struct Rotation { + public enum SupportedInterfaceOrientation { + case standard + case all + public static func == (a: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation, b: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public var isEnabled: Swift.Bool + public var supportedInterfaceOrientations: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation + public init() + } + public var rotation: HHSDKVideo.EKAttributes.PositionConstraints.Rotation + public var keyboardRelation: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation + public var size: HHSDKVideo.EKAttributes.PositionConstraints.Size + public var maxSize: HHSDKVideo.EKAttributes.PositionConstraints.Size + public var verticalOffset: CoreGraphics.CGFloat + public var safeArea: HHSDKVideo.EKAttributes.PositionConstraints.SafeArea + public var hasVerticalOffset: Swift.Bool { + get + } + public static var float: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public static var fullWidth: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public static var fullScreen: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public init(verticalOffset: CoreGraphics.CGFloat = 0, size: HHSDKVideo.EKAttributes.PositionConstraints.Size = .sizeToWidth, maxSize: HHSDKVideo.EKAttributes.PositionConstraints.Size = .intrinsic) + } +} +extension EKAttributes { + public enum Precedence { + public struct Priority : Swift.Hashable, Swift.Equatable, Swift.RawRepresentable, Swift.Comparable { + public var rawValue: Swift.Int + public var hashValue: Swift.Int { + get + } + public init(_ rawValue: Swift.Int) + public init(rawValue: Swift.Int) + public static func == (lhs: HHSDKVideo.EKAttributes.Precedence.Priority, rhs: HHSDKVideo.EKAttributes.Precedence.Priority) -> Swift.Bool + public static func < (lhs: HHSDKVideo.EKAttributes.Precedence.Priority, rhs: HHSDKVideo.EKAttributes.Precedence.Priority) -> Swift.Bool + public typealias RawValue = Swift.Int + } + public enum QueueingHeuristic { + public static var value: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic + case chronological + case priority + public static func == (a: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic, b: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + case override(priority: HHSDKVideo.EKAttributes.Precedence.Priority, dropEnqueuedEntries: Swift.Bool) + case enqueue(priority: HHSDKVideo.EKAttributes.Precedence.Priority) + public var priority: HHSDKVideo.EKAttributes.Precedence.Priority { + get + set + } + } +} +extension EKAttributes.Precedence.Priority { + public static let maxRawValue: Swift.Int + public static let highRawValue: Swift.Int + public static let normalRawValue: Swift.Int + public static let lowRawValue: Swift.Int + public static let minRawValue: Swift.Int + public static let max: HHSDKVideo.EKAttributes.Precedence.Priority + public static let high: HHSDKVideo.EKAttributes.Precedence.Priority + public static let normal: HHSDKVideo.EKAttributes.Precedence.Priority + public static let low: HHSDKVideo.EKAttributes.Precedence.Priority + public static let min: HHSDKVideo.EKAttributes.Precedence.Priority +} +extension EKAttributes { + public static var `default`: HHSDKVideo.EKAttributes + public static var toast: HHSDKVideo.EKAttributes { + get + } + public static var float: HHSDKVideo.EKAttributes { + get + } + public static var topFloat: HHSDKVideo.EKAttributes { + get + } + public static var bottomFloat: HHSDKVideo.EKAttributes { + get + } + public static var centerFloat: HHSDKVideo.EKAttributes { + get + } + public static var bottomToast: HHSDKVideo.EKAttributes { + get + } + public static var topToast: HHSDKVideo.EKAttributes { + get + } + public static var topNote: HHSDKVideo.EKAttributes { + get + } + public static var bottomNote: HHSDKVideo.EKAttributes { + get + } + public static var statusBar: HHSDKVideo.EKAttributes { + get + } +} +extension EKAttributes { + public enum Scroll { + public struct PullbackAnimation { + public var duration: Foundation.TimeInterval + public var damping: CoreGraphics.CGFloat + public var initialSpringVelocity: CoreGraphics.CGFloat + public init(duration: Foundation.TimeInterval, damping: CoreGraphics.CGFloat, initialSpringVelocity: CoreGraphics.CGFloat) + public static var jolt: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation { + get + } + public static var easeOut: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation { + get + } + } + case disabled + case edgeCrossingDisabled(swipeable: Swift.Bool) + case enabled(swipeable: Swift.Bool, pullbackAnimation: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation) + } +} +extension EKAttributes { + public enum Shadow { + case none + case active(with: HHSDKVideo.EKAttributes.Shadow.Value) + public struct Value { + public let radius: CoreGraphics.CGFloat + public let opacity: Swift.Float + public let color: HHSDKVideo.EKColor + public let offset: CoreGraphics.CGSize + public init(color: HHSDKVideo.EKColor = .black, opacity: Swift.Float, radius: CoreGraphics.CGFloat, offset: CoreGraphics.CGSize = .zero) + } + } +} +extension EKAttributes { + public enum StatusBar { + public typealias Appearance = (visible: Swift.Bool, style: UIKit.UIStatusBarStyle) + case ignored + case hidden + case dark + case light + case inferred + public var appearance: HHSDKVideo.EKAttributes.StatusBar.Appearance { + get + } + public static func statusBar(by appearance: HHSDKVideo.EKAttributes.StatusBar.Appearance) -> HHSDKVideo.EKAttributes.StatusBar + public static var currentAppearance: HHSDKVideo.EKAttributes.StatusBar.Appearance { + get + } + public static var currentStatusBar: HHSDKVideo.EKAttributes.StatusBar { + get + } + public static func == (a: HHSDKVideo.EKAttributes.StatusBar, b: HHSDKVideo.EKAttributes.StatusBar) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct UserInteraction { + public typealias Action = () -> () + public enum Default { + case absorbTouches + case delayExit(by: Foundation.TimeInterval) + case dismissEntry + case forward + } + public var defaultAction: HHSDKVideo.EKAttributes.UserInteraction.Default + public var customTapActions: [HHSDKVideo.EKAttributes.UserInteraction.Action] + public init(defaultAction: HHSDKVideo.EKAttributes.UserInteraction.Default = .absorbTouches, customTapActions: [HHSDKVideo.EKAttributes.UserInteraction.Action] = []) + public static var dismiss: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static var forward: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static var absorbTouches: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static func delayExit(by delay: Foundation.TimeInterval) -> HHSDKVideo.EKAttributes.UserInteraction + } +} +extension EKAttributes { + public enum WindowLevel { + case alerts + case statusBar + case normal + case custom(level: UIKit.UIWindow.Level) + public var value: UIKit.UIWindow.Level { + get + } + } +} +@objc final public class EKButtonBarView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent) + @objc override final public func layoutSubviews() + final public func expand() + final public func compress() + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKColor : Swift.Equatable { + public var dark: UIKit.UIColor { + get + } + public var light: UIKit.UIColor { + get + } + public init(light: UIKit.UIColor, dark: UIKit.UIColor) + public init(_ unified: UIKit.UIColor) + public init(rgb: Swift.Int) + public init(red: Swift.Int, green: Swift.Int, blue: Swift.Int) + public func color(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIColor + public static func == (a: HHSDKVideo.EKColor, b: HHSDKVideo.EKColor) -> Swift.Bool +} +extension EKColor { + public var inverted: HHSDKVideo.EKColor { + get + } + public func with(alpha: CoreGraphics.CGFloat) -> HHSDKVideo.EKColor + public static var white: HHSDKVideo.EKColor { + get + } + public static var black: HHSDKVideo.EKColor { + get + } + public static var clear: HHSDKVideo.EKColor { + get + } + public static var standardBackground: HHSDKVideo.EKColor { + get + } + public static var standardContent: HHSDKVideo.EKColor { + get + } +} +@objc final public class EKFormMessageView : UIKit.UIView { + public init(with title: HHSDKVideo.EKProperty.LabelContent, textFieldsContent: [HHSDKVideo.EKProperty.TextFieldContent], buttonContent: HHSDKVideo.EKProperty.ButtonContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + final public func becomeFirstResponder(with textFieldIndex: Swift.Int) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKImageNoteMessageView : HHSDKVideo.EKAccessoryNoteMessageView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with content: HHSDKVideo.EKProperty.LabelContent, imageContent: HHSDKVideo.EKProperty.ImageContent) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKMessageContentView : UIKit.UIView { + public var titleContent: HHSDKVideo.EKProperty.LabelContent! { + get + set + } + public var subtitleContent: HHSDKVideo.EKProperty.LabelContent! { + get + set + } + public var titleAttributes: HHSDKVideo.EKProperty.LabelStyle! { + get + set + } + public var subtitleAttributes: HHSDKVideo.EKProperty.LabelStyle! { + get + set + } + public var title: Swift.String! { + get + set + } + public var subtitle: Swift.String! { + get + set + } + public var verticalMargins: CoreGraphics.CGFloat { + get + set + } + public var horizontalMargins: CoreGraphics.CGFloat { + get + set + } + public var labelsOffset: CoreGraphics.CGFloat { + get + set + } + @objc dynamic public init() + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKNoteMessageView : UIKit.UIView { + public var horizontalOffset: CoreGraphics.CGFloat { + get + set + } + public var verticalOffset: CoreGraphics.CGFloat { + get + set + } + public init(with content: HHSDKVideo.EKProperty.LabelContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKNotificationMessage { + public struct Insets { + public var contentInsets: UIKit.UIEdgeInsets + public var titleToDescription: CoreGraphics.CGFloat + public static var `default`: HHSDKVideo.EKNotificationMessage.Insets + } + public let simpleMessage: HHSDKVideo.EKSimpleMessage + public let auxiliary: HHSDKVideo.EKProperty.LabelContent? + public let insets: HHSDKVideo.EKNotificationMessage.Insets + public init(simpleMessage: HHSDKVideo.EKSimpleMessage, auxiliary: HHSDKVideo.EKProperty.LabelContent? = nil, insets: HHSDKVideo.EKNotificationMessage.Insets = .default) +} +@objc @_hasMissingDesignatedInitializers final public class EKNotificationMessageView : HHSDKVideo.EKSimpleMessageView { + public init(with message: HHSDKVideo.EKNotificationMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc deinit +} +public struct EKPopUpMessage { + public typealias EKPopUpMessageAction = () -> () + public struct ThemeImage { + public enum Position { + case topToTop(offset: CoreGraphics.CGFloat) + case centerToTop(offset: CoreGraphics.CGFloat) + } + public var image: HHSDKVideo.EKProperty.ImageContent + public var position: HHSDKVideo.EKPopUpMessage.ThemeImage.Position + public init(image: HHSDKVideo.EKProperty.ImageContent, position: HHSDKVideo.EKPopUpMessage.ThemeImage.Position = .topToTop(offset: 40)) + } + public var themeImage: HHSDKVideo.EKPopUpMessage.ThemeImage? + public var title: HHSDKVideo.EKProperty.LabelContent + public var description: HHSDKVideo.EKProperty.LabelContent + public var button: HHSDKVideo.EKProperty.ButtonContent + public var action: HHSDKVideo.EKPopUpMessage.EKPopUpMessageAction + public init(themeImage: HHSDKVideo.EKPopUpMessage.ThemeImage? = nil, title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent, button: HHSDKVideo.EKProperty.ButtonContent, action: @escaping HHSDKVideo.EKPopUpMessage.EKPopUpMessageAction) +} +@objc final public class EKPopUpMessageView : UIKit.UIView { + public init(with message: HHSDKVideo.EKPopUpMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKProcessingNoteMessageView : HHSDKVideo.EKAccessoryNoteMessageView { + public var isProcessing: Swift.Bool { + get + set + } + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with content: HHSDKVideo.EKProperty.LabelContent, activityIndicator: UIKit.UIActivityIndicatorView.Style) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKProperty { + public struct ButtonContent { + public typealias Action = () -> () + public var label: HHSDKVideo.EKProperty.LabelContent + public var backgroundColor: HHSDKVideo.EKColor + public var highlightedBackgroundColor: HHSDKVideo.EKColor + public var contentEdgeInset: CoreGraphics.CGFloat + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var accessibilityIdentifier: Swift.String? + public var action: HHSDKVideo.EKProperty.ButtonContent.Action? + public init(label: HHSDKVideo.EKProperty.LabelContent, backgroundColor: HHSDKVideo.EKColor, highlightedBackgroundColor: HHSDKVideo.EKColor, contentEdgeInset: CoreGraphics.CGFloat = 5, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, accessibilityIdentifier: Swift.String? = nil, action: @escaping HHSDKVideo.EKProperty.ButtonContent.Action = {}) + public func backgroundColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + public func highlightedBackgroundColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + public func highlighedLabelColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct LabelContent { + public var text: Swift.String + public var style: HHSDKVideo.EKProperty.LabelStyle + public var accessibilityIdentifier: Swift.String? + public init(text: Swift.String, style: HHSDKVideo.EKProperty.LabelStyle, accessibilityIdentifier: Swift.String? = nil) + } + public struct LabelStyle { + public var font: UIKit.UIFont + public var color: HHSDKVideo.EKColor + public var alignment: UIKit.NSTextAlignment + public var numberOfLines: Swift.Int + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public init(font: UIKit.UIFont, color: HHSDKVideo.EKColor, alignment: UIKit.NSTextAlignment = .left, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, numberOfLines: Swift.Int = 0) + public func color(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct ImageContent { + public enum TransformAnimation { + case animate(duration: Foundation.TimeInterval, options: UIKit.UIView.AnimationOptions, transform: CoreGraphics.CGAffineTransform) + case none + } + public var tint: HHSDKVideo.EKColor? + public var images: [UIKit.UIImage] + public var imageSequenceAnimationDuration: Foundation.TimeInterval + public var size: CoreGraphics.CGSize? + public var contentMode: UIKit.UIView.ContentMode + public var makesRound: Swift.Bool + public var animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var accessibilityIdentifier: Swift.String? + public init(imageName: Swift.String, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, size: CoreGraphics.CGSize? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, tint: HHSDKVideo.EKColor? = nil, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(image: UIKit.UIImage, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(images: [UIKit.UIImage], imageSequenceAnimationDuration: Foundation.TimeInterval = 1, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(imagesNames: [Swift.String], imageSequenceAnimationDuration: Foundation.TimeInterval = 1, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public static func thumb(with image: UIKit.UIImage, edgeSize: CoreGraphics.CGFloat) -> HHSDKVideo.EKProperty.ImageContent + public static func thumb(with imageName: Swift.String, edgeSize: CoreGraphics.CGFloat) -> HHSDKVideo.EKProperty.ImageContent + public func tintColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + } + public struct TextFieldContent { + weak public var delegate: UIKit.UITextFieldDelegate? + public var keyboardType: UIKit.UIKeyboardType + public var isSecure: Swift.Bool + public var leadingImage: UIKit.UIImage! + public var placeholder: HHSDKVideo.EKProperty.LabelContent + public var textStyle: HHSDKVideo.EKProperty.LabelStyle + public var tintColor: HHSDKVideo.EKColor! + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var bottomBorderColor: HHSDKVideo.EKColor + public var accessibilityIdentifier: Swift.String? + public var textContent: Swift.String { + get + set + } + public init(delegate: UIKit.UITextFieldDelegate? = nil, keyboardType: UIKit.UIKeyboardType = .default, placeholder: HHSDKVideo.EKProperty.LabelContent, tintColor: HHSDKVideo.EKColor? = nil, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, textStyle: HHSDKVideo.EKProperty.LabelStyle, isSecure: Swift.Bool = false, leadingImage: UIKit.UIImage? = nil, bottomBorderColor: HHSDKVideo.EKColor = .clear, accessibilityIdentifier: Swift.String? = nil) + public func tintColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + public func bottomBorderColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + } + public struct ButtonBarContent { + public var content: [HHSDKVideo.EKProperty.ButtonContent] + public var separatorColor: HHSDKVideo.EKColor + public var horizontalDistributionThreshold: Swift.Int + public var expandAnimatedly: Swift.Bool + public var buttonHeight: CoreGraphics.CGFloat + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public init(with buttonContents: HHSDKVideo.EKProperty.ButtonContent..., separatorColor: HHSDKVideo.EKColor, horizontalDistributionThreshold: Swift.Int = 2, buttonHeight: CoreGraphics.CGFloat = 50, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, expandAnimatedly: Swift.Bool) + public init(with buttonContents: [HHSDKVideo.EKProperty.ButtonContent], separatorColor: HHSDKVideo.EKColor, horizontalDistributionThreshold: Swift.Int = 2, buttonHeight: CoreGraphics.CGFloat = 50, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, expandAnimatedly: Swift.Bool) + public func separatorColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct EKRatingItemContent { + public var title: HHSDKVideo.EKProperty.LabelContent + public var description: HHSDKVideo.EKProperty.LabelContent + public var unselectedImage: HHSDKVideo.EKProperty.ImageContent + public var selectedImage: HHSDKVideo.EKProperty.ImageContent + public var size: CoreGraphics.CGSize + public init(title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent, unselectedImage: HHSDKVideo.EKProperty.ImageContent, selectedImage: HHSDKVideo.EKProperty.ImageContent, size: CoreGraphics.CGSize = CGSize(width: 50, height: 50)) + } +} +public struct EKRatingMessage { + public typealias Selection = (Swift.Int) -> Swift.Void + public var initialTitle: HHSDKVideo.EKProperty.LabelContent + public var initialDescription: HHSDKVideo.EKProperty.LabelContent + public var ratingItems: [HHSDKVideo.EKProperty.EKRatingItemContent] + public var buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent + public var selection: HHSDKVideo.EKRatingMessage.Selection! + public var selectedIndex: Swift.Int? { + get + set + } + public init(initialTitle: HHSDKVideo.EKProperty.LabelContent, initialDescription: HHSDKVideo.EKProperty.LabelContent, ratingItems: [HHSDKVideo.EKProperty.EKRatingItemContent], buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent, selection: HHSDKVideo.EKRatingMessage.Selection? = nil) +} +@objc final public class EKRatingMessageView : UIKit.UIView { + public init(with message: HHSDKVideo.EKRatingMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc @_inheritsConvenienceInitializers final public class EKRatingSymbolsContainerView : UIKit.UIView { + final public func setup(with message: HHSDKVideo.EKRatingMessage, externalSelection: @escaping HHSDKVideo.EKRatingMessage.Selection) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +@objc final public class EKRatingSymbolView : UIKit.UIView { + final public var isSelected: Swift.Bool { + get + set + } + public init(unselectedImage: HHSDKVideo.EKProperty.ImageContent, selectedImage: HHSDKVideo.EKProperty.ImageContent, selection: @escaping HHSDKVideo.EKRatingMessage.Selection) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKSimpleMessage { + public let image: HHSDKVideo.EKProperty.ImageContent? + public let title: HHSDKVideo.EKProperty.LabelContent + public let description: HHSDKVideo.EKProperty.LabelContent + public init(image: HHSDKVideo.EKProperty.ImageContent? = nil, title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent) +} +@objc @_hasMissingDesignatedInitializers public class EKSimpleMessageView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc final public class EKTextField : UIKit.UIView { + final public var text: Swift.String { + get + set + } + public init(with content: HHSDKVideo.EKProperty.TextFieldContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + final public func makeFirstResponder() + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKXStatusBarMessageView : UIKit.UIView { + public init(leading: HHSDKVideo.EKProperty.LabelContent, trailing: HHSDKVideo.EKProperty.LabelContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: T, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: T?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [Swift.String : T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [Swift.String : T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [Swift.String : T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [Swift.String : T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +open class EnumTransform<T> : HHSDKVideo.TransformType where T : Swift.RawRepresentable { + public typealias Object = T + public typealias JSON = T.RawValue + public init() + open func transformFromJSON(_ value: Any?) -> T? + open func transformToJSON(_ value: T?) -> T.RawValue? + @objc deinit +} +final public class GCM : HHSDKVideo.BlockMode { + public enum Mode { + case combined + case detached + public static func == (a: HHSDKVideo.GCM.Mode, b: HHSDKVideo.GCM.Mode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public let options: HHSDKVideo.BlockModeOption + public enum Error : Swift.Error { + case invalidInitializationVector + case fail + public static func == (a: HHSDKVideo.GCM.Error, b: HHSDKVideo.GCM.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(iv: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, tagLength: Swift.Int = 16, mode: HHSDKVideo.GCM.Mode = .detached) + convenience public init(iv: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, mode: HHSDKVideo.GCM.Mode = .detached) + final public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker + @objc deinit +} +open class HexColorTransform : HHSDKVideo.TransformType { + public typealias Object = UIKit.UIColor + public typealias JSON = Swift.String + public init(prefixToJSON: Swift.Bool = false, alphaToJSON: Swift.Bool = false) + open func transformFromJSON(_ value: Any?) -> HHSDKVideo.HexColorTransform.Object? + open func transformToJSON(_ value: HHSDKVideo.HexColorTransform.Object?) -> HHSDKVideo.HexColorTransform.JSON? + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHAppProtocolCheck : ObjectiveC.NSObject { + public static let instance: HHSDKVideo.HHAppProtocolCheck + @objc override dynamic public init() + public func showPrivacyDialog(content: Swift.String, userDoc: Swift.String, privateDoc: Swift.String, _ agreeBlock: ((Swift.Bool) -> Swift.Void)?) + @objc deinit +} +extension HHAppProtocolCheck : UIKit.UITextViewDelegate { + @objc dynamic public func textView(_ textView: UIKit.UITextView, shouldInteractWith URL: Foundation.URL, in characterRange: Foundation.NSRange, interaction: UIKit.UITextItemInteraction) -> Swift.Bool +} +extension Array { + public subscript(safe index: Swift.Int) -> Element? { + get + } +} +public struct HHBaseApi { +} +@propertyWrapper public struct ApiConfig { + public var wrappedValue: HHSDKVideo.HHBaseApi { + get + } + public init(path: Swift.String, method: HHSDKVideo.HHRequestMethod = .post, host: Swift.String = HHUrl.baseUrl(), domain: Swift.String = HHUrl.urlForFamily(), needUserInfo: Swift.Bool = true, needEncrypt: Swift.Bool = true, needDNS: Swift.Bool = true) +} +public typealias HHLoginHandler = ((Swift.String?) -> Swift.Void) +public var HMHudManager: HHSDKVideo.HHHUDable { + get +} +@_inheritsConvenienceInitializers @objc public class HHBaseSDK : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHBaseSDK + public var dnsCallback: HHSDKVideo.HHDNSProtocal? + @objc public func start() + @objc public func login(userToken: Swift.String, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func logout(_ callback: ((Swift.String?) -> Swift.Void)? = nil) + @objc override dynamic public init() + @objc deinit +} +@objc public enum HHBaseCallingState : Swift.Int { + case onStart = 0 + case waitingDoctor + case callFreeDoctor + case callConnect + case didRing + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc public protocol HHBaseVideoDelegate : ObjectiveC.NSObjectProtocol { + @objc func callStateChange(_ state: HHSDKVideo.HHBaseCallingState) + @objc optional func onStart(orderId: Swift.String?) + @objc func callDidEstablish() + @objc func getChatParentView(_ view: UIKit.UIView) + @objc func callFail(code: Swift.Int, error: Swift.String) + @objc func onFail(_ errorCode: Swift.Int, errrorStr: Swift.String?) + @objc func onCancel() + @objc func receivedOrder(_ orderId: Swift.String) + @objc func callDidFinish() + @objc func onExtensionDoctor() + @objc func onReceive(_ callID: Swift.String) + @objc func onResponse(_ accept: Swift.Bool) + @objc func onLeakPermission(_ type: HHSDKVideo.HHBasePermissionType) + @objc optional func onForceOffline() +} +@objc public protocol HHCallDelegate : ObjectiveC.NSObjectProtocol { + @objc optional func onCallStatus(_ error: Swift.Error?) + @objc optional func onCallSuccess() + @objc optional func callFinished() +} +@_inheritsConvenienceInitializers @objc public class HHCallerInfo : ObjectiveC.NSObject, HHSDKVideo.Mappable { + public var name: Swift.String? + public var photourl: Swift.String? + public var uuid: Swift.Int? + public var userToken: Swift.String? + @objc override dynamic public init() + required public init?(map: HHSDKVideo.Map) + public func mapping(map: HHSDKVideo.Map) + @objc deinit +} +public class HHCameraConfig { + weak public var sender: UIKit.UIViewController! + public var mediaType: HHSDKVideo.HHMediaType + public var isGrayCam: Swift.Bool + public var canReduce: Swift.Bool + public var autoUpload: Swift.Bool + public var maxCount: Swift.Int? + public var crop: HHSDKVideo.onCropFinish? + public var canceled: HHSDKVideo.onCanceled? + public init() + public func build(_ block: (inout HHSDKVideo.HHCameraConfig) -> Swift.Void) -> HHSDKVideo.HHCameraConfig + @objc deinit +} +public let HHSDKScreenWidth: CoreGraphics.CGFloat +public let HHSDKScreenHeight: CoreGraphics.CGFloat +public let China_Flag: Swift.String +public struct HHDimens { + public static func isPad() -> Swift.Bool + public static func isPlus() -> Swift.Bool +} +public func HHColor(_ red: CoreGraphics.CGFloat, green: CoreGraphics.CGFloat, blue: CoreGraphics.CGFloat, alpha: CoreGraphics.CGFloat = 1.0) -> UIKit.UIColor +public func HHUISingleColor(_ value: CoreGraphics.CGFloat, alpha: CoreGraphics.CGFloat = 1.0) -> UIKit.UIColor +public func visibleWindow() -> UIKit.UIWindow? +public func imageWithColor(color: UIKit.UIColor) -> UIKit.UIImage? +public func delayFunc(_ time: Swift.Double, block: @escaping () -> Swift.Void) +public func appLanguage() -> Swift.String +public func isChina() -> Swift.Bool +@_hasMissingDesignatedInitializers public class HHDevice { + public static func isIphoneX() -> Swift.Bool + public static func botOffset() -> CoreGraphics.CGFloat + public static func tOffset() -> CoreGraphics.CGFloat + public class func isSml() -> Swift.Bool + public class func isMid() -> Swift.Bool + public class func isPlus() -> Swift.Bool + public class func isX() -> Swift.Bool + public static func iphoneType() -> Swift.String + @objc deinit +} +public typealias HHFetchBlock = (UIKit.UIImage?, [Swift.AnyHashable : Any]?) -> Swift.Void +public typealias onCanceled = (() -> Swift.Void) +public typealias onCapFinished = (([HHSDKVideo.SDKCameraImageModel]?) -> Swift.Void) +public typealias onCropFinish = (UIKit.UIImage, Swift.String?) -> Swift.Void +public enum HHMediaType : Swift.Int { + case cusCamera + case sysCamera + case cusVideo + case sysVideo + case photoImage + case photoVideo + case cusPhoto + case sysCrop + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +open class HHDataController<T> where T : HHSDKVideo.Mappable { + open var mData: T? + public init() + open func request(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func emptyRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func noDataRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func request<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: ((E) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + @objc deinit +} +extension Date { + public static func currentDate() -> Foundation.Date +} +public enum DateFormat : Swift.String { + case Full + case SingleDate + case Single + case WithoutSecond + case WithoutYearAndSecond + case HourMinute + case CN_Month_Day + case CN_Hour_Minute + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +@objc @_inheritsConvenienceInitializers public class HHDateUtils : ObjectiveC.NSObject { + public class func getDateForChinaStr() -> Swift.String + public static func stringWithDurationFromSeconds(_ seconds: Foundation.TimeInterval) -> Swift.String + public static func component(_ date: Foundation.Date) -> Foundation.DateComponents + @objc override dynamic public init() + @objc deinit +} +extension HHDateUtils { + public class func date2String(_ date: Foundation.Date, format: Swift.String) -> Swift.String + public class func date2String(_ date: Foundation.Date, format: HHSDKVideo.DateFormat) -> Swift.String +} +extension HHDateUtils { + public class func string2Date(_ str: Swift.String, format: HHSDKVideo.DateFormat) -> Foundation.Date? + public class func string2Date(_ str: Swift.String, format: Swift.String) -> Foundation.Date? +} +extension HHDateUtils { + public static func dateStringFromNow(_ date: Swift.Int) -> Swift.String + public static func dateStringFromInt(_ date: Swift.Int) -> Swift.String + public static func dateYearStringFromInt(_ date: Swift.Int) -> Swift.String +} +@objc @_inheritsConvenienceInitializers open class HHDeviceManager : ObjectiveC.NSObject { + public static func jailBrokend() -> Swift.Bool + @objc override dynamic public init() + @objc deinit +} +public protocol HHDNSProtocal { + func changeHost(_ hostDomain: Swift.String) -> Swift.String + func requestHost(_ host: Swift.String, challenge: Foundation.URLAuthenticationChallenge, completion: @escaping (Foundation.URLSession.AuthChallengeDisposition, Foundation.URLCredential?) -> Swift.Void) +} +public typealias HHPriceInfo = (priceAttri: Foundation.NSMutableAttributedString, disPriceWidth: CoreGraphics.CGFloat?) +public struct HHDoctorModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public var agentUuid: Swift.String? + public var createtime: Swift.String? + public var department: Swift.String? + public var deptid: Swift.Int? + public var disease: Swift.String? + public var diseaseList: [Swift.String]? + public var doctorid: Swift.String? + public var expertStatus: Swift.String? + public var expertVideoTime: Swift.String? + public var famExpertVideoPrice: Swift.Float? + public var famServices: Swift.Int? + public var famprovidetypes: Swift.String? + public var hhTitle: Swift.String? + public var hospital: Swift.String? + public var hospitalid: Swift.Int? + public var introduction: Swift.String? + public var isTest: Swift.String? + public var login: HHSDKVideo.LoginModel? + public var workyear: Swift.Int? + public var name: Swift.String? + public var photourl: Swift.String? + public var price: Swift.Float? + public var providetype: Swift.String? + public var province: Swift.String? + public var service: Swift.String? + public var serviceTypeStatus: Swift.String? + public var speciality: Swift.String? + public var standardDeptid: Swift.Int? + public var standardDeptname: Swift.String? + public var standardid: Swift.Int? + public var subdept: Swift.String? + public var subdeptids: Swift.String? + public var title: Swift.String? + public var titleid: Swift.Int? + public var vedioTimeList: Swift.String? + public var videoprice: Swift.Float? + public var license: Swift.String? + public init() + public mutating func mapping(map: HHSDKVideo.Map) + public func isJianzhi() -> Swift.Bool + public func supportType(type: HHSDKVideo.HHConsType) -> Swift.Bool + public func getPrice() -> HHSDKVideo.HHPriceInfo? + public func isZhuanke() -> Swift.Bool +} +public struct LoginModel : HHSDKVideo.Mappable { + public var actionSource: Swift.String? + public var loginname: Swift.String? + public var name: Swift.String? + public var photourl: Swift.String? + public var uuid: Swift.Int? + public var videoToken: Swift.String? + public var phoneno: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public enum HHConsType : Swift.String { + case normal + case expert_video + case feiDao + case video + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public struct HHEmptyModel : HHSDKVideo.Mappable { + public init() + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +@_hasMissingDesignatedInitializers public class HHEncryptUtils { + public static func encrypto(key: Swift.String, content: Swift.String) -> Swift.String? + public static func decrypto(key: Swift.String, content: Swift.String) -> Swift.String? + public static func encrypto(key: Swift.String, content: Foundation.Data) -> Foundation.Data? + public static func decrypto(key: Swift.String, content: Foundation.Data) -> Foundation.Data? + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHFileCacheManager : ObjectiveC.NSObject { + public enum HHAssetPathType { + case image + case video + case sound + case dicom + case fb + case other + case dataBase + public static func == (a: HHSDKVideo.HHFileCacheManager.HHAssetPathType, b: HHSDKVideo.HHFileCacheManager.HHAssetPathType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum HHFileFormat : Swift.String { + case Jpg + case Png + case Jpeg + case webp + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } + } + @objc override dynamic public init() + @objc deinit +} +extension HHFileCacheManager { + public class func getFileFormat(_ name: Swift.String) -> HHSDKVideo.HHFileCacheManager.HHAssetPathType + public class func createSoundFilePath(_ aPath: Swift.String) -> Swift.String + public class func createDBPath(_ aPath: Swift.String) -> Swift.String + public class func assetsCachePath(_ pathType: HHSDKVideo.HHFileCacheManager.HHAssetPathType) -> Swift.String + public class func createImageFilePath(_ format: HHSDKVideo.HHFileCacheManager.HHFileFormat = .Jpg) -> Swift.String + public class func createVideoFilePath() -> Swift.String + public class func isWriteCache(_ path: Swift.String?, data: Foundation.Data?) -> Swift.Bool + public class func isWriteCache(_ path: Swift.String?, image: UIKit.UIImage, quality: CoreGraphics.CGFloat = 1.0) -> Swift.Bool + public class func getFilePath(_ name: Swift.String) -> Swift.String? +} +extension HHFileCacheManager { + public static func saveString2File(_ string: Swift.String?, fileName: Swift.String) + public static func stringFromFile(_ fileName: Swift.String) -> Swift.String? +} +extension FileManager { + public func addSkipBackupAttributeToItemAtURL(_ url: Foundation.URL) -> Swift.Bool +} +public var uploadManager: HHSDKVideo.UploadQueue { + get +} +@_hasMissingDesignatedInitializers public class UploadQueue { + @discardableResult + public func upload(files: [Swift.String], config: HHSDKVideo.SDKUploadConfig) -> HHSDKVideo.HHFileUploadManager + public func cancelAll(_ finished: (() -> Swift.Void)? = nil) + @objc deinit +} +public class HHFileUploadManager { + public var mFileQueue: [Swift.String] + public var config: HHSDKVideo.SDKUploadConfig! + public var mTransFile: Swift.String? + public var isUploading: Swift.Bool + public init(files: [Swift.String], config: HHSDKVideo.SDKUploadConfig) + public func uploadFile(_ file: [Swift.String]) + public func cancalFiles(_ files: [Swift.String], cancelFinish: ((Swift.String) -> Swift.Void)? = nil) + public func cancelAll(_ finished: (() -> Swift.Void)? = nil) + @objc deinit +} +@objc public protocol HHHUDable { + @objc optional var autoDismissDuration: Foundation.TimeInterval { get } + @objc func showHUD() + @objc func dismissHUD() + @objc func showSuccess(_ message: Swift.String?) + @objc func showError(_ messgae: Swift.String?) + @objc optional func setDismissDuration(_ duraion: Foundation.TimeInterval) +} +extension HHHUDable { + public var autoDismissDuration: Foundation.TimeInterval { + get + } + public func setDismissDuration(_ duraion: Foundation.TimeInterval) +} +@objc public protocol HHIM { + @objc func register(_ cerName: Swift.String?) + @objc func login(_ completion: ((Swift.String?) -> Swift.Void)?) + @objc func autoLogin(_ completion: ((Swift.String?) -> Swift.Void)?) + @objc func logout(_ callback: ((Swift.String?) -> Swift.Void)?) + @objc func canVideo() -> Swift.Bool +} +public struct HHInviteDocModel : HHSDKVideo.Mappable { + public var orderId: Swift.String? + public var channelId: Swift.UInt64? + public var doctorId: Swift.String? + public var imageUrl: Swift.String? + public var signalingType: Swift.String? + public var width: CoreGraphics.CGFloat + public var height: CoreGraphics.CGFloat + public init?(map: HHSDKVideo.Map) + public init(_ info: HHSDKVideo.HHNetCallChatInfo, meetId: Swift.UInt64?) + public func isWhiteBoard() -> Swift.Bool + public func isMultyCall() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +@objc public enum HHLogMode : Swift.Int { + case error = 0 + case warn = 1 + case info = 2 + case debug = 3 + case verbose = 4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public func logging(type: HHSDKVideo.HHLogMode = .info, _ tip: Swift.String) +@objc @_inheritsConvenienceInitializers open class HHMediaStatusCheckUtils : ObjectiveC.NSObject { + open class func checkCameraAccess() -> Swift.Bool + open class func checkCameraVideoPermission() -> Swift.Bool + open class func checkAlbumAccess() -> Swift.Bool + open class func checkAudioAccess() -> Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers open class HHMedicNetObserver : ObjectiveC.NSObject { + public static let sharedInstance: HHSDKVideo.HHMedicNetObserver + open func createReachability() + open func currentInWifi() -> Swift.Bool + open func haveNetWork() -> Swift.Bool + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHMedicPhotoPicker { + public static func openCamera(config: HHSDKVideo.HHCameraConfig, capFinished: HHSDKVideo.onCapFinished? = nil) + public static func reduceImages(paths: [Swift.String], finished: @escaping (([Swift.String]) -> Swift.Void)) + public class func changeAvatar(vc: UIKit.UIViewController, reference: UIKit.UIView? = nil, uuid: Swift.Int, imgClosure: @escaping (UIKit.UIImage) -> Swift.Void, keyClosure: @escaping (Swift.String) -> Swift.Void) + @objc deinit +} +extension HHMedicPhotoPicker { + public static func checkPermisstion(_ type: HHSDKVideo.HHBasePermissionType, authorized: (() -> Swift.Void)?, others: ((HHSDKVideo.HHBasePermissionType) -> Swift.Void)?) + public static func converSize(_ size: CoreGraphics.CGSize) -> CoreGraphics.CGSize +} +extension HHMedicPhotoPicker : HHSDKVideo.HHPhotoPickerManagerDelegate { + public func selectImage(_ selectedImages: [UIKit.UIImage]) + public func cancelImage() + public func selectImageRequestError(_ errorAssets: [Photos.PHAsset], errorIndexs: [Swift.Int]) +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers public class HHNeedRealNameView : UIKit.UIView { + public var realNameLinkClourse: (() -> ())? + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHNetCallChatInfo { + public init() + @objc deinit +} +@objc public enum HHCallType : Swift.Int { + case child = 600000 + case adult = 600002 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public enum HHServerType { + case pay + case pacs + case weixin + public static func == (a: HHSDKVideo.HHServerType, b: HHSDKVideo.HHServerType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public enum HHRequestMethod { + case get + case post + public static func == (a: HHSDKVideo.HHRequestMethod, b: HHSDKVideo.HHRequestMethod) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public let HH_RELOGIN_NOTIFICATION_STR: Swift.String +public struct HHRequestData { + public init(body: [Swift.String : Any] = ["default_sw":"default"], param: [Swift.String : Any] = ["default_sw":"default"]) + public var mHttpBody: [Swift.String : Any] + public var mParameters: [Swift.String : Any] +} +@_hasMissingDesignatedInitializers public class HHNetFetch { + public static func request<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: ((E) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + public static func requestArray<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: (([E]?) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + public static func noDataRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + public static func emptyRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + @objc deinit +} +extension UIControl.State : Swift.Hashable { + public var hashValue: Swift.Int { + get + } +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers @IBDesignable public class HHPagerView : UIKit.UIView, UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegate { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func layoutSubviews() + @objc override dynamic public func willMove(toWindow newWindow: UIKit.UIWindow?) + @objc override dynamic public func prepareForInterfaceBuilder() + @objc deinit + @objc public func numberOfSections(in collectionView: UIKit.UICollectionView) -> Swift.Int + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, shouldHighlightItemAt indexPath: Foundation.IndexPath) -> Swift.Bool + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didHighlightItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, shouldSelectItemAt indexPath: Foundation.IndexPath) -> Swift.Bool + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didEndDisplaying cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewWillBeginDragging(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewWillEndDragging(_ scrollView: UIKit.UIScrollView, withVelocity velocity: CoreGraphics.CGPoint, targetContentOffset: Swift.UnsafeMutablePointer<CoreGraphics.CGPoint>) + @objc public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewDidEndScrollingAnimation(_ scrollView: UIKit.UIScrollView) +} +@objc public enum HHPagerViewTransformerType : Swift.Int { + case crossFading + case zoomOut + case depth + case overlap + case linear + case coverFlow + case ferrisWheel + case invertedFerrisWheel + case cubic + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@_hasMissingDesignatedInitializers public class UrlParams { + public static func addUserParams(_ parameters: [Swift.String : Any]?) -> [Swift.String : Any]? + public static func addCommon(_ param: [Swift.String : Any]?) -> [Swift.String : Any] + public static func param2String(param: [Swift.String : Any]? = nil) -> Swift.String + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoPickerController : UIKit.UINavigationController { + @objc override dynamic public func viewDidLoad() + convenience public init(localPath: Swift.String? = nil, deleteMode: Swift.Bool = false, finish: (([HHSDKVideo.SDKCameraImageModel]?) -> Swift.Void)? = nil) + @objc deinit + @available(iOS 5.0, *) + @objc override dynamic public init(navigationBarClass: Swift.AnyClass?, toolbarClass: Swift.AnyClass?) + @objc override dynamic public init(rootViewController: UIKit.UIViewController) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) +} +public protocol HHPhotoPickerManagerDelegate { + func selectImage(_ selectedImages: [UIKit.UIImage]) + func cancelImage() + func selectImageRequestError(_ errorAssets: [Photos.PHAsset], errorIndexs: [Swift.Int]) +} +@objc public class HHPhotoPickerManager : ObjectiveC.NSObject { + public var viewDelegate: HHSDKVideo.HHPhotoPickerManagerDelegate? + public var photoConfigModel: HHSDKVideo.HHPhotoConfigModel + public var photoUIConfigModel: HHSDKVideo.HHPhotoUIConfigModel + required public init(showVC: UIKit.UIViewController) + public func showImagePicker() + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoConfigModel : ObjectiveC.NSObject { + public var maxPreviewCount: Swift.Int + public var maxSelectCount: Swift.Int { + get + set + } + public var minVideoSelectCount: Swift.Int { + get + set + } + public var maxVideoSelectCount: Swift.Int { + get + set + } + public var minSelectVideoDuration: Swift.Int + public var maxSelectVideoDuration: Swift.Int + public var cellCornerRadio: CoreGraphics.CGFloat + public var languageType: HHSDKVideo.ZLLanguageType { + get + set + } + public var columnCount: Swift.Int { + get + set + } + public var sortAscending: Swift.Bool + public var allowSelectImage: Swift.Bool + public var allowTakePhotoInLibrary: Swift.Bool + public var allowSelectOriginal: Swift.Bool + public var allowSelectGif: Swift.Bool + public var allowSelectVideo: Swift.Bool + public var allowSelectLivePhoto: Swift.Bool + public var allowEditImage: Swift.Bool + public var allowMixSelect: Swift.Bool + public var allowPreviewPhotos: Swift.Bool + public var editImageWithDraw: Swift.Bool + public var editImageWithClip: Swift.Bool + public var editImageWithImageSticker: Swift.Bool + public var editImageWithTextSticker: Swift.Bool + public var editImageWithMosaic: Swift.Bool + public var editImageWithFilter: Swift.Bool + public var editImageWithAdjust: Swift.Bool + public var editImageWitAdjustBrightness: Swift.Bool + public var editImageWitAdjustContrast: Swift.Bool + public var editImageWitAdjustSaturation: Swift.Bool + public var shouldAnialysisAsset: Swift.Bool + public var allowEditVideo: Swift.Bool { + get + set + } + public var saveNewImageAfterEdit: Swift.Bool + public var allowDragSelect: Swift.Bool + public var allowSlideSelect: Swift.Bool + public var autoScrollWhenSlideSelectIsActive: Swift.Bool + public var autoScrollMaxSpeed: CoreGraphics.CGFloat + public var showCaptureImageOnTakePhotoBtn: Swift.Bool + public var showSelectedIndex: Swift.Bool + public var showSelectedMask: Swift.Bool + public var showSelectedBorder: Swift.Bool + public var showInvalidMask: Swift.Bool + public var useCustomCamera: Swift.Bool + public var flashMode: HHSDKVideo.ZLCameraConfiguration.FlashMode + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoUIConfigModel : ObjectiveC.NSObject { + public var style: HHSDKVideo.ZLPhotoBrowserStyle + public var bottomToolViewBtnNormalBgColor: UIKit.UIColor + public var bottomToolViewBtnNormalBgColorOfPreviewVC: UIKit.UIColor + @objc public var indexLabelBgColor: UIKit.UIColor + @objc override dynamic public init() + @objc deinit +} +public class HHProgressHUD : HHSDKVideo.HHHUDable { + public init() + @objc public func showHUD() + @objc public func dismissHUD() + @objc public func showError(_ messgae: Swift.String?) + @objc public func showSuccess(_ message: Swift.String?) + public func hhMessageTips(message: Swift.String?) + @objc deinit +} +public struct HHGetQuesetionModel : HHSDKVideo.Mappable { + public var question: HHSDKVideo.HHQuesetionModel? + public var rate: [HHSDKVideo.rateModel]? + public init?(map: HHSDKVideo.Map) + public init() + public mutating func mapping(map: HHSDKVideo.Map) + public func isHaveQ() -> Swift.Bool +} +public struct HHQuesetionModel : HHSDKVideo.Mappable { + public var answerOne: Swift.String? + public var answerTwo: Swift.String? + public var content: Swift.String? + public var id: Swift.Int? + public init?(map: HHSDKVideo.Map) + public init() + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct rateModel : HHSDKVideo.Mappable { + public var createTime: Swift.Int? + public var content: Swift.String? + public var state: Swift.Int? + public var id: Swift.Int? + public var answerOne: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +@objc public enum HHRealNameType : Swift.Int { + case normal, buyMedic + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_inheritsConvenienceInitializers public class HHRealNameInputNewView : UIKit.UIView { + @objc @IBOutlet weak public var idCardTF: UIKit.UITextField! + public class func createRealNameInputNewView(realNameType: HHSDKVideo.HHRealNameType, hideNickName: Swift.Bool = false) -> HHSDKVideo.HHRealNameInputNewView + public func showErroTip(tip: Swift.String) + public func getInpuValues() -> [Swift.String : Swift.String]? + @objc override dynamic public func awakeFromNib() + public func load(userModel: HHSDKVideo.HHUserModel?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +extension HHRealNameInputNewView : UIKit.UITextFieldDelegate { + @objc dynamic public func textField(_ textField: UIKit.UITextField, shouldChangeCharactersIn range: Foundation.NSRange, replacementString string: Swift.String) -> Swift.Bool + @objc dynamic public func textFieldDidBeginEditing(_ textField: UIKit.UITextField) +} +@_inheritsConvenienceInitializers @objc public class HHRealNameInputView : UIKit.UIView { + public var nickName: Swift.String { + get + set + } + public class func createRealNameInputView(realNameType: HHSDKVideo.HHRealNameType) -> HHSDKVideo.HHRealNameInputView + public var showPassPort: Swift.Bool { + get + set + } + public func showErroTip(tip: Swift.String) + public func getInpuValues() -> [Swift.String : Swift.String]? + @objc override dynamic public func awakeFromNib() + public func load(userModel: HHSDKVideo.HHUserModel?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +extension HHRealNameInputView : UIKit.UITextFieldDelegate { + @objc dynamic public func textField(_ textField: UIKit.UITextField, shouldChangeCharactersIn range: Foundation.NSRange, replacementString string: Swift.String) -> Swift.Bool +} +public let KeyNetErrorStr: Swift.String +public let KeyNoNetErrorStr: Swift.String +public typealias HHNetError = ((Swift.String) -> (Swift.Void)) +public typealias HHNetSuccessNoData = () -> Swift.Void +public typealias NetResult = (Swift.Bool, Swift.String) -> (Swift.Void) +public class HHRequest<T> where T : HHSDKVideo.Mappable { + public typealias HHNetSuccess = (T) -> Swift.Void + public typealias HHNetSuccessForArray = ([T]?) -> Swift.Void + public var mRequestFail: HHSDKVideo.HHNetError? + public var mRequestSuccess: HHSDKVideo.HHRequest<T>.HHNetSuccess? + public var mRequestSuccessNoData: HHSDKVideo.HHNetSuccessNoData? + public var mRequestSuccessForArray: HHSDKVideo.HHRequest<T>.HHNetSuccessForArray? + public var errorCode: Swift.Int? + public var mApi: HHSDKVideo.HHBaseApi? + required public init(api: HHSDKVideo.HHBaseApi, requestData: HHSDKVideo.HHRequestData? = nil, postData: Foundation.Data? = nil) + public func start() + public func cancel() + @objc deinit +} +extension HHRequest { + public func startForArray(_ successCallBack: @escaping HHSDKVideo.HHRequest<T>.HHNetSuccessForArray, failCallBack: @escaping HHSDKVideo.HHNetError) +} +@objc public protocol HHRTC { + @objc optional func setOrderId(orderId: Swift.String) + @objc optional func startCall(callee: Swift.String, orderId: Swift.String?) + @objc optional func enterRoom(orderId: Swift.String) + @objc optional func switchLocalAudio(_ isOpen: Swift.Bool) + @objc optional func switchLocalVideo(_ isOpen: Swift.Bool, localView: UIKit.UIView) + @objc optional func openDoctorView(userId: Swift.String, view: UIKit.UIView) + @objc optional func closeDoctorView(userId: Swift.String) + @objc optional func switchCamera(_ isFront: Swift.Bool) + @objc optional func switchCameraFlash(_ isOpen: Swift.Bool) + @objc optional func sendMsg(isSignal: Swift.Bool, cmd: Swift.String, to: Swift.String, complete: ((Swift.String?) -> Swift.Void)?) + @objc optional func leaveRoom() + @objc optional func hangUp(callId: Swift.UInt64) + @objc optional func startRing(audioId: Swift.Int) + @objc optional func stopRing() + @objc optional func snapshotVideo(userId: Swift.String?, imageBack: @escaping (UIKit.UIImage) -> ()) +} +public protocol HHRTCDelegate : ObjectiveC.NSObject { + func onEnterRoom() + func checkHasAccept(_ isCmd: Swift.Bool, volumn: Swift.Int) + func switchVideo(_ isToAudio: Swift.Bool) + func onOtherViewAvailable(_ availableUserId: Swift.String, isAvailable: Swift.Bool) + func onRemoteUserEnterRoom(_ userId: Swift.String) + func onRemoteUserLeaveRoom(_ userId: Swift.String) + func sendRTCLog(action: HHSDKVideo.TrtcLog, ex: Swift.String) + func esdablishByRTC(error: HHSDKVideo.TrtcError, reason: Swift.String) + func processMsg(cmd: HHSDKVideo.HHIMCmd, orderId: Swift.String, uuid: Swift.String) + func waitingChanged(_ waitingInfo: HHSDKVideo.HHWaitDoctorModel) + func waitingSuccess(_ doctorInfo: HHSDKVideo.HHDoctorModel, orderId: Swift.String) + func onTransform(_ transInfo: HHSDKVideo.HHWaitDoctorModel) + func onExitRoom() + func hangup() + func getDoctorUserId() -> Swift.String? + func resumeRemote() + func onFirstVideoFrame(_ userId: Swift.String?, width: Swift.Int32, height: Swift.Int32) +} +public enum TrtcLog : Swift.String { + case waitingRecall + case missMessage + case ignoreCall + case enterError + case doctorJoinRoom + case micDidReady + case netQuality + case signalError + case killEror + case netDown + case joinSuccess + case schedule + case noSchedule + case video_busy + case permit_error + case transform + case camera_close + case camera_open + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public enum TrtcError : Swift.String { + case callTimeOut + case rtcError + case enterRoomFail + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +@_inheritsConvenienceInitializers @objc public class HHSDKBaseOptions : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHSDKBaseOptions + @objc public var isDebug: Swift.Bool + @objc public var isDevelopment: Swift.Bool + @objc public var isSDK: Swift.Bool + @objc public var isTRTC: Swift.Bool + @objc public var sdkProductId: Swift.String + @objc public var appVersion: Swift.String + @objc public var needDNS: Swift.Bool + public var hudManager: HHSDKVideo.HHHUDable + @objc public var sdkVersion: Swift.String + @objc public var hudDisTime: Swift.Double { + @objc get + @objc set + } + @objc public func setConfig(_ sdkProductId: Swift.String, isDebug: Swift.Bool, isDevelopment: Swift.Bool, isTrtc: Swift.Bool, needDNS: Swift.Bool = false) + @objc override dynamic public init() + @objc deinit +} +@objc public protocol OptionProtocal { + @objc var hudDisTime: Foundation.TimeInterval { get set } + @objc var isDebug: Swift.Bool { get set } + @objc var isDevelopment: Swift.Bool { get set } + @objc var hudManager: HHSDKVideo.HHHUDable { get set } + @objc var productId: Swift.String { get set } + @objc var cerName: Swift.String? { get set } + @objc var logLevel: HHSDKVideo.HHLogMode { get set } + @objc var mExtension: Swift.String { get set } + @objc var changeDoctorTime: Swift.Int { get set } + @objc var logCallback: ((Swift.String) -> Swift.Void)? { get set } + @objc var mVideoOptions: HHSDKVideo.VideoOptions { get set } + @objc var mMessageOptions: HHSDKVideo.MessageOptions { get set } + @objc var mUserCenterOptions: HHSDKVideo.UsercenterOptions { get set } + @objc var sdkVersion: Swift.String { get set } + @objc var appVersion: Swift.String { get set } + @objc var isTRTC: Swift.Bool { get set } + @objc var needDNS: Swift.Bool { get set } + @objc var shouldWaingCall: Swift.Bool { get set } +} +public var HMDefaultOpt: HHSDKVideo.OptionProtocal { + get +} +@_inheritsConvenienceInitializers @objc public class VideoOptions : ObjectiveC.NSObject { + public var filterCallerInfo: Swift.Bool + @objc public var allowBeauty: Swift.Bool + @objc public var allowEvaluate: Swift.Bool + @objc public var allowAddMember: Swift.Bool + @objc public var allowMulti: Swift.Bool + public var mCallExtension: Swift.String + @objc public var isShowDocInfo: Swift.Bool + @objc public var enableCloseCamera: Swift.Bool + @objc public var isCloseCameraCall: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class MessageOptions : ObjectiveC.NSObject { + @objc public var isByPresent: Swift.Bool + @objc public var isFilterSummary: Swift.Bool + @objc public var isFilterMedicinal: Swift.Bool + @objc public var defaultDocHeader: Swift.String + @objc public var defaultDocName: Swift.String + @objc public var messageTitle: Swift.String + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class UsercenterOptions : ObjectiveC.NSObject { + @objc public var enableVipInfo: Swift.Bool + @objc public var hideUserCenter: Swift.Bool + @objc public var enableActivate: Swift.Bool + @objc public var enableMedical: Swift.Bool + @objc public var enableAddMemberInDoc: Swift.Bool + @objc public var enableBuyService: Swift.Bool + @objc public var hideNickName: Swift.Bool + @objc public var enablePopRealName: Swift.Bool + @objc public var isCloseMoreFunc: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHStatics { + public static let `default`: HHSDKVideo.HHStatics + public func send(params: [Swift.String : Any]) + @objc deinit +} +public struct CommonApi { +} +extension String { + public func subFrom(_ index: Swift.Int) -> Swift.String + public func subTo(_ index: Swift.Int) -> Swift.String +} +extension String { + public func urlEncode() -> Swift.String + public func stringByAppendingPathComponent(_ pathComponent: Swift.String) -> Swift.String + public func hh_sha1() -> Swift.String + public func string2base64String() -> Swift.String + public func base64String2String() -> Swift.String + public var lastPathComponent: Swift.String { + get + } + public var pathExtension: Swift.String { + get + } +} +public enum hhToastPosition { + case top + case center + case bottom + public static func == (a: HHSDKVideo.hhToastPosition, b: HHSDKVideo.hhToastPosition) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +extension UIView { + public func hhmakeToast(_ message: Swift.String) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, style: HHSDKVideo.hhToastStyle?) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, style: HHSDKVideo.hhToastStyle?) + public func hhmakeToast(_ message: Swift.String?, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle?, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhmakeToast(_ message: Swift.String?, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle?, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhshowToast(_ toast: UIKit.UIView) + public func hhshowToast(_ toast: UIKit.UIView, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhshowToast(_ toast: UIKit.UIView, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhmakeToastActivity(_ position: HHSDKVideo.hhToastPosition) + public func hhmakeToastActivity(_ position: CoreGraphics.CGPoint) + public func hhhideToastActivity() + @objc dynamic public func hhhandleToastTapped(_ recognizer: UIKit.UITapGestureRecognizer) + @objc dynamic public func hhtoastTimerDidFinish(_ timer: Foundation.Timer) + public func hhtoastViewForMessage(_ message: Swift.String?, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle) throws -> UIKit.UIView +} +public struct hhToastStyle { + public init() + public var backgroundColor: UIKit.UIColor + public var titleColor: UIKit.UIColor + public var messageColor: UIKit.UIColor + public var maxWidthPercentage: CoreGraphics.CGFloat { + get + set + } + public var maxHeightPercentage: CoreGraphics.CGFloat { + get + set + } + public var horizontalPadding: CoreGraphics.CGFloat + public var verticalPadding: CoreGraphics.CGFloat + public var cornerRadius: CoreGraphics.CGFloat + public var titleFont: UIKit.UIFont + public var messageFont: UIKit.UIFont + public var titleAlignment: UIKit.NSTextAlignment + public var messageAlignment: UIKit.NSTextAlignment + public var titleNumberOfLines: Swift.Int + public var messageNumberOfLines: Swift.Int + public var displayShadow: Swift.Bool + public var shadowColor: UIKit.UIColor + public var shadowOpacity: Swift.Float { + get + set + } + public var shadowRadius: CoreGraphics.CGFloat + public var shadowOffset: CoreGraphics.CGSize + public var imageSize: CoreGraphics.CGSize + public var activitySize: CoreGraphics.CGSize + public var fadeDuration: Swift.Double +} +extension UIAlertController { + public func showAlter() + public func present(animated: Swift.Bool, completion: (() -> Swift.Void)?) + public func addAlterActions(_ actions: [UIKit.UIAlertAction]) + public func alterMessageStyle(_ fonsize: CoreGraphics.CGFloat = (HHDimens.isPad()) ? 18 : 16) + public static func closeAlert(_ title: Swift.String = "", msg: Swift.String = "", keyString: Swift.String = "取消", closeBlock: (() -> Swift.Void)? = nil) -> UIKit.UIAlertController +} +extension UIButton { + public func centerImageTitleVertically(spacing: CoreGraphics.CGFloat = 2) + public func imageTitleHorizonal(spacing: CoreGraphics.CGFloat = 2) +} +extension UIImage { + public func rotatedBy(_ degrees: CoreGraphics.CGFloat) -> UIKit.UIImage +} +extension UIImageView { + public func hh_image(url: Foundation.URL?) + public func hh_image(url: Foundation.URL?, complete: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)?) + public func hh_image(url: Foundation.URL?, placeHolder: UIKit.UIImage?) + public func hh_image(url: Foundation.URL?, placeHolder: UIKit.UIImage?, progresses: ((CoreGraphics.CGFloat) -> Swift.Void)?, complete: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)?) +} +public func hh_downloadImg(_ url: Foundation.URL?, finish: @escaping ((UIKit.UIImage?, Foundation.Data?, Swift.Error?) -> Swift.Void)) +extension UIViewController { + public func hhAddCloseBtn(_ atLeft: Swift.Bool? = nil, isDismiss: Swift.Bool = true, title: Swift.String = "关闭") + @objc dynamic public func hhCloseThisController() + @objc dynamic public func hhPopThisController() + public func setNavigationTheme() + public func setNaviBackImg(navi: UIKit.UINavigationController?, color: UIKit.UIColor) + public func imageFromColor(color: UIKit.UIColor, size: CoreGraphics.CGSize) -> UIKit.UIImage +} +extension UIView { + public var sj_width: CoreGraphics.CGFloat { + get + set + } + public var sj_height: CoreGraphics.CGFloat { + get + set + } + public var sj_size: CoreGraphics.CGSize { + get + set + } + public var sj_origin: CoreGraphics.CGPoint { + get + set + } + public var sj_x: CoreGraphics.CGFloat { + get + set + } + public var sj_y: CoreGraphics.CGFloat { + get + set + } + public var sj_centerX: CoreGraphics.CGFloat { + get + set + } + public var sj_centerY: CoreGraphics.CGFloat { + get + set + } + public var sj_top: CoreGraphics.CGFloat { + get + set + } + public var sj_bottom: CoreGraphics.CGFloat { + get + set + } + public var sj_right: CoreGraphics.CGFloat { + get + set + } + public var sj_left: CoreGraphics.CGFloat { + get + set + } +} +extension UIView { + public class func viewFromNib<T>(_ aClass: T.Type, frameworkPath: Swift.String) -> T +} +public typealias onSDKProgress = ((CoreGraphics.CGFloat, Swift.String) -> Swift.Void) +public typealias onSDKUploadOnce = ((Swift.Bool, HHSDKVideo.SDKUploadModel) -> Swift.Void) +public typealias onSDKFinished = (() -> Swift.Void) +public class SDKUploadConfig { + public var progress: HHSDKVideo.onSDKProgress? + public var uploadOnce: HHSDKVideo.onSDKUploadOnce? + public var finished: HHSDKVideo.onSDKFinished? + public var orderId: Swift.String? + public init() + @objc deinit +} +public class SDKUploadModel { + public var clouldKey: Swift.String? + public var filePath: Swift.String? { + get + set + } + public var smallImage: Swift.String + public var state: HHSDKVideo.SDKUploadState? + public init() + public init(full: Swift.String?, scale: Swift.String) + public init(clouldKey: Swift.String?, filePath: Swift.String?, uploadTime: Foundation.TimeInterval?, name: Swift.String?, smallImage: Swift.String) + @objc deinit +} +@_hasMissingDesignatedInitializers public class SDKUploadState { + public var file: Swift.String? + public var isSelect: Swift.Bool + public var changed: (() -> Swift.Void)? + public var progress: Swift.Float { + get + set + } + public func isSuccess() -> Swift.Bool + public func isFail() -> Swift.Bool + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHUrl { + public static func domains() -> [Swift.String] + public static var timeOffset: Swift.Double + public static func urlForPay() -> Swift.String + public static func urlForFamily() -> Swift.String + public static func urlForWeixin() -> Swift.String + public static func baseUrl() -> Swift.String + public static func basePayUrl() -> Swift.String + public static func baseMedicUrl() -> Swift.String + public static func baseSecUrl() -> Swift.String + public static func testURL() -> Swift.String + public static func fileLogUrl(_ name: Swift.String, orderId: Swift.String) -> Foundation.URL + public static func expertDetailUrl(expertId: Swift.String) -> Swift.String + public static func buyVIPUrl() -> Swift.String + public static func productRightUrl() -> Swift.String + @objc deinit +} +extension HHUrl { + public static func headers(host: Swift.String) -> [Swift.String : Swift.String] +} +public func languagePrefix() -> Swift.String +@_hasMissingDesignatedInitializers public class HHUserDefaults { + public class func setString(_ str: Swift.String, key: Swift.String) + public class func stringValue(_ key: Swift.String) -> Swift.String? + public class func setArray(_ array: [Swift.AnyObject], key: Swift.String) + public class func arrayForKey(_ key: Swift.String) -> [Swift.AnyObject]? + public class func setImage(_ image: UIKit.UIImage, key: Swift.String) + public class func imageForKey(_ key: Swift.String) -> UIKit.UIImage? + @objc deinit +} +extension HHUserDefaults { + public class func setBool(_ flag: Swift.Bool, key: Swift.String) + public class func boolForKey(_ key: Swift.String) -> Swift.Bool + public class func setObject(_ obj: Swift.AnyObject, key: Swift.String) + public class func objectForKey(_ key: Swift.String) -> Swift.AnyObject? + public class func removeObject(_ key: Swift.String) +} +extension HHUserDefaults { + public class func setData(_ data: Foundation.Data?, key: Swift.String) + public class func dataForKey(_ key: Swift.String) -> Foundation.Data? + public class func userDefaults() -> Foundation.UserDefaults + public class func synchronize() + public class func encryptkey(_ key: Swift.String) -> Swift.String +} +public struct HHMemberInfoModel : HHSDKVideo.Mappable { + public var productStatusDescn: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public class HHUserModel : HHSDKVideo.Mappable { + public var age: Swift.String? + public var companyLogo: Swift.String? + public var birthday: Swift.Int64? + public var loginname: Swift.String? + public var name: Swift.String? + public var photourl: Swift.String? + public var pid: Swift.Int? + public var product: HHSDKVideo.HHMemberInfoModel? + public var relation: Swift.String? + public var sex: Swift.String? + public var uuid: Swift.Int? + public var userToken: Swift.String? + public var videoToken: Swift.String? + public var auth: Swift.Bool? + public var isMember: Swift.Bool? + public var isAccount: Swift.Bool? + public var license: Swift.String? + public var userSig: Swift.String? + public var phoneNum: Swift.String? + required public init?(map: HHSDKVideo.Map) + public init() + public func mapping(map: HHSDKVideo.Map) + @objc deinit +} +public struct HHUserProtocolModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +open class HHValueObservable<T> { + public typealias Observer = (T) -> Swift.Void + open var observer: HHSDKVideo.HHValueObservable<T>.Observer? + open func observe(_ observer: HHSDKVideo.HHValueObservable<T>.Observer?) + open var value: T { + get + set + } + public init(_ v: T) + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class HHVideoLocation : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHVideoLocation + @objc public func startLocation(lng: Swift.String, lat: Swift.String) + @objc public func closeLocation() + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class HHVideoSDK : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHVideoSDK + public var mHHRTC: HHSDKVideo.HHRTC? + public var mSDKOption: HHSDKVideo.OptionProtocal? + weak public var mCallDelegate: HHSDKVideo.HHCallDelegate? + weak public var mHHRTCDelegate: HHSDKVideo.HHRTCDelegate? + weak public var videoManager: HHSDKVideo.HHBaseVideoDelegate? + public var expertVideoCallback: (() -> Swift.Void)? + public var autoLoginCheck: (() -> Swift.Void)? + public var onReceiveNewMsg: (([Swift.String : Any]) -> Swift.Void)? + public var userProtocolModel: HHSDKVideo.HHUserProtocolModel? + @objc public var photosPreview: ((Swift.Array<Swift.String>) -> Swift.Void)? + @objc public func start(option: HHSDKVideo.OptionProtocal, im: HHSDKVideo.HHIM, rtc: HHSDKVideo.HHRTC) + @objc public func login(userToken: Swift.String, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func autoLogin(uuid: Swift.Int, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func logout(_ callback: ((Swift.String?) -> Swift.Void)? = nil) + @objc public func terminate() + @objc public func setAlipayHook(alipayCallback: @escaping (Swift.String, Swift.String, @escaping (([Swift.String : Any]) -> Swift.Void)) -> Swift.Bool) + @objc override dynamic public init() + @objc deinit +} +extension HHVideoSDK { + @objc dynamic public func startCall(_ type: HHSDKVideo.HHCallType = .adult, scene: Swift.String? = nil, callDelegate: HHSDKVideo.HHCallDelegate? = nil) + @objc dynamic public func startNewCall(_ uuid: Swift.Int, type: HHSDKVideo.HHCallType = .adult, callDelegate: HHSDKVideo.HHCallDelegate? = nil) + @objc dynamic public func startCall(_ uuid: Swift.Int, scene: Swift.String? = nil, type: HHSDKVideo.HHCallType = .adult, callDelegate: HHSDKVideo.HHCallDelegate? = nil) +} +extension HHVideoSDK { + @objc dynamic public func startTeamCall(_ type: HHSDKVideo.HHCallType, callee: HHSDKVideo.HHCallerInfo, callDelegate: HHSDKVideo.HHCallDelegate? = nil) +} +extension HHVideoSDK { + @objc dynamic public func call(_ memberToken: Swift.String, scene: Swift.String? = nil) +} +extension HHVideoSDK { + public func waitExpert(userToken: Swift.String, callOrderId: Swift.String) +} +extension HHVideoSDK { + @objc dynamic public func startMemberCall(needSelectMember: Swift.Bool = true) +} +extension HHVideoSDK { + @objc dynamic public func skipChatHome(isByPresent: Swift.Bool = false, vc: UIKit.UIViewController? = nil) + @objc dynamic public func skipChatHome(_ nav: UIKit.UINavigationController) + @objc dynamic public func chatHomeVC() -> UIKit.UIViewController? +} +extension HHVideoSDK { + public func sendBaseLog(ex: [Swift.String : Swift.String]? = nil, action: [Swift.String : Swift.String]? = nil) +} +public func topviewController() -> UIKit.UIViewController? +extension HHVideoSDK { + @objc dynamic public func loginForThirdId(_ thirdInfo: [Swift.String : Any], completion: @escaping HHSDKVideo.HHLoginHandler) +} +extension HHVideoSDK { + public func checkProtocolUpdate(agreeBlock: ((Swift.Bool) -> Swift.Void)?) +} +extension HHVideoSDK { + @objc dynamic public func getMedicDetail(userToken: Swift.String, medicId: Swift.String) -> Swift.String + @objc dynamic public func getMedicList(userToken: Swift.String) -> Swift.String + @objc dynamic public func getAllMedics(userToken: Swift.String) -> Swift.String +} +extension HHVideoSDK { + @objc dynamic public func onKickedOffline() +} +public struct HHWaitDoctorModel : HHSDKVideo.Mappable { + public var isNormalTrans: Swift.Bool + public var deptId: Swift.String? + public var uuid: Swift.Int? + public var transUuid: Swift.Int? + public init?(map: HHSDKVideo.Map) + public func isWaiting() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HHWaitingCallModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public func isCall() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HHAgentCallModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public func isAgent() -> Swift.Bool + public func isTransform() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HKDF { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.HKDF.Error, b: HHSDKVideo.HKDF.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>? = nil, info: Swift.Array<Swift.UInt8>? = nil, keyLength: Swift.Int? = nil, variant: HHSDKVideo.HMAC.Variant = .sha256) throws + public func calculate() throws -> Swift.Array<Swift.UInt8> +} +final public class HMAC : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case authenticateError + case invalidInput + public static func == (a: HHSDKVideo.HMAC.Error, b: HHSDKVideo.HMAC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant { + case sha1, sha256, sha384, sha512, md5 + public static func == (a: HHSDKVideo.HMAC.Variant, b: HHSDKVideo.HMAC.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(key: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.HMAC.Variant = .md5) + final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension HMAC { + convenience public init(key: Swift.String, variant: HHSDKVideo.HMAC.Variant = .md5) throws +} +public protocol ImmutableMappable : HHSDKVideo.BaseMappable { + init(map: HHSDKVideo.Map) throws +} +extension ImmutableMappable { + public func mapping(map: HHSDKVideo.Map) + public init(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) throws + public init(JSON: [Swift.String : Any], context: HHSDKVideo.MapContext? = nil) throws + public init(JSONObject: Any, context: HHSDKVideo.MapContext? = nil) throws +} +extension Map { + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> Transform.Object where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T? where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T] where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T]? where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T? where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T]? where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Transform.Object] where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : T]? where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : Transform.Object] where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[T]]? where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[T]] where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[Transform.Object]] where Transform : HHSDKVideo.TransformType +} +extension Mapper where N : HHSDKVideo.ImmutableMappable { + final public func map(JSON: [Swift.String : Any]) throws -> N + final public func map(JSONString: Swift.String) throws -> N + final public func map(JSONObject: Any) throws -> N + final public func mapArray(JSONArray: [[Swift.String : Any]]) throws -> [N] + final public func mapArray(JSONString: Swift.String) throws -> [N] + final public func mapArray(JSONObject: Any) throws -> [N] + final public func mapDictionary(JSONString: Swift.String) throws -> [Swift.String : N] + final public func mapDictionary(JSONObject: Any?) throws -> [Swift.String : N] + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]]) throws -> [Swift.String : N] + final public func mapDictionaryOfArrays(JSONObject: Any?) throws -> [Swift.String : [N]] + final public func mapDictionaryOfArrays(JSON: [Swift.String : [[Swift.String : Any]]]) throws -> [Swift.String : [N]] + final public func mapArrayOfArrays(JSONObject: Any?) throws -> [[N]] +} +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.SignedInteger +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.SignedInteger +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.UnsignedInteger +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.UnsignedInteger +extension DateFormatter { + convenience public init(withFormat format: Swift.String, locale: Swift.String) +} +open class ISO8601DateTransform : HHSDKVideo.DateFormatterTransform { + public init() + override public init(dateFormatter: Foundation.DateFormatter) + @objc deinit +} +public let KeychainAccessErrorDomain: Swift.String +public enum ItemClass { + case genericPassword + case internetPassword +} +public enum ProtocolType { + case ftp + case ftpAccount + case http + case irc + case nntp + case pop3 + case smtp + case socks + case imap + case ldap + case appleTalk + case afp + case telnet + case ssh + case ftps + case https + case httpProxy + case httpsProxy + case ftpProxy + case smb + case rtsp + case rtspProxy + case daap + case eppc + case ipp + case nntps + case ldaps + case telnetS + case imaps + case ircs + case pop3S +} +public enum AuthenticationType { + case ntlm + case msn + case dpa + case rpa + case httpBasic + case httpDigest + case htmlForm + case `default` +} +public enum Accessibility { + case whenUnlocked + case afterFirstUnlock + case always + @available(iOS 8.0, macOS 10.10, *) + case whenPasscodeSetThisDeviceOnly + case whenUnlockedThisDeviceOnly + case afterFirstUnlockThisDeviceOnly + case alwaysThisDeviceOnly +} +public struct AuthenticationPolicy : Swift.OptionSet { + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + public static let userPresence: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let touchIDAny: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let touchIDCurrentSet: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, macOS 10.11, *) + @available(watchOS, unavailable) + public static let devicePasscode: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let or: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let and: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let privateKeyUsage: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let applicationPassword: HHSDKVideo.AuthenticationPolicy + public let rawValue: Swift.UInt + public init(rawValue: Swift.UInt) + public typealias ArrayLiteralElement = HHSDKVideo.AuthenticationPolicy + public typealias Element = HHSDKVideo.AuthenticationPolicy + public typealias RawValue = Swift.UInt +} +public struct Attributes { + public var `class`: Swift.String? { + get + } + public var data: Foundation.Data? { + get + } + public var ref: Foundation.Data? { + get + } + public var persistentRef: Foundation.Data? { + get + } + public var accessible: Swift.String? { + get + } + public var accessControl: Security.SecAccessControl? { + get + } + public var accessGroup: Swift.String? { + get + } + public var synchronizable: Swift.Bool? { + get + } + public var creationDate: Foundation.Date? { + get + } + public var modificationDate: Foundation.Date? { + get + } + public var attributeDescription: Swift.String? { + get + } + public var comment: Swift.String? { + get + } + public var creator: Swift.String? { + get + } + public var type: Swift.String? { + get + } + public var label: Swift.String? { + get + } + public var isInvisible: Swift.Bool? { + get + } + public var isNegative: Swift.Bool? { + get + } + public var account: Swift.String? { + get + } + public var service: Swift.String? { + get + } + public var generic: Foundation.Data? { + get + } + public var securityDomain: Swift.String? { + get + } + public var server: Swift.String? { + get + } + public var `protocol`: Swift.String? { + get + } + public var authenticationType: Swift.String? { + get + } + public var port: Swift.Int? { + get + } + public var path: Swift.String? { + get + } + public subscript(key: Swift.String) -> Any? { + get + } +} +@_hasMissingDesignatedInitializers final public class Keychain { + final public var itemClass: HHSDKVideo.ItemClass { + get + } + final public var service: Swift.String { + get + } + final public var accessGroup: Swift.String? { + get + } + final public var server: Foundation.URL { + get + } + final public var protocolType: HHSDKVideo.ProtocolType { + get + } + final public var authenticationType: HHSDKVideo.AuthenticationType { + get + } + final public var accessibility: HHSDKVideo.Accessibility { + get + } + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public var authenticationPolicy: HHSDKVideo.AuthenticationPolicy? { + get + } + final public var synchronizable: Swift.Bool { + get + } + final public var label: Swift.String? { + get + } + final public var comment: Swift.String? { + get + } + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public var authenticationPrompt: Swift.String? { + get + } + @available(iOS 9.0, macOS 10.11, *) + final public var authenticationContext: LocalAuthentication.LAContext? { + get + } + convenience public init() + convenience public init(service: Swift.String) + convenience public init(accessGroup: Swift.String) + convenience public init(service: Swift.String, accessGroup: Swift.String) + convenience public init(server: Swift.String, protocolType: HHSDKVideo.ProtocolType, authenticationType: HHSDKVideo.AuthenticationType = .default) + convenience public init(server: Foundation.URL, protocolType: HHSDKVideo.ProtocolType, authenticationType: HHSDKVideo.AuthenticationType = .default) + final public func accessibility(_ accessibility: HHSDKVideo.Accessibility) -> HHSDKVideo.Keychain + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public func accessibility(_ accessibility: HHSDKVideo.Accessibility, authenticationPolicy: HHSDKVideo.AuthenticationPolicy) -> HHSDKVideo.Keychain + final public func synchronizable(_ synchronizable: Swift.Bool) -> HHSDKVideo.Keychain + final public func label(_ label: Swift.String) -> HHSDKVideo.Keychain + final public func comment(_ comment: Swift.String) -> HHSDKVideo.Keychain + final public func attributes(_ attributes: [Swift.String : Any]) -> HHSDKVideo.Keychain + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public func authenticationPrompt(_ authenticationPrompt: Swift.String) -> HHSDKVideo.Keychain + @available(iOS 9.0, macOS 10.11, *) + final public func authenticationContext(_ authenticationContext: LocalAuthentication.LAContext) -> HHSDKVideo.Keychain + final public func get(_ key: Swift.String) throws -> Swift.String? + final public func getString(_ key: Swift.String) throws -> Swift.String? + final public func getData(_ key: Swift.String) throws -> Foundation.Data? + final public func get<T>(_ key: Swift.String, handler: (HHSDKVideo.Attributes?) -> T) throws -> T + final public func set(_ value: Swift.String, key: Swift.String) throws + final public func set(_ value: Foundation.Data, key: Swift.String) throws + final public subscript(key: Swift.String) -> Swift.String? { + get + set + } + final public subscript(string key: Swift.String) -> Swift.String? { + get + set + } + final public subscript(data key: Swift.String) -> Foundation.Data? { + get + set + } + final public subscript(attributes key: Swift.String) -> HHSDKVideo.Attributes? { + get + } + final public func remove(_ key: Swift.String) throws + final public func removeAll() throws + final public func contains(_ key: Swift.String) throws -> Swift.Bool + final public class func allKeys(_ itemClass: HHSDKVideo.ItemClass) -> [(Swift.String, Swift.String)] + final public func allKeys() -> [Swift.String] + final public class func allItems(_ itemClass: HHSDKVideo.ItemClass) -> [[Swift.String : Any]] + final public func allItems() -> [[Swift.String : Any]] + @available(iOS 8.0, *) + final public func getSharedPassword(_ completion: @escaping (Swift.String?, Swift.String?, Swift.Error?) -> () = { account, password, error -> () in }) + @available(iOS 8.0, *) + final public func getSharedPassword(_ account: Swift.String, completion: @escaping (Swift.String?, Swift.Error?) -> () = { password, error -> () in }) + @available(iOS 8.0, *) + final public func setSharedPassword(_ password: Swift.String, account: Swift.String, completion: @escaping (Swift.Error?) -> () = { e -> () in }) + @available(iOS 8.0, *) + final public func removeSharedPassword(_ account: Swift.String, completion: @escaping (Swift.Error?) -> () = { e -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(_ completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(domain: Swift.String, completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(domain: Swift.String, account: Swift.String, completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func generatePassword() -> Swift.String + @objc deinit +} +extension Keychain : Swift.CustomStringConvertible, Swift.CustomDebugStringConvertible { + final public var description: Swift.String { + get + } + final public var debugDescription: Swift.String { + get + } +} +extension Attributes : Swift.CustomStringConvertible, Swift.CustomDebugStringConvertible { + public var description: Swift.String { + get + } + public var debugDescription: Swift.String { + get + } +} +extension ItemClass : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension ProtocolType : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension AuthenticationType : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension Accessibility : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +public enum Status : Darwin.OSStatus, Swift.Error { + case success + case unimplemented + case diskFull + case io + case opWr + case param + case wrPerm + case allocate + case userCanceled + case badReq + case internalComponent + case notAvailable + case readOnly + case authFailed + case noSuchKeychain + case invalidKeychain + case duplicateKeychain + case duplicateCallback + case invalidCallback + case duplicateItem + case itemNotFound + case bufferTooSmall + case dataTooLarge + case noSuchAttr + case invalidItemRef + case invalidSearchRef + case noSuchClass + case noDefaultKeychain + case interactionNotAllowed + case readOnlyAttr + case wrongSecVersion + case keySizeNotAllowed + case noStorageModule + case noCertificateModule + case noPolicyModule + case interactionRequired + case dataNotAvailable + case dataNotModifiable + case createChainFailed + case invalidPrefsDomain + case inDarkWake + case aclNotSimple + case policyNotFound + case invalidTrustSetting + case noAccessForItem + case invalidOwnerEdit + case trustNotAvailable + case unsupportedFormat + case unknownFormat + case keyIsSensitive + case multiplePrivKeys + case passphraseRequired + case invalidPasswordRef + case invalidTrustSettings + case noTrustSettings + case pkcs12VerifyFailure + case invalidCertificate + case notSigner + case policyDenied + case invalidKey + case decode + case `internal` + case unsupportedAlgorithm + case unsupportedOperation + case unsupportedPadding + case itemInvalidKey + case itemInvalidKeyType + case itemInvalidValue + case itemClassMissing + case itemMatchUnsupported + case useItemListUnsupported + case useKeychainUnsupported + case useKeychainListUnsupported + case returnDataUnsupported + case returnAttributesUnsupported + case returnRefUnsupported + case returnPersitentRefUnsupported + case valueRefUnsupported + case valuePersistentRefUnsupported + case returnMissingPointer + case matchLimitUnsupported + case itemIllegalQuery + case waitForCallback + case missingEntitlement + case upgradePending + case mpSignatureInvalid + case otrTooOld + case otrIDTooNew + case serviceNotAvailable + case insufficientClientID + case deviceReset + case deviceFailed + case appleAddAppACLSubject + case applePublicKeyIncomplete + case appleSignatureMismatch + case appleInvalidKeyStartDate + case appleInvalidKeyEndDate + case conversionError + case appleSSLv2Rollback + case quotaExceeded + case fileTooBig + case invalidDatabaseBlob + case invalidKeyBlob + case incompatibleDatabaseBlob + case incompatibleKeyBlob + case hostNameMismatch + case unknownCriticalExtensionFlag + case noBasicConstraints + case noBasicConstraintsCA + case invalidAuthorityKeyID + case invalidSubjectKeyID + case invalidKeyUsageForPolicy + case invalidExtendedKeyUsage + case invalidIDLinkage + case pathLengthConstraintExceeded + case invalidRoot + case crlExpired + case crlNotValidYet + case crlNotFound + case crlServerDown + case crlBadURI + case unknownCertExtension + case unknownCRLExtension + case crlNotTrusted + case crlPolicyFailed + case idpFailure + case smimeEmailAddressesNotFound + case smimeBadExtendedKeyUsage + case smimeBadKeyUsage + case smimeKeyUsageNotCritical + case smimeNoEmailAddress + case smimeSubjAltNameNotCritical + case sslBadExtendedKeyUsage + case ocspBadResponse + case ocspBadRequest + case ocspUnavailable + case ocspStatusUnrecognized + case endOfData + case incompleteCertRevocationCheck + case networkFailure + case ocspNotTrustedToAnchor + case recordModified + case ocspSignatureError + case ocspNoSigner + case ocspResponderMalformedReq + case ocspResponderInternalError + case ocspResponderTryLater + case ocspResponderSignatureRequired + case ocspResponderUnauthorized + case ocspResponseNonceMismatch + case codeSigningBadCertChainLength + case codeSigningNoBasicConstraints + case codeSigningBadPathLengthConstraint + case codeSigningNoExtendedKeyUsage + case codeSigningDevelopment + case resourceSignBadCertChainLength + case resourceSignBadExtKeyUsage + case trustSettingDeny + case invalidSubjectName + case unknownQualifiedCertStatement + case mobileMeRequestQueued + case mobileMeRequestRedirected + case mobileMeServerError + case mobileMeServerNotAvailable + case mobileMeServerAlreadyExists + case mobileMeServerServiceErr + case mobileMeRequestAlreadyPending + case mobileMeNoRequestPending + case mobileMeCSRVerifyFailure + case mobileMeFailedConsistencyCheck + case notInitialized + case invalidHandleUsage + case pvcReferentNotFound + case functionIntegrityFail + case internalError + case memoryError + case invalidData + case mdsError + case invalidPointer + case selfCheckFailed + case functionFailed + case moduleManifestVerifyFailed + case invalidGUID + case invalidHandle + case invalidDBList + case invalidPassthroughID + case invalidNetworkAddress + case crlAlreadySigned + case invalidNumberOfFields + case verificationFailure + case unknownTag + case invalidSignature + case invalidName + case invalidCertificateRef + case invalidCertificateGroup + case tagNotFound + case invalidQuery + case invalidValue + case callbackFailed + case aclDeleteFailed + case aclReplaceFailed + case aclAddFailed + case aclChangeFailed + case invalidAccessCredentials + case invalidRecord + case invalidACL + case invalidSampleValue + case incompatibleVersion + case privilegeNotGranted + case invalidScope + case pvcAlreadyConfigured + case invalidPVC + case emmLoadFailed + case emmUnloadFailed + case addinLoadFailed + case invalidKeyRef + case invalidKeyHierarchy + case addinUnloadFailed + case libraryReferenceNotFound + case invalidAddinFunctionTable + case invalidServiceMask + case moduleNotLoaded + case invalidSubServiceID + case attributeNotInContext + case moduleManagerInitializeFailed + case moduleManagerNotFound + case eventNotificationCallbackNotFound + case inputLengthError + case outputLengthError + case privilegeNotSupported + case deviceError + case attachHandleBusy + case notLoggedIn + case algorithmMismatch + case keyUsageIncorrect + case keyBlobTypeIncorrect + case keyHeaderInconsistent + case unsupportedKeyFormat + case unsupportedKeySize + case invalidKeyUsageMask + case unsupportedKeyUsageMask + case invalidKeyAttributeMask + case unsupportedKeyAttributeMask + case invalidKeyLabel + case unsupportedKeyLabel + case invalidKeyFormat + case unsupportedVectorOfBuffers + case invalidInputVector + case invalidOutputVector + case invalidContext + case invalidAlgorithm + case invalidAttributeKey + case missingAttributeKey + case invalidAttributeInitVector + case missingAttributeInitVector + case invalidAttributeSalt + case missingAttributeSalt + case invalidAttributePadding + case missingAttributePadding + case invalidAttributeRandom + case missingAttributeRandom + case invalidAttributeSeed + case missingAttributeSeed + case invalidAttributePassphrase + case missingAttributePassphrase + case invalidAttributeKeyLength + case missingAttributeKeyLength + case invalidAttributeBlockSize + case missingAttributeBlockSize + case invalidAttributeOutputSize + case missingAttributeOutputSize + case invalidAttributeRounds + case missingAttributeRounds + case invalidAlgorithmParms + case missingAlgorithmParms + case invalidAttributeLabel + case missingAttributeLabel + case invalidAttributeKeyType + case missingAttributeKeyType + case invalidAttributeMode + case missingAttributeMode + case invalidAttributeEffectiveBits + case missingAttributeEffectiveBits + case invalidAttributeStartDate + case missingAttributeStartDate + case invalidAttributeEndDate + case missingAttributeEndDate + case invalidAttributeVersion + case missingAttributeVersion + case invalidAttributePrime + case missingAttributePrime + case invalidAttributeBase + case missingAttributeBase + case invalidAttributeSubprime + case missingAttributeSubprime + case invalidAttributeIterationCount + case missingAttributeIterationCount + case invalidAttributeDLDBHandle + case missingAttributeDLDBHandle + case invalidAttributeAccessCredentials + case missingAttributeAccessCredentials + case invalidAttributePublicKeyFormat + case missingAttributePublicKeyFormat + case invalidAttributePrivateKeyFormat + case missingAttributePrivateKeyFormat + case invalidAttributeSymmetricKeyFormat + case missingAttributeSymmetricKeyFormat + case invalidAttributeWrappedKeyFormat + case missingAttributeWrappedKeyFormat + case stagedOperationInProgress + case stagedOperationNotStarted + case verifyFailed + case querySizeUnknown + case blockSizeMismatch + case publicKeyInconsistent + case deviceVerifyFailed + case invalidLoginName + case alreadyLoggedIn + case invalidDigestAlgorithm + case invalidCRLGroup + case certificateCannotOperate + case certificateExpired + case certificateNotValidYet + case certificateRevoked + case certificateSuspended + case insufficientCredentials + case invalidAction + case invalidAuthority + case verifyActionFailed + case invalidCertAuthority + case invaldCRLAuthority + case invalidCRLEncoding + case invalidCRLType + case invalidCRL + case invalidFormType + case invalidID + case invalidIdentifier + case invalidIndex + case invalidPolicyIdentifiers + case invalidTimeString + case invalidReason + case invalidRequestInputs + case invalidResponseVector + case invalidStopOnPolicy + case invalidTuple + case multipleValuesUnsupported + case notTrusted + case noDefaultAuthority + case rejectedForm + case requestLost + case requestRejected + case unsupportedAddressType + case unsupportedService + case invalidTupleGroup + case invalidBaseACLs + case invalidTupleCredendtials + case invalidEncoding + case invalidValidityPeriod + case invalidRequestor + case requestDescriptor + case invalidBundleInfo + case invalidCRLIndex + case noFieldValues + case unsupportedFieldFormat + case unsupportedIndexInfo + case unsupportedLocality + case unsupportedNumAttributes + case unsupportedNumIndexes + case unsupportedNumRecordTypes + case fieldSpecifiedMultiple + case incompatibleFieldFormat + case invalidParsingModule + case databaseLocked + case datastoreIsOpen + case missingValue + case unsupportedQueryLimits + case unsupportedNumSelectionPreds + case unsupportedOperator + case invalidDBLocation + case invalidAccessRequest + case invalidIndexInfo + case invalidNewOwner + case invalidModifyMode + case missingRequiredExtension + case extendedKeyUsageNotCritical + case timestampMissing + case timestampInvalid + case timestampNotTrusted + case timestampServiceNotAvailable + case timestampBadAlg + case timestampBadRequest + case timestampBadDataFormat + case timestampTimeNotAvailable + case timestampUnacceptedPolicy + case timestampUnacceptedExtension + case timestampAddInfoNotAvailable + case timestampSystemFailure + case signingTimeMissing + case timestampRejection + case timestampWaiting + case timestampRevocationWarning + case timestampRevocationNotification + case unexpectedError +} +extension Status : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init(status: Darwin.OSStatus) + public var description: Swift.String { + get + } + public init?(rawValue: Darwin.OSStatus) + public typealias RawValue = Darwin.OSStatus + public var rawValue: Darwin.OSStatus { + get + } +} +extension Status : Foundation.CustomNSError { + public static let errorDomain: Swift.String + public var errorCode: Swift.Int { + get + } + public var errorUserInfo: [Swift.String : Any] { + get + } +} +extension HHPermission : CoreLocation.CLLocationManagerDelegate { + @objc dynamic public func locationManager(_ manager: CoreLocation.CLLocationManager, didChangeAuthorization status: CoreLocation.CLAuthorizationStatus) +} +public let HHUUID: Swift.String +public let HHUserToken: Swift.String +@_hasMissingDesignatedInitializers public class LoginManager { + public static let `default`: HHSDKVideo.LoginManager + public var mUUID: Swift.Int? + public var mUserInfo: HHSDKVideo.HHUserModel? + public func loadCache() + public func removeCache() + public func getUserInfo(token: Swift.String, success: ((Swift.String?) -> Swift.Void)? = nil, fail: ((Swift.String) -> Swift.Void)? = nil) + public func getUserInfoRequest(success: ((Swift.String?) -> Swift.Void)? = nil, fail: ((Swift.String) -> Swift.Void)? = nil) + public func convert2Model() -> Swift.String? + public func getUserInfo() -> HHSDKVideo.HHUserModel? + public func getCacheUserInfo() -> HHSDKVideo.HHUserModel? + public func hasLoginData() -> Swift.Bool + public func getUUID() -> Swift.Int? + public func setUUID(uuid: Swift.Int) + public func getToken() -> Swift.String? + public func uuidStr() -> Swift.String? + public func isMemeber() -> Swift.Bool + public func isVIP() -> Swift.Bool + public func getUpgradeVIPTips() -> Swift.String? + public func isBuyProduct() -> Swift.Bool + public func getMemberDes() -> Swift.String? + public func isPhoneAccount() -> Swift.Bool + @objc deinit +} +public protocol MapContext { +} +final public class Map { + final public let mappingType: HHSDKVideo.MappingType + final public var JSON: [Swift.String : Any] { + get + } + final public var isKeyPresent: Swift.Bool { + get + } + final public var currentValue: Any? { + get + } + final public var currentKey: Swift.String? { + get + } + final public var nestedKeyDelimiter: Swift.String { + get + } + final public var context: HHSDKVideo.MapContext? + final public var shouldIncludeNilValues: Swift.Bool + final public let toObject: Swift.Bool + public init(mappingType: HHSDKVideo.MappingType, JSON: [Swift.String : Any], toObject: Swift.Bool = false, context: HHSDKVideo.MapContext? = nil, shouldIncludeNilValues: Swift.Bool = false) + final public subscript(key: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, delimiter delimiter: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool, delimiter delimiter: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, delimiter delimiter: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool?, delimiter delimiter: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public func value<T>() -> T? + @objc deinit +} +extension Map { + final public func value<T>(_ key: Swift.String, default: T.Object, using transform: T) throws -> T.Object where T : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, default: T) throws -> T + final public func value<T>(_ key: Swift.String, default: [T]) -> [T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, default: T) throws -> T where T : HHSDKVideo.BaseMappable +} +public struct MapError : Swift.Error { + public var key: Swift.String? + public var currentValue: Any? + public var reason: Swift.String? + public var file: Swift.StaticString? + public var function: Swift.StaticString? + public var line: Swift.UInt? + public init(key: Swift.String?, currentValue: Any?, reason: Swift.String?, file: Swift.StaticString? = nil, function: Swift.StaticString? = nil, line: Swift.UInt? = nil) +} +extension MapError : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +public protocol BaseMappable { + mutating func mapping(map: HHSDKVideo.Map) +} +public protocol Mappable : HHSDKVideo.BaseMappable { + init?(map: HHSDKVideo.Map) +} +public protocol StaticMappable : HHSDKVideo.BaseMappable { + static func objectForMapping(map: HHSDKVideo.Map) -> HHSDKVideo.BaseMappable? +} +extension Mappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init?(JSON: [Swift.String : Any], context: HHSDKVideo.MapContext? = nil) +} +extension BaseMappable { + public func toJSON() -> [Swift.String : Any] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +extension Array where Element : HHSDKVideo.BaseMappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init(JSONArray: [[Swift.String : Any]], context: HHSDKVideo.MapContext? = nil) + public func toJSON() -> [[Swift.String : Any]] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +extension Set where Element : HHSDKVideo.BaseMappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init?(JSONArray: [[Swift.String : Any]], context: HHSDKVideo.MapContext? = nil) + public func toJSON() -> [[Swift.String : Any]] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +public enum MappingType { + case fromJSON + case toJSON + public static func == (a: HHSDKVideo.MappingType, b: HHSDKVideo.MappingType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +final public class Mapper<N> where N : HHSDKVideo.BaseMappable { + final public var context: HHSDKVideo.MapContext? + final public var shouldIncludeNilValues: Swift.Bool + public init(context: HHSDKVideo.MapContext? = nil, shouldIncludeNilValues: Swift.Bool = false) + final public func map(JSONObject: Any?, toObject object: N) -> N + final public func map(JSONString: Swift.String, toObject object: N) -> N + final public func map(JSON: [Swift.String : Any], toObject object: N) -> N + final public func map(JSONString: Swift.String) -> N? + final public func map(JSONObject: Any?) -> N? + final public func map(JSON: [Swift.String : Any]) -> N? + final public func mapArray(JSONString: Swift.String) -> [N]? + final public func mapArray(JSONObject: Any?) -> [N]? + final public func mapArray(JSONArray: [[Swift.String : Any]]) -> [N] + final public func mapDictionary(JSONString: Swift.String) -> [Swift.String : N]? + final public func mapDictionary(JSONObject: Any?) -> [Swift.String : N]? + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]]) -> [Swift.String : N]? + final public func mapDictionary(JSONObject: Any?, toDictionary dictionary: [Swift.String : N]) -> [Swift.String : N] + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]], toDictionary dictionary: [Swift.String : N]) -> [Swift.String : N] + final public func mapDictionaryOfArrays(JSONObject: Any?) -> [Swift.String : [N]]? + final public func mapDictionaryOfArrays(JSON: [Swift.String : [[Swift.String : Any]]]) -> [Swift.String : [N]]? + final public func mapArrayOfArrays(JSONObject: Any?) -> [[N]]? + public static func parseJSONStringIntoDictionary(JSONString: Swift.String) -> [Swift.String : Any]? + public static func parseJSONString(JSONString: Swift.String) -> Any? + @objc deinit +} +extension Mapper { + final public func map(JSONfile: Swift.String) -> N? + final public func mapArray(JSONfile: Swift.String) -> [N]? +} +extension Mapper { + final public func toJSON(_ object: N) -> [Swift.String : Any] + final public func toJSONArray(_ array: [N]) -> [[Swift.String : Any]] + final public func toJSONDictionary(_ dictionary: [Swift.String : N]) -> [Swift.String : [Swift.String : Any]] + final public func toJSONDictionaryOfArrays(_ dictionary: [Swift.String : [N]]) -> [Swift.String : [[Swift.String : Any]]] + final public func toJSONString(_ object: N, prettyPrint: Swift.Bool = false) -> Swift.String? + final public func toJSONString(_ array: [N], prettyPrint: Swift.Bool = false) -> Swift.String? + public static func toJSONString(_ JSONObject: Any, prettyPrint: Swift.Bool) -> Swift.String? + public static func toJSONData(_ JSONObject: Any, options: Foundation.JSONSerialization.WritingOptions) -> Foundation.Data? +} +extension Mapper where N : Swift.Hashable { + final public func mapSet(JSONString: Swift.String) -> Swift.Set<N>? + final public func mapSet(JSONObject: Any?) -> Swift.Set<N>? + final public func mapSet(JSONArray: [[Swift.String : Any]]) -> Swift.Set<N> + final public func toJSONSet(_ set: Swift.Set<N>) -> [[Swift.String : Any]] + final public func toJSONString(_ set: Swift.Set<N>, prettyPrint: Swift.Bool = false) -> Swift.String? +} +final public class MD5 { + public init() + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension MD5 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +public struct NotifyInfo { + public init() + public var fromAccountId: Swift.String? + public var requestId: Swift.String? + public var channelId: Swift.String? + public var customInfo: Swift.String? +} +open class NSDecimalNumberTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.NSDecimalNumber + public typealias JSON = Swift.String + public init() + open func transformFromJSON(_ value: Any?) -> Foundation.NSDecimalNumber? + open func transformToJSON(_ value: Foundation.NSDecimalNumber?) -> Swift.String? + @objc deinit +} +final public class OCB : HHSDKVideo.BlockMode { + public enum Mode { + case combined + case detached + public static func == (a: HHSDKVideo.OCB.Mode, b: HHSDKVideo.OCB.Mode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public let options: HHSDKVideo.BlockModeOption + public enum Error : Swift.Error { + case invalidNonce + case fail + public static func == (a: HHSDKVideo.OCB.Error, b: HHSDKVideo.OCB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(nonce N: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, tagLength: Swift.Int = 16, mode: HHSDKVideo.OCB.Mode = .detached) + convenience public init(nonce N: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, mode: HHSDKVideo.OCB.Mode = .detached) + final public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker + @objc deinit +} +public struct OFB : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.OFB.Error, b: HHSDKVideo.OFB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +infix operator <- : DefaultPrecedence +infix operator >>> : DefaultPrecedence +public func <- <T>(left: inout T, right: HHSDKVideo.Map) +public func >>> <T>(left: T, right: HHSDKVideo.Map) +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) +public func >>> <T>(left: T?, right: HHSDKVideo.Map) +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: T, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: T?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, [T]>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, [T]>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, [T]>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, [T]>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<Swift.Array<T>>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<Swift.Array<T>>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<Swift.Array<T>>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<Swift.Array<T>>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Set<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func >>> <T>(left: Swift.Set<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func <- <T>(left: inout Swift.Set<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func >>> <T>(left: Swift.Set<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public struct OrderModel : HHSDKVideo.Mappable { + public var orderid: Swift.String? + public var price: Swift.Float? + public var buyServiceName: Swift.String? + public var expertId: Swift.String? + public var expertName: Swift.String? + public var patientName: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public protocol PaddingProtocol { + func add(to: Swift.Array<Swift.UInt8>, blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> + func remove(from: Swift.Array<Swift.UInt8>, blockSize: Swift.Int?) -> Swift.Array<Swift.UInt8> +} +public enum Padding : HHSDKVideo.PaddingProtocol { + case noPadding, zeroPadding, pkcs7, pkcs5, iso78164 + public func add(to: Swift.Array<Swift.UInt8>, blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> + public func remove(from: Swift.Array<Swift.UInt8>, blockSize: Swift.Int?) -> Swift.Array<Swift.UInt8> + public static func == (a: HHSDKVideo.Padding, b: HHSDKVideo.Padding) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +extension PKCS5 { + public struct PBKDF1 { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.PKCS5.PBKDF1.Error, b: HHSDKVideo.PKCS5.PBKDF1.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant { + case md5, sha1 + public static func == (a: HHSDKVideo.PKCS5.PBKDF1.Variant, b: HHSDKVideo.PKCS5.PBKDF1.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.PKCS5.PBKDF1.Variant = .sha1, iterations: Swift.Int = 4096, keyLength: Swift.Int? = nil) throws + public func calculate() -> Swift.Array<Swift.UInt8> + } +} +extension PKCS5 { + public struct PBKDF2 { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.PKCS5.PBKDF2.Error, b: HHSDKVideo.PKCS5.PBKDF2.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, iterations: Swift.Int = 4096, keyLength: Swift.Int? = nil, variant: HHSDKVideo.HMAC.Variant = .sha256) throws + public func calculate() throws -> Swift.Array<Swift.UInt8> + } +} +public struct PCBC : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.PCBC.Error, b: HHSDKVideo.PCBC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@objc @_hasMissingDesignatedInitializers public class HHPermission : ObjectiveC.NSObject { + public static let locationAlways: HHSDKVideo.HHPermission + public static let locationWhenInUse: HHSDKVideo.HHPermission + public static let microphone: HHSDKVideo.HHPermission + public static let camera: HHSDKVideo.HHPermission + public static let photos: HHSDKVideo.HHPermission + final public let type: HHSDKVideo.HHBasePermissionType + public var status: HHSDKVideo.PermissionStatus { + get + } + public var presentPrePermissionAlert: Swift.Bool + public var prePermissionAlert: HHSDKVideo.PermissionAlert { + get + set + } + public var presentDeniedAlert: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +extension HHPermission { + @objc override dynamic public var description: Swift.String { + @objc get + } + @objc override dynamic public var debugDescription: Swift.String { + @objc get + } +} +@_hasMissingDesignatedInitializers public class PermissionAlert { + @objc deinit +} +public enum PermissionStatus : Swift.String { + case authorized + case denied + case disabled + case notDetermined + case limited + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +extension PermissionStatus : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +@objc public enum HHBasePermissionType : Swift.Int { + case locationAlways + case locationWhenInUse + case microphone + case camera + case photos + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension HHBasePermissionType : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +@_hasMissingDesignatedInitializers public class PhotoPickerConfig { + public static let `default`: HHSDKVideo.PhotoPickerConfig + public var miniPicTip: Swift.Bool + public var mMaxSelectCount: Swift.Int + public var mDetailColumnCount: Swift.Int + @objc deinit +} +public enum PKCS5 { +} +public enum PKCS7 { +} +final public class Poly1305 : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case authenticateError + public static func == (a: HHSDKVideo.Poly1305.Error, b: HHSDKVideo.Poly1305.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>) + final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +@_hasMissingDesignatedInitializers public class PostBodyEncoding { + @objc deinit +} +final public class Rabbit { + public enum Error : Swift.Error { + case invalidKeyOrInitializationVector + public static func == (a: HHSDKVideo.Rabbit.Error, b: HHSDKVideo.Rabbit.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let ivSize: Swift.Int + public static let keySize: Swift.Int + public static let blockSize: Swift.Int + final public var keySize: Swift.Int { + get + } + convenience public init(key: Swift.Array<Swift.UInt8>) throws + public init(key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>?) throws + @objc deinit +} +extension Rabbit : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension Rabbit { + convenience public init(key: Swift.String) throws + convenience public init(key: Swift.String, iv: Swift.String) throws +} +public enum ReachabilityError : Swift.Error { + case FailedToCreateWithAddress(Darwin.sockaddr_in) + case FailedToCreateWithHostname(Swift.String) + case UnableToSetCallback + case UnableToSetDispatchQueue +} +public let ReachabilityChangedNotification: Foundation.NSNotification.Name +public class Reachability { + public typealias NetworkReachable = (HHSDKVideo.Reachability) -> () + public typealias NetworkUnreachable = (HHSDKVideo.Reachability) -> () + public enum NetworkStatus : Swift.CustomStringConvertible { + case notReachable, reachableViaWiFi, reachableViaWWAN + public var description: Swift.String { + get + } + public static func == (a: HHSDKVideo.Reachability.NetworkStatus, b: HHSDKVideo.Reachability.NetworkStatus) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public var whenReachable: HHSDKVideo.Reachability.NetworkReachable? + public var whenUnreachable: HHSDKVideo.Reachability.NetworkUnreachable? + public var reachableOnWWAN: Swift.Bool + public var currentReachabilityString: Swift.String { + get + } + public var currentReachabilityStatus: HHSDKVideo.Reachability.NetworkStatus { + get + } + required public init(reachabilityRef: SystemConfiguration.SCNetworkReachability) + convenience public init?(hostname: Swift.String) + convenience public init?() + @objc deinit +} +extension Reachability { + public func startNotifier() throws + public func stopNotifier() + public var isReachable: Swift.Bool { + get + } + public var isReachableViaWWAN: Swift.Bool { + get + } + public var isReachableViaWiFi: Swift.Bool { + get + } + public var description: Swift.String { + get + } +} +public enum RecordImgType : Swift.Int { + case medic + case check + case yingXiang + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public struct RemoteData : HHSDKVideo.Mappable { + public var changeDoctorTime: Swift.Int + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +final public class Scrypt { + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, dkLen: Swift.Int, N: Swift.Int, r: Swift.Int, p: Swift.Int) throws + final public func calculate() throws -> [Swift.UInt8] + @objc deinit +} +public struct SDKConfigModel : HHSDKVideo.Mappable { + public var cardIdActiveShow: Swift.Int + public var changeDoctorTime: Swift.Int? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +final public class SHA1 { + public init() + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA1 : HHSDKVideo.Updatable { + @discardableResult + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +final public class SHA2 { + public enum Variant : Swift.RawRepresentable { + case sha224, sha256, sha384, sha512 + public var digestLength: Swift.Int { + get + } + public var blockSize: Swift.Int { + get + } + public typealias RawValue = Swift.Int + public var rawValue: HHSDKVideo.SHA2.Variant.RawValue { + get + } + public init?(rawValue: HHSDKVideo.SHA2.Variant.RawValue) + } + public init(variant: HHSDKVideo.SHA2.Variant) + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA2 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +final public class SHA3 { + final public let blockSize: Swift.Int + final public let digestLength: Swift.Int + final public let markByte: Swift.UInt8 + public enum Variant { + case sha224, sha256, sha384, sha512, keccak224, keccak256, keccak384, keccak512 + public var outputLength: Swift.Int { + get + } + public static func == (a: HHSDKVideo.SHA3.Variant, b: HHSDKVideo.SHA3.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(variant: HHSDKVideo.SHA3.Variant) + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA3 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +extension String { + public var bytes: Swift.Array<Swift.UInt8> { + get + } + public func md5() -> Swift.String + public func sha1() -> Swift.String + public func sha224() -> Swift.String + public func sha256() -> Swift.String + public func sha384() -> Swift.String + public func sha512() -> Swift.String + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> Swift.String + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.String + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.String + public func crc16(seed: Swift.UInt16? = nil) -> Swift.String + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> Swift.String + public func encryptToBase64(cipher: HHSDKVideo.Cipher) throws -> Swift.String? + public func authenticate<A>(with authenticator: A) throws -> Swift.String where A : HHSDKVideo.CryptoAuthenticator +} +extension String { + public func decryptBase64ToString(cipher: HHSDKVideo.Cipher) throws -> Swift.String + public func decryptBase64(cipher: HHSDKVideo.Cipher) throws -> Swift.Array<Swift.UInt8> +} +@_hasMissingDesignatedInitializers final public class SwiftEntryKit { + public enum EntryDismissalDescriptor { + case specific(entryName: Swift.String) + case prioritizedLowerOrEqualTo(priority: HHSDKVideo.EKAttributes.Precedence.Priority) + case enqueued + case all + case displayed + } + public enum RollbackWindow { + case main + case custom(window: UIKit.UIWindow) + } + public typealias DismissCompletionHandler = () -> Swift.Void + final public class var window: UIKit.UIWindow? { + get + } + final public class var isCurrentlyDisplaying: Swift.Bool { + get + } + final public class func isCurrentlyDisplaying(entryNamed name: Swift.String? = nil) -> Swift.Bool + final public class var isQueueEmpty: Swift.Bool { + get + } + final public class func queueContains(entryNamed name: Swift.String? = nil) -> Swift.Bool + final public class func display(entry view: UIKit.UIView, using attributes: HHSDKVideo.EKAttributes, presentInsideKeyWindow: Swift.Bool = false, rollbackWindow: HHSDKVideo.SwiftEntryKit.RollbackWindow = .main) + final public class func display(entry viewController: UIKit.UIViewController, using attributes: HHSDKVideo.EKAttributes, presentInsideKeyWindow: Swift.Bool = false, rollbackWindow: HHSDKVideo.SwiftEntryKit.RollbackWindow = .main) + final public class func transform(to view: UIKit.UIView) + final public class func dismiss(_ descriptor: HHSDKVideo.SwiftEntryKit.EntryDismissalDescriptor = .displayed, with completion: HHSDKVideo.SwiftEntryKit.DismissCompletionHandler? = nil) + final public class func layoutIfNeeded() + @objc deinit +} +open class TransformOf<ObjectType, JSONType> : HHSDKVideo.TransformType { + public typealias Object = ObjectType + public typealias JSON = JSONType + public init(fromJSON: @escaping (JSONType?) -> ObjectType?, toJSON: @escaping (ObjectType?) -> JSONType?) + open func transformFromJSON(_ value: Any?) -> ObjectType? + open func transformToJSON(_ value: ObjectType?) -> JSONType? + @objc deinit +} +public func <- <Transform>(left: inout Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Swift.String : Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Swift.String : Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Swift.String : Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Swift.String : Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, [Transform.Object]>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, [Transform.Object]>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, [Transform.Object]>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, [Transform.Object]>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Array<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Array<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Array<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Array<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout [[Transform.Object]], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [[Transform.Object]], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [[Transform.Object]]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [[Transform.Object]]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Swift.Set<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func >>> <Transform>(left: Swift.Set<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func <- <Transform>(left: inout Swift.Set<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func >>> <Transform>(left: Swift.Set<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public protocol TransformType { + associatedtype Object + associatedtype JSON + func transformFromJSON(_ value: Any?) -> Self.Object? + func transformToJSON(_ value: Self.Object?) -> Self.JSON? +} +extension UIImage { + public class func gifImageWithData(_ data: Foundation.Data) -> UIKit.UIImage? + public class func gifImageWithURL(_ gifUrl: Swift.String) -> UIKit.UIImage? + public class func gifImageWithName(_ name: Swift.String) -> UIKit.UIImage? +} +public protocol _UInt8Type { +} +extension UInt8 : HHSDKVideo._UInt8Type { +} +extension UInt8 { + public func bits() -> [HHSDKVideo.Bit] + public func bits() -> Swift.String +} +public protocol Updatable { + mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool) throws -> Swift.Array<Swift.UInt8> + mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws +} +extension Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public mutating func update(withBytes bytes: Swift.Array<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public mutating func update(withBytes bytes: Swift.Array<Swift.UInt8>, isLast: Swift.Bool = false, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(withBytes bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public mutating func finish(withBytes bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public mutating func finish() throws -> Swift.Array<Swift.UInt8> + public mutating func finish(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(withBytes bytes: Swift.Array<Swift.UInt8>, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws +} +open class URLTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.URL + public typealias JSON = Swift.String + public init(shouldEncodeURLString: Swift.Bool = false, allowedCharacterSet: Foundation.CharacterSet = .urlQueryAllowed) + open func transformFromJSON(_ value: Any?) -> Foundation.URL? + open func transformToJSON(_ value: Foundation.URL?) -> Swift.String? + @objc deinit +} +public struct UserApi { +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers public class VCManager : ObjectiveC.NSObject { + public static let `default`: HHSDKVideo.VCManager + public var waitModel: HHSDKVideo.HHWaitDoctorModel? + @objc deinit +} +extension VCManager { + public func onReceiveCall(callee: Swift.String, caller: Swift.String, orderId: Swift.String) + public func isInBusy() -> Swift.Bool +} +extension VCManager { + public func onReceiveInvite(docModel: HHSDKVideo.HHInviteDocModel) + public static func onUserReject(_ fromUuid: Swift.String) + public static func onCancelInvite(_ fromUuid: Swift.String) + public static func changeVideo(_ isVoice: Swift.Bool) +} +extension VCManager { + public func showEduBoard(groupId: Swift.String, orderId: Swift.String) + public func closeEduBoard() +} +public struct VideoApi { +} +public enum HHIMCmd : Swift.String { + case audio + case video + case closeVideo + case openVideo + case transfor + case accept + case call + case reject + case cancelCall + case pcCancel + case phoneCall + case busy + case waiting + case waitingTip + case agentTrans + case web_transform + case callWeb + case SWITCH_TO_CAMERA_wmp + case cancelCallWeb + case call_invite + case reject_invite + case cancel_invite + case exit_camera + case enter_camera + case conference_begin + case conference_end + case user_certification + case cancel_user_certification + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public class WeakArray<T> { + public func add(_ delegate: T) + public func remove(_ delegate: T) + public func excute(_ block: @escaping ((T?) -> Swift.Void)) + public init() + @objc deinit +} +@objc public class ZLAlbumListModel : ObjectiveC.NSObject { + final public let title: Swift.String + public var count: Swift.Int { + get + } + public var result: Photos.PHFetchResult<Photos.PHAsset> + final public let collection: Photos.PHAssetCollection + final public let option: Photos.PHFetchOptions + final public let isCameraRoll: Swift.Bool + public var headImageAsset: Photos.PHAsset? { + get + } + public var models: [HHSDKVideo.ZLPhotoModel] + public init(title: Swift.String, result: Photos.PHFetchResult<Photos.PHAsset>, collection: Photos.PHAssetCollection, option: Photos.PHFetchOptions, isCameraRoll: Swift.Bool) + public func refetchPhotos() + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class ZLCameraConfiguration : ObjectiveC.NSObject { + @objc public enum CaptureSessionPreset : Swift.Int { + case cif352x288 + case vga640x480 + case hd1280x720 + case hd1920x1080 + case hd4K3840x2160 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum FocusMode : Swift.Int { + case autoFocus + case continuousAutoFocus + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum ExposureMode : Swift.Int { + case autoExpose + case continuousAutoExposure + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum FlashMode : Swift.Int { + case auto + case on + case off + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum VideoExportType : Swift.Int { + case mov + case mp4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public var sessionPreset: HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset + @objc public var focusMode: HHSDKVideo.ZLCameraConfiguration.FocusMode + @objc public var exposureMode: HHSDKVideo.ZLCameraConfiguration.ExposureMode + @objc public var flashMode: HHSDKVideo.ZLCameraConfiguration.FlashMode + @objc public var videoExportType: HHSDKVideo.ZLCameraConfiguration.VideoExportType + @objc override dynamic public init() + @objc deinit +} +extension ZLCameraConfiguration { + @discardableResult + public func sessionPreset(_ sessionPreset: HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func focusMode(_ mode: HHSDKVideo.ZLCameraConfiguration.FocusMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func exposureMode(_ mode: HHSDKVideo.ZLCameraConfiguration.ExposureMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func flashMode(_ mode: HHSDKVideo.ZLCameraConfiguration.FlashMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func videoExportType(_ type: HHSDKVideo.ZLCameraConfiguration.VideoExportType) -> HHSDKVideo.ZLCameraConfiguration +} +@objc open class ZLCustomCamera : UIKit.UIViewController, QuartzCore.CAAnimationDelegate { + @objc public var takeDoneBlock: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)? + @objc public var cancelBlock: (() -> Swift.Void)? + public var tipsLabel: UIKit.UILabel { + get + set + } + public var bottomView: UIKit.UIView { + get + set + } + public var largeCircleView: UIKit.UIVisualEffectView { + get + set + } + public var smallCircleView: UIKit.UIView { + get + set + } + public var animateLayer: QuartzCore.CAShapeLayer { + get + set + } + public var retakeBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var doneBtn: UIKit.UIButton { + get + set + } + public var dismissBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var switchCameraBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var focusCursorView: UIKit.UIImageView { + get + set + } + public var takedImageView: UIKit.UIImageView { + get + set + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc deinit + @objc dynamic public init() + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic open func viewDidLoad() + @objc override dynamic open func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic open func viewWillDisappear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidDisappear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidLayoutSubviews() + @objc public func animationDidStop(_ anim: QuartzCore.CAAnimation, finished flag: Swift.Bool) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLCustomCamera : AVFoundation.AVCapturePhotoCaptureDelegate { + @objc dynamic public func photoOutput(_ output: AVFoundation.AVCapturePhotoOutput, willCapturePhotoFor resolvedSettings: AVFoundation.AVCaptureResolvedPhotoSettings) + @objc dynamic public func photoOutput(_ output: AVFoundation.AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CoreMedia.CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CoreMedia.CMSampleBuffer?, resolvedSettings: AVFoundation.AVCaptureResolvedPhotoSettings, bracketSettings: AVFoundation.AVCaptureBracketedStillImageSettings?, error: Swift.Error?) +} +extension ZLCustomCamera : AVFoundation.AVCaptureFileOutputRecordingDelegate { + @objc dynamic public func fileOutput(_ output: AVFoundation.AVCaptureFileOutput, didStartRecordingTo fileURL: Foundation.URL, from connections: [AVFoundation.AVCaptureConnection]) + @objc dynamic public func fileOutput(_ output: AVFoundation.AVCaptureFileOutput, didFinishRecordingTo outputFileURL: Foundation.URL, from connections: [AVFoundation.AVCaptureConnection], error: Swift.Error?) +} +extension ZLCustomCamera : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizer(_ gestureRecognizer: UIKit.UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +@objc public protocol ZLImageStickerContainerDelegate { + @objc var selectImageBlock: ((UIKit.UIImage) -> Swift.Void)? { get set } + @objc var hideBlock: (() -> Swift.Void)? { get set } + @objc func show(in view: UIKit.UIView) +} +@objc @_inheritsConvenienceInitializers public class ZLEditImageConfiguration : ObjectiveC.NSObject { + @objc public enum EditTool : Swift.Int, Swift.CaseIterable { + case draw + case clip + case imageSticker + case textSticker + case mosaic + case filter + case adjust + public init?(rawValue: Swift.Int) + public typealias AllCases = [HHSDKVideo.ZLEditImageConfiguration.EditTool] + public typealias RawValue = Swift.Int + public static var allCases: [HHSDKVideo.ZLEditImageConfiguration.EditTool] { + get + } + public var rawValue: Swift.Int { + get + } + } + @objc public enum AdjustTool : Swift.Int, Swift.CaseIterable { + case brightness + case contrast + case saturation + public init?(rawValue: Swift.Int) + public typealias AllCases = [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] + public typealias RawValue = Swift.Int + public static var allCases: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] { + get + } + public var rawValue: Swift.Int { + get + } + } + public var tools: [HHSDKVideo.ZLEditImageConfiguration.EditTool] { + get + set + } + @objc public var tools_objc: [Swift.Int] { + @objc get + @objc set + } + @objc public var drawColors: [UIKit.UIColor] { + @objc get + @objc set + } + @objc public var defaultDrawColor: UIKit.UIColor + @objc public var clipRatios: [HHSDKVideo.ZLImageClipRatio] { + @objc get + @objc set + } + @objc public var textStickerTextColors: [UIKit.UIColor] { + @objc get + @objc set + } + @objc public var textStickerDefaultTextColor: UIKit.UIColor + @objc public var filters: [HHSDKVideo.ZLFilter] { + @objc get + @objc set + } + @objc public var imageStickerContainerView: (UIKit.UIView & HHSDKVideo.ZLImageStickerContainerDelegate)? + public var adjustTools: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] { + get + set + } + @objc public var adjustTools_objc: [Swift.Int] { + @objc get + @objc set + } + @objc public var impactFeedbackWhenAdjustSliderValueIsZero: Swift.Bool + @objc public var impactFeedbackStyle: UIKit.UIImpactFeedbackGenerator.FeedbackStyle + @objc override dynamic public init() + @objc deinit +} +extension ZLEditImageConfiguration { + @discardableResult + public func tools(_ tools: [HHSDKVideo.ZLEditImageConfiguration.EditTool]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func drawColors(_ colors: [UIKit.UIColor]) -> HHSDKVideo.ZLEditImageConfiguration + public func defaultDrawColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func clipRatios(_ ratios: [HHSDKVideo.ZLImageClipRatio]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func textStickerTextColors(_ colors: [UIKit.UIColor]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func textStickerDefaultTextColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func filters(_ filters: [HHSDKVideo.ZLFilter]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func imageStickerContainerView(_ view: (UIKit.UIView & HHSDKVideo.ZLImageStickerContainerDelegate)?) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func adjustTools(_ tools: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func impactFeedbackWhenAdjustSliderValueIsZero(_ value: Swift.Bool) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func impactFeedbackStyle(_ style: UIKit.UIImpactFeedbackGenerator.FeedbackStyle) -> HHSDKVideo.ZLEditImageConfiguration +} +@objc public class ZLImageClipRatio : ObjectiveC.NSObject { + public var title: Swift.String + final public let whRatio: CoreGraphics.CGFloat + @objc public init(title: Swift.String, whRatio: CoreGraphics.CGFloat, isCircle: Swift.Bool = false) + @objc override dynamic public init() + @objc deinit +} +extension ZLImageClipRatio { + @objc public static let custom: HHSDKVideo.ZLImageClipRatio + @objc public static let circle: HHSDKVideo.ZLImageClipRatio + @objc public static let wh1x1: HHSDKVideo.ZLImageClipRatio + @objc public static let wh3x4: HHSDKVideo.ZLImageClipRatio + @objc public static let wh4x3: HHSDKVideo.ZLImageClipRatio + @objc public static let wh2x3: HHSDKVideo.ZLImageClipRatio + @objc public static let wh3x2: HHSDKVideo.ZLImageClipRatio + @objc public static let wh9x16: HHSDKVideo.ZLImageClipRatio + @objc public static let wh16x9: HHSDKVideo.ZLImageClipRatio +} +@objc public class ZLEditImageModel : ObjectiveC.NSObject { + final public let drawPaths: [HHSDKVideo.ZLDrawPath] + final public let mosaicPaths: [HHSDKVideo.ZLMosaicPath] + final public let editRect: CoreGraphics.CGRect? + final public let angle: CoreGraphics.CGFloat + final public let brightness: Swift.Float + final public let contrast: Swift.Float + final public let saturation: Swift.Float + final public let selectRatio: HHSDKVideo.ZLImageClipRatio? + final public let selectFilter: HHSDKVideo.ZLFilter? + final public let textStickers: [(state: HHSDKVideo.ZLTextStickerState, index: Swift.Int)]? + final public let imageStickers: [(state: HHSDKVideo.ZLImageStickerState, index: Swift.Int)]? + public init(drawPaths: [HHSDKVideo.ZLDrawPath], mosaicPaths: [HHSDKVideo.ZLMosaicPath], editRect: CoreGraphics.CGRect?, angle: CoreGraphics.CGFloat, brightness: Swift.Float, contrast: Swift.Float, saturation: Swift.Float, selectRatio: HHSDKVideo.ZLImageClipRatio?, selectFilter: HHSDKVideo.ZLFilter, textStickers: [(state: HHSDKVideo.ZLTextStickerState, index: Swift.Int)]?, imageStickers: [(state: HHSDKVideo.ZLImageStickerState, index: Swift.Int)]?) + @objc override dynamic public init() + @objc deinit +} +@objc open class ZLEditImageViewController : UIKit.UIViewController { + @objc public var drawColViewH: CoreGraphics.CGFloat + @objc public var filterColViewH: CoreGraphics.CGFloat + @objc public var adjustColViewH: CoreGraphics.CGFloat + @objc public var ashbinNormalBgColor: UIKit.UIColor + @objc public var cancelBtn: HHSDKVideo.ZLEnlargeButton { + @objc get + @objc set + } + @objc public var mainScrollView: UIKit.UIScrollView { + @objc get + @objc set + } + @objc public var topShadowView: UIKit.UIView { + @objc get + @objc set + } + @objc public var topShadowLayer: QuartzCore.CAGradientLayer { + @objc get + @objc set + } + @objc public var bottomShadowView: UIKit.UIView + @objc public var bottomShadowLayer: QuartzCore.CAGradientLayer + @objc public var doneBtn: UIKit.UIButton + @objc public var revokeBtn: UIKit.UIButton + @objc public var ashbinView: UIKit.UIView { + @objc get + @objc set + } + @objc public var ashbinImgView: UIKit.UIImageView { + @objc get + @objc set + } + @objc public var drawLineWidth: CoreGraphics.CGFloat + @objc public var mosaicLineWidth: CoreGraphics.CGFloat + @objc public var editFinishBlock: ((UIKit.UIImage, HHSDKVideo.ZLEditImageModel?) -> Swift.Void)? + @objc public var cancelEditBlock: (() -> Swift.Void)? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc deinit + @objc public class func showEditImageVC(parentVC: UIKit.UIViewController?, animate: Swift.Bool = false, image: UIKit.UIImage, editModel: HHSDKVideo.ZLEditImageModel? = nil, cancel: (() -> Swift.Void)? = nil, completion: ((UIKit.UIImage, HHSDKVideo.ZLEditImageModel?) -> Swift.Void)?) + @objc public init(image: UIKit.UIImage, editModel: HHSDKVideo.ZLEditImageModel? = nil) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic open func viewDidLoad() + @objc override dynamic open func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLEditImageViewController : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLEditImageViewController : UIKit.UIScrollViewDelegate { + @objc dynamic public func viewForZooming(in scrollView: UIKit.UIScrollView) -> UIKit.UIView? + @objc dynamic public func scrollViewDidZoom(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndZooming(_ scrollView: UIKit.UIScrollView, with view: UIKit.UIView?, atScale scale: CoreGraphics.CGFloat) + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDragging(_ scrollView: UIKit.UIScrollView, willDecelerate decelerate: Swift.Bool) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndScrollingAnimation(_ scrollView: UIKit.UIScrollView) +} +extension ZLEditImageViewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegate { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) +} +@objc @_hasMissingDesignatedInitializers public class ZLDrawPath : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_hasMissingDesignatedInitializers public class ZLMosaicPath : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_hasMissingDesignatedInitializers public class ZLEditVideoViewController : UIKit.UIViewController { + @objc public var editFinishBlock: ((Foundation.URL?) -> Swift.Void)? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc deinit + @objc public init(avAsset: AVFoundation.AVAsset, animateDismiss: Swift.Bool = false) + @objc override dynamic public func viewDidLoad() + @objc override dynamic public func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLEditVideoViewController : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLEditVideoViewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDragging(_ scrollView: UIKit.UIScrollView, willDecelerate decelerate: Swift.Bool) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, insetForSectionAt section: Swift.Int) -> UIKit.UIEdgeInsets + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) +} +@objc @_inheritsConvenienceInitializers public class ZLEnlargeButton : UIKit.UIButton { + public var enlargeInsets: UIKit.UIEdgeInsets + public var enlargeInset: CoreGraphics.CGFloat { + get + set + } + @objc override dynamic public func point(inside point: CoreGraphics.CGPoint, with event: UIKit.UIEvent?) -> Swift.Bool + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public typealias ZLFilterApplierType = ((UIKit.UIImage) -> UIKit.UIImage) +@objc public enum ZLFilterType : Swift.Int { + case normal + case chrome + case fade + case instant + case process + case transfer + case tone + case linear + case sepia + case mono + case noir + case tonal + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc public class ZLFilter : ObjectiveC.NSObject { + public var name: Swift.String + @objc public init(name: Swift.String, filterType: HHSDKVideo.ZLFilterType) + @objc public init(name: Swift.String, applier: HHSDKVideo.ZLFilterApplierType?) + @objc override dynamic public init() + @objc deinit +} +extension ZLFilter { + @objc public static let all: [HHSDKVideo.ZLFilter] + @objc public static let normal: HHSDKVideo.ZLFilter + @objc public static let clarendon: HHSDKVideo.ZLFilter + @objc public static let nashville: HHSDKVideo.ZLFilter + @objc public static let apply1977: HHSDKVideo.ZLFilter + @objc public static let toaster: HHSDKVideo.ZLFilter + @objc public static let chrome: HHSDKVideo.ZLFilter + @objc public static let fade: HHSDKVideo.ZLFilter + @objc public static let instant: HHSDKVideo.ZLFilter + @objc public static let process: HHSDKVideo.ZLFilter + @objc public static let transfer: HHSDKVideo.ZLFilter + @objc public static let tone: HHSDKVideo.ZLFilter + @objc public static let linear: HHSDKVideo.ZLFilter + @objc public static let sepia: HHSDKVideo.ZLFilter + @objc public static let mono: HHSDKVideo.ZLFilter + @objc public static let noir: HHSDKVideo.ZLFilter + @objc public static let tonal: HHSDKVideo.ZLFilter +} +@objc public enum ZLURLType : Swift.Int { + case image + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_hasMissingDesignatedInitializers public class ZLImagePreviewController : UIKit.UIViewController { + @objc public var longPressBlock: ((HHSDKVideo.ZLImagePreviewController?, UIKit.UIImage?, Swift.Int) -> Swift.Void)? + @objc public var doneBlock: (([Any]) -> Swift.Void)? + @objc public var videoHttpHeader: [Swift.String : Any]? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var preferredStatusBarStyle: UIKit.UIStatusBarStyle { + @objc get + } + @objc public init(datas: [Any], index: Swift.Int = 0, showSelectBtn: Swift.Bool = true, showBottomView: Swift.Bool = true, urlType: ((Foundation.URL) -> HHSDKVideo.ZLURLType)? = nil, urlImageLoader: ((Foundation.URL, UIKit.UIImageView, @escaping (CoreGraphics.CGFloat) -> Swift.Void, @escaping () -> Swift.Void) -> Swift.Void)? = nil) + @objc override dynamic public func viewDidLoad() + @objc override dynamic public func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc deinit +} +extension ZLImagePreviewController { + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) +} +extension ZLImagePreviewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, minimumInteritemSpacingForSectionAt section: Swift.Int) -> CoreGraphics.CGFloat + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, minimumLineSpacingForSectionAt section: Swift.Int) -> CoreGraphics.CGFloat + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, insetForSectionAt section: Swift.Int) -> UIKit.UIEdgeInsets + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, sizeForItemAt indexPath: Foundation.IndexPath) -> CoreGraphics.CGSize + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didEndDisplaying cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) +} +@objc @_hasMissingDesignatedInitializers public class ZLImageStickerState : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLLanguageType : Swift.Int { + case system + case chineseSimplified + case chineseTraditional + case english + case japanese + case french + case german + case russian + case vietnamese + case korean + case malay + case italian + case indonesian + case portuguese + case spanish + case turkish + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public struct ZLLocalLanguageKey : Swift.Hashable { + public let rawValue: Swift.String + public init(rawValue: Swift.String) + public static let previewCamera: HHSDKVideo.ZLLocalLanguageKey + public static let previewCameraRecord: HHSDKVideo.ZLLocalLanguageKey + public static let previewAlbum: HHSDKVideo.ZLLocalLanguageKey + public static let cancel: HHSDKVideo.ZLLocalLanguageKey + public static let noPhotoTips: HHSDKVideo.ZLLocalLanguageKey + public static let loading: HHSDKVideo.ZLLocalLanguageKey + public static let hudLoading: HHSDKVideo.ZLLocalLanguageKey + public static let done: HHSDKVideo.ZLLocalLanguageKey + public static let ok: HHSDKVideo.ZLLocalLanguageKey + public static let timeout: HHSDKVideo.ZLLocalLanguageKey + public static let noPhotoLibratyAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let noCameraAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let noMicrophoneAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let cameraUnavailable: HHSDKVideo.ZLLocalLanguageKey + public static let keepRecording: HHSDKVideo.ZLLocalLanguageKey + public static let gotoSettings: HHSDKVideo.ZLLocalLanguageKey + public static let photo: HHSDKVideo.ZLLocalLanguageKey + public static let originalPhoto: HHSDKVideo.ZLLocalLanguageKey + public static let back: HHSDKVideo.ZLLocalLanguageKey + public static let edit: HHSDKVideo.ZLLocalLanguageKey + public static let editFinish: HHSDKVideo.ZLLocalLanguageKey + public static let revert: HHSDKVideo.ZLLocalLanguageKey + public static let brightness: HHSDKVideo.ZLLocalLanguageKey + public static let contrast: HHSDKVideo.ZLLocalLanguageKey + public static let saturation: HHSDKVideo.ZLLocalLanguageKey + public static let preview: HHSDKVideo.ZLLocalLanguageKey + public static let notAllowMixSelect: HHSDKVideo.ZLLocalLanguageKey + public static let save: HHSDKVideo.ZLLocalLanguageKey + public static let saveImageError: HHSDKVideo.ZLLocalLanguageKey + public static let saveVideoError: HHSDKVideo.ZLLocalLanguageKey + public static let exceededMaxSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let exceededMaxVideoSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let lessThanMinVideoSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let longerThanMaxVideoDuration: HHSDKVideo.ZLLocalLanguageKey + public static let shorterThanMaxVideoDuration: HHSDKVideo.ZLLocalLanguageKey + public static let iCloudVideoLoadFaild: HHSDKVideo.ZLLocalLanguageKey + public static let imageLoadFailed: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraTips: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraTakePhotoTips: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraRecordVideoTips: HHSDKVideo.ZLLocalLanguageKey + public static let minRecordTimeTips: HHSDKVideo.ZLLocalLanguageKey + public static let cameraRoll: HHSDKVideo.ZLLocalLanguageKey + public static let panoramas: HHSDKVideo.ZLLocalLanguageKey + public static let videos: HHSDKVideo.ZLLocalLanguageKey + public static let favorites: HHSDKVideo.ZLLocalLanguageKey + public static let timelapses: HHSDKVideo.ZLLocalLanguageKey + public static let recentlyAdded: HHSDKVideo.ZLLocalLanguageKey + public static let bursts: HHSDKVideo.ZLLocalLanguageKey + public static let slomoVideos: HHSDKVideo.ZLLocalLanguageKey + public static let selfPortraits: HHSDKVideo.ZLLocalLanguageKey + public static let screenshots: HHSDKVideo.ZLLocalLanguageKey + public static let depthEffect: HHSDKVideo.ZLLocalLanguageKey + public static let livePhotos: HHSDKVideo.ZLLocalLanguageKey + public static let animated: HHSDKVideo.ZLLocalLanguageKey + public static let myPhotoStream: HHSDKVideo.ZLLocalLanguageKey + public static let noTitleAlbumListPlaceholder: HHSDKVideo.ZLLocalLanguageKey + public static let unableToAccessAllPhotos: HHSDKVideo.ZLLocalLanguageKey + public static let textStickerRemoveTips: HHSDKVideo.ZLLocalLanguageKey + public func hash(into hasher: inout Swift.Hasher) + public static func == (a: HHSDKVideo.ZLLocalLanguageKey, b: HHSDKVideo.ZLLocalLanguageKey) -> Swift.Bool + public var hashValue: Swift.Int { + get + } +} +public typealias Second = Swift.Int +@objc @_inheritsConvenienceInitializers public class ZLPhotoConfiguration : ObjectiveC.NSObject { + @objc public class func `default`() -> HHSDKVideo.ZLPhotoConfiguration + @objc public class func resetConfiguration() + @objc public var sortAscending: Swift.Bool + @objc public var maxSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var maxVideoSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var minVideoSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var allowMixSelect: Swift.Bool + @objc public var maxPreviewCount: Swift.Int + @objc public var cellCornerRadio: CoreGraphics.CGFloat + @objc public var allowSelectImage: Swift.Bool + @objc public var allowSelectVideo: Swift.Bool + @objc public var allowSelectGif: Swift.Bool + @objc public var allowSelectLivePhoto: Swift.Bool + @objc public var allowTakePhotoInLibrary: Swift.Bool { + @objc get + @objc set + } + @objc public var allowEditImage: Swift.Bool { + @objc get + @objc set + } + @objc public var allowEditVideo: Swift.Bool { + @objc get + @objc set + } + @objc public var animateSelectBtnWhenSelect: Swift.Bool + @objc public var selectBtnAnimationDuration: Swift.Double + @objc public var editAfterSelectThumbnailImage: Swift.Bool + @objc public var cropVideoAfterSelectThumbnail: Swift.Bool + @objc public var showClipDirectlyIfOnlyHasClipTool: Swift.Bool + @objc public var saveNewImageAfterEdit: Swift.Bool + @objc public var allowSlideSelect: Swift.Bool + @objc public var autoScrollWhenSlideSelectIsActive: Swift.Bool + @objc public var autoScrollMaxSpeed: CoreGraphics.CGFloat + @objc public var allowDragSelect: Swift.Bool + @objc public var allowSelectOriginal: Swift.Bool + @objc public var allowPreviewPhotos: Swift.Bool + @objc public var showPreviewButtonInAlbum: Swift.Bool + @objc public var showSelectCountOnDoneBtn: Swift.Bool + @objc public var columnCount: Swift.Int { + @objc get + @objc set + } + @objc public var maxEditVideoTime: Swift.Int + @objc public var maxSelectVideoDuration: Swift.Int + @objc public var minSelectVideoDuration: Swift.Int + @objc public var editImageConfiguration: HHSDKVideo.ZLEditImageConfiguration + @objc public var showCaptureImageOnTakePhotoBtn: Swift.Bool + @objc public var showSelectBtnWhenSingleSelect: Swift.Bool + @objc public var showSelectedMask: Swift.Bool + @objc public var showSelectedBorder: Swift.Bool + @objc public var showInvalidMask: Swift.Bool + @objc public var showSelectedIndex: Swift.Bool + @objc public var showSelectedPhotoPreview: Swift.Bool + @objc public var shouldAnialysisAsset: Swift.Bool + @objc public var timeout: Swift.Double + @objc public var languageType: HHSDKVideo.ZLLanguageType { + @objc get + @objc set + } + @objc public var useCustomCamera: Swift.Bool + @objc public var allowTakePhoto: Swift.Bool { + @objc get + @objc set + } + @objc public var allowRecordVideo: Swift.Bool { + @objc get + @objc set + } + @objc public var minRecordDuration: HHSDKVideo.Second { + @objc get + @objc set + } + @objc public var maxRecordDuration: HHSDKVideo.Second { + @objc get + @objc set + } + @objc public var cameraConfiguration: HHSDKVideo.ZLCameraConfiguration + @objc public var hudStyle: HHSDKVideo.ZLProgressHUD.HUDStyle + @objc public var canSelectAsset: ((Photos.PHAsset) -> Swift.Bool)? + @objc public var showAddPhotoButton: Swift.Bool + @objc public var showEnterSettingTips: Swift.Bool + @objc public var noAuthorityCallback: ((HHSDKVideo.ZLNoAuthorityType) -> Swift.Void)? + @objc public var operateBeforeDoneAction: ((UIKit.UIViewController, @escaping () -> Swift.Void) -> Swift.Void)? + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLNoAuthorityType : Swift.Int { + case library + case camera + case microphone + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension ZLPhotoConfiguration { + @discardableResult + public func sortAscending(_ ascending: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxVideoSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minVideoSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowMixSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxPreviewCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cellCornerRadio(_ cornerRadio: CoreGraphics.CGFloat) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func allowSelectVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectGif(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectLivePhoto(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowTakePhotoInLibrary(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowEditImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowEditVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func animateSelectBtnWhenSelect(_ animate: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func selectBtnAnimationDuration(_ duration: CoreFoundation.CFTimeInterval) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func editAfterSelectThumbnailImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cropVideoAfterSelectThumbnail(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showClipDirectlyIfOnlyHasClipTool(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func saveNewImageAfterEdit(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSlideSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func autoScrollWhenSlideSelectIsActive(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func autoScrollMaxSpeed(_ speed: CoreGraphics.CGFloat) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowDragSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectOriginal(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowPreviewPhotos(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showPreviewButtonInAlbum(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectCountOnDoneBtn(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func columnCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxEditVideoTime(_ second: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxSelectVideoDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minSelectVideoDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func editImageConfiguration(_ configuration: HHSDKVideo.ZLEditImageConfiguration) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showCaptureImageOnTakePhotoBtn(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectBtnWhenSingleSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedMask(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedBorder(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showInvalidMask(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedIndex(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedPhotoPreview(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func shouldAnialysisAsset(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func timeout(_ timeout: Foundation.TimeInterval) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func languageType(_ type: HHSDKVideo.ZLLanguageType) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func useCustomCamera(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowTakePhoto(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowRecordVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minRecordDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxRecordDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cameraConfiguration(_ configuration: HHSDKVideo.ZLCameraConfiguration) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func hudStyle(_ style: HHSDKVideo.ZLProgressHUD.HUDStyle) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func canSelectAsset(_ block: ((Photos.PHAsset) -> Swift.Bool)?) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func showAddPhotoButton(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func showEnterSettingTips(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func noAuthorityCallback(_ callback: ((HHSDKVideo.ZLNoAuthorityType) -> Swift.Void)?) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func operateBeforeDoneAction(_ block: ((UIKit.UIViewController, @escaping () -> Swift.Void) -> Swift.Void)?) -> HHSDKVideo.ZLPhotoConfiguration +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoManager : ObjectiveC.NSObject { + @objc public class func saveImageToAlbum(image: UIKit.UIImage, completion: ((Swift.Bool, Photos.PHAsset?) -> Swift.Void)?) + @objc public class func saveVideoToAlbum(url: Foundation.URL, completion: ((Swift.Bool, Photos.PHAsset?) -> Swift.Void)?) + @objc public class func fetchPhoto(in result: Photos.PHFetchResult<Photos.PHAsset>, ascending: Swift.Bool, allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, limitCount: Swift.Int = .max) -> [HHSDKVideo.ZLPhotoModel] + @objc public class func getPhotoAlbumList(ascending: Swift.Bool, allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, completion: ([HHSDKVideo.ZLAlbumListModel]) -> Swift.Void) + @objc public class func getCameraRollAlbum(allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, completion: @escaping (HHSDKVideo.ZLAlbumListModel) -> Swift.Void) + @discardableResult + @objc public class func fetchImage(for asset: Photos.PHAsset, size: CoreGraphics.CGSize, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (UIKit.UIImage?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @discardableResult + @objc public class func fetchOriginalImage(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (UIKit.UIImage?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @discardableResult + @objc public class func fetchOriginalImageData(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (Foundation.Data, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchLivePhoto(for asset: Photos.PHAsset, completion: @escaping (Photos.PHLivePhoto?, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchVideo(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (AVFoundation.AVPlayerItem?, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchAVAsset(forVideo asset: Photos.PHAsset, completion: @escaping (AVFoundation.AVAsset?, [Swift.AnyHashable : Any]?) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchAssetFilePath(asset: Photos.PHAsset, completion: @escaping (Swift.String?) -> Swift.Void) + @objc override dynamic public init() + @objc deinit +} +extension ZLPhotoManager { + @objc dynamic public class func hasPhotoLibratyAuthority() -> Swift.Bool + @objc dynamic public class func hasCameraAuthority() -> Swift.Bool + @objc dynamic public class func hasMicrophoneAuthority() -> Swift.Bool +} +extension ZLPhotoModel { + public enum MediaType : Swift.Int { + case unknown + case image + case gif + case livePhoto + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } +} +@objc public class ZLPhotoModel : ObjectiveC.NSObject { + final public let ident: Swift.String + final public let asset: Photos.PHAsset + public var type: HHSDKVideo.ZLPhotoModel.MediaType + public var duration: Swift.String + public var isSelected: Swift.Bool + public var editImage: UIKit.UIImage? { + get + set + } + public var second: HHSDKVideo.Second { + get + } + public var whRatio: CoreGraphics.CGFloat { + get + } + public var previewSize: CoreGraphics.CGSize { + get + } + public var editImageModel: HHSDKVideo.ZLEditImageModel? + public init(asset: Photos.PHAsset) + public func transformAssetType(for asset: Photos.PHAsset) -> HHSDKVideo.ZLPhotoModel.MediaType + public func transformDuration(for asset: Photos.PHAsset) -> Swift.String + @objc override dynamic public init() + @objc deinit +} +extension ZLPhotoModel { + public static func == (lhs: HHSDKVideo.ZLPhotoModel, rhs: HHSDKVideo.ZLPhotoModel) -> Swift.Bool +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoPreviewSheet : UIKit.UIView { + @objc public var selectImageBlock: (([UIKit.UIImage], [Photos.PHAsset], Swift.Bool) -> Swift.Void)? + @objc public var selectImageRequestErrorBlock: (([Photos.PHAsset], [Swift.Int]) -> Swift.Void)? + @objc public var cancelBlock: (() -> Swift.Void)? + @objc deinit + @objc convenience override dynamic public init(frame: CoreGraphics.CGRect) + @objc public init(selectedAssets: [Photos.PHAsset]? = nil) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic public func layoutSubviews() + @objc public func showPreview(animate: Swift.Bool = true, sender: UIKit.UIViewController) + @objc public func showPhotoLibrary(sender: UIKit.UIViewController) + @objc public func previewAssets(sender: UIKit.UIViewController, assets: [Photos.PHAsset], index: Swift.Int, isOriginal: Swift.Bool, showBottomViewAndSelectBtn: Swift.Bool = true) +} +extension ZLPhotoPreviewSheet : UIKit.UIGestureRecognizerDelegate { + @objc override dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLPhotoPreviewSheet : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, sizeForItemAt indexPath: Foundation.IndexPath) -> CoreGraphics.CGSize + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) +} +extension ZLPhotoPreviewSheet : UIKit.UIImagePickerControllerDelegate, UIKit.UINavigationControllerDelegate { + @objc dynamic public func imagePickerController(_ picker: UIKit.UIImagePickerController, didFinishPickingMediaWithInfo info: [UIKit.UIImagePickerController.InfoKey : Any]) +} +extension ZLPhotoPreviewSheet : Photos.PHPhotoLibraryChangeObserver { + @objc dynamic public func photoLibraryDidChange(_ changeInstance: Photos.PHChange) +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoUIConfiguration : ObjectiveC.NSObject { + @objc public enum CancelButtonStyle : Swift.Int { + case text + case image + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public class func `default`() -> HHSDKVideo.ZLPhotoUIConfiguration + @objc public class func resetConfiguration() + @objc public var style: HHSDKVideo.ZLPhotoBrowserStyle + @objc public var statusBarStyle: UIKit.UIStatusBarStyle + @objc public var navCancelButtonStyle: HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle + @objc public var showStatusBarInPreviewInterface: Swift.Bool + @objc public var navViewBlurEffectOfAlbumList: UIKit.UIBlurEffect? + @objc public var navViewBlurEffectOfPreview: UIKit.UIBlurEffect? + @objc public var bottomViewBlurEffectOfAlbumList: UIKit.UIBlurEffect? + @objc public var bottomViewBlurEffectOfPreview: UIKit.UIBlurEffect? + @objc public var customImageNames: [Swift.String] { + @objc get + @objc set + } + public var customImageForKey: [Swift.String : UIKit.UIImage?] { + get + set + } + @objc public var customImageForKey_objc: [Swift.String : UIKit.UIImage] { + @objc get + @objc set + } + public var customLanguageKeyValue: [HHSDKVideo.ZLLocalLanguageKey : Swift.String] { + get + set + } + @objc public var customLanguageKeyValue_objc: [Swift.String : Swift.String] { + @objc get + @objc set + } + @objc public var themeFontName: Swift.String? { + @objc get + @objc set + } + @objc public var sheetTranslucentColor: UIKit.UIColor + @objc public var sheetBtnBgColor: UIKit.UIColor + @objc public var sheetBtnTitleColor: UIKit.UIColor + @objc public var sheetBtnTitleTintColor: UIKit.UIColor + @objc public var navBarColor: UIKit.UIColor + @objc public var navBarColorOfPreviewVC: UIKit.UIColor + @objc public var navTitleColor: UIKit.UIColor + @objc public var navTitleColorOfPreviewVC: UIKit.UIColor + @objc public var navEmbedTitleViewBgColor: UIKit.UIColor + @objc public var albumListBgColor: UIKit.UIColor + @objc public var embedAlbumListTranslucentColor: UIKit.UIColor + @objc public var albumListTitleColor: UIKit.UIColor + @objc public var albumListCountColor: UIKit.UIColor + @objc public var separatorColor: UIKit.UIColor + @objc public var thumbnailBgColor: UIKit.UIColor + @objc public var previewVCBgColor: UIKit.UIColor + @objc public var bottomToolViewBgColor: UIKit.UIColor + @objc public var bottomToolViewBgColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnNormalTitleColor: UIKit.UIColor + @objc public var bottomToolViewDoneBtnNormalTitleColor: UIKit.UIColor + @objc public var bottomToolViewBtnNormalTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewDoneBtnNormalTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnDisableTitleColor: UIKit.UIColor + @objc public var bottomToolViewDoneBtnDisableTitleColor: UIKit.UIColor + @objc public var bottomToolViewBtnDisableTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewDoneBtnDisableTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnNormalBgColor: UIKit.UIColor + @objc public var bottomToolViewBtnNormalBgColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnDisableBgColor: UIKit.UIColor + @objc public var bottomToolViewBtnDisableBgColorOfPreviewVC: UIKit.UIColor + @objc public var selectMorePhotoWhenAuthIsLismitedTitleColor: UIKit.UIColor + @objc public var cameraRecodeProgressColor: UIKit.UIColor + @objc public var selectedMaskColor: UIKit.UIColor + @objc public var selectedBorderColor: UIKit.UIColor + @objc public var invalidMaskColor: UIKit.UIColor + @objc public var indexLabelTextColor: UIKit.UIColor + @objc public var indexLabelBgColor: UIKit.UIColor + @objc public var cameraCellBgColor: UIKit.UIColor + @objc public var adjustSliderNormalColor: UIKit.UIColor + @objc public var adjustSliderTintColor: UIKit.UIColor + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLPhotoBrowserStyle : Swift.Int { + case embedAlbumList + case externalAlbumList + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension ZLPhotoUIConfiguration { + @discardableResult + public func style(_ style: HHSDKVideo.ZLPhotoBrowserStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func statusBarStyle(_ statusBarStyle: UIKit.UIStatusBarStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navCancelButtonStyle(_ style: HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func showStatusBarInPreviewInterface(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navViewBlurEffectOfAlbumList(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navViewBlurEffectOfPreview(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomViewBlurEffectOfAlbumList(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomViewBlurEffectOfPreview(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customImageNames(_ names: [Swift.String]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customImageForKey(_ map: [Swift.String : UIKit.UIImage?]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customLanguageKeyValue(_ map: [HHSDKVideo.ZLLocalLanguageKey : Swift.String]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func themeFontName(_ name: Swift.String) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetTranslucentColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnTitleTintColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navBarColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navBarColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navEmbedTitleViewBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func embedAlbumListTranslucentColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListCountColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func separatorColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func thumbnailBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func previewVCBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnNormalTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnNormalTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnDisableTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnDisableTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectMorePhotoWhenAuthIsLismitedTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func cameraRecodeProgressColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectedMaskColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectedBorderColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func invalidMaskColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func indexLabelTextColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func indexLabelBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func cameraCellBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func adjustSliderNormalColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func adjustSliderTintColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration +} +@objc @_hasMissingDesignatedInitializers public class ZLProgressHUD : UIKit.UIView { + @objc public enum HUDStyle : Swift.Int { + case light + case lightBlur + case dark + case darkBlur + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc deinit + @objc public init(style: HHSDKVideo.ZLProgressHUD.HUDStyle) + @objc public func show(timeout: Foundation.TimeInterval = 100) + @objc public func hide() + @objc override dynamic public init(frame: CoreGraphics.CGRect) +} +@objc @_hasMissingDesignatedInitializers public class ZLTextStickerState : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class ZLVideoManager : ObjectiveC.NSObject { + @objc public class func mergeVideos(fileUrls: [Foundation.URL], completion: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) + @objc override dynamic public init() + @objc deinit +} +extension ZLVideoManager { + @objc dynamic public class func exportVideo(for asset: Photos.PHAsset, exportType: HHSDKVideo.ZLVideoManager.ExportType = .mov, presetName: Swift.String = AVAssetExportPresetMediumQuality, complete: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) + @objc dynamic public class func exportVideo(for asset: AVFoundation.AVAsset, range: CoreMedia.CMTimeRange = CMTimeRange(start: .zero, duration: .positiveInfinity), exportType: HHSDKVideo.ZLVideoManager.ExportType = .mov, presetName: Swift.String = AVAssetExportPresetMediumQuality, complete: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) +} +extension ZLVideoManager { + @objc public enum ExportType : Swift.Int { + case mov + case mp4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } +} +extension HHSDKVideo.AES.Error : Swift.Equatable {} +extension HHSDKVideo.AES.Error : Swift.Hashable {} +extension HHSDKVideo.AES.Variant : Swift.Equatable {} +extension HHSDKVideo.AES.Variant : Swift.Hashable {} +extension HHSDKVideo.AES.Variant : Swift.RawRepresentable {} +extension HHSDKVideo.Bit : Swift.Equatable {} +extension HHSDKVideo.Bit : Swift.Hashable {} +extension HHSDKVideo.Bit : Swift.RawRepresentable {} +extension HHSDKVideo.Blowfish.Error : Swift.Equatable {} +extension HHSDKVideo.Blowfish.Error : Swift.Hashable {} +extension HHSDKVideo.CBC.Error : Swift.Equatable {} +extension HHSDKVideo.CBC.Error : Swift.Hashable {} +extension HHSDKVideo.CCM : HHSDKVideo.BlockMode {} +extension HHSDKVideo.CCM.Error : Swift.Equatable {} +extension HHSDKVideo.CCM.Error : Swift.Hashable {} +extension HHSDKVideo.CFB.Error : Swift.Equatable {} +extension HHSDKVideo.CFB.Error : Swift.Hashable {} +extension HHSDKVideo.ChaCha20.Error : Swift.Equatable {} +extension HHSDKVideo.ChaCha20.Error : Swift.Hashable {} +extension HHSDKVideo.CipherError : Swift.Equatable {} +extension HHSDKVideo.CipherError : Swift.Hashable {} +extension HHSDKVideo.CMAC.Error : Swift.Equatable {} +extension HHSDKVideo.CMAC.Error : Swift.Hashable {} +extension HHSDKVideo.CTR : HHSDKVideo.BlockMode {} +extension HHSDKVideo.CTR.Error : Swift.Equatable {} +extension HHSDKVideo.CTR.Error : Swift.Hashable {} +extension HHSDKVideo.DateTransform.Unit : Swift.Equatable {} +extension HHSDKVideo.DateTransform.Unit : Swift.Hashable {} +extension HHSDKVideo.DateTransform.Unit : Swift.RawRepresentable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.Equatable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.Hashable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.RawRepresentable {} +extension HHSDKVideo.EKAlertMessage.ImagePosition : Swift.Equatable {} +extension HHSDKVideo.EKAlertMessage.ImagePosition : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.DisplayMode : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.DisplayMode : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.NotificationHapticFeedback : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.NotificationHapticFeedback : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Position : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.Position : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.StatusBar : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.StatusBar : Swift.Hashable {} +extension HHSDKVideo.GCM.Mode : Swift.Equatable {} +extension HHSDKVideo.GCM.Mode : Swift.Hashable {} +extension HHSDKVideo.GCM.Error : Swift.Equatable {} +extension HHSDKVideo.GCM.Error : Swift.Hashable {} +extension HHSDKVideo.HHBaseCallingState : Swift.Equatable {} +extension HHSDKVideo.HHBaseCallingState : Swift.Hashable {} +extension HHSDKVideo.HHBaseCallingState : Swift.RawRepresentable {} +extension HHSDKVideo.HHMediaType : Swift.Equatable {} +extension HHSDKVideo.HHMediaType : Swift.Hashable {} +extension HHSDKVideo.HHMediaType : Swift.RawRepresentable {} +extension HHSDKVideo.DateFormat : Swift.Equatable {} +extension HHSDKVideo.DateFormat : Swift.Hashable {} +extension HHSDKVideo.DateFormat : Swift.RawRepresentable {} +extension HHSDKVideo.HHConsType : Swift.Equatable {} +extension HHSDKVideo.HHConsType : Swift.Hashable {} +extension HHSDKVideo.HHConsType : Swift.RawRepresentable {} +extension HHSDKVideo.HHFileCacheManager.HHAssetPathType : Swift.Equatable {} +extension HHSDKVideo.HHFileCacheManager.HHAssetPathType : Swift.Hashable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.Equatable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.Hashable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.RawRepresentable {} +extension HHSDKVideo.HHLogMode : Swift.Equatable {} +extension HHSDKVideo.HHLogMode : Swift.Hashable {} +extension HHSDKVideo.HHLogMode : Swift.RawRepresentable {} +extension HHSDKVideo.HHCallType : Swift.Equatable {} +extension HHSDKVideo.HHCallType : Swift.Hashable {} +extension HHSDKVideo.HHCallType : Swift.RawRepresentable {} +extension HHSDKVideo.HHServerType : Swift.Equatable {} +extension HHSDKVideo.HHServerType : Swift.Hashable {} +extension HHSDKVideo.HHRequestMethod : Swift.Equatable {} +extension HHSDKVideo.HHRequestMethod : Swift.Hashable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.Equatable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.Hashable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.RawRepresentable {} +extension HHSDKVideo.HHRealNameType : Swift.Equatable {} +extension HHSDKVideo.HHRealNameType : Swift.Hashable {} +extension HHSDKVideo.HHRealNameType : Swift.RawRepresentable {} +extension HHSDKVideo.TrtcLog : Swift.Equatable {} +extension HHSDKVideo.TrtcLog : Swift.Hashable {} +extension HHSDKVideo.TrtcLog : Swift.RawRepresentable {} +extension HHSDKVideo.TrtcError : Swift.Equatable {} +extension HHSDKVideo.TrtcError : Swift.Hashable {} +extension HHSDKVideo.TrtcError : Swift.RawRepresentable {} +extension HHSDKVideo.hhToastPosition : Swift.Equatable {} +extension HHSDKVideo.hhToastPosition : Swift.Hashable {} +extension HHSDKVideo.HKDF.Error : Swift.Equatable {} +extension HHSDKVideo.HKDF.Error : Swift.Hashable {} +extension HHSDKVideo.HMAC.Error : Swift.Equatable {} +extension HHSDKVideo.HMAC.Error : Swift.Hashable {} +extension HHSDKVideo.HMAC.Variant : Swift.Equatable {} +extension HHSDKVideo.HMAC.Variant : Swift.Hashable {} +extension HHSDKVideo.ItemClass : Swift.Equatable {} +extension HHSDKVideo.ItemClass : Swift.Hashable {} +extension HHSDKVideo.ProtocolType : Swift.Equatable {} +extension HHSDKVideo.ProtocolType : Swift.Hashable {} +extension HHSDKVideo.AuthenticationType : Swift.Equatable {} +extension HHSDKVideo.AuthenticationType : Swift.Hashable {} +extension HHSDKVideo.Accessibility : Swift.Equatable {} +extension HHSDKVideo.Accessibility : Swift.Hashable {} +extension HHSDKVideo.Status : Swift.Equatable {} +extension HHSDKVideo.Status : Swift.Hashable {} +extension HHSDKVideo.MappingType : Swift.Equatable {} +extension HHSDKVideo.MappingType : Swift.Hashable {} +extension HHSDKVideo.OCB.Mode : Swift.Equatable {} +extension HHSDKVideo.OCB.Mode : Swift.Hashable {} +extension HHSDKVideo.OCB.Error : Swift.Equatable {} +extension HHSDKVideo.OCB.Error : Swift.Hashable {} +extension HHSDKVideo.OFB.Error : Swift.Equatable {} +extension HHSDKVideo.OFB.Error : Swift.Hashable {} +extension HHSDKVideo.Padding : Swift.Equatable {} +extension HHSDKVideo.Padding : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF1.Error : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF1.Error : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF1.Variant : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF1.Variant : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF2.Error : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF2.Error : Swift.Hashable {} +extension HHSDKVideo.PCBC.Error : Swift.Equatable {} +extension HHSDKVideo.PCBC.Error : Swift.Hashable {} +extension HHSDKVideo.PermissionStatus : Swift.Equatable {} +extension HHSDKVideo.PermissionStatus : Swift.Hashable {} +extension HHSDKVideo.PermissionStatus : Swift.RawRepresentable {} +extension HHSDKVideo.HHBasePermissionType : Swift.Equatable {} +extension HHSDKVideo.HHBasePermissionType : Swift.Hashable {} +extension HHSDKVideo.HHBasePermissionType : Swift.RawRepresentable {} +extension HHSDKVideo.Poly1305.Error : Swift.Equatable {} +extension HHSDKVideo.Poly1305.Error : Swift.Hashable {} +extension HHSDKVideo.Rabbit.Error : Swift.Equatable {} +extension HHSDKVideo.Rabbit.Error : Swift.Hashable {} +extension HHSDKVideo.Reachability.NetworkStatus : Swift.Equatable {} +extension HHSDKVideo.Reachability.NetworkStatus : Swift.Hashable {} +extension HHSDKVideo.RecordImgType : Swift.Equatable {} +extension HHSDKVideo.RecordImgType : Swift.Hashable {} +extension HHSDKVideo.RecordImgType : Swift.RawRepresentable {} +extension HHSDKVideo.SHA2.Variant : Swift.Equatable {} +extension HHSDKVideo.SHA2.Variant : Swift.Hashable {} +extension HHSDKVideo.SHA3.Variant : Swift.Equatable {} +extension HHSDKVideo.SHA3.Variant : Swift.Hashable {} +extension HHSDKVideo.HHIMCmd : Swift.Equatable {} +extension HHSDKVideo.HHIMCmd : Swift.Hashable {} +extension HHSDKVideo.HHIMCmd : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.Equatable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.Hashable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.RawRepresentable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.Equatable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.Hashable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.RawRepresentable {} +extension HHSDKVideo.ZLFilterType : Swift.Equatable {} +extension HHSDKVideo.ZLFilterType : Swift.Hashable {} +extension HHSDKVideo.ZLFilterType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLURLType : Swift.Equatable {} +extension HHSDKVideo.ZLURLType : Swift.Hashable {} +extension HHSDKVideo.ZLURLType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLLanguageType : Swift.Equatable {} +extension HHSDKVideo.ZLLanguageType : Swift.Hashable {} +extension HHSDKVideo.ZLLanguageType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.Equatable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.Hashable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.Equatable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.Hashable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.Equatable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.Hashable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.RawRepresentable {} diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftmodule b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftmodule new file mode 100644 index 0000000..2ae19d4 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64-apple-ios-simulator.swiftmodule differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftdoc b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftdoc new file mode 100644 index 0000000..b8ddb67 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftdoc differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftinterface b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftinterface new file mode 100644 index 0000000..b95ec3b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftinterface @@ -0,0 +1,5650 @@ +// swift-interface-format-version: 1.0 +// swift-compiler-version: Apple Swift version 5.4.2 (swiftlang-1205.0.28.2 clang-1205.0.19.57) +// swift-module-flags: -target x86_64-apple-ios10.0-simulator -enable-objc-interop -enable-library-evolution -swift-version 5 -enforce-exclusivity=checked -O -module-name HHSDKVideo +import AVFoundation +import AVKit +import Accelerate +import CoreGraphics +import CoreLocation +import CoreMotion +import CoreTelephony +import Darwin +import Dispatch +import Foundation +@_exported import HHSDKVideo +import ImageIO +import LocalAuthentication +import MobileCoreServices +import ObjectiveC +import Photos +import PhotosUI +import Security +import SecurityKit +import Swift +import SystemConfiguration +import UIKit +import UserNotifications +import WebKit +public protocol AEAD { + static var kLen: Swift.Int { get } + static var ivRange: Swift.Range<Swift.Int> { get } +} +@_hasMissingDesignatedInitializers final public class AEADChaCha20Poly1305 : HHSDKVideo.AEAD { + public static let kLen: Swift.Int + public static var ivRange: Swift.Range<Swift.Int> + public static func encrypt(_ plainText: Swift.Array<Swift.UInt8>, key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>, authenticationHeader: Swift.Array<Swift.UInt8>) throws -> (cipherText: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>) + public static func decrypt(_ cipherText: Swift.Array<Swift.UInt8>, key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>, authenticationHeader: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>) throws -> (plainText: Swift.Array<Swift.UInt8>, success: Swift.Bool) + @objc deinit +} +final public class AES { + public enum Error : Swift.Error { + case invalidKeySize + case dataPaddingRequired + case invalidData + public static func == (a: HHSDKVideo.AES.Error, b: HHSDKVideo.AES.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant : Swift.Int { + case aes128, aes192, aes256 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + final public let variant: HHSDKVideo.AES.Variant + public init(key: Swift.Array<Swift.UInt8>, blockMode: HHSDKVideo.BlockMode, padding: HHSDKVideo.Padding = .pkcs7) throws + @objc deinit +} +extension AES : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension AES { + convenience public init(key: Swift.String, iv: Swift.String, padding: HHSDKVideo.Padding = .pkcs7) throws +} +extension AES : HHSDKVideo.Cryptors { + final public func makeEncryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + final public func makeDecryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable +} +extension Array where Element == Swift.UInt8 { + public init(hex: Swift.String) + public func toHexString() -> Swift.String +} +extension Array where Element == Swift.UInt8 { + @available(*, deprecated) + public func chunks(size chunksize: Swift.Int) -> Swift.Array<Swift.Array<Element>> + public func md5() -> [Element] + public func sha1() -> [Element] + public func sha224() -> [Element] + public func sha256() -> [Element] + public func sha384() -> [Element] + public func sha512() -> [Element] + public func sha2(_ variant: HHSDKVideo.SHA2.Variant) -> [Element] + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> [Element] + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public func crc16(seed: Swift.UInt16? = nil) -> Swift.UInt16 + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> [Element] + public func decrypt(cipher: HHSDKVideo.Cipher) throws -> [Element] + public func authenticate<A>(with authenticator: A) throws -> [Element] where A : HHSDKVideo.CryptoAuthenticator +} +extension Array where Element == Swift.UInt8 { + public func toBase64() -> Swift.String? + public init(base64: Swift.String) +} +public protocol CryptoAuthenticator { + func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +public enum Bit : Swift.Int { + case zero + case one + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@_hasMissingDesignatedInitializers public class BlockDecryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public func seek(to position: Swift.Int) throws + @objc deinit +} +public typealias CipherOperationOnBlock = (Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8>? +public protocol BlockMode { + var options: HHSDKVideo.BlockModeOption { get } + func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +public struct BlockModeOption : Swift.OptionSet { + public let rawValue: Swift.Int + public init(rawValue: Swift.Int) + public typealias ArrayLiteralElement = HHSDKVideo.BlockModeOption + public typealias Element = HHSDKVideo.BlockModeOption + public typealias RawValue = Swift.Int +} +final public class Blowfish { + public enum Error : Swift.Error { + case dataPaddingRequired + case invalidKeyOrInitializationVector + case invalidInitializationVector + case invalidBlockMode + public static func == (a: HHSDKVideo.Blowfish.Error, b: HHSDKVideo.Blowfish.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>, blockMode: HHSDKVideo.BlockMode = CBC(iv: Array<UInt8>(repeating: 0, count: Blowfish.blockSize)), padding: HHSDKVideo.Padding) throws + @objc deinit +} +extension Blowfish : HHSDKVideo.Cipher { + final public func encrypt<C>(_ bytes: C) throws -> Swift.Array<Swift.UInt8> where C : Swift.Collection, C.Element == Swift.UInt8, C.Index == Swift.Int + final public func decrypt<C>(_ bytes: C) throws -> Swift.Array<Swift.UInt8> where C : Swift.Collection, C.Element == Swift.UInt8, C.Index == Swift.Int +} +extension Blowfish { + convenience public init(key: Swift.String, iv: Swift.String, padding: HHSDKVideo.Padding = .pkcs7) throws +} +@_hasMissingDesignatedInitializers public class BusyPics { + public static let `default`: HHSDKVideo.BusyPics + public func cacheImgs() + public func getImgs() -> [Foundation.URL?] + @objc deinit +} +public struct CallDoctorModel : HHSDKVideo.Mappable { + public var doctor: HHSDKVideo.HHDoctorModel? + public var order: HHSDKVideo.OrderModel? + public var appoint: Swift.String? + public var pushFlowUrl: Swift.String? + public var realPatientUuid: Swift.Int? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct CBC : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CBC.Error, b: HHSDKVideo.CBC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@_inheritsConvenienceInitializers final public class CBCMAC : HHSDKVideo.CMAC { + override final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + override public init(key: Swift.Array<Swift.UInt8>) throws + @objc deinit +} +public struct CCM { + public enum Error : Swift.Error { + case invalidInitializationVector + case invalidParameter + case fail + public static func == (a: HHSDKVideo.CCM.Error, b: HHSDKVideo.CCM.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(iv: Swift.Array<Swift.UInt8>, tagLength: Swift.Int, messageLength: Swift.Int, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil) + public init(iv: Swift.Array<Swift.UInt8>, tagLength: Swift.Int, messageLength: Swift.Int, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +public struct CFB : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CFB.Error, b: HHSDKVideo.CFB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +final public class ChaCha20 { + public enum Error : Swift.Error { + case invalidKeyOrInitializationVector + case notSupported + public static func == (a: HHSDKVideo.ChaCha20.Error, b: HHSDKVideo.ChaCha20.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + final public let keySize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>, iv nonce: Swift.Array<Swift.UInt8>) throws + @objc deinit +} +extension ChaCha20 : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension ChaCha20 { + public struct ChaChaEncryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public func seek(to: Swift.Int) throws + } +} +extension ChaCha20 { + public struct ChaChaDecryptor : HHSDKVideo.Cryptor, HHSDKVideo.Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = true) throws -> Swift.Array<Swift.UInt8> + public func seek(to: Swift.Int) throws + } +} +extension ChaCha20 : HHSDKVideo.Cryptors { + final public func makeEncryptor() -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + final public func makeDecryptor() -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable +} +extension ChaCha20 { + convenience public init(key: Swift.String, iv: Swift.String) throws +} +public struct ChatApi { +} +@_hasMissingDesignatedInitializers final public class Checksum { + @objc deinit +} +extension Checksum { + public static func crc32(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public static func crc32c(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.UInt32 + public static func crc16(_ message: Swift.Array<Swift.UInt8>, seed: Swift.UInt16? = nil) -> Swift.UInt16 +} +public enum CipherError : Swift.Error { + case encrypt + case decrypt + public static func == (a: HHSDKVideo.CipherError, b: HHSDKVideo.CipherError) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public protocol Cipher : AnyObject { + var keySize: Swift.Int { get } + func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func encrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + func decrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension Cipher { + public func encrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public func decrypt(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +public protocol CipherModeWorker { + var cipherOperation: HHSDKVideo.CipherOperationOnBlock { get } + var additionalBufferSize: Swift.Int { get } + mutating func encrypt(block plaintext: Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + mutating func decrypt(block ciphertext: Swift.ArraySlice<Swift.UInt8>) -> Swift.Array<Swift.UInt8> +} +public protocol BlockModeWorker : HHSDKVideo.CipherModeWorker { + var blockSize: Swift.Int { get } +} +public protocol CounterModeWorker : HHSDKVideo.CipherModeWorker { + associatedtype Counter + var counter: Self.Counter { get set } +} +public protocol SeekableModeWorker : HHSDKVideo.CipherModeWorker { + mutating func seek(to position: Swift.Int) throws +} +public protocol StreamModeWorker : HHSDKVideo.CipherModeWorker { +} +public protocol FinalizingEncryptModeWorker : HHSDKVideo.CipherModeWorker { + mutating func finalize(encrypt ciphertext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> +} +public protocol FinalizingDecryptModeWorker : HHSDKVideo.CipherModeWorker { + @discardableResult + mutating func willDecryptLast(bytes ciphertext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> + mutating func didDecryptLast(bytes plaintext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> + mutating func finalize(decrypt plaintext: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.ArraySlice<Swift.UInt8> +} +public class CMAC : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case wrongKeyLength + public static func == (a: HHSDKVideo.CMAC.Error, b: HHSDKVideo.CMAC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(key: Swift.Array<Swift.UInt8>) throws + public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public func authenticate(_ bytes: Swift.Array<Swift.UInt8>, cipher: HHSDKVideo.Cipher) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +open class CodableTransform<T> : HHSDKVideo.TransformType where T : Swift.Decodable, T : Swift.Encodable { + public typealias Object = T + public typealias JSON = Any + public init() + open func transformFromJSON(_ value: Any?) -> HHSDKVideo.CodableTransform<T>.Object? + open func transformToJSON(_ value: T?) -> HHSDKVideo.CodableTransform<T>.JSON? + @objc deinit +} +public struct CommentApi { +} +@objc @_inheritsConvenienceInitializers public class CommentBaseVC : UIKit.UIViewController { + @objc override dynamic public func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class CommentVC : HHSDKVideo.CommentBaseVC { + @objc override dynamic public func viewDidLoad() + public static func show(_ orderId: Swift.String, docId: Swift.String, uuid: Swift.Int?, type: HHSDKVideo.HHCallType?, _ model: HHSDKVideo.HHGetQuesetionModel?) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public protocol Cryptor { + mutating func seek(to: Swift.Int) throws +} +public protocol Cryptors : AnyObject { + func makeEncryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + func makeDecryptor() throws -> HHSDKVideo.Cryptor & HHSDKVideo.Updatable + static func randomIV(_ blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> +} +extension Cryptors { + public static func randomIV(_ count: Swift.Int) -> Swift.Array<Swift.UInt8> +} +public struct CTR { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.CTR.Error, b: HHSDKVideo.CTR.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>, counter: Swift.Int = 0) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +open class CustomDateFormatTransform : HHSDKVideo.DateFormatterTransform { + public init(formatString: Swift.String) + override public init(dateFormatter: Foundation.DateFormatter) + @objc deinit +} +extension Data { + public func checksum() -> Swift.UInt16 + public func md5() -> Foundation.Data + public func sha1() -> Foundation.Data + public func sha224() -> Foundation.Data + public func sha256() -> Foundation.Data + public func sha384() -> Foundation.Data + public func sha512() -> Foundation.Data + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> Foundation.Data + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Foundation.Data + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Foundation.Data + public func crc16(seed: Swift.UInt16? = nil) -> Foundation.Data + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> Foundation.Data + public func decrypt(cipher: HHSDKVideo.Cipher) throws -> Foundation.Data + public func authenticate(with authenticator: HHSDKVideo.CryptoAuthenticator) throws -> Foundation.Data +} +extension Data { + public init(hex: Swift.String) + public var bytes: Swift.Array<Swift.UInt8> { + get + } + public func toHexString() -> Swift.String +} +open class DataTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Data + public typealias JSON = Swift.String + public init() + open func transformFromJSON(_ value: Any?) -> Foundation.Data? + open func transformToJSON(_ value: Foundation.Data?) -> Swift.String? + @objc deinit +} +open class DateFormatterTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Date + public typealias JSON = Swift.String + final public let dateFormatter: Foundation.DateFormatter + public init(dateFormatter: Foundation.DateFormatter) + open func transformFromJSON(_ value: Any?) -> Foundation.Date? + open func transformToJSON(_ value: Foundation.Date?) -> Swift.String? + @objc deinit +} +open class DateTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.Date + public typealias JSON = Swift.Double + public enum Unit : Foundation.TimeInterval { + case seconds + case milliseconds + public init?(rawValue: Foundation.TimeInterval) + public typealias RawValue = Foundation.TimeInterval + public var rawValue: Foundation.TimeInterval { + get + } + } + public init(unit: HHSDKVideo.DateTransform.Unit = .seconds) + open func transformFromJSON(_ value: Any?) -> Foundation.Date? + open func transformToJSON(_ value: Foundation.Date?) -> Swift.Double? + @objc deinit +} +public struct DGElasticPullToRefreshConstants { + public static var WaveMaxHeight: CoreGraphics.CGFloat + public static var MinOffsetToPull: CoreGraphics.CGFloat + public static var LoadingContentInset: CoreGraphics.CGFloat + public static var LoadingViewSize: CoreGraphics.CGFloat +} +extension NSObject { + public func dg_addObserver(_ observer: ObjectiveC.NSObject, forKeyPath keyPath: Swift.String) + public func dg_removeObserver(_ observer: ObjectiveC.NSObject, forKeyPath keyPath: Swift.String) +} +extension UIScrollView { + public func dg_addPullToRefreshWithActionHandler(_ actionHandler: @escaping () -> Swift.Void, loadingView: HHSDKVideo.DGElasticPullToRefreshLoadingView?) + public func dg_removePullToRefresh() + public func dg_setPullToRefreshBackgroundColor(_ color: UIKit.UIColor) + public func dg_setPullToRefreshFillColor(_ color: UIKit.UIColor) + public func dg_stopLoading() + public func dg_startLoading() +} +extension UIView { + public func dg_center(_ usePresentationLayerIfPossible: Swift.Bool) -> CoreGraphics.CGPoint +} +extension UIPanGestureRecognizer { + public func dg_resign() +} +extension UIGestureRecognizer.State { + public func dg_isAnyOf(_ values: [UIKit.UIGestureRecognizer.State]) -> Swift.Bool +} +@objc @_inheritsConvenienceInitializers open class DGElasticPullToRefreshLoadingView : UIKit.UIView { + @objc dynamic public init() + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + open func setPullProgress(_ progress: CoreGraphics.CGFloat) + open func startAnimating() + open func stopLoading() + @objc deinit +} +extension CGFloat { + public func toRadians() -> CoreGraphics.CGFloat + public func toDegrees() -> CoreGraphics.CGFloat +} +@objc open class DGElasticPullToRefreshLoadingViewCircle : HHSDKVideo.DGElasticPullToRefreshLoadingView { + @objc override dynamic public init() + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + override open func setPullProgress(_ progress: CoreGraphics.CGFloat) + override open func startAnimating() + override open func stopLoading() + @objc override dynamic open func tintColorDidChange() + @objc override dynamic open func layoutSubviews() + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public enum DGElasticPullToRefreshState : Swift.Int { + case stopped + case dragging + case animatingBounce + case loading + case animatingToStopped + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_hasMissingDesignatedInitializers open class DGElasticPullToRefreshView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc deinit + @objc override dynamic open func observeValue(forKeyPath keyPath: Swift.String?, of object: Any?, change: [Foundation.NSKeyValueChangeKey : Any]?, context: Swift.UnsafeMutableRawPointer?) + @objc override dynamic open func layoutSubviews() + @objc override dynamic public init(frame: CoreGraphics.CGRect) +} +public struct DictionaryTransform<Key, Value> : HHSDKVideo.TransformType where Key : Swift.Hashable, Key : Swift.RawRepresentable, Value : HHSDKVideo.Mappable, Key.RawValue == Swift.String { + public init() + public func transformFromJSON(_ value: Any?) -> [Key : Value]? + public func transformToJSON(_ value: [Key : Value]?) -> Any? + public typealias JSON = Any + public typealias Object = Swift.Dictionary<Key, Value> +} +@available(*, renamed: "Digest") +public typealias Hash = HHSDKVideo.Digest +public struct Digest { + public static func md5(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha1(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha224(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha256(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha384(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha512(_ bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + public static func sha2(_ bytes: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.SHA2.Variant) -> Swift.Array<Swift.UInt8> + public static func sha3(_ bytes: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.SHA3.Variant) -> Swift.Array<Swift.UInt8> +} +public struct ECB : HHSDKVideo.BlockMode { + public let options: HHSDKVideo.BlockModeOption + public init() + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@objc @_inheritsConvenienceInitializers public class EKAccessoryNoteMessageView : UIKit.UIView { + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public struct EKAlertMessage { + public enum ImagePosition { + case top + case left + public static func == (a: HHSDKVideo.EKAlertMessage.ImagePosition, b: HHSDKVideo.EKAlertMessage.ImagePosition) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let imagePosition: HHSDKVideo.EKAlertMessage.ImagePosition + public let simpleMessage: HHSDKVideo.EKSimpleMessage + public let buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent + public init(simpleMessage: HHSDKVideo.EKSimpleMessage, imagePosition: HHSDKVideo.EKAlertMessage.ImagePosition = .top, buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent) +} +@objc @_hasMissingDesignatedInitializers final public class EKAlertMessageView : HHSDKVideo.EKSimpleMessageView { + public init(with message: HHSDKVideo.EKAlertMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc deinit +} +public struct EKAttributes { + public var name: Swift.String? + public var windowLevel: HHSDKVideo.EKAttributes.WindowLevel + public var position: HHSDKVideo.EKAttributes.Position + public var precedence: HHSDKVideo.EKAttributes.Precedence + public var displayDuration: Swift.Double + public var positionConstraints: HHSDKVideo.EKAttributes.PositionConstraints + public var screenInteraction: HHSDKVideo.EKAttributes.UserInteraction + public var entryInteraction: HHSDKVideo.EKAttributes.UserInteraction + public var scroll: HHSDKVideo.EKAttributes.Scroll + public var hapticFeedbackType: HHSDKVideo.EKAttributes.NotificationHapticFeedback + public var lifecycleEvents: HHSDKVideo.EKAttributes.LifecycleEvents + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var entryBackground: HHSDKVideo.EKAttributes.BackgroundStyle + public var screenBackground: HHSDKVideo.EKAttributes.BackgroundStyle + public var shadow: HHSDKVideo.EKAttributes.Shadow + public var roundCorners: HHSDKVideo.EKAttributes.RoundCorners + public var border: HHSDKVideo.EKAttributes.Border + public var statusBar: HHSDKVideo.EKAttributes.StatusBar + public var entranceAnimation: HHSDKVideo.EKAttributes.Animation + public var exitAnimation: HHSDKVideo.EKAttributes.Animation + public var popBehavior: HHSDKVideo.EKAttributes.PopBehavior { + get + set + } + public init() +} +extension EKAttributes { + public struct Animation : Swift.Equatable { + public struct Spring : Swift.Equatable { + public var damping: CoreGraphics.CGFloat + public var initialVelocity: CoreGraphics.CGFloat + public init(damping: CoreGraphics.CGFloat, initialVelocity: CoreGraphics.CGFloat) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Spring, b: HHSDKVideo.EKAttributes.Animation.Spring) -> Swift.Bool + } + public struct RangeAnimation : Swift.Equatable { + public var duration: Foundation.TimeInterval + public var delay: Foundation.TimeInterval + public var start: CoreGraphics.CGFloat + public var end: CoreGraphics.CGFloat + public var spring: HHSDKVideo.EKAttributes.Animation.Spring? + public init(from start: CoreGraphics.CGFloat, to end: CoreGraphics.CGFloat, duration: Foundation.TimeInterval, delay: Foundation.TimeInterval = 0, spring: HHSDKVideo.EKAttributes.Animation.Spring? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation.RangeAnimation, b: HHSDKVideo.EKAttributes.Animation.RangeAnimation) -> Swift.Bool + } + public struct Translate : Swift.Equatable { + public enum AnchorPosition : Swift.Equatable { + case top + case bottom + case automatic + public func hash(into hasher: inout Swift.Hasher) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition, b: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition) -> Swift.Bool + public var hashValue: Swift.Int { + get + } + } + public var duration: Foundation.TimeInterval + public var delay: Foundation.TimeInterval + public var anchorPosition: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition + public var spring: HHSDKVideo.EKAttributes.Animation.Spring? + public init(duration: Foundation.TimeInterval, anchorPosition: HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition = .automatic, delay: Foundation.TimeInterval = 0, spring: HHSDKVideo.EKAttributes.Animation.Spring? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation.Translate, b: HHSDKVideo.EKAttributes.Animation.Translate) -> Swift.Bool + } + public var translate: HHSDKVideo.EKAttributes.Animation.Translate? + public var scale: HHSDKVideo.EKAttributes.Animation.RangeAnimation? + public var fade: HHSDKVideo.EKAttributes.Animation.RangeAnimation? + public var containsTranslation: Swift.Bool { + get + } + public var containsScale: Swift.Bool { + get + } + public var containsFade: Swift.Bool { + get + } + public var containsAnimation: Swift.Bool { + get + } + public var maxDelay: Foundation.TimeInterval { + get + } + public var maxDuration: Foundation.TimeInterval { + get + } + public var totalDuration: Foundation.TimeInterval { + get + } + public static var translation: HHSDKVideo.EKAttributes.Animation { + get + } + public static var none: HHSDKVideo.EKAttributes.Animation { + get + } + public init(translate: HHSDKVideo.EKAttributes.Animation.Translate? = nil, scale: HHSDKVideo.EKAttributes.Animation.RangeAnimation? = nil, fade: HHSDKVideo.EKAttributes.Animation.RangeAnimation? = nil) + public static func == (a: HHSDKVideo.EKAttributes.Animation, b: HHSDKVideo.EKAttributes.Animation) -> Swift.Bool + } +} +extension EKAttributes { + public enum BackgroundStyle : Swift.Equatable { + public struct BlurStyle : Swift.Equatable { + public static var extra: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public static var standard: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + @available(iOS 10.0, *) + public static var prominent: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public static var dark: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle { + get + } + public init(style: UIKit.UIBlurEffect.Style) + public init(light: UIKit.UIBlurEffect.Style, dark: UIKit.UIBlurEffect.Style) + public func blurStyle(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIBlurEffect.Style + public func blurEffect(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIBlurEffect + public static func == (a: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle, b: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle) -> Swift.Bool + } + public struct Gradient { + public var colors: [HHSDKVideo.EKColor] + public var startPoint: CoreGraphics.CGPoint + public var endPoint: CoreGraphics.CGPoint + public init(colors: [HHSDKVideo.EKColor], startPoint: CoreGraphics.CGPoint, endPoint: CoreGraphics.CGPoint) + } + case visualEffect(style: HHSDKVideo.EKAttributes.BackgroundStyle.BlurStyle) + case color(color: HHSDKVideo.EKColor) + case gradient(gradient: HHSDKVideo.EKAttributes.BackgroundStyle.Gradient) + case image(image: UIKit.UIImage) + case clear + public static func == (lhs: HHSDKVideo.EKAttributes.BackgroundStyle, rhs: HHSDKVideo.EKAttributes.BackgroundStyle) -> Swift.Bool + } +} +extension EKAttributes { + public enum DisplayMode { + case inferred + case light + case dark + public static func == (a: HHSDKVideo.EKAttributes.DisplayMode, b: HHSDKVideo.EKAttributes.DisplayMode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public typealias DisplayDuration = Foundation.TimeInterval +} +extension EKAttributes { + public enum RoundCorners { + case none + case all(radius: CoreGraphics.CGFloat) + case top(radius: CoreGraphics.CGFloat) + case bottom(radius: CoreGraphics.CGFloat) + } + public enum Border { + case none + case value(color: UIKit.UIColor, width: CoreGraphics.CGFloat) + } +} +extension EKAttributes { + public enum NotificationHapticFeedback { + case success + case warning + case error + case none + public static func == (a: HHSDKVideo.EKAttributes.NotificationHapticFeedback, b: HHSDKVideo.EKAttributes.NotificationHapticFeedback) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct LifecycleEvents { + public typealias Event = () -> Swift.Void + public var willAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var didAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var willDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public var didDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? + public init(willAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, didAppear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, willDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil, didDisappear: HHSDKVideo.EKAttributes.LifecycleEvents.Event? = nil) + } +} +extension EKAttributes { + public enum PopBehavior { + case overridden + case animated(animation: HHSDKVideo.EKAttributes.Animation) + public var isOverriden: Swift.Bool { + get + } + } +} +extension EKAttributes { + public enum Position { + case top + case bottom + case center + public var isTop: Swift.Bool { + get + } + public var isCenter: Swift.Bool { + get + } + public var isBottom: Swift.Bool { + get + } + public static func == (a: HHSDKVideo.EKAttributes.Position, b: HHSDKVideo.EKAttributes.Position) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct PositionConstraints { + public enum SafeArea { + case overridden + case empty(fillSafeArea: Swift.Bool) + public var isOverridden: Swift.Bool { + get + } + } + public enum Edge { + case ratio(value: CoreGraphics.CGFloat) + case offset(value: CoreGraphics.CGFloat) + case constant(value: CoreGraphics.CGFloat) + case intrinsic + public static var fill: HHSDKVideo.EKAttributes.PositionConstraints.Edge { + get + } + } + public struct Size { + public var width: HHSDKVideo.EKAttributes.PositionConstraints.Edge + public var height: HHSDKVideo.EKAttributes.PositionConstraints.Edge + public init(width: HHSDKVideo.EKAttributes.PositionConstraints.Edge, height: HHSDKVideo.EKAttributes.PositionConstraints.Edge) + public static var intrinsic: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + public static var sizeToWidth: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + public static var screen: HHSDKVideo.EKAttributes.PositionConstraints.Size { + get + } + } + public enum KeyboardRelation { + public struct Offset { + public var bottom: CoreGraphics.CGFloat + public var screenEdgeResistance: CoreGraphics.CGFloat? + public init(bottom: CoreGraphics.CGFloat = 0, screenEdgeResistance: CoreGraphics.CGFloat? = nil) + public static var none: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation.Offset { + get + } + } + case bind(offset: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation.Offset) + case unbind + public var isBound: Swift.Bool { + get + } + } + public struct Rotation { + public enum SupportedInterfaceOrientation { + case standard + case all + public static func == (a: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation, b: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public var isEnabled: Swift.Bool + public var supportedInterfaceOrientations: HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation + public init() + } + public var rotation: HHSDKVideo.EKAttributes.PositionConstraints.Rotation + public var keyboardRelation: HHSDKVideo.EKAttributes.PositionConstraints.KeyboardRelation + public var size: HHSDKVideo.EKAttributes.PositionConstraints.Size + public var maxSize: HHSDKVideo.EKAttributes.PositionConstraints.Size + public var verticalOffset: CoreGraphics.CGFloat + public var safeArea: HHSDKVideo.EKAttributes.PositionConstraints.SafeArea + public var hasVerticalOffset: Swift.Bool { + get + } + public static var float: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public static var fullWidth: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public static var fullScreen: HHSDKVideo.EKAttributes.PositionConstraints { + get + } + public init(verticalOffset: CoreGraphics.CGFloat = 0, size: HHSDKVideo.EKAttributes.PositionConstraints.Size = .sizeToWidth, maxSize: HHSDKVideo.EKAttributes.PositionConstraints.Size = .intrinsic) + } +} +extension EKAttributes { + public enum Precedence { + public struct Priority : Swift.Hashable, Swift.Equatable, Swift.RawRepresentable, Swift.Comparable { + public var rawValue: Swift.Int + public var hashValue: Swift.Int { + get + } + public init(_ rawValue: Swift.Int) + public init(rawValue: Swift.Int) + public static func == (lhs: HHSDKVideo.EKAttributes.Precedence.Priority, rhs: HHSDKVideo.EKAttributes.Precedence.Priority) -> Swift.Bool + public static func < (lhs: HHSDKVideo.EKAttributes.Precedence.Priority, rhs: HHSDKVideo.EKAttributes.Precedence.Priority) -> Swift.Bool + public typealias RawValue = Swift.Int + } + public enum QueueingHeuristic { + public static var value: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic + case chronological + case priority + public static func == (a: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic, b: HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + case override(priority: HHSDKVideo.EKAttributes.Precedence.Priority, dropEnqueuedEntries: Swift.Bool) + case enqueue(priority: HHSDKVideo.EKAttributes.Precedence.Priority) + public var priority: HHSDKVideo.EKAttributes.Precedence.Priority { + get + set + } + } +} +extension EKAttributes.Precedence.Priority { + public static let maxRawValue: Swift.Int + public static let highRawValue: Swift.Int + public static let normalRawValue: Swift.Int + public static let lowRawValue: Swift.Int + public static let minRawValue: Swift.Int + public static let max: HHSDKVideo.EKAttributes.Precedence.Priority + public static let high: HHSDKVideo.EKAttributes.Precedence.Priority + public static let normal: HHSDKVideo.EKAttributes.Precedence.Priority + public static let low: HHSDKVideo.EKAttributes.Precedence.Priority + public static let min: HHSDKVideo.EKAttributes.Precedence.Priority +} +extension EKAttributes { + public static var `default`: HHSDKVideo.EKAttributes + public static var toast: HHSDKVideo.EKAttributes { + get + } + public static var float: HHSDKVideo.EKAttributes { + get + } + public static var topFloat: HHSDKVideo.EKAttributes { + get + } + public static var bottomFloat: HHSDKVideo.EKAttributes { + get + } + public static var centerFloat: HHSDKVideo.EKAttributes { + get + } + public static var bottomToast: HHSDKVideo.EKAttributes { + get + } + public static var topToast: HHSDKVideo.EKAttributes { + get + } + public static var topNote: HHSDKVideo.EKAttributes { + get + } + public static var bottomNote: HHSDKVideo.EKAttributes { + get + } + public static var statusBar: HHSDKVideo.EKAttributes { + get + } +} +extension EKAttributes { + public enum Scroll { + public struct PullbackAnimation { + public var duration: Foundation.TimeInterval + public var damping: CoreGraphics.CGFloat + public var initialSpringVelocity: CoreGraphics.CGFloat + public init(duration: Foundation.TimeInterval, damping: CoreGraphics.CGFloat, initialSpringVelocity: CoreGraphics.CGFloat) + public static var jolt: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation { + get + } + public static var easeOut: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation { + get + } + } + case disabled + case edgeCrossingDisabled(swipeable: Swift.Bool) + case enabled(swipeable: Swift.Bool, pullbackAnimation: HHSDKVideo.EKAttributes.Scroll.PullbackAnimation) + } +} +extension EKAttributes { + public enum Shadow { + case none + case active(with: HHSDKVideo.EKAttributes.Shadow.Value) + public struct Value { + public let radius: CoreGraphics.CGFloat + public let opacity: Swift.Float + public let color: HHSDKVideo.EKColor + public let offset: CoreGraphics.CGSize + public init(color: HHSDKVideo.EKColor = .black, opacity: Swift.Float, radius: CoreGraphics.CGFloat, offset: CoreGraphics.CGSize = .zero) + } + } +} +extension EKAttributes { + public enum StatusBar { + public typealias Appearance = (visible: Swift.Bool, style: UIKit.UIStatusBarStyle) + case ignored + case hidden + case dark + case light + case inferred + public var appearance: HHSDKVideo.EKAttributes.StatusBar.Appearance { + get + } + public static func statusBar(by appearance: HHSDKVideo.EKAttributes.StatusBar.Appearance) -> HHSDKVideo.EKAttributes.StatusBar + public static var currentAppearance: HHSDKVideo.EKAttributes.StatusBar.Appearance { + get + } + public static var currentStatusBar: HHSDKVideo.EKAttributes.StatusBar { + get + } + public static func == (a: HHSDKVideo.EKAttributes.StatusBar, b: HHSDKVideo.EKAttributes.StatusBar) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } +} +extension EKAttributes { + public struct UserInteraction { + public typealias Action = () -> () + public enum Default { + case absorbTouches + case delayExit(by: Foundation.TimeInterval) + case dismissEntry + case forward + } + public var defaultAction: HHSDKVideo.EKAttributes.UserInteraction.Default + public var customTapActions: [HHSDKVideo.EKAttributes.UserInteraction.Action] + public init(defaultAction: HHSDKVideo.EKAttributes.UserInteraction.Default = .absorbTouches, customTapActions: [HHSDKVideo.EKAttributes.UserInteraction.Action] = []) + public static var dismiss: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static var forward: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static var absorbTouches: HHSDKVideo.EKAttributes.UserInteraction { + get + } + public static func delayExit(by delay: Foundation.TimeInterval) -> HHSDKVideo.EKAttributes.UserInteraction + } +} +extension EKAttributes { + public enum WindowLevel { + case alerts + case statusBar + case normal + case custom(level: UIKit.UIWindow.Level) + public var value: UIKit.UIWindow.Level { + get + } + } +} +@objc final public class EKButtonBarView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent) + @objc override final public func layoutSubviews() + final public func expand() + final public func compress() + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKColor : Swift.Equatable { + public var dark: UIKit.UIColor { + get + } + public var light: UIKit.UIColor { + get + } + public init(light: UIKit.UIColor, dark: UIKit.UIColor) + public init(_ unified: UIKit.UIColor) + public init(rgb: Swift.Int) + public init(red: Swift.Int, green: Swift.Int, blue: Swift.Int) + public func color(for traits: UIKit.UITraitCollection, mode: HHSDKVideo.EKAttributes.DisplayMode) -> UIKit.UIColor + public static func == (a: HHSDKVideo.EKColor, b: HHSDKVideo.EKColor) -> Swift.Bool +} +extension EKColor { + public var inverted: HHSDKVideo.EKColor { + get + } + public func with(alpha: CoreGraphics.CGFloat) -> HHSDKVideo.EKColor + public static var white: HHSDKVideo.EKColor { + get + } + public static var black: HHSDKVideo.EKColor { + get + } + public static var clear: HHSDKVideo.EKColor { + get + } + public static var standardBackground: HHSDKVideo.EKColor { + get + } + public static var standardContent: HHSDKVideo.EKColor { + get + } +} +@objc final public class EKFormMessageView : UIKit.UIView { + public init(with title: HHSDKVideo.EKProperty.LabelContent, textFieldsContent: [HHSDKVideo.EKProperty.TextFieldContent], buttonContent: HHSDKVideo.EKProperty.ButtonContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + final public func becomeFirstResponder(with textFieldIndex: Swift.Int) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKImageNoteMessageView : HHSDKVideo.EKAccessoryNoteMessageView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with content: HHSDKVideo.EKProperty.LabelContent, imageContent: HHSDKVideo.EKProperty.ImageContent) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKMessageContentView : UIKit.UIView { + public var titleContent: HHSDKVideo.EKProperty.LabelContent! { + get + set + } + public var subtitleContent: HHSDKVideo.EKProperty.LabelContent! { + get + set + } + public var titleAttributes: HHSDKVideo.EKProperty.LabelStyle! { + get + set + } + public var subtitleAttributes: HHSDKVideo.EKProperty.LabelStyle! { + get + set + } + public var title: Swift.String! { + get + set + } + public var subtitle: Swift.String! { + get + set + } + public var verticalMargins: CoreGraphics.CGFloat { + get + set + } + public var horizontalMargins: CoreGraphics.CGFloat { + get + set + } + public var labelsOffset: CoreGraphics.CGFloat { + get + set + } + @objc dynamic public init() + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKNoteMessageView : UIKit.UIView { + public var horizontalOffset: CoreGraphics.CGFloat { + get + set + } + public var verticalOffset: CoreGraphics.CGFloat { + get + set + } + public init(with content: HHSDKVideo.EKProperty.LabelContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKNotificationMessage { + public struct Insets { + public var contentInsets: UIKit.UIEdgeInsets + public var titleToDescription: CoreGraphics.CGFloat + public static var `default`: HHSDKVideo.EKNotificationMessage.Insets + } + public let simpleMessage: HHSDKVideo.EKSimpleMessage + public let auxiliary: HHSDKVideo.EKProperty.LabelContent? + public let insets: HHSDKVideo.EKNotificationMessage.Insets + public init(simpleMessage: HHSDKVideo.EKSimpleMessage, auxiliary: HHSDKVideo.EKProperty.LabelContent? = nil, insets: HHSDKVideo.EKNotificationMessage.Insets = .default) +} +@objc @_hasMissingDesignatedInitializers final public class EKNotificationMessageView : HHSDKVideo.EKSimpleMessageView { + public init(with message: HHSDKVideo.EKNotificationMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc deinit +} +public struct EKPopUpMessage { + public typealias EKPopUpMessageAction = () -> () + public struct ThemeImage { + public enum Position { + case topToTop(offset: CoreGraphics.CGFloat) + case centerToTop(offset: CoreGraphics.CGFloat) + } + public var image: HHSDKVideo.EKProperty.ImageContent + public var position: HHSDKVideo.EKPopUpMessage.ThemeImage.Position + public init(image: HHSDKVideo.EKProperty.ImageContent, position: HHSDKVideo.EKPopUpMessage.ThemeImage.Position = .topToTop(offset: 40)) + } + public var themeImage: HHSDKVideo.EKPopUpMessage.ThemeImage? + public var title: HHSDKVideo.EKProperty.LabelContent + public var description: HHSDKVideo.EKProperty.LabelContent + public var button: HHSDKVideo.EKProperty.ButtonContent + public var action: HHSDKVideo.EKPopUpMessage.EKPopUpMessageAction + public init(themeImage: HHSDKVideo.EKPopUpMessage.ThemeImage? = nil, title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent, button: HHSDKVideo.EKProperty.ButtonContent, action: @escaping HHSDKVideo.EKPopUpMessage.EKPopUpMessageAction) +} +@objc final public class EKPopUpMessageView : UIKit.UIView { + public init(with message: HHSDKVideo.EKPopUpMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKProcessingNoteMessageView : HHSDKVideo.EKAccessoryNoteMessageView { + public var isProcessing: Swift.Bool { + get + set + } + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + public init(with content: HHSDKVideo.EKProperty.LabelContent, activityIndicator: UIKit.UIActivityIndicatorView.Style) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKProperty { + public struct ButtonContent { + public typealias Action = () -> () + public var label: HHSDKVideo.EKProperty.LabelContent + public var backgroundColor: HHSDKVideo.EKColor + public var highlightedBackgroundColor: HHSDKVideo.EKColor + public var contentEdgeInset: CoreGraphics.CGFloat + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var accessibilityIdentifier: Swift.String? + public var action: HHSDKVideo.EKProperty.ButtonContent.Action? + public init(label: HHSDKVideo.EKProperty.LabelContent, backgroundColor: HHSDKVideo.EKColor, highlightedBackgroundColor: HHSDKVideo.EKColor, contentEdgeInset: CoreGraphics.CGFloat = 5, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, accessibilityIdentifier: Swift.String? = nil, action: @escaping HHSDKVideo.EKProperty.ButtonContent.Action = {}) + public func backgroundColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + public func highlightedBackgroundColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + public func highlighedLabelColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct LabelContent { + public var text: Swift.String + public var style: HHSDKVideo.EKProperty.LabelStyle + public var accessibilityIdentifier: Swift.String? + public init(text: Swift.String, style: HHSDKVideo.EKProperty.LabelStyle, accessibilityIdentifier: Swift.String? = nil) + } + public struct LabelStyle { + public var font: UIKit.UIFont + public var color: HHSDKVideo.EKColor + public var alignment: UIKit.NSTextAlignment + public var numberOfLines: Swift.Int + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public init(font: UIKit.UIFont, color: HHSDKVideo.EKColor, alignment: UIKit.NSTextAlignment = .left, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, numberOfLines: Swift.Int = 0) + public func color(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct ImageContent { + public enum TransformAnimation { + case animate(duration: Foundation.TimeInterval, options: UIKit.UIView.AnimationOptions, transform: CoreGraphics.CGAffineTransform) + case none + } + public var tint: HHSDKVideo.EKColor? + public var images: [UIKit.UIImage] + public var imageSequenceAnimationDuration: Foundation.TimeInterval + public var size: CoreGraphics.CGSize? + public var contentMode: UIKit.UIView.ContentMode + public var makesRound: Swift.Bool + public var animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var accessibilityIdentifier: Swift.String? + public init(imageName: Swift.String, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, size: CoreGraphics.CGSize? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, tint: HHSDKVideo.EKColor? = nil, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(image: UIKit.UIImage, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(images: [UIKit.UIImage], imageSequenceAnimationDuration: Foundation.TimeInterval = 1, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public init(imagesNames: [Swift.String], imageSequenceAnimationDuration: Foundation.TimeInterval = 1, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, animation: HHSDKVideo.EKProperty.ImageContent.TransformAnimation = .none, size: CoreGraphics.CGSize? = nil, tint: HHSDKVideo.EKColor? = nil, contentMode: UIKit.UIView.ContentMode = .scaleToFill, makesRound: Swift.Bool = false, accessibilityIdentifier: Swift.String? = nil) + public static func thumb(with image: UIKit.UIImage, edgeSize: CoreGraphics.CGFloat) -> HHSDKVideo.EKProperty.ImageContent + public static func thumb(with imageName: Swift.String, edgeSize: CoreGraphics.CGFloat) -> HHSDKVideo.EKProperty.ImageContent + public func tintColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + } + public struct TextFieldContent { + weak public var delegate: UIKit.UITextFieldDelegate? + public var keyboardType: UIKit.UIKeyboardType + public var isSecure: Swift.Bool + public var leadingImage: UIKit.UIImage! + public var placeholder: HHSDKVideo.EKProperty.LabelContent + public var textStyle: HHSDKVideo.EKProperty.LabelStyle + public var tintColor: HHSDKVideo.EKColor! + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public var bottomBorderColor: HHSDKVideo.EKColor + public var accessibilityIdentifier: Swift.String? + public var textContent: Swift.String { + get + set + } + public init(delegate: UIKit.UITextFieldDelegate? = nil, keyboardType: UIKit.UIKeyboardType = .default, placeholder: HHSDKVideo.EKProperty.LabelContent, tintColor: HHSDKVideo.EKColor? = nil, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, textStyle: HHSDKVideo.EKProperty.LabelStyle, isSecure: Swift.Bool = false, leadingImage: UIKit.UIImage? = nil, bottomBorderColor: HHSDKVideo.EKColor = .clear, accessibilityIdentifier: Swift.String? = nil) + public func tintColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + public func bottomBorderColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor? + } + public struct ButtonBarContent { + public var content: [HHSDKVideo.EKProperty.ButtonContent] + public var separatorColor: HHSDKVideo.EKColor + public var horizontalDistributionThreshold: Swift.Int + public var expandAnimatedly: Swift.Bool + public var buttonHeight: CoreGraphics.CGFloat + public var displayMode: HHSDKVideo.EKAttributes.DisplayMode + public init(with buttonContents: HHSDKVideo.EKProperty.ButtonContent..., separatorColor: HHSDKVideo.EKColor, horizontalDistributionThreshold: Swift.Int = 2, buttonHeight: CoreGraphics.CGFloat = 50, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, expandAnimatedly: Swift.Bool) + public init(with buttonContents: [HHSDKVideo.EKProperty.ButtonContent], separatorColor: HHSDKVideo.EKColor, horizontalDistributionThreshold: Swift.Int = 2, buttonHeight: CoreGraphics.CGFloat = 50, displayMode: HHSDKVideo.EKAttributes.DisplayMode = .inferred, expandAnimatedly: Swift.Bool) + public func separatorColor(for traitCollection: UIKit.UITraitCollection) -> UIKit.UIColor + } + public struct EKRatingItemContent { + public var title: HHSDKVideo.EKProperty.LabelContent + public var description: HHSDKVideo.EKProperty.LabelContent + public var unselectedImage: HHSDKVideo.EKProperty.ImageContent + public var selectedImage: HHSDKVideo.EKProperty.ImageContent + public var size: CoreGraphics.CGSize + public init(title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent, unselectedImage: HHSDKVideo.EKProperty.ImageContent, selectedImage: HHSDKVideo.EKProperty.ImageContent, size: CoreGraphics.CGSize = CGSize(width: 50, height: 50)) + } +} +public struct EKRatingMessage { + public typealias Selection = (Swift.Int) -> Swift.Void + public var initialTitle: HHSDKVideo.EKProperty.LabelContent + public var initialDescription: HHSDKVideo.EKProperty.LabelContent + public var ratingItems: [HHSDKVideo.EKProperty.EKRatingItemContent] + public var buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent + public var selection: HHSDKVideo.EKRatingMessage.Selection! + public var selectedIndex: Swift.Int? { + get + set + } + public init(initialTitle: HHSDKVideo.EKProperty.LabelContent, initialDescription: HHSDKVideo.EKProperty.LabelContent, ratingItems: [HHSDKVideo.EKProperty.EKRatingItemContent], buttonBarContent: HHSDKVideo.EKProperty.ButtonBarContent, selection: HHSDKVideo.EKRatingMessage.Selection? = nil) +} +@objc final public class EKRatingMessageView : UIKit.UIView { + public init(with message: HHSDKVideo.EKRatingMessage) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc @_inheritsConvenienceInitializers final public class EKRatingSymbolsContainerView : UIKit.UIView { + final public func setup(with message: HHSDKVideo.EKRatingMessage, externalSelection: @escaping HHSDKVideo.EKRatingMessage.Selection) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +@objc final public class EKRatingSymbolView : UIKit.UIView { + final public var isSelected: Swift.Bool { + get + set + } + public init(unselectedImage: HHSDKVideo.EKProperty.ImageContent, selectedImage: HHSDKVideo.EKProperty.ImageContent, selection: @escaping HHSDKVideo.EKRatingMessage.Selection) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public struct EKSimpleMessage { + public let image: HHSDKVideo.EKProperty.ImageContent? + public let title: HHSDKVideo.EKProperty.LabelContent + public let description: HHSDKVideo.EKProperty.LabelContent + public init(image: HHSDKVideo.EKProperty.ImageContent? = nil, title: HHSDKVideo.EKProperty.LabelContent, description: HHSDKVideo.EKProperty.LabelContent) +} +@objc @_hasMissingDesignatedInitializers public class EKSimpleMessageView : UIKit.UIView { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc final public class EKTextField : UIKit.UIView { + final public var text: Swift.String { + get + set + } + public init(with content: HHSDKVideo.EKProperty.TextFieldContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + final public func makeFirstResponder() + @objc override final public func traitCollectionDidChange(_ previousTraitCollection: UIKit.UITraitCollection?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +@objc public class EKXStatusBarMessageView : UIKit.UIView { + public init(leading: HHSDKVideo.EKProperty.LabelContent, trailing: HHSDKVideo.EKProperty.LabelContent) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc deinit +} +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: T, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: T?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [Swift.String : T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [Swift.String : T], right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func <- <T>(left: inout [Swift.String : T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +public func >>> <T>(left: [Swift.String : T]?, right: HHSDKVideo.Map) where T : Swift.RawRepresentable +open class EnumTransform<T> : HHSDKVideo.TransformType where T : Swift.RawRepresentable { + public typealias Object = T + public typealias JSON = T.RawValue + public init() + open func transformFromJSON(_ value: Any?) -> T? + open func transformToJSON(_ value: T?) -> T.RawValue? + @objc deinit +} +final public class GCM : HHSDKVideo.BlockMode { + public enum Mode { + case combined + case detached + public static func == (a: HHSDKVideo.GCM.Mode, b: HHSDKVideo.GCM.Mode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public let options: HHSDKVideo.BlockModeOption + public enum Error : Swift.Error { + case invalidInitializationVector + case fail + public static func == (a: HHSDKVideo.GCM.Error, b: HHSDKVideo.GCM.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(iv: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, tagLength: Swift.Int = 16, mode: HHSDKVideo.GCM.Mode = .detached) + convenience public init(iv: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, mode: HHSDKVideo.GCM.Mode = .detached) + final public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker + @objc deinit +} +open class HexColorTransform : HHSDKVideo.TransformType { + public typealias Object = UIKit.UIColor + public typealias JSON = Swift.String + public init(prefixToJSON: Swift.Bool = false, alphaToJSON: Swift.Bool = false) + open func transformFromJSON(_ value: Any?) -> HHSDKVideo.HexColorTransform.Object? + open func transformToJSON(_ value: HHSDKVideo.HexColorTransform.Object?) -> HHSDKVideo.HexColorTransform.JSON? + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHAppProtocolCheck : ObjectiveC.NSObject { + public static let instance: HHSDKVideo.HHAppProtocolCheck + @objc override dynamic public init() + public func showPrivacyDialog(content: Swift.String, userDoc: Swift.String, privateDoc: Swift.String, _ agreeBlock: ((Swift.Bool) -> Swift.Void)?) + @objc deinit +} +extension HHAppProtocolCheck : UIKit.UITextViewDelegate { + @objc dynamic public func textView(_ textView: UIKit.UITextView, shouldInteractWith URL: Foundation.URL, in characterRange: Foundation.NSRange, interaction: UIKit.UITextItemInteraction) -> Swift.Bool +} +extension Array { + public subscript(safe index: Swift.Int) -> Element? { + get + } +} +public struct HHBaseApi { +} +@propertyWrapper public struct ApiConfig { + public var wrappedValue: HHSDKVideo.HHBaseApi { + get + } + public init(path: Swift.String, method: HHSDKVideo.HHRequestMethod = .post, host: Swift.String = HHUrl.baseUrl(), domain: Swift.String = HHUrl.urlForFamily(), needUserInfo: Swift.Bool = true, needEncrypt: Swift.Bool = true, needDNS: Swift.Bool = true) +} +public typealias HHLoginHandler = ((Swift.String?) -> Swift.Void) +public var HMHudManager: HHSDKVideo.HHHUDable { + get +} +@_inheritsConvenienceInitializers @objc public class HHBaseSDK : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHBaseSDK + public var dnsCallback: HHSDKVideo.HHDNSProtocal? + @objc public func start() + @objc public func login(userToken: Swift.String, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func logout(_ callback: ((Swift.String?) -> Swift.Void)? = nil) + @objc override dynamic public init() + @objc deinit +} +@objc public enum HHBaseCallingState : Swift.Int { + case onStart = 0 + case waitingDoctor + case callFreeDoctor + case callConnect + case didRing + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc public protocol HHBaseVideoDelegate : ObjectiveC.NSObjectProtocol { + @objc func callStateChange(_ state: HHSDKVideo.HHBaseCallingState) + @objc optional func onStart(orderId: Swift.String?) + @objc func callDidEstablish() + @objc func getChatParentView(_ view: UIKit.UIView) + @objc func callFail(code: Swift.Int, error: Swift.String) + @objc func onFail(_ errorCode: Swift.Int, errrorStr: Swift.String?) + @objc func onCancel() + @objc func receivedOrder(_ orderId: Swift.String) + @objc func callDidFinish() + @objc func onExtensionDoctor() + @objc func onReceive(_ callID: Swift.String) + @objc func onResponse(_ accept: Swift.Bool) + @objc func onLeakPermission(_ type: HHSDKVideo.HHBasePermissionType) + @objc optional func onForceOffline() +} +@objc public protocol HHCallDelegate : ObjectiveC.NSObjectProtocol { + @objc optional func onCallStatus(_ error: Swift.Error?) + @objc optional func onCallSuccess() + @objc optional func callFinished() +} +@_inheritsConvenienceInitializers @objc public class HHCallerInfo : ObjectiveC.NSObject, HHSDKVideo.Mappable { + public var name: Swift.String? + public var photourl: Swift.String? + public var uuid: Swift.Int? + public var userToken: Swift.String? + @objc override dynamic public init() + required public init?(map: HHSDKVideo.Map) + public func mapping(map: HHSDKVideo.Map) + @objc deinit +} +public class HHCameraConfig { + weak public var sender: UIKit.UIViewController! + public var mediaType: HHSDKVideo.HHMediaType + public var isGrayCam: Swift.Bool + public var canReduce: Swift.Bool + public var autoUpload: Swift.Bool + public var maxCount: Swift.Int? + public var crop: HHSDKVideo.onCropFinish? + public var canceled: HHSDKVideo.onCanceled? + public init() + public func build(_ block: (inout HHSDKVideo.HHCameraConfig) -> Swift.Void) -> HHSDKVideo.HHCameraConfig + @objc deinit +} +public let HHSDKScreenWidth: CoreGraphics.CGFloat +public let HHSDKScreenHeight: CoreGraphics.CGFloat +public let China_Flag: Swift.String +public struct HHDimens { + public static func isPad() -> Swift.Bool + public static func isPlus() -> Swift.Bool +} +public func HHColor(_ red: CoreGraphics.CGFloat, green: CoreGraphics.CGFloat, blue: CoreGraphics.CGFloat, alpha: CoreGraphics.CGFloat = 1.0) -> UIKit.UIColor +public func HHUISingleColor(_ value: CoreGraphics.CGFloat, alpha: CoreGraphics.CGFloat = 1.0) -> UIKit.UIColor +public func visibleWindow() -> UIKit.UIWindow? +public func imageWithColor(color: UIKit.UIColor) -> UIKit.UIImage? +public func delayFunc(_ time: Swift.Double, block: @escaping () -> Swift.Void) +public func appLanguage() -> Swift.String +public func isChina() -> Swift.Bool +@_hasMissingDesignatedInitializers public class HHDevice { + public static func isIphoneX() -> Swift.Bool + public static func botOffset() -> CoreGraphics.CGFloat + public static func tOffset() -> CoreGraphics.CGFloat + public class func isSml() -> Swift.Bool + public class func isMid() -> Swift.Bool + public class func isPlus() -> Swift.Bool + public class func isX() -> Swift.Bool + public static func iphoneType() -> Swift.String + @objc deinit +} +public typealias HHFetchBlock = (UIKit.UIImage?, [Swift.AnyHashable : Any]?) -> Swift.Void +public typealias onCanceled = (() -> Swift.Void) +public typealias onCapFinished = (([HHSDKVideo.SDKCameraImageModel]?) -> Swift.Void) +public typealias onCropFinish = (UIKit.UIImage, Swift.String?) -> Swift.Void +public enum HHMediaType : Swift.Int { + case cusCamera + case sysCamera + case cusVideo + case sysVideo + case photoImage + case photoVideo + case cusPhoto + case sysCrop + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +open class HHDataController<T> where T : HHSDKVideo.Mappable { + open var mData: T? + public init() + open func request(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func emptyRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func noDataRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + open func request<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: ((E) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + @objc deinit +} +extension Date { + public static func currentDate() -> Foundation.Date +} +public enum DateFormat : Swift.String { + case Full + case SingleDate + case Single + case WithoutSecond + case WithoutYearAndSecond + case HourMinute + case CN_Month_Day + case CN_Hour_Minute + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +@objc @_inheritsConvenienceInitializers public class HHDateUtils : ObjectiveC.NSObject { + public class func getDateForChinaStr() -> Swift.String + public static func stringWithDurationFromSeconds(_ seconds: Foundation.TimeInterval) -> Swift.String + public static func component(_ date: Foundation.Date) -> Foundation.DateComponents + @objc override dynamic public init() + @objc deinit +} +extension HHDateUtils { + public class func date2String(_ date: Foundation.Date, format: Swift.String) -> Swift.String + public class func date2String(_ date: Foundation.Date, format: HHSDKVideo.DateFormat) -> Swift.String +} +extension HHDateUtils { + public class func string2Date(_ str: Swift.String, format: HHSDKVideo.DateFormat) -> Foundation.Date? + public class func string2Date(_ str: Swift.String, format: Swift.String) -> Foundation.Date? +} +extension HHDateUtils { + public static func dateStringFromNow(_ date: Swift.Int) -> Swift.String + public static func dateStringFromInt(_ date: Swift.Int) -> Swift.String + public static func dateYearStringFromInt(_ date: Swift.Int) -> Swift.String +} +@objc @_inheritsConvenienceInitializers open class HHDeviceManager : ObjectiveC.NSObject { + public static func jailBrokend() -> Swift.Bool + @objc override dynamic public init() + @objc deinit +} +public protocol HHDNSProtocal { + func changeHost(_ hostDomain: Swift.String) -> Swift.String + func requestHost(_ host: Swift.String, challenge: Foundation.URLAuthenticationChallenge, completion: @escaping (Foundation.URLSession.AuthChallengeDisposition, Foundation.URLCredential?) -> Swift.Void) +} +public typealias HHPriceInfo = (priceAttri: Foundation.NSMutableAttributedString, disPriceWidth: CoreGraphics.CGFloat?) +public struct HHDoctorModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public var agentUuid: Swift.String? + public var createtime: Swift.String? + public var department: Swift.String? + public var deptid: Swift.Int? + public var disease: Swift.String? + public var diseaseList: [Swift.String]? + public var doctorid: Swift.String? + public var expertStatus: Swift.String? + public var expertVideoTime: Swift.String? + public var famExpertVideoPrice: Swift.Float? + public var famServices: Swift.Int? + public var famprovidetypes: Swift.String? + public var hhTitle: Swift.String? + public var hospital: Swift.String? + public var hospitalid: Swift.Int? + public var introduction: Swift.String? + public var isTest: Swift.String? + public var login: HHSDKVideo.LoginModel? + public var workyear: Swift.Int? + public var name: Swift.String? + public var photourl: Swift.String? + public var price: Swift.Float? + public var providetype: Swift.String? + public var province: Swift.String? + public var service: Swift.String? + public var serviceTypeStatus: Swift.String? + public var speciality: Swift.String? + public var standardDeptid: Swift.Int? + public var standardDeptname: Swift.String? + public var standardid: Swift.Int? + public var subdept: Swift.String? + public var subdeptids: Swift.String? + public var title: Swift.String? + public var titleid: Swift.Int? + public var vedioTimeList: Swift.String? + public var videoprice: Swift.Float? + public var license: Swift.String? + public init() + public mutating func mapping(map: HHSDKVideo.Map) + public func isJianzhi() -> Swift.Bool + public func supportType(type: HHSDKVideo.HHConsType) -> Swift.Bool + public func getPrice() -> HHSDKVideo.HHPriceInfo? + public func isZhuanke() -> Swift.Bool +} +public struct LoginModel : HHSDKVideo.Mappable { + public var actionSource: Swift.String? + public var loginname: Swift.String? + public var name: Swift.String? + public var photourl: Swift.String? + public var uuid: Swift.Int? + public var videoToken: Swift.String? + public var phoneno: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public enum HHConsType : Swift.String { + case normal + case expert_video + case feiDao + case video + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public struct HHEmptyModel : HHSDKVideo.Mappable { + public init() + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +@_hasMissingDesignatedInitializers public class HHEncryptUtils { + public static func encrypto(key: Swift.String, content: Swift.String) -> Swift.String? + public static func decrypto(key: Swift.String, content: Swift.String) -> Swift.String? + public static func encrypto(key: Swift.String, content: Foundation.Data) -> Foundation.Data? + public static func decrypto(key: Swift.String, content: Foundation.Data) -> Foundation.Data? + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHFileCacheManager : ObjectiveC.NSObject { + public enum HHAssetPathType { + case image + case video + case sound + case dicom + case fb + case other + case dataBase + public static func == (a: HHSDKVideo.HHFileCacheManager.HHAssetPathType, b: HHSDKVideo.HHFileCacheManager.HHAssetPathType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum HHFileFormat : Swift.String { + case Jpg + case Png + case Jpeg + case webp + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } + } + @objc override dynamic public init() + @objc deinit +} +extension HHFileCacheManager { + public class func getFileFormat(_ name: Swift.String) -> HHSDKVideo.HHFileCacheManager.HHAssetPathType + public class func createSoundFilePath(_ aPath: Swift.String) -> Swift.String + public class func createDBPath(_ aPath: Swift.String) -> Swift.String + public class func assetsCachePath(_ pathType: HHSDKVideo.HHFileCacheManager.HHAssetPathType) -> Swift.String + public class func createImageFilePath(_ format: HHSDKVideo.HHFileCacheManager.HHFileFormat = .Jpg) -> Swift.String + public class func createVideoFilePath() -> Swift.String + public class func isWriteCache(_ path: Swift.String?, data: Foundation.Data?) -> Swift.Bool + public class func isWriteCache(_ path: Swift.String?, image: UIKit.UIImage, quality: CoreGraphics.CGFloat = 1.0) -> Swift.Bool + public class func getFilePath(_ name: Swift.String) -> Swift.String? +} +extension HHFileCacheManager { + public static func saveString2File(_ string: Swift.String?, fileName: Swift.String) + public static func stringFromFile(_ fileName: Swift.String) -> Swift.String? +} +extension FileManager { + public func addSkipBackupAttributeToItemAtURL(_ url: Foundation.URL) -> Swift.Bool +} +public var uploadManager: HHSDKVideo.UploadQueue { + get +} +@_hasMissingDesignatedInitializers public class UploadQueue { + @discardableResult + public func upload(files: [Swift.String], config: HHSDKVideo.SDKUploadConfig) -> HHSDKVideo.HHFileUploadManager + public func cancelAll(_ finished: (() -> Swift.Void)? = nil) + @objc deinit +} +public class HHFileUploadManager { + public var mFileQueue: [Swift.String] + public var config: HHSDKVideo.SDKUploadConfig! + public var mTransFile: Swift.String? + public var isUploading: Swift.Bool + public init(files: [Swift.String], config: HHSDKVideo.SDKUploadConfig) + public func uploadFile(_ file: [Swift.String]) + public func cancalFiles(_ files: [Swift.String], cancelFinish: ((Swift.String) -> Swift.Void)? = nil) + public func cancelAll(_ finished: (() -> Swift.Void)? = nil) + @objc deinit +} +@objc public protocol HHHUDable { + @objc optional var autoDismissDuration: Foundation.TimeInterval { get } + @objc func showHUD() + @objc func dismissHUD() + @objc func showSuccess(_ message: Swift.String?) + @objc func showError(_ messgae: Swift.String?) + @objc optional func setDismissDuration(_ duraion: Foundation.TimeInterval) +} +extension HHHUDable { + public var autoDismissDuration: Foundation.TimeInterval { + get + } + public func setDismissDuration(_ duraion: Foundation.TimeInterval) +} +@objc public protocol HHIM { + @objc func register(_ cerName: Swift.String?) + @objc func login(_ completion: ((Swift.String?) -> Swift.Void)?) + @objc func autoLogin(_ completion: ((Swift.String?) -> Swift.Void)?) + @objc func logout(_ callback: ((Swift.String?) -> Swift.Void)?) + @objc func canVideo() -> Swift.Bool +} +public struct HHInviteDocModel : HHSDKVideo.Mappable { + public var orderId: Swift.String? + public var channelId: Swift.UInt64? + public var doctorId: Swift.String? + public var imageUrl: Swift.String? + public var signalingType: Swift.String? + public var width: CoreGraphics.CGFloat + public var height: CoreGraphics.CGFloat + public init?(map: HHSDKVideo.Map) + public init(_ info: HHSDKVideo.HHNetCallChatInfo, meetId: Swift.UInt64?) + public func isWhiteBoard() -> Swift.Bool + public func isMultyCall() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +@objc public enum HHLogMode : Swift.Int { + case error = 0 + case warn = 1 + case info = 2 + case debug = 3 + case verbose = 4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public func logging(type: HHSDKVideo.HHLogMode = .info, _ tip: Swift.String) +@objc @_inheritsConvenienceInitializers open class HHMediaStatusCheckUtils : ObjectiveC.NSObject { + open class func checkCameraAccess() -> Swift.Bool + open class func checkCameraVideoPermission() -> Swift.Bool + open class func checkAlbumAccess() -> Swift.Bool + open class func checkAudioAccess() -> Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers open class HHMedicNetObserver : ObjectiveC.NSObject { + public static let sharedInstance: HHSDKVideo.HHMedicNetObserver + open func createReachability() + open func currentInWifi() -> Swift.Bool + open func haveNetWork() -> Swift.Bool + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHMedicPhotoPicker { + public static func openCamera(config: HHSDKVideo.HHCameraConfig, capFinished: HHSDKVideo.onCapFinished? = nil) + public static func reduceImages(paths: [Swift.String], finished: @escaping (([Swift.String]) -> Swift.Void)) + public class func changeAvatar(vc: UIKit.UIViewController, reference: UIKit.UIView? = nil, uuid: Swift.Int, imgClosure: @escaping (UIKit.UIImage) -> Swift.Void, keyClosure: @escaping (Swift.String) -> Swift.Void) + @objc deinit +} +extension HHMedicPhotoPicker { + public static func checkPermisstion(_ type: HHSDKVideo.HHBasePermissionType, authorized: (() -> Swift.Void)?, others: ((HHSDKVideo.HHBasePermissionType) -> Swift.Void)?) + public static func converSize(_ size: CoreGraphics.CGSize) -> CoreGraphics.CGSize +} +extension HHMedicPhotoPicker : HHSDKVideo.HHPhotoPickerManagerDelegate { + public func selectImage(_ selectedImages: [UIKit.UIImage]) + public func cancelImage() + public func selectImageRequestError(_ errorAssets: [Photos.PHAsset], errorIndexs: [Swift.Int]) +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers public class HHNeedRealNameView : UIKit.UIView { + public var realNameLinkClourse: (() -> ())? + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHNetCallChatInfo { + public init() + @objc deinit +} +@objc public enum HHCallType : Swift.Int { + case child = 600000 + case adult = 600002 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public enum HHServerType { + case pay + case pacs + case weixin + public static func == (a: HHSDKVideo.HHServerType, b: HHSDKVideo.HHServerType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public enum HHRequestMethod { + case get + case post + public static func == (a: HHSDKVideo.HHRequestMethod, b: HHSDKVideo.HHRequestMethod) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +public let HH_RELOGIN_NOTIFICATION_STR: Swift.String +public struct HHRequestData { + public init(body: [Swift.String : Any] = ["default_sw":"default"], param: [Swift.String : Any] = ["default_sw":"default"]) + public var mHttpBody: [Swift.String : Any] + public var mParameters: [Swift.String : Any] +} +@_hasMissingDesignatedInitializers public class HHNetFetch { + public static func request<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: ((E) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + public static func requestArray<E>(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: (([E]?) -> Swift.Void)?, fail: HHSDKVideo.HHNetError? = nil) where E : HHSDKVideo.Mappable + public static func noDataRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + public static func emptyRequest(api: HHSDKVideo.HHBaseApi, data: HHSDKVideo.HHRequestData? = nil, result: HHSDKVideo.NetResult?) + @objc deinit +} +extension UIControl.State : Swift.Hashable { + public var hashValue: Swift.Int { + get + } +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers @IBDesignable public class HHPagerView : UIKit.UIView, UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegate { + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) + @objc override dynamic public func layoutSubviews() + @objc override dynamic public func willMove(toWindow newWindow: UIKit.UIWindow?) + @objc override dynamic public func prepareForInterfaceBuilder() + @objc deinit + @objc public func numberOfSections(in collectionView: UIKit.UICollectionView) -> Swift.Int + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, shouldHighlightItemAt indexPath: Foundation.IndexPath) -> Swift.Bool + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didHighlightItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, shouldSelectItemAt indexPath: Foundation.IndexPath) -> Swift.Bool + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc public func collectionView(_ collectionView: UIKit.UICollectionView, didEndDisplaying cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewWillBeginDragging(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewWillEndDragging(_ scrollView: UIKit.UIScrollView, withVelocity velocity: CoreGraphics.CGPoint, targetContentOffset: Swift.UnsafeMutablePointer<CoreGraphics.CGPoint>) + @objc public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc public func scrollViewDidEndScrollingAnimation(_ scrollView: UIKit.UIScrollView) +} +@objc public enum HHPagerViewTransformerType : Swift.Int { + case crossFading + case zoomOut + case depth + case overlap + case linear + case coverFlow + case ferrisWheel + case invertedFerrisWheel + case cubic + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@_hasMissingDesignatedInitializers public class UrlParams { + public static func addUserParams(_ parameters: [Swift.String : Any]?) -> [Swift.String : Any]? + public static func addCommon(_ param: [Swift.String : Any]?) -> [Swift.String : Any] + public static func param2String(param: [Swift.String : Any]? = nil) -> Swift.String + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoPickerController : UIKit.UINavigationController { + @objc override dynamic public func viewDidLoad() + convenience public init(localPath: Swift.String? = nil, deleteMode: Swift.Bool = false, finish: (([HHSDKVideo.SDKCameraImageModel]?) -> Swift.Void)? = nil) + @objc deinit + @available(iOS 5.0, *) + @objc override dynamic public init(navigationBarClass: Swift.AnyClass?, toolbarClass: Swift.AnyClass?) + @objc override dynamic public init(rootViewController: UIKit.UIViewController) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc required dynamic public init?(coder aDecoder: Foundation.NSCoder) +} +public protocol HHPhotoPickerManagerDelegate { + func selectImage(_ selectedImages: [UIKit.UIImage]) + func cancelImage() + func selectImageRequestError(_ errorAssets: [Photos.PHAsset], errorIndexs: [Swift.Int]) +} +@objc public class HHPhotoPickerManager : ObjectiveC.NSObject { + public var viewDelegate: HHSDKVideo.HHPhotoPickerManagerDelegate? + public var photoConfigModel: HHSDKVideo.HHPhotoConfigModel + public var photoUIConfigModel: HHSDKVideo.HHPhotoUIConfigModel + required public init(showVC: UIKit.UIViewController) + public func showImagePicker() + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoConfigModel : ObjectiveC.NSObject { + public var maxPreviewCount: Swift.Int + public var maxSelectCount: Swift.Int { + get + set + } + public var minVideoSelectCount: Swift.Int { + get + set + } + public var maxVideoSelectCount: Swift.Int { + get + set + } + public var minSelectVideoDuration: Swift.Int + public var maxSelectVideoDuration: Swift.Int + public var cellCornerRadio: CoreGraphics.CGFloat + public var languageType: HHSDKVideo.ZLLanguageType { + get + set + } + public var columnCount: Swift.Int { + get + set + } + public var sortAscending: Swift.Bool + public var allowSelectImage: Swift.Bool + public var allowTakePhotoInLibrary: Swift.Bool + public var allowSelectOriginal: Swift.Bool + public var allowSelectGif: Swift.Bool + public var allowSelectVideo: Swift.Bool + public var allowSelectLivePhoto: Swift.Bool + public var allowEditImage: Swift.Bool + public var allowMixSelect: Swift.Bool + public var allowPreviewPhotos: Swift.Bool + public var editImageWithDraw: Swift.Bool + public var editImageWithClip: Swift.Bool + public var editImageWithImageSticker: Swift.Bool + public var editImageWithTextSticker: Swift.Bool + public var editImageWithMosaic: Swift.Bool + public var editImageWithFilter: Swift.Bool + public var editImageWithAdjust: Swift.Bool + public var editImageWitAdjustBrightness: Swift.Bool + public var editImageWitAdjustContrast: Swift.Bool + public var editImageWitAdjustSaturation: Swift.Bool + public var shouldAnialysisAsset: Swift.Bool + public var allowEditVideo: Swift.Bool { + get + set + } + public var saveNewImageAfterEdit: Swift.Bool + public var allowDragSelect: Swift.Bool + public var allowSlideSelect: Swift.Bool + public var autoScrollWhenSlideSelectIsActive: Swift.Bool + public var autoScrollMaxSpeed: CoreGraphics.CGFloat + public var showCaptureImageOnTakePhotoBtn: Swift.Bool + public var showSelectedIndex: Swift.Bool + public var showSelectedMask: Swift.Bool + public var showSelectedBorder: Swift.Bool + public var showInvalidMask: Swift.Bool + public var useCustomCamera: Swift.Bool + public var flashMode: HHSDKVideo.ZLCameraConfiguration.FlashMode + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class HHPhotoUIConfigModel : ObjectiveC.NSObject { + public var style: HHSDKVideo.ZLPhotoBrowserStyle + public var bottomToolViewBtnNormalBgColor: UIKit.UIColor + public var bottomToolViewBtnNormalBgColorOfPreviewVC: UIKit.UIColor + @objc public var indexLabelBgColor: UIKit.UIColor + @objc override dynamic public init() + @objc deinit +} +public class HHProgressHUD : HHSDKVideo.HHHUDable { + public init() + @objc public func showHUD() + @objc public func dismissHUD() + @objc public func showError(_ messgae: Swift.String?) + @objc public func showSuccess(_ message: Swift.String?) + public func hhMessageTips(message: Swift.String?) + @objc deinit +} +public struct HHGetQuesetionModel : HHSDKVideo.Mappable { + public var question: HHSDKVideo.HHQuesetionModel? + public var rate: [HHSDKVideo.rateModel]? + public init?(map: HHSDKVideo.Map) + public init() + public mutating func mapping(map: HHSDKVideo.Map) + public func isHaveQ() -> Swift.Bool +} +public struct HHQuesetionModel : HHSDKVideo.Mappable { + public var answerOne: Swift.String? + public var answerTwo: Swift.String? + public var content: Swift.String? + public var id: Swift.Int? + public init?(map: HHSDKVideo.Map) + public init() + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct rateModel : HHSDKVideo.Mappable { + public var createTime: Swift.Int? + public var content: Swift.String? + public var state: Swift.Int? + public var id: Swift.Int? + public var answerOne: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +@objc public enum HHRealNameType : Swift.Int { + case normal, buyMedic + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_inheritsConvenienceInitializers public class HHRealNameInputNewView : UIKit.UIView { + @objc @IBOutlet weak public var idCardTF: UIKit.UITextField! + public class func createRealNameInputNewView(realNameType: HHSDKVideo.HHRealNameType, hideNickName: Swift.Bool = false) -> HHSDKVideo.HHRealNameInputNewView + public func showErroTip(tip: Swift.String) + public func getInpuValues() -> [Swift.String : Swift.String]? + @objc override dynamic public func awakeFromNib() + public func load(userModel: HHSDKVideo.HHUserModel?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +extension HHRealNameInputNewView : UIKit.UITextFieldDelegate { + @objc dynamic public func textField(_ textField: UIKit.UITextField, shouldChangeCharactersIn range: Foundation.NSRange, replacementString string: Swift.String) -> Swift.Bool + @objc dynamic public func textFieldDidBeginEditing(_ textField: UIKit.UITextField) +} +@_inheritsConvenienceInitializers @objc public class HHRealNameInputView : UIKit.UIView { + public var nickName: Swift.String { + get + set + } + public class func createRealNameInputView(realNameType: HHSDKVideo.HHRealNameType) -> HHSDKVideo.HHRealNameInputView + public var showPassPort: Swift.Bool { + get + set + } + public func showErroTip(tip: Swift.String) + public func getInpuValues() -> [Swift.String : Swift.String]? + @objc override dynamic public func awakeFromNib() + public func load(userModel: HHSDKVideo.HHUserModel?) + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +extension HHRealNameInputView : UIKit.UITextFieldDelegate { + @objc dynamic public func textField(_ textField: UIKit.UITextField, shouldChangeCharactersIn range: Foundation.NSRange, replacementString string: Swift.String) -> Swift.Bool +} +public let KeyNetErrorStr: Swift.String +public let KeyNoNetErrorStr: Swift.String +public typealias HHNetError = ((Swift.String) -> (Swift.Void)) +public typealias HHNetSuccessNoData = () -> Swift.Void +public typealias NetResult = (Swift.Bool, Swift.String) -> (Swift.Void) +public class HHRequest<T> where T : HHSDKVideo.Mappable { + public typealias HHNetSuccess = (T) -> Swift.Void + public typealias HHNetSuccessForArray = ([T]?) -> Swift.Void + public var mRequestFail: HHSDKVideo.HHNetError? + public var mRequestSuccess: HHSDKVideo.HHRequest<T>.HHNetSuccess? + public var mRequestSuccessNoData: HHSDKVideo.HHNetSuccessNoData? + public var mRequestSuccessForArray: HHSDKVideo.HHRequest<T>.HHNetSuccessForArray? + public var errorCode: Swift.Int? + public var mApi: HHSDKVideo.HHBaseApi? + required public init(api: HHSDKVideo.HHBaseApi, requestData: HHSDKVideo.HHRequestData? = nil, postData: Foundation.Data? = nil) + public func start() + public func cancel() + @objc deinit +} +extension HHRequest { + public func startForArray(_ successCallBack: @escaping HHSDKVideo.HHRequest<T>.HHNetSuccessForArray, failCallBack: @escaping HHSDKVideo.HHNetError) +} +@objc public protocol HHRTC { + @objc optional func setOrderId(orderId: Swift.String) + @objc optional func startCall(callee: Swift.String, orderId: Swift.String?) + @objc optional func enterRoom(orderId: Swift.String) + @objc optional func switchLocalAudio(_ isOpen: Swift.Bool) + @objc optional func switchLocalVideo(_ isOpen: Swift.Bool, localView: UIKit.UIView) + @objc optional func openDoctorView(userId: Swift.String, view: UIKit.UIView) + @objc optional func closeDoctorView(userId: Swift.String) + @objc optional func switchCamera(_ isFront: Swift.Bool) + @objc optional func switchCameraFlash(_ isOpen: Swift.Bool) + @objc optional func sendMsg(isSignal: Swift.Bool, cmd: Swift.String, to: Swift.String, complete: ((Swift.String?) -> Swift.Void)?) + @objc optional func leaveRoom() + @objc optional func hangUp(callId: Swift.UInt64) + @objc optional func startRing(audioId: Swift.Int) + @objc optional func stopRing() + @objc optional func snapshotVideo(userId: Swift.String?, imageBack: @escaping (UIKit.UIImage) -> ()) +} +public protocol HHRTCDelegate : ObjectiveC.NSObject { + func onEnterRoom() + func checkHasAccept(_ isCmd: Swift.Bool, volumn: Swift.Int) + func switchVideo(_ isToAudio: Swift.Bool) + func onOtherViewAvailable(_ availableUserId: Swift.String, isAvailable: Swift.Bool) + func onRemoteUserEnterRoom(_ userId: Swift.String) + func onRemoteUserLeaveRoom(_ userId: Swift.String) + func sendRTCLog(action: HHSDKVideo.TrtcLog, ex: Swift.String) + func esdablishByRTC(error: HHSDKVideo.TrtcError, reason: Swift.String) + func processMsg(cmd: HHSDKVideo.HHIMCmd, orderId: Swift.String, uuid: Swift.String) + func waitingChanged(_ waitingInfo: HHSDKVideo.HHWaitDoctorModel) + func waitingSuccess(_ doctorInfo: HHSDKVideo.HHDoctorModel, orderId: Swift.String) + func onTransform(_ transInfo: HHSDKVideo.HHWaitDoctorModel) + func onExitRoom() + func hangup() + func getDoctorUserId() -> Swift.String? + func resumeRemote() + func onFirstVideoFrame(_ userId: Swift.String?, width: Swift.Int32, height: Swift.Int32) +} +public enum TrtcLog : Swift.String { + case waitingRecall + case missMessage + case ignoreCall + case enterError + case doctorJoinRoom + case micDidReady + case netQuality + case signalError + case killEror + case netDown + case joinSuccess + case schedule + case noSchedule + case video_busy + case permit_error + case transform + case camera_close + case camera_open + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public enum TrtcError : Swift.String { + case callTimeOut + case rtcError + case enterRoomFail + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +@_inheritsConvenienceInitializers @objc public class HHSDKBaseOptions : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHSDKBaseOptions + @objc public var isDebug: Swift.Bool + @objc public var isDevelopment: Swift.Bool + @objc public var isSDK: Swift.Bool + @objc public var isTRTC: Swift.Bool + @objc public var sdkProductId: Swift.String + @objc public var appVersion: Swift.String + @objc public var needDNS: Swift.Bool + public var hudManager: HHSDKVideo.HHHUDable + @objc public var sdkVersion: Swift.String + @objc public var hudDisTime: Swift.Double { + @objc get + @objc set + } + @objc public func setConfig(_ sdkProductId: Swift.String, isDebug: Swift.Bool, isDevelopment: Swift.Bool, isTrtc: Swift.Bool, needDNS: Swift.Bool = false) + @objc override dynamic public init() + @objc deinit +} +@objc public protocol OptionProtocal { + @objc var hudDisTime: Foundation.TimeInterval { get set } + @objc var isDebug: Swift.Bool { get set } + @objc var isDevelopment: Swift.Bool { get set } + @objc var hudManager: HHSDKVideo.HHHUDable { get set } + @objc var productId: Swift.String { get set } + @objc var cerName: Swift.String? { get set } + @objc var logLevel: HHSDKVideo.HHLogMode { get set } + @objc var mExtension: Swift.String { get set } + @objc var changeDoctorTime: Swift.Int { get set } + @objc var logCallback: ((Swift.String) -> Swift.Void)? { get set } + @objc var mVideoOptions: HHSDKVideo.VideoOptions { get set } + @objc var mMessageOptions: HHSDKVideo.MessageOptions { get set } + @objc var mUserCenterOptions: HHSDKVideo.UsercenterOptions { get set } + @objc var sdkVersion: Swift.String { get set } + @objc var appVersion: Swift.String { get set } + @objc var isTRTC: Swift.Bool { get set } + @objc var needDNS: Swift.Bool { get set } + @objc var shouldWaingCall: Swift.Bool { get set } +} +public var HMDefaultOpt: HHSDKVideo.OptionProtocal { + get +} +@_inheritsConvenienceInitializers @objc public class VideoOptions : ObjectiveC.NSObject { + public var filterCallerInfo: Swift.Bool + @objc public var allowBeauty: Swift.Bool + @objc public var allowEvaluate: Swift.Bool + @objc public var allowAddMember: Swift.Bool + @objc public var allowMulti: Swift.Bool + public var mCallExtension: Swift.String + @objc public var isShowDocInfo: Swift.Bool + @objc public var enableCloseCamera: Swift.Bool + @objc public var isCloseCameraCall: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class MessageOptions : ObjectiveC.NSObject { + @objc public var isByPresent: Swift.Bool + @objc public var isFilterSummary: Swift.Bool + @objc public var isFilterMedicinal: Swift.Bool + @objc public var defaultDocHeader: Swift.String + @objc public var defaultDocName: Swift.String + @objc public var messageTitle: Swift.String + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class UsercenterOptions : ObjectiveC.NSObject { + @objc public var enableVipInfo: Swift.Bool + @objc public var hideUserCenter: Swift.Bool + @objc public var enableActivate: Swift.Bool + @objc public var enableMedical: Swift.Bool + @objc public var enableAddMemberInDoc: Swift.Bool + @objc public var enableBuyService: Swift.Bool + @objc public var hideNickName: Swift.Bool + @objc public var enablePopRealName: Swift.Bool + @objc public var isCloseMoreFunc: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHStatics { + public static let `default`: HHSDKVideo.HHStatics + public func send(params: [Swift.String : Any]) + @objc deinit +} +public struct CommonApi { +} +extension String { + public func subFrom(_ index: Swift.Int) -> Swift.String + public func subTo(_ index: Swift.Int) -> Swift.String +} +extension String { + public func urlEncode() -> Swift.String + public func stringByAppendingPathComponent(_ pathComponent: Swift.String) -> Swift.String + public func hh_sha1() -> Swift.String + public func string2base64String() -> Swift.String + public func base64String2String() -> Swift.String + public var lastPathComponent: Swift.String { + get + } + public var pathExtension: Swift.String { + get + } +} +public enum hhToastPosition { + case top + case center + case bottom + public static func == (a: HHSDKVideo.hhToastPosition, b: HHSDKVideo.hhToastPosition) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +extension UIView { + public func hhmakeToast(_ message: Swift.String) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, style: HHSDKVideo.hhToastStyle?) + public func hhmakeToast(_ message: Swift.String, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, style: HHSDKVideo.hhToastStyle?) + public func hhmakeToast(_ message: Swift.String?, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle?, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhmakeToast(_ message: Swift.String?, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle?, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhshowToast(_ toast: UIKit.UIView) + public func hhshowToast(_ toast: UIKit.UIView, duration: Foundation.TimeInterval, position: HHSDKVideo.hhToastPosition, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhshowToast(_ toast: UIKit.UIView, duration: Foundation.TimeInterval, position: CoreGraphics.CGPoint, completion: ((Swift.Bool) -> Swift.Void)?) + public func hhmakeToastActivity(_ position: HHSDKVideo.hhToastPosition) + public func hhmakeToastActivity(_ position: CoreGraphics.CGPoint) + public func hhhideToastActivity() + @objc dynamic public func hhhandleToastTapped(_ recognizer: UIKit.UITapGestureRecognizer) + @objc dynamic public func hhtoastTimerDidFinish(_ timer: Foundation.Timer) + public func hhtoastViewForMessage(_ message: Swift.String?, title: Swift.String?, image: UIKit.UIImage?, style: HHSDKVideo.hhToastStyle) throws -> UIKit.UIView +} +public struct hhToastStyle { + public init() + public var backgroundColor: UIKit.UIColor + public var titleColor: UIKit.UIColor + public var messageColor: UIKit.UIColor + public var maxWidthPercentage: CoreGraphics.CGFloat { + get + set + } + public var maxHeightPercentage: CoreGraphics.CGFloat { + get + set + } + public var horizontalPadding: CoreGraphics.CGFloat + public var verticalPadding: CoreGraphics.CGFloat + public var cornerRadius: CoreGraphics.CGFloat + public var titleFont: UIKit.UIFont + public var messageFont: UIKit.UIFont + public var titleAlignment: UIKit.NSTextAlignment + public var messageAlignment: UIKit.NSTextAlignment + public var titleNumberOfLines: Swift.Int + public var messageNumberOfLines: Swift.Int + public var displayShadow: Swift.Bool + public var shadowColor: UIKit.UIColor + public var shadowOpacity: Swift.Float { + get + set + } + public var shadowRadius: CoreGraphics.CGFloat + public var shadowOffset: CoreGraphics.CGSize + public var imageSize: CoreGraphics.CGSize + public var activitySize: CoreGraphics.CGSize + public var fadeDuration: Swift.Double +} +extension UIAlertController { + public func showAlter() + public func present(animated: Swift.Bool, completion: (() -> Swift.Void)?) + public func addAlterActions(_ actions: [UIKit.UIAlertAction]) + public func alterMessageStyle(_ fonsize: CoreGraphics.CGFloat = (HHDimens.isPad()) ? 18 : 16) + public static func closeAlert(_ title: Swift.String = "", msg: Swift.String = "", keyString: Swift.String = "取消", closeBlock: (() -> Swift.Void)? = nil) -> UIKit.UIAlertController +} +extension UIButton { + public func centerImageTitleVertically(spacing: CoreGraphics.CGFloat = 2) + public func imageTitleHorizonal(spacing: CoreGraphics.CGFloat = 2) +} +extension UIImage { + public func rotatedBy(_ degrees: CoreGraphics.CGFloat) -> UIKit.UIImage +} +extension UIImageView { + public func hh_image(url: Foundation.URL?) + public func hh_image(url: Foundation.URL?, complete: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)?) + public func hh_image(url: Foundation.URL?, placeHolder: UIKit.UIImage?) + public func hh_image(url: Foundation.URL?, placeHolder: UIKit.UIImage?, progresses: ((CoreGraphics.CGFloat) -> Swift.Void)?, complete: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)?) +} +public func hh_downloadImg(_ url: Foundation.URL?, finish: @escaping ((UIKit.UIImage?, Foundation.Data?, Swift.Error?) -> Swift.Void)) +extension UIViewController { + public func hhAddCloseBtn(_ atLeft: Swift.Bool? = nil, isDismiss: Swift.Bool = true, title: Swift.String = "关闭") + @objc dynamic public func hhCloseThisController() + @objc dynamic public func hhPopThisController() + public func setNavigationTheme() + public func setNaviBackImg(navi: UIKit.UINavigationController?, color: UIKit.UIColor) + public func imageFromColor(color: UIKit.UIColor, size: CoreGraphics.CGSize) -> UIKit.UIImage +} +extension UIView { + public var sj_width: CoreGraphics.CGFloat { + get + set + } + public var sj_height: CoreGraphics.CGFloat { + get + set + } + public var sj_size: CoreGraphics.CGSize { + get + set + } + public var sj_origin: CoreGraphics.CGPoint { + get + set + } + public var sj_x: CoreGraphics.CGFloat { + get + set + } + public var sj_y: CoreGraphics.CGFloat { + get + set + } + public var sj_centerX: CoreGraphics.CGFloat { + get + set + } + public var sj_centerY: CoreGraphics.CGFloat { + get + set + } + public var sj_top: CoreGraphics.CGFloat { + get + set + } + public var sj_bottom: CoreGraphics.CGFloat { + get + set + } + public var sj_right: CoreGraphics.CGFloat { + get + set + } + public var sj_left: CoreGraphics.CGFloat { + get + set + } +} +extension UIView { + public class func viewFromNib<T>(_ aClass: T.Type, frameworkPath: Swift.String) -> T +} +public typealias onSDKProgress = ((CoreGraphics.CGFloat, Swift.String) -> Swift.Void) +public typealias onSDKUploadOnce = ((Swift.Bool, HHSDKVideo.SDKUploadModel) -> Swift.Void) +public typealias onSDKFinished = (() -> Swift.Void) +public class SDKUploadConfig { + public var progress: HHSDKVideo.onSDKProgress? + public var uploadOnce: HHSDKVideo.onSDKUploadOnce? + public var finished: HHSDKVideo.onSDKFinished? + public var orderId: Swift.String? + public init() + @objc deinit +} +public class SDKUploadModel { + public var clouldKey: Swift.String? + public var filePath: Swift.String? { + get + set + } + public var smallImage: Swift.String + public var state: HHSDKVideo.SDKUploadState? + public init() + public init(full: Swift.String?, scale: Swift.String) + public init(clouldKey: Swift.String?, filePath: Swift.String?, uploadTime: Foundation.TimeInterval?, name: Swift.String?, smallImage: Swift.String) + @objc deinit +} +@_hasMissingDesignatedInitializers public class SDKUploadState { + public var file: Swift.String? + public var isSelect: Swift.Bool + public var changed: (() -> Swift.Void)? + public var progress: Swift.Float { + get + set + } + public func isSuccess() -> Swift.Bool + public func isFail() -> Swift.Bool + @objc deinit +} +@_hasMissingDesignatedInitializers public class HHUrl { + public static func domains() -> [Swift.String] + public static var timeOffset: Swift.Double + public static func urlForPay() -> Swift.String + public static func urlForFamily() -> Swift.String + public static func urlForWeixin() -> Swift.String + public static func baseUrl() -> Swift.String + public static func basePayUrl() -> Swift.String + public static func baseMedicUrl() -> Swift.String + public static func baseSecUrl() -> Swift.String + public static func testURL() -> Swift.String + public static func fileLogUrl(_ name: Swift.String, orderId: Swift.String) -> Foundation.URL + public static func expertDetailUrl(expertId: Swift.String) -> Swift.String + public static func buyVIPUrl() -> Swift.String + public static func productRightUrl() -> Swift.String + @objc deinit +} +extension HHUrl { + public static func headers(host: Swift.String) -> [Swift.String : Swift.String] +} +public func languagePrefix() -> Swift.String +@_hasMissingDesignatedInitializers public class HHUserDefaults { + public class func setString(_ str: Swift.String, key: Swift.String) + public class func stringValue(_ key: Swift.String) -> Swift.String? + public class func setArray(_ array: [Swift.AnyObject], key: Swift.String) + public class func arrayForKey(_ key: Swift.String) -> [Swift.AnyObject]? + public class func setImage(_ image: UIKit.UIImage, key: Swift.String) + public class func imageForKey(_ key: Swift.String) -> UIKit.UIImage? + @objc deinit +} +extension HHUserDefaults { + public class func setBool(_ flag: Swift.Bool, key: Swift.String) + public class func boolForKey(_ key: Swift.String) -> Swift.Bool + public class func setObject(_ obj: Swift.AnyObject, key: Swift.String) + public class func objectForKey(_ key: Swift.String) -> Swift.AnyObject? + public class func removeObject(_ key: Swift.String) +} +extension HHUserDefaults { + public class func setData(_ data: Foundation.Data?, key: Swift.String) + public class func dataForKey(_ key: Swift.String) -> Foundation.Data? + public class func userDefaults() -> Foundation.UserDefaults + public class func synchronize() + public class func encryptkey(_ key: Swift.String) -> Swift.String +} +public struct HHMemberInfoModel : HHSDKVideo.Mappable { + public var productStatusDescn: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public class HHUserModel : HHSDKVideo.Mappable { + public var age: Swift.String? + public var companyLogo: Swift.String? + public var birthday: Swift.Int64? + public var loginname: Swift.String? + public var name: Swift.String? + public var photourl: Swift.String? + public var pid: Swift.Int? + public var product: HHSDKVideo.HHMemberInfoModel? + public var relation: Swift.String? + public var sex: Swift.String? + public var uuid: Swift.Int? + public var userToken: Swift.String? + public var videoToken: Swift.String? + public var auth: Swift.Bool? + public var isMember: Swift.Bool? + public var isAccount: Swift.Bool? + public var license: Swift.String? + public var userSig: Swift.String? + public var phoneNum: Swift.String? + required public init?(map: HHSDKVideo.Map) + public init() + public func mapping(map: HHSDKVideo.Map) + @objc deinit +} +public struct HHUserProtocolModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +open class HHValueObservable<T> { + public typealias Observer = (T) -> Swift.Void + open var observer: HHSDKVideo.HHValueObservable<T>.Observer? + open func observe(_ observer: HHSDKVideo.HHValueObservable<T>.Observer?) + open var value: T { + get + set + } + public init(_ v: T) + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class HHVideoLocation : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHVideoLocation + @objc public func startLocation(lng: Swift.String, lat: Swift.String) + @objc public func closeLocation() + @objc override dynamic public init() + @objc deinit +} +@_inheritsConvenienceInitializers @objc public class HHVideoSDK : ObjectiveC.NSObject { + @objc public static let `default`: HHSDKVideo.HHVideoSDK + public var mHHRTC: HHSDKVideo.HHRTC? + public var mSDKOption: HHSDKVideo.OptionProtocal? + weak public var mCallDelegate: HHSDKVideo.HHCallDelegate? + weak public var mHHRTCDelegate: HHSDKVideo.HHRTCDelegate? + weak public var videoManager: HHSDKVideo.HHBaseVideoDelegate? + public var expertVideoCallback: (() -> Swift.Void)? + public var autoLoginCheck: (() -> Swift.Void)? + public var onReceiveNewMsg: (([Swift.String : Any]) -> Swift.Void)? + public var userProtocolModel: HHSDKVideo.HHUserProtocolModel? + @objc public var photosPreview: ((Swift.Array<Swift.String>) -> Swift.Void)? + @objc public func start(option: HHSDKVideo.OptionProtocal, im: HHSDKVideo.HHIM, rtc: HHSDKVideo.HHRTC) + @objc public func login(userToken: Swift.String, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func autoLogin(uuid: Swift.Int, completion: @escaping HHSDKVideo.HHLoginHandler) + @objc public func logout(_ callback: ((Swift.String?) -> Swift.Void)? = nil) + @objc public func terminate() + @objc public func setAlipayHook(alipayCallback: @escaping (Swift.String, Swift.String, @escaping (([Swift.String : Any]) -> Swift.Void)) -> Swift.Bool) + @objc override dynamic public init() + @objc deinit +} +extension HHVideoSDK { + @objc dynamic public func startCall(_ type: HHSDKVideo.HHCallType = .adult, scene: Swift.String? = nil, callDelegate: HHSDKVideo.HHCallDelegate? = nil) + @objc dynamic public func startNewCall(_ uuid: Swift.Int, type: HHSDKVideo.HHCallType = .adult, callDelegate: HHSDKVideo.HHCallDelegate? = nil) + @objc dynamic public func startCall(_ uuid: Swift.Int, scene: Swift.String? = nil, type: HHSDKVideo.HHCallType = .adult, callDelegate: HHSDKVideo.HHCallDelegate? = nil) +} +extension HHVideoSDK { + @objc dynamic public func startTeamCall(_ type: HHSDKVideo.HHCallType, callee: HHSDKVideo.HHCallerInfo, callDelegate: HHSDKVideo.HHCallDelegate? = nil) +} +extension HHVideoSDK { + @objc dynamic public func call(_ memberToken: Swift.String, scene: Swift.String? = nil) +} +extension HHVideoSDK { + public func waitExpert(userToken: Swift.String, callOrderId: Swift.String) +} +extension HHVideoSDK { + @objc dynamic public func startMemberCall(needSelectMember: Swift.Bool = true) +} +extension HHVideoSDK { + @objc dynamic public func skipChatHome(isByPresent: Swift.Bool = false, vc: UIKit.UIViewController? = nil) + @objc dynamic public func skipChatHome(_ nav: UIKit.UINavigationController) + @objc dynamic public func chatHomeVC() -> UIKit.UIViewController? +} +extension HHVideoSDK { + public func sendBaseLog(ex: [Swift.String : Swift.String]? = nil, action: [Swift.String : Swift.String]? = nil) +} +public func topviewController() -> UIKit.UIViewController? +extension HHVideoSDK { + @objc dynamic public func loginForThirdId(_ thirdInfo: [Swift.String : Any], completion: @escaping HHSDKVideo.HHLoginHandler) +} +extension HHVideoSDK { + public func checkProtocolUpdate(agreeBlock: ((Swift.Bool) -> Swift.Void)?) +} +extension HHVideoSDK { + @objc dynamic public func getMedicDetail(userToken: Swift.String, medicId: Swift.String) -> Swift.String + @objc dynamic public func getMedicList(userToken: Swift.String) -> Swift.String + @objc dynamic public func getAllMedics(userToken: Swift.String) -> Swift.String +} +extension HHVideoSDK { + @objc dynamic public func onKickedOffline() +} +public struct HHWaitDoctorModel : HHSDKVideo.Mappable { + public var isNormalTrans: Swift.Bool + public var deptId: Swift.String? + public var uuid: Swift.Int? + public var transUuid: Swift.Int? + public init?(map: HHSDKVideo.Map) + public func isWaiting() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HHWaitingCallModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public func isCall() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HHAgentCallModel : HHSDKVideo.Mappable { + public init?(map: HHSDKVideo.Map) + public func isAgent() -> Swift.Bool + public func isTransform() -> Swift.Bool + public mutating func mapping(map: HHSDKVideo.Map) +} +public struct HKDF { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.HKDF.Error, b: HHSDKVideo.HKDF.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>? = nil, info: Swift.Array<Swift.UInt8>? = nil, keyLength: Swift.Int? = nil, variant: HHSDKVideo.HMAC.Variant = .sha256) throws + public func calculate() throws -> Swift.Array<Swift.UInt8> +} +final public class HMAC : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case authenticateError + case invalidInput + public static func == (a: HHSDKVideo.HMAC.Error, b: HHSDKVideo.HMAC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant { + case sha1, sha256, sha384, sha512, md5 + public static func == (a: HHSDKVideo.HMAC.Variant, b: HHSDKVideo.HMAC.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(key: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.HMAC.Variant = .md5) + final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension HMAC { + convenience public init(key: Swift.String, variant: HHSDKVideo.HMAC.Variant = .md5) throws +} +public protocol ImmutableMappable : HHSDKVideo.BaseMappable { + init(map: HHSDKVideo.Map) throws +} +extension ImmutableMappable { + public func mapping(map: HHSDKVideo.Map) + public init(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) throws + public init(JSON: [Swift.String : Any], context: HHSDKVideo.MapContext? = nil) throws + public init(JSONObject: Any, context: HHSDKVideo.MapContext? = nil) throws +} +extension Map { + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> Transform.Object where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T? where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T] where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T]? where T : Swift.RawRepresentable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> T? where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [T]? where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Transform.Object] where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : T]? where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [Swift.String : Transform.Object] where Transform : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[T]]? where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[T]] where T : HHSDKVideo.BaseMappable + final public func value<Transform>(_ key: Swift.String, nested: Swift.Bool? = nil, delimiter: Swift.String = ".", using transform: Transform, file: Swift.StaticString = #file, function: Swift.StaticString = #function, line: Swift.UInt = #line) throws -> [[Transform.Object]] where Transform : HHSDKVideo.TransformType +} +extension Mapper where N : HHSDKVideo.ImmutableMappable { + final public func map(JSON: [Swift.String : Any]) throws -> N + final public func map(JSONString: Swift.String) throws -> N + final public func map(JSONObject: Any) throws -> N + final public func mapArray(JSONArray: [[Swift.String : Any]]) throws -> [N] + final public func mapArray(JSONString: Swift.String) throws -> [N] + final public func mapArray(JSONObject: Any) throws -> [N] + final public func mapDictionary(JSONString: Swift.String) throws -> [Swift.String : N] + final public func mapDictionary(JSONObject: Any?) throws -> [Swift.String : N] + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]]) throws -> [Swift.String : N] + final public func mapDictionaryOfArrays(JSONObject: Any?) throws -> [Swift.String : [N]] + final public func mapDictionaryOfArrays(JSON: [Swift.String : [[Swift.String : Any]]]) throws -> [Swift.String : [N]] + final public func mapArrayOfArrays(JSONObject: Any?) throws -> [[N]] +} +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.SignedInteger +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.SignedInteger +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : Swift.UnsignedInteger +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : Swift.UnsignedInteger +extension DateFormatter { + convenience public init(withFormat format: Swift.String, locale: Swift.String) +} +open class ISO8601DateTransform : HHSDKVideo.DateFormatterTransform { + public init() + override public init(dateFormatter: Foundation.DateFormatter) + @objc deinit +} +public let KeychainAccessErrorDomain: Swift.String +public enum ItemClass { + case genericPassword + case internetPassword +} +public enum ProtocolType { + case ftp + case ftpAccount + case http + case irc + case nntp + case pop3 + case smtp + case socks + case imap + case ldap + case appleTalk + case afp + case telnet + case ssh + case ftps + case https + case httpProxy + case httpsProxy + case ftpProxy + case smb + case rtsp + case rtspProxy + case daap + case eppc + case ipp + case nntps + case ldaps + case telnetS + case imaps + case ircs + case pop3S +} +public enum AuthenticationType { + case ntlm + case msn + case dpa + case rpa + case httpBasic + case httpDigest + case htmlForm + case `default` +} +public enum Accessibility { + case whenUnlocked + case afterFirstUnlock + case always + @available(iOS 8.0, macOS 10.10, *) + case whenPasscodeSetThisDeviceOnly + case whenUnlockedThisDeviceOnly + case afterFirstUnlockThisDeviceOnly + case alwaysThisDeviceOnly +} +public struct AuthenticationPolicy : Swift.OptionSet { + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + public static let userPresence: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let touchIDAny: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let touchIDCurrentSet: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, macOS 10.11, *) + @available(watchOS, unavailable) + public static let devicePasscode: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let or: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let and: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let privateKeyUsage: HHSDKVideo.AuthenticationPolicy + @available(iOS 9.0, *) + @available(macOS, unavailable) + @available(watchOS, unavailable) + public static let applicationPassword: HHSDKVideo.AuthenticationPolicy + public let rawValue: Swift.UInt + public init(rawValue: Swift.UInt) + public typealias ArrayLiteralElement = HHSDKVideo.AuthenticationPolicy + public typealias Element = HHSDKVideo.AuthenticationPolicy + public typealias RawValue = Swift.UInt +} +public struct Attributes { + public var `class`: Swift.String? { + get + } + public var data: Foundation.Data? { + get + } + public var ref: Foundation.Data? { + get + } + public var persistentRef: Foundation.Data? { + get + } + public var accessible: Swift.String? { + get + } + public var accessControl: Security.SecAccessControl? { + get + } + public var accessGroup: Swift.String? { + get + } + public var synchronizable: Swift.Bool? { + get + } + public var creationDate: Foundation.Date? { + get + } + public var modificationDate: Foundation.Date? { + get + } + public var attributeDescription: Swift.String? { + get + } + public var comment: Swift.String? { + get + } + public var creator: Swift.String? { + get + } + public var type: Swift.String? { + get + } + public var label: Swift.String? { + get + } + public var isInvisible: Swift.Bool? { + get + } + public var isNegative: Swift.Bool? { + get + } + public var account: Swift.String? { + get + } + public var service: Swift.String? { + get + } + public var generic: Foundation.Data? { + get + } + public var securityDomain: Swift.String? { + get + } + public var server: Swift.String? { + get + } + public var `protocol`: Swift.String? { + get + } + public var authenticationType: Swift.String? { + get + } + public var port: Swift.Int? { + get + } + public var path: Swift.String? { + get + } + public subscript(key: Swift.String) -> Any? { + get + } +} +@_hasMissingDesignatedInitializers final public class Keychain { + final public var itemClass: HHSDKVideo.ItemClass { + get + } + final public var service: Swift.String { + get + } + final public var accessGroup: Swift.String? { + get + } + final public var server: Foundation.URL { + get + } + final public var protocolType: HHSDKVideo.ProtocolType { + get + } + final public var authenticationType: HHSDKVideo.AuthenticationType { + get + } + final public var accessibility: HHSDKVideo.Accessibility { + get + } + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public var authenticationPolicy: HHSDKVideo.AuthenticationPolicy? { + get + } + final public var synchronizable: Swift.Bool { + get + } + final public var label: Swift.String? { + get + } + final public var comment: Swift.String? { + get + } + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public var authenticationPrompt: Swift.String? { + get + } + @available(iOS 9.0, macOS 10.11, *) + final public var authenticationContext: LocalAuthentication.LAContext? { + get + } + convenience public init() + convenience public init(service: Swift.String) + convenience public init(accessGroup: Swift.String) + convenience public init(service: Swift.String, accessGroup: Swift.String) + convenience public init(server: Swift.String, protocolType: HHSDKVideo.ProtocolType, authenticationType: HHSDKVideo.AuthenticationType = .default) + convenience public init(server: Foundation.URL, protocolType: HHSDKVideo.ProtocolType, authenticationType: HHSDKVideo.AuthenticationType = .default) + final public func accessibility(_ accessibility: HHSDKVideo.Accessibility) -> HHSDKVideo.Keychain + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public func accessibility(_ accessibility: HHSDKVideo.Accessibility, authenticationPolicy: HHSDKVideo.AuthenticationPolicy) -> HHSDKVideo.Keychain + final public func synchronizable(_ synchronizable: Swift.Bool) -> HHSDKVideo.Keychain + final public func label(_ label: Swift.String) -> HHSDKVideo.Keychain + final public func comment(_ comment: Swift.String) -> HHSDKVideo.Keychain + final public func attributes(_ attributes: [Swift.String : Any]) -> HHSDKVideo.Keychain + @available(iOS 8.0, macOS 10.10, *) + @available(watchOS, unavailable) + final public func authenticationPrompt(_ authenticationPrompt: Swift.String) -> HHSDKVideo.Keychain + @available(iOS 9.0, macOS 10.11, *) + final public func authenticationContext(_ authenticationContext: LocalAuthentication.LAContext) -> HHSDKVideo.Keychain + final public func get(_ key: Swift.String) throws -> Swift.String? + final public func getString(_ key: Swift.String) throws -> Swift.String? + final public func getData(_ key: Swift.String) throws -> Foundation.Data? + final public func get<T>(_ key: Swift.String, handler: (HHSDKVideo.Attributes?) -> T) throws -> T + final public func set(_ value: Swift.String, key: Swift.String) throws + final public func set(_ value: Foundation.Data, key: Swift.String) throws + final public subscript(key: Swift.String) -> Swift.String? { + get + set + } + final public subscript(string key: Swift.String) -> Swift.String? { + get + set + } + final public subscript(data key: Swift.String) -> Foundation.Data? { + get + set + } + final public subscript(attributes key: Swift.String) -> HHSDKVideo.Attributes? { + get + } + final public func remove(_ key: Swift.String) throws + final public func removeAll() throws + final public func contains(_ key: Swift.String) throws -> Swift.Bool + final public class func allKeys(_ itemClass: HHSDKVideo.ItemClass) -> [(Swift.String, Swift.String)] + final public func allKeys() -> [Swift.String] + final public class func allItems(_ itemClass: HHSDKVideo.ItemClass) -> [[Swift.String : Any]] + final public func allItems() -> [[Swift.String : Any]] + @available(iOS 8.0, *) + final public func getSharedPassword(_ completion: @escaping (Swift.String?, Swift.String?, Swift.Error?) -> () = { account, password, error -> () in }) + @available(iOS 8.0, *) + final public func getSharedPassword(_ account: Swift.String, completion: @escaping (Swift.String?, Swift.Error?) -> () = { password, error -> () in }) + @available(iOS 8.0, *) + final public func setSharedPassword(_ password: Swift.String, account: Swift.String, completion: @escaping (Swift.Error?) -> () = { e -> () in }) + @available(iOS 8.0, *) + final public func removeSharedPassword(_ account: Swift.String, completion: @escaping (Swift.Error?) -> () = { e -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(_ completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(domain: Swift.String, completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func requestSharedWebCredential(domain: Swift.String, account: Swift.String, completion: @escaping ([[Swift.String : Swift.String]], Swift.Error?) -> () = { credentials, error -> () in }) + @available(iOS 8.0, *) + final public class func generatePassword() -> Swift.String + @objc deinit +} +extension Keychain : Swift.CustomStringConvertible, Swift.CustomDebugStringConvertible { + final public var description: Swift.String { + get + } + final public var debugDescription: Swift.String { + get + } +} +extension Attributes : Swift.CustomStringConvertible, Swift.CustomDebugStringConvertible { + public var description: Swift.String { + get + } + public var debugDescription: Swift.String { + get + } +} +extension ItemClass : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension ProtocolType : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension AuthenticationType : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +extension Accessibility : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init?(rawValue: Swift.String) + public var rawValue: Swift.String { + get + } + public var description: Swift.String { + get + } + public typealias RawValue = Swift.String +} +public enum Status : Darwin.OSStatus, Swift.Error { + case success + case unimplemented + case diskFull + case io + case opWr + case param + case wrPerm + case allocate + case userCanceled + case badReq + case internalComponent + case notAvailable + case readOnly + case authFailed + case noSuchKeychain + case invalidKeychain + case duplicateKeychain + case duplicateCallback + case invalidCallback + case duplicateItem + case itemNotFound + case bufferTooSmall + case dataTooLarge + case noSuchAttr + case invalidItemRef + case invalidSearchRef + case noSuchClass + case noDefaultKeychain + case interactionNotAllowed + case readOnlyAttr + case wrongSecVersion + case keySizeNotAllowed + case noStorageModule + case noCertificateModule + case noPolicyModule + case interactionRequired + case dataNotAvailable + case dataNotModifiable + case createChainFailed + case invalidPrefsDomain + case inDarkWake + case aclNotSimple + case policyNotFound + case invalidTrustSetting + case noAccessForItem + case invalidOwnerEdit + case trustNotAvailable + case unsupportedFormat + case unknownFormat + case keyIsSensitive + case multiplePrivKeys + case passphraseRequired + case invalidPasswordRef + case invalidTrustSettings + case noTrustSettings + case pkcs12VerifyFailure + case invalidCertificate + case notSigner + case policyDenied + case invalidKey + case decode + case `internal` + case unsupportedAlgorithm + case unsupportedOperation + case unsupportedPadding + case itemInvalidKey + case itemInvalidKeyType + case itemInvalidValue + case itemClassMissing + case itemMatchUnsupported + case useItemListUnsupported + case useKeychainUnsupported + case useKeychainListUnsupported + case returnDataUnsupported + case returnAttributesUnsupported + case returnRefUnsupported + case returnPersitentRefUnsupported + case valueRefUnsupported + case valuePersistentRefUnsupported + case returnMissingPointer + case matchLimitUnsupported + case itemIllegalQuery + case waitForCallback + case missingEntitlement + case upgradePending + case mpSignatureInvalid + case otrTooOld + case otrIDTooNew + case serviceNotAvailable + case insufficientClientID + case deviceReset + case deviceFailed + case appleAddAppACLSubject + case applePublicKeyIncomplete + case appleSignatureMismatch + case appleInvalidKeyStartDate + case appleInvalidKeyEndDate + case conversionError + case appleSSLv2Rollback + case quotaExceeded + case fileTooBig + case invalidDatabaseBlob + case invalidKeyBlob + case incompatibleDatabaseBlob + case incompatibleKeyBlob + case hostNameMismatch + case unknownCriticalExtensionFlag + case noBasicConstraints + case noBasicConstraintsCA + case invalidAuthorityKeyID + case invalidSubjectKeyID + case invalidKeyUsageForPolicy + case invalidExtendedKeyUsage + case invalidIDLinkage + case pathLengthConstraintExceeded + case invalidRoot + case crlExpired + case crlNotValidYet + case crlNotFound + case crlServerDown + case crlBadURI + case unknownCertExtension + case unknownCRLExtension + case crlNotTrusted + case crlPolicyFailed + case idpFailure + case smimeEmailAddressesNotFound + case smimeBadExtendedKeyUsage + case smimeBadKeyUsage + case smimeKeyUsageNotCritical + case smimeNoEmailAddress + case smimeSubjAltNameNotCritical + case sslBadExtendedKeyUsage + case ocspBadResponse + case ocspBadRequest + case ocspUnavailable + case ocspStatusUnrecognized + case endOfData + case incompleteCertRevocationCheck + case networkFailure + case ocspNotTrustedToAnchor + case recordModified + case ocspSignatureError + case ocspNoSigner + case ocspResponderMalformedReq + case ocspResponderInternalError + case ocspResponderTryLater + case ocspResponderSignatureRequired + case ocspResponderUnauthorized + case ocspResponseNonceMismatch + case codeSigningBadCertChainLength + case codeSigningNoBasicConstraints + case codeSigningBadPathLengthConstraint + case codeSigningNoExtendedKeyUsage + case codeSigningDevelopment + case resourceSignBadCertChainLength + case resourceSignBadExtKeyUsage + case trustSettingDeny + case invalidSubjectName + case unknownQualifiedCertStatement + case mobileMeRequestQueued + case mobileMeRequestRedirected + case mobileMeServerError + case mobileMeServerNotAvailable + case mobileMeServerAlreadyExists + case mobileMeServerServiceErr + case mobileMeRequestAlreadyPending + case mobileMeNoRequestPending + case mobileMeCSRVerifyFailure + case mobileMeFailedConsistencyCheck + case notInitialized + case invalidHandleUsage + case pvcReferentNotFound + case functionIntegrityFail + case internalError + case memoryError + case invalidData + case mdsError + case invalidPointer + case selfCheckFailed + case functionFailed + case moduleManifestVerifyFailed + case invalidGUID + case invalidHandle + case invalidDBList + case invalidPassthroughID + case invalidNetworkAddress + case crlAlreadySigned + case invalidNumberOfFields + case verificationFailure + case unknownTag + case invalidSignature + case invalidName + case invalidCertificateRef + case invalidCertificateGroup + case tagNotFound + case invalidQuery + case invalidValue + case callbackFailed + case aclDeleteFailed + case aclReplaceFailed + case aclAddFailed + case aclChangeFailed + case invalidAccessCredentials + case invalidRecord + case invalidACL + case invalidSampleValue + case incompatibleVersion + case privilegeNotGranted + case invalidScope + case pvcAlreadyConfigured + case invalidPVC + case emmLoadFailed + case emmUnloadFailed + case addinLoadFailed + case invalidKeyRef + case invalidKeyHierarchy + case addinUnloadFailed + case libraryReferenceNotFound + case invalidAddinFunctionTable + case invalidServiceMask + case moduleNotLoaded + case invalidSubServiceID + case attributeNotInContext + case moduleManagerInitializeFailed + case moduleManagerNotFound + case eventNotificationCallbackNotFound + case inputLengthError + case outputLengthError + case privilegeNotSupported + case deviceError + case attachHandleBusy + case notLoggedIn + case algorithmMismatch + case keyUsageIncorrect + case keyBlobTypeIncorrect + case keyHeaderInconsistent + case unsupportedKeyFormat + case unsupportedKeySize + case invalidKeyUsageMask + case unsupportedKeyUsageMask + case invalidKeyAttributeMask + case unsupportedKeyAttributeMask + case invalidKeyLabel + case unsupportedKeyLabel + case invalidKeyFormat + case unsupportedVectorOfBuffers + case invalidInputVector + case invalidOutputVector + case invalidContext + case invalidAlgorithm + case invalidAttributeKey + case missingAttributeKey + case invalidAttributeInitVector + case missingAttributeInitVector + case invalidAttributeSalt + case missingAttributeSalt + case invalidAttributePadding + case missingAttributePadding + case invalidAttributeRandom + case missingAttributeRandom + case invalidAttributeSeed + case missingAttributeSeed + case invalidAttributePassphrase + case missingAttributePassphrase + case invalidAttributeKeyLength + case missingAttributeKeyLength + case invalidAttributeBlockSize + case missingAttributeBlockSize + case invalidAttributeOutputSize + case missingAttributeOutputSize + case invalidAttributeRounds + case missingAttributeRounds + case invalidAlgorithmParms + case missingAlgorithmParms + case invalidAttributeLabel + case missingAttributeLabel + case invalidAttributeKeyType + case missingAttributeKeyType + case invalidAttributeMode + case missingAttributeMode + case invalidAttributeEffectiveBits + case missingAttributeEffectiveBits + case invalidAttributeStartDate + case missingAttributeStartDate + case invalidAttributeEndDate + case missingAttributeEndDate + case invalidAttributeVersion + case missingAttributeVersion + case invalidAttributePrime + case missingAttributePrime + case invalidAttributeBase + case missingAttributeBase + case invalidAttributeSubprime + case missingAttributeSubprime + case invalidAttributeIterationCount + case missingAttributeIterationCount + case invalidAttributeDLDBHandle + case missingAttributeDLDBHandle + case invalidAttributeAccessCredentials + case missingAttributeAccessCredentials + case invalidAttributePublicKeyFormat + case missingAttributePublicKeyFormat + case invalidAttributePrivateKeyFormat + case missingAttributePrivateKeyFormat + case invalidAttributeSymmetricKeyFormat + case missingAttributeSymmetricKeyFormat + case invalidAttributeWrappedKeyFormat + case missingAttributeWrappedKeyFormat + case stagedOperationInProgress + case stagedOperationNotStarted + case verifyFailed + case querySizeUnknown + case blockSizeMismatch + case publicKeyInconsistent + case deviceVerifyFailed + case invalidLoginName + case alreadyLoggedIn + case invalidDigestAlgorithm + case invalidCRLGroup + case certificateCannotOperate + case certificateExpired + case certificateNotValidYet + case certificateRevoked + case certificateSuspended + case insufficientCredentials + case invalidAction + case invalidAuthority + case verifyActionFailed + case invalidCertAuthority + case invaldCRLAuthority + case invalidCRLEncoding + case invalidCRLType + case invalidCRL + case invalidFormType + case invalidID + case invalidIdentifier + case invalidIndex + case invalidPolicyIdentifiers + case invalidTimeString + case invalidReason + case invalidRequestInputs + case invalidResponseVector + case invalidStopOnPolicy + case invalidTuple + case multipleValuesUnsupported + case notTrusted + case noDefaultAuthority + case rejectedForm + case requestLost + case requestRejected + case unsupportedAddressType + case unsupportedService + case invalidTupleGroup + case invalidBaseACLs + case invalidTupleCredendtials + case invalidEncoding + case invalidValidityPeriod + case invalidRequestor + case requestDescriptor + case invalidBundleInfo + case invalidCRLIndex + case noFieldValues + case unsupportedFieldFormat + case unsupportedIndexInfo + case unsupportedLocality + case unsupportedNumAttributes + case unsupportedNumIndexes + case unsupportedNumRecordTypes + case fieldSpecifiedMultiple + case incompatibleFieldFormat + case invalidParsingModule + case databaseLocked + case datastoreIsOpen + case missingValue + case unsupportedQueryLimits + case unsupportedNumSelectionPreds + case unsupportedOperator + case invalidDBLocation + case invalidAccessRequest + case invalidIndexInfo + case invalidNewOwner + case invalidModifyMode + case missingRequiredExtension + case extendedKeyUsageNotCritical + case timestampMissing + case timestampInvalid + case timestampNotTrusted + case timestampServiceNotAvailable + case timestampBadAlg + case timestampBadRequest + case timestampBadDataFormat + case timestampTimeNotAvailable + case timestampUnacceptedPolicy + case timestampUnacceptedExtension + case timestampAddInfoNotAvailable + case timestampSystemFailure + case signingTimeMissing + case timestampRejection + case timestampWaiting + case timestampRevocationWarning + case timestampRevocationNotification + case unexpectedError +} +extension Status : Swift.RawRepresentable, Swift.CustomStringConvertible { + public init(status: Darwin.OSStatus) + public var description: Swift.String { + get + } + public init?(rawValue: Darwin.OSStatus) + public typealias RawValue = Darwin.OSStatus + public var rawValue: Darwin.OSStatus { + get + } +} +extension Status : Foundation.CustomNSError { + public static let errorDomain: Swift.String + public var errorCode: Swift.Int { + get + } + public var errorUserInfo: [Swift.String : Any] { + get + } +} +extension HHPermission : CoreLocation.CLLocationManagerDelegate { + @objc dynamic public func locationManager(_ manager: CoreLocation.CLLocationManager, didChangeAuthorization status: CoreLocation.CLAuthorizationStatus) +} +public let HHUUID: Swift.String +public let HHUserToken: Swift.String +@_hasMissingDesignatedInitializers public class LoginManager { + public static let `default`: HHSDKVideo.LoginManager + public var mUUID: Swift.Int? + public var mUserInfo: HHSDKVideo.HHUserModel? + public func loadCache() + public func removeCache() + public func getUserInfo(token: Swift.String, success: ((Swift.String?) -> Swift.Void)? = nil, fail: ((Swift.String) -> Swift.Void)? = nil) + public func getUserInfoRequest(success: ((Swift.String?) -> Swift.Void)? = nil, fail: ((Swift.String) -> Swift.Void)? = nil) + public func convert2Model() -> Swift.String? + public func getUserInfo() -> HHSDKVideo.HHUserModel? + public func getCacheUserInfo() -> HHSDKVideo.HHUserModel? + public func hasLoginData() -> Swift.Bool + public func getUUID() -> Swift.Int? + public func setUUID(uuid: Swift.Int) + public func getToken() -> Swift.String? + public func uuidStr() -> Swift.String? + public func isMemeber() -> Swift.Bool + public func isVIP() -> Swift.Bool + public func getUpgradeVIPTips() -> Swift.String? + public func isBuyProduct() -> Swift.Bool + public func getMemberDes() -> Swift.String? + public func isPhoneAccount() -> Swift.Bool + @objc deinit +} +public protocol MapContext { +} +final public class Map { + final public let mappingType: HHSDKVideo.MappingType + final public var JSON: [Swift.String : Any] { + get + } + final public var isKeyPresent: Swift.Bool { + get + } + final public var currentValue: Any? { + get + } + final public var currentKey: Swift.String? { + get + } + final public var nestedKeyDelimiter: Swift.String { + get + } + final public var context: HHSDKVideo.MapContext? + final public var shouldIncludeNilValues: Swift.Bool + final public let toObject: Swift.Bool + public init(mappingType: HHSDKVideo.MappingType, JSON: [Swift.String : Any], toObject: Swift.Bool = false, context: HHSDKVideo.MapContext? = nil, shouldIncludeNilValues: Swift.Bool = false) + final public subscript(key: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, delimiter delimiter: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool, delimiter delimiter: Swift.String) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, delimiter delimiter: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public subscript(key: Swift.String, nested nested: Swift.Bool?, delimiter delimiter: Swift.String, ignoreNil ignoreNil: Swift.Bool) -> HHSDKVideo.Map { + get + } + final public func value<T>() -> T? + @objc deinit +} +extension Map { + final public func value<T>(_ key: Swift.String, default: T.Object, using transform: T) throws -> T.Object where T : HHSDKVideo.TransformType + final public func value<T>(_ key: Swift.String, default: T) throws -> T + final public func value<T>(_ key: Swift.String, default: [T]) -> [T] where T : HHSDKVideo.BaseMappable + final public func value<T>(_ key: Swift.String, default: T) throws -> T where T : HHSDKVideo.BaseMappable +} +public struct MapError : Swift.Error { + public var key: Swift.String? + public var currentValue: Any? + public var reason: Swift.String? + public var file: Swift.StaticString? + public var function: Swift.StaticString? + public var line: Swift.UInt? + public init(key: Swift.String?, currentValue: Any?, reason: Swift.String?, file: Swift.StaticString? = nil, function: Swift.StaticString? = nil, line: Swift.UInt? = nil) +} +extension MapError : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +public protocol BaseMappable { + mutating func mapping(map: HHSDKVideo.Map) +} +public protocol Mappable : HHSDKVideo.BaseMappable { + init?(map: HHSDKVideo.Map) +} +public protocol StaticMappable : HHSDKVideo.BaseMappable { + static func objectForMapping(map: HHSDKVideo.Map) -> HHSDKVideo.BaseMappable? +} +extension Mappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init?(JSON: [Swift.String : Any], context: HHSDKVideo.MapContext? = nil) +} +extension BaseMappable { + public func toJSON() -> [Swift.String : Any] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +extension Array where Element : HHSDKVideo.BaseMappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init(JSONArray: [[Swift.String : Any]], context: HHSDKVideo.MapContext? = nil) + public func toJSON() -> [[Swift.String : Any]] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +extension Set where Element : HHSDKVideo.BaseMappable { + public init?(JSONString: Swift.String, context: HHSDKVideo.MapContext? = nil) + public init?(JSONArray: [[Swift.String : Any]], context: HHSDKVideo.MapContext? = nil) + public func toJSON() -> [[Swift.String : Any]] + public func toJSONString(prettyPrint: Swift.Bool = false) -> Swift.String? +} +public enum MappingType { + case fromJSON + case toJSON + public static func == (a: HHSDKVideo.MappingType, b: HHSDKVideo.MappingType) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +final public class Mapper<N> where N : HHSDKVideo.BaseMappable { + final public var context: HHSDKVideo.MapContext? + final public var shouldIncludeNilValues: Swift.Bool + public init(context: HHSDKVideo.MapContext? = nil, shouldIncludeNilValues: Swift.Bool = false) + final public func map(JSONObject: Any?, toObject object: N) -> N + final public func map(JSONString: Swift.String, toObject object: N) -> N + final public func map(JSON: [Swift.String : Any], toObject object: N) -> N + final public func map(JSONString: Swift.String) -> N? + final public func map(JSONObject: Any?) -> N? + final public func map(JSON: [Swift.String : Any]) -> N? + final public func mapArray(JSONString: Swift.String) -> [N]? + final public func mapArray(JSONObject: Any?) -> [N]? + final public func mapArray(JSONArray: [[Swift.String : Any]]) -> [N] + final public func mapDictionary(JSONString: Swift.String) -> [Swift.String : N]? + final public func mapDictionary(JSONObject: Any?) -> [Swift.String : N]? + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]]) -> [Swift.String : N]? + final public func mapDictionary(JSONObject: Any?, toDictionary dictionary: [Swift.String : N]) -> [Swift.String : N] + final public func mapDictionary(JSON: [Swift.String : [Swift.String : Any]], toDictionary dictionary: [Swift.String : N]) -> [Swift.String : N] + final public func mapDictionaryOfArrays(JSONObject: Any?) -> [Swift.String : [N]]? + final public func mapDictionaryOfArrays(JSON: [Swift.String : [[Swift.String : Any]]]) -> [Swift.String : [N]]? + final public func mapArrayOfArrays(JSONObject: Any?) -> [[N]]? + public static func parseJSONStringIntoDictionary(JSONString: Swift.String) -> [Swift.String : Any]? + public static func parseJSONString(JSONString: Swift.String) -> Any? + @objc deinit +} +extension Mapper { + final public func map(JSONfile: Swift.String) -> N? + final public func mapArray(JSONfile: Swift.String) -> [N]? +} +extension Mapper { + final public func toJSON(_ object: N) -> [Swift.String : Any] + final public func toJSONArray(_ array: [N]) -> [[Swift.String : Any]] + final public func toJSONDictionary(_ dictionary: [Swift.String : N]) -> [Swift.String : [Swift.String : Any]] + final public func toJSONDictionaryOfArrays(_ dictionary: [Swift.String : [N]]) -> [Swift.String : [[Swift.String : Any]]] + final public func toJSONString(_ object: N, prettyPrint: Swift.Bool = false) -> Swift.String? + final public func toJSONString(_ array: [N], prettyPrint: Swift.Bool = false) -> Swift.String? + public static func toJSONString(_ JSONObject: Any, prettyPrint: Swift.Bool) -> Swift.String? + public static func toJSONData(_ JSONObject: Any, options: Foundation.JSONSerialization.WritingOptions) -> Foundation.Data? +} +extension Mapper where N : Swift.Hashable { + final public func mapSet(JSONString: Swift.String) -> Swift.Set<N>? + final public func mapSet(JSONObject: Any?) -> Swift.Set<N>? + final public func mapSet(JSONArray: [[Swift.String : Any]]) -> Swift.Set<N> + final public func toJSONSet(_ set: Swift.Set<N>) -> [[Swift.String : Any]] + final public func toJSONString(_ set: Swift.Set<N>, prettyPrint: Swift.Bool = false) -> Swift.String? +} +final public class MD5 { + public init() + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension MD5 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +public struct NotifyInfo { + public init() + public var fromAccountId: Swift.String? + public var requestId: Swift.String? + public var channelId: Swift.String? + public var customInfo: Swift.String? +} +open class NSDecimalNumberTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.NSDecimalNumber + public typealias JSON = Swift.String + public init() + open func transformFromJSON(_ value: Any?) -> Foundation.NSDecimalNumber? + open func transformToJSON(_ value: Foundation.NSDecimalNumber?) -> Swift.String? + @objc deinit +} +final public class OCB : HHSDKVideo.BlockMode { + public enum Mode { + case combined + case detached + public static func == (a: HHSDKVideo.OCB.Mode, b: HHSDKVideo.OCB.Mode) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public let options: HHSDKVideo.BlockModeOption + public enum Error : Swift.Error { + case invalidNonce + case fail + public static func == (a: HHSDKVideo.OCB.Error, b: HHSDKVideo.OCB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + final public var authenticationTag: Swift.Array<Swift.UInt8>? + public init(nonce N: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, tagLength: Swift.Int = 16, mode: HHSDKVideo.OCB.Mode = .detached) + convenience public init(nonce N: Swift.Array<Swift.UInt8>, authenticationTag: Swift.Array<Swift.UInt8>, additionalAuthenticatedData: Swift.Array<Swift.UInt8>? = nil, mode: HHSDKVideo.OCB.Mode = .detached) + final public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker + @objc deinit +} +public struct OFB : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.OFB.Error, b: HHSDKVideo.OFB.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +infix operator <- : DefaultPrecedence +infix operator >>> : DefaultPrecedence +public func <- <T>(left: inout T, right: HHSDKVideo.Map) +public func >>> <T>(left: T, right: HHSDKVideo.Map) +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) +public func >>> <T>(left: T?, right: HHSDKVideo.Map) +public func <- <T>(left: inout T, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: T, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout T?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: T?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, [T]>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, [T]>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Dictionary<Swift.String, [T]>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Dictionary<Swift.String, [T]>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<Swift.Array<T>>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<Swift.Array<T>>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Array<Swift.Array<T>>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func >>> <T>(left: Swift.Array<Swift.Array<T>>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable +public func <- <T>(left: inout Swift.Set<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func >>> <T>(left: Swift.Set<T>, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func <- <T>(left: inout Swift.Set<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public func >>> <T>(left: Swift.Set<T>?, right: HHSDKVideo.Map) where T : HHSDKVideo.BaseMappable, T : Swift.Hashable +public struct OrderModel : HHSDKVideo.Mappable { + public var orderid: Swift.String? + public var price: Swift.Float? + public var buyServiceName: Swift.String? + public var expertId: Swift.String? + public var expertName: Swift.String? + public var patientName: Swift.String? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +public protocol PaddingProtocol { + func add(to: Swift.Array<Swift.UInt8>, blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> + func remove(from: Swift.Array<Swift.UInt8>, blockSize: Swift.Int?) -> Swift.Array<Swift.UInt8> +} +public enum Padding : HHSDKVideo.PaddingProtocol { + case noPadding, zeroPadding, pkcs7, pkcs5, iso78164 + public func add(to: Swift.Array<Swift.UInt8>, blockSize: Swift.Int) -> Swift.Array<Swift.UInt8> + public func remove(from: Swift.Array<Swift.UInt8>, blockSize: Swift.Int?) -> Swift.Array<Swift.UInt8> + public static func == (a: HHSDKVideo.Padding, b: HHSDKVideo.Padding) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } +} +extension PKCS5 { + public struct PBKDF1 { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.PKCS5.PBKDF1.Error, b: HHSDKVideo.PKCS5.PBKDF1.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public enum Variant { + case md5, sha1 + public static func == (a: HHSDKVideo.PKCS5.PBKDF1.Variant, b: HHSDKVideo.PKCS5.PBKDF1.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, variant: HHSDKVideo.PKCS5.PBKDF1.Variant = .sha1, iterations: Swift.Int = 4096, keyLength: Swift.Int? = nil) throws + public func calculate() -> Swift.Array<Swift.UInt8> + } +} +extension PKCS5 { + public struct PBKDF2 { + public enum Error : Swift.Error { + case invalidInput + case derivedKeyTooLong + public static func == (a: HHSDKVideo.PKCS5.PBKDF2.Error, b: HHSDKVideo.PKCS5.PBKDF2.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, iterations: Swift.Int = 4096, keyLength: Swift.Int? = nil, variant: HHSDKVideo.HMAC.Variant = .sha256) throws + public func calculate() throws -> Swift.Array<Swift.UInt8> + } +} +public struct PCBC : HHSDKVideo.BlockMode { + public enum Error : Swift.Error { + case invalidInitializationVector + public static func == (a: HHSDKVideo.PCBC.Error, b: HHSDKVideo.PCBC.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public let options: HHSDKVideo.BlockModeOption + public init(iv: Swift.Array<Swift.UInt8>) + public func worker(blockSize: Swift.Int, cipherOperation: @escaping HHSDKVideo.CipherOperationOnBlock, encryptionOperation: @escaping HHSDKVideo.CipherOperationOnBlock) throws -> HHSDKVideo.CipherModeWorker +} +@objc @_hasMissingDesignatedInitializers public class HHPermission : ObjectiveC.NSObject { + public static let locationAlways: HHSDKVideo.HHPermission + public static let locationWhenInUse: HHSDKVideo.HHPermission + public static let microphone: HHSDKVideo.HHPermission + public static let camera: HHSDKVideo.HHPermission + public static let photos: HHSDKVideo.HHPermission + final public let type: HHSDKVideo.HHBasePermissionType + public var status: HHSDKVideo.PermissionStatus { + get + } + public var presentPrePermissionAlert: Swift.Bool + public var prePermissionAlert: HHSDKVideo.PermissionAlert { + get + set + } + public var presentDeniedAlert: Swift.Bool + @objc override dynamic public init() + @objc deinit +} +extension HHPermission { + @objc override dynamic public var description: Swift.String { + @objc get + } + @objc override dynamic public var debugDescription: Swift.String { + @objc get + } +} +@_hasMissingDesignatedInitializers public class PermissionAlert { + @objc deinit +} +public enum PermissionStatus : Swift.String { + case authorized + case denied + case disabled + case notDetermined + case limited + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +extension PermissionStatus : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +@objc public enum HHBasePermissionType : Swift.Int { + case locationAlways + case locationWhenInUse + case microphone + case camera + case photos + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension HHBasePermissionType : Swift.CustomStringConvertible { + public var description: Swift.String { + get + } +} +@_hasMissingDesignatedInitializers public class PhotoPickerConfig { + public static let `default`: HHSDKVideo.PhotoPickerConfig + public var miniPicTip: Swift.Bool + public var mMaxSelectCount: Swift.Int + public var mDetailColumnCount: Swift.Int + @objc deinit +} +public enum PKCS5 { +} +public enum PKCS7 { +} +final public class Poly1305 : HHSDKVideo.CryptoAuthenticator { + public enum Error : Swift.Error { + case authenticateError + public static func == (a: HHSDKVideo.Poly1305.Error, b: HHSDKVideo.Poly1305.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let blockSize: Swift.Int + public init(key: Swift.Array<Swift.UInt8>) + final public func authenticate(_ bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + @objc deinit +} +@_hasMissingDesignatedInitializers public class PostBodyEncoding { + @objc deinit +} +final public class Rabbit { + public enum Error : Swift.Error { + case invalidKeyOrInitializationVector + public static func == (a: HHSDKVideo.Rabbit.Error, b: HHSDKVideo.Rabbit.Error) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public static let ivSize: Swift.Int + public static let keySize: Swift.Int + public static let blockSize: Swift.Int + final public var keySize: Swift.Int { + get + } + convenience public init(key: Swift.Array<Swift.UInt8>) throws + public init(key: Swift.Array<Swift.UInt8>, iv: Swift.Array<Swift.UInt8>?) throws + @objc deinit +} +extension Rabbit : HHSDKVideo.Cipher { + final public func encrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + final public func decrypt(_ bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> +} +extension Rabbit { + convenience public init(key: Swift.String) throws + convenience public init(key: Swift.String, iv: Swift.String) throws +} +public enum ReachabilityError : Swift.Error { + case FailedToCreateWithAddress(Darwin.sockaddr_in) + case FailedToCreateWithHostname(Swift.String) + case UnableToSetCallback + case UnableToSetDispatchQueue +} +public let ReachabilityChangedNotification: Foundation.NSNotification.Name +public class Reachability { + public typealias NetworkReachable = (HHSDKVideo.Reachability) -> () + public typealias NetworkUnreachable = (HHSDKVideo.Reachability) -> () + public enum NetworkStatus : Swift.CustomStringConvertible { + case notReachable, reachableViaWiFi, reachableViaWWAN + public var description: Swift.String { + get + } + public static func == (a: HHSDKVideo.Reachability.NetworkStatus, b: HHSDKVideo.Reachability.NetworkStatus) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public var whenReachable: HHSDKVideo.Reachability.NetworkReachable? + public var whenUnreachable: HHSDKVideo.Reachability.NetworkUnreachable? + public var reachableOnWWAN: Swift.Bool + public var currentReachabilityString: Swift.String { + get + } + public var currentReachabilityStatus: HHSDKVideo.Reachability.NetworkStatus { + get + } + required public init(reachabilityRef: SystemConfiguration.SCNetworkReachability) + convenience public init?(hostname: Swift.String) + convenience public init?() + @objc deinit +} +extension Reachability { + public func startNotifier() throws + public func stopNotifier() + public var isReachable: Swift.Bool { + get + } + public var isReachableViaWWAN: Swift.Bool { + get + } + public var isReachableViaWiFi: Swift.Bool { + get + } + public var description: Swift.String { + get + } +} +public enum RecordImgType : Swift.Int { + case medic + case check + case yingXiang + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public struct RemoteData : HHSDKVideo.Mappable { + public var changeDoctorTime: Swift.Int + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +final public class Scrypt { + public init(password: Swift.Array<Swift.UInt8>, salt: Swift.Array<Swift.UInt8>, dkLen: Swift.Int, N: Swift.Int, r: Swift.Int, p: Swift.Int) throws + final public func calculate() throws -> [Swift.UInt8] + @objc deinit +} +public struct SDKConfigModel : HHSDKVideo.Mappable { + public var cardIdActiveShow: Swift.Int + public var changeDoctorTime: Swift.Int? + public init?(map: HHSDKVideo.Map) + public mutating func mapping(map: HHSDKVideo.Map) +} +final public class SHA1 { + public init() + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA1 : HHSDKVideo.Updatable { + @discardableResult + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +final public class SHA2 { + public enum Variant : Swift.RawRepresentable { + case sha224, sha256, sha384, sha512 + public var digestLength: Swift.Int { + get + } + public var blockSize: Swift.Int { + get + } + public typealias RawValue = Swift.Int + public var rawValue: HHSDKVideo.SHA2.Variant.RawValue { + get + } + public init?(rawValue: HHSDKVideo.SHA2.Variant.RawValue) + } + public init(variant: HHSDKVideo.SHA2.Variant) + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA2 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +final public class SHA3 { + final public let blockSize: Swift.Int + final public let digestLength: Swift.Int + final public let markByte: Swift.UInt8 + public enum Variant { + case sha224, sha256, sha384, sha512, keccak224, keccak256, keccak384, keccak512 + public var outputLength: Swift.Int { + get + } + public static func == (a: HHSDKVideo.SHA3.Variant, b: HHSDKVideo.SHA3.Variant) -> Swift.Bool + public func hash(into hasher: inout Swift.Hasher) + public var hashValue: Swift.Int { + get + } + } + public init(variant: HHSDKVideo.SHA3.Variant) + final public func calculate(for bytes: Swift.Array<Swift.UInt8>) -> Swift.Array<Swift.UInt8> + @objc deinit +} +extension SHA3 : HHSDKVideo.Updatable { + final public func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> +} +extension String { + public var bytes: Swift.Array<Swift.UInt8> { + get + } + public func md5() -> Swift.String + public func sha1() -> Swift.String + public func sha224() -> Swift.String + public func sha256() -> Swift.String + public func sha384() -> Swift.String + public func sha512() -> Swift.String + public func sha3(_ variant: HHSDKVideo.SHA3.Variant) -> Swift.String + public func crc32(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.String + public func crc32c(seed: Swift.UInt32? = nil, reflect: Swift.Bool = true) -> Swift.String + public func crc16(seed: Swift.UInt16? = nil) -> Swift.String + public func encrypt(cipher: HHSDKVideo.Cipher) throws -> Swift.String + public func encryptToBase64(cipher: HHSDKVideo.Cipher) throws -> Swift.String? + public func authenticate<A>(with authenticator: A) throws -> Swift.String where A : HHSDKVideo.CryptoAuthenticator +} +extension String { + public func decryptBase64ToString(cipher: HHSDKVideo.Cipher) throws -> Swift.String + public func decryptBase64(cipher: HHSDKVideo.Cipher) throws -> Swift.Array<Swift.UInt8> +} +@_hasMissingDesignatedInitializers final public class SwiftEntryKit { + public enum EntryDismissalDescriptor { + case specific(entryName: Swift.String) + case prioritizedLowerOrEqualTo(priority: HHSDKVideo.EKAttributes.Precedence.Priority) + case enqueued + case all + case displayed + } + public enum RollbackWindow { + case main + case custom(window: UIKit.UIWindow) + } + public typealias DismissCompletionHandler = () -> Swift.Void + final public class var window: UIKit.UIWindow? { + get + } + final public class var isCurrentlyDisplaying: Swift.Bool { + get + } + final public class func isCurrentlyDisplaying(entryNamed name: Swift.String? = nil) -> Swift.Bool + final public class var isQueueEmpty: Swift.Bool { + get + } + final public class func queueContains(entryNamed name: Swift.String? = nil) -> Swift.Bool + final public class func display(entry view: UIKit.UIView, using attributes: HHSDKVideo.EKAttributes, presentInsideKeyWindow: Swift.Bool = false, rollbackWindow: HHSDKVideo.SwiftEntryKit.RollbackWindow = .main) + final public class func display(entry viewController: UIKit.UIViewController, using attributes: HHSDKVideo.EKAttributes, presentInsideKeyWindow: Swift.Bool = false, rollbackWindow: HHSDKVideo.SwiftEntryKit.RollbackWindow = .main) + final public class func transform(to view: UIKit.UIView) + final public class func dismiss(_ descriptor: HHSDKVideo.SwiftEntryKit.EntryDismissalDescriptor = .displayed, with completion: HHSDKVideo.SwiftEntryKit.DismissCompletionHandler? = nil) + final public class func layoutIfNeeded() + @objc deinit +} +open class TransformOf<ObjectType, JSONType> : HHSDKVideo.TransformType { + public typealias Object = ObjectType + public typealias JSON = JSONType + public init(fromJSON: @escaping (JSONType?) -> ObjectType?, toJSON: @escaping (ObjectType?) -> JSONType?) + open func transformFromJSON(_ value: Any?) -> ObjectType? + open func transformToJSON(_ value: ObjectType?) -> JSONType? + @objc deinit +} +public func <- <Transform>(left: inout Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Swift.String : Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Swift.String : Transform.Object], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [Swift.String : Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [Swift.String : Transform.Object]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Transform.Object, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Transform.Object?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, [Transform.Object]>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, [Transform.Object]>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Dictionary<Swift.String, [Transform.Object]>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Dictionary<Swift.String, [Transform.Object]>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Array<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Array<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout Swift.Array<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func >>> <Transform>(left: Swift.Array<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable +public func <- <Transform>(left: inout [[Transform.Object]], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [[Transform.Object]], right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout [[Transform.Object]]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func >>> <Transform>(left: [[Transform.Object]]?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType +public func <- <Transform>(left: inout Swift.Set<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func >>> <Transform>(left: Swift.Set<Transform.Object>, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func <- <Transform>(left: inout Swift.Set<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public func >>> <Transform>(left: Swift.Set<Transform.Object>?, right: (HHSDKVideo.Map, Transform)) where Transform : HHSDKVideo.TransformType, Transform.Object : HHSDKVideo.BaseMappable, Transform.Object : Swift.Hashable +public protocol TransformType { + associatedtype Object + associatedtype JSON + func transformFromJSON(_ value: Any?) -> Self.Object? + func transformToJSON(_ value: Self.Object?) -> Self.JSON? +} +extension UIImage { + public class func gifImageWithData(_ data: Foundation.Data) -> UIKit.UIImage? + public class func gifImageWithURL(_ gifUrl: Swift.String) -> UIKit.UIImage? + public class func gifImageWithName(_ name: Swift.String) -> UIKit.UIImage? +} +public protocol _UInt8Type { +} +extension UInt8 : HHSDKVideo._UInt8Type { +} +extension UInt8 { + public func bits() -> [HHSDKVideo.Bit] + public func bits() -> Swift.String +} +public protocol Updatable { + mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool) throws -> Swift.Array<Swift.UInt8> + mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws +} +extension Updatable { + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func update(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public mutating func update(withBytes bytes: Swift.Array<Swift.UInt8>, isLast: Swift.Bool = false) throws -> Swift.Array<Swift.UInt8> + public mutating func update(withBytes bytes: Swift.Array<Swift.UInt8>, isLast: Swift.Bool = false, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(withBytes bytes: Swift.ArraySlice<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public mutating func finish(withBytes bytes: Swift.Array<Swift.UInt8>) throws -> Swift.Array<Swift.UInt8> + public mutating func finish() throws -> Swift.Array<Swift.UInt8> + public mutating func finish(withBytes bytes: Swift.ArraySlice<Swift.UInt8>, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(withBytes bytes: Swift.Array<Swift.UInt8>, output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws + public mutating func finish(output: (Swift.Array<Swift.UInt8>) -> Swift.Void) throws +} +open class URLTransform : HHSDKVideo.TransformType { + public typealias Object = Foundation.URL + public typealias JSON = Swift.String + public init(shouldEncodeURLString: Swift.Bool = false, allowedCharacterSet: Foundation.CharacterSet = .urlQueryAllowed) + open func transformFromJSON(_ value: Any?) -> Foundation.URL? + open func transformToJSON(_ value: Foundation.URL?) -> Swift.String? + @objc deinit +} +public struct UserApi { +} +@objc @_inheritsConvenienceInitializers @_hasMissingDesignatedInitializers public class VCManager : ObjectiveC.NSObject { + public static let `default`: HHSDKVideo.VCManager + public var waitModel: HHSDKVideo.HHWaitDoctorModel? + @objc deinit +} +extension VCManager { + public func onReceiveCall(callee: Swift.String, caller: Swift.String, orderId: Swift.String) + public func isInBusy() -> Swift.Bool +} +extension VCManager { + public func onReceiveInvite(docModel: HHSDKVideo.HHInviteDocModel) + public static func onUserReject(_ fromUuid: Swift.String) + public static func onCancelInvite(_ fromUuid: Swift.String) + public static func changeVideo(_ isVoice: Swift.Bool) +} +extension VCManager { + public func showEduBoard(groupId: Swift.String, orderId: Swift.String) + public func closeEduBoard() +} +public struct VideoApi { +} +public enum HHIMCmd : Swift.String { + case audio + case video + case closeVideo + case openVideo + case transfor + case accept + case call + case reject + case cancelCall + case pcCancel + case phoneCall + case busy + case waiting + case waitingTip + case agentTrans + case web_transform + case callWeb + case SWITCH_TO_CAMERA_wmp + case cancelCallWeb + case call_invite + case reject_invite + case cancel_invite + case exit_camera + case enter_camera + case conference_begin + case conference_end + case user_certification + case cancel_user_certification + public init?(rawValue: Swift.String) + public typealias RawValue = Swift.String + public var rawValue: Swift.String { + get + } +} +public class WeakArray<T> { + public func add(_ delegate: T) + public func remove(_ delegate: T) + public func excute(_ block: @escaping ((T?) -> Swift.Void)) + public init() + @objc deinit +} +@objc public class ZLAlbumListModel : ObjectiveC.NSObject { + final public let title: Swift.String + public var count: Swift.Int { + get + } + public var result: Photos.PHFetchResult<Photos.PHAsset> + final public let collection: Photos.PHAssetCollection + final public let option: Photos.PHFetchOptions + final public let isCameraRoll: Swift.Bool + public var headImageAsset: Photos.PHAsset? { + get + } + public var models: [HHSDKVideo.ZLPhotoModel] + public init(title: Swift.String, result: Photos.PHFetchResult<Photos.PHAsset>, collection: Photos.PHAssetCollection, option: Photos.PHFetchOptions, isCameraRoll: Swift.Bool) + public func refetchPhotos() + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class ZLCameraConfiguration : ObjectiveC.NSObject { + @objc public enum CaptureSessionPreset : Swift.Int { + case cif352x288 + case vga640x480 + case hd1280x720 + case hd1920x1080 + case hd4K3840x2160 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum FocusMode : Swift.Int { + case autoFocus + case continuousAutoFocus + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum ExposureMode : Swift.Int { + case autoExpose + case continuousAutoExposure + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum FlashMode : Swift.Int { + case auto + case on + case off + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public enum VideoExportType : Swift.Int { + case mov + case mp4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public var sessionPreset: HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset + @objc public var focusMode: HHSDKVideo.ZLCameraConfiguration.FocusMode + @objc public var exposureMode: HHSDKVideo.ZLCameraConfiguration.ExposureMode + @objc public var flashMode: HHSDKVideo.ZLCameraConfiguration.FlashMode + @objc public var videoExportType: HHSDKVideo.ZLCameraConfiguration.VideoExportType + @objc override dynamic public init() + @objc deinit +} +extension ZLCameraConfiguration { + @discardableResult + public func sessionPreset(_ sessionPreset: HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func focusMode(_ mode: HHSDKVideo.ZLCameraConfiguration.FocusMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func exposureMode(_ mode: HHSDKVideo.ZLCameraConfiguration.ExposureMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func flashMode(_ mode: HHSDKVideo.ZLCameraConfiguration.FlashMode) -> HHSDKVideo.ZLCameraConfiguration + @discardableResult + public func videoExportType(_ type: HHSDKVideo.ZLCameraConfiguration.VideoExportType) -> HHSDKVideo.ZLCameraConfiguration +} +@objc open class ZLCustomCamera : UIKit.UIViewController, QuartzCore.CAAnimationDelegate { + @objc public var takeDoneBlock: ((UIKit.UIImage?, Foundation.URL?) -> Swift.Void)? + @objc public var cancelBlock: (() -> Swift.Void)? + public var tipsLabel: UIKit.UILabel { + get + set + } + public var bottomView: UIKit.UIView { + get + set + } + public var largeCircleView: UIKit.UIVisualEffectView { + get + set + } + public var smallCircleView: UIKit.UIView { + get + set + } + public var animateLayer: QuartzCore.CAShapeLayer { + get + set + } + public var retakeBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var doneBtn: UIKit.UIButton { + get + set + } + public var dismissBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var switchCameraBtn: HHSDKVideo.ZLEnlargeButton { + get + set + } + public var focusCursorView: UIKit.UIImageView { + get + set + } + public var takedImageView: UIKit.UIImageView { + get + set + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc deinit + @objc dynamic public init() + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic open func viewDidLoad() + @objc override dynamic open func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic open func viewWillDisappear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidDisappear(_ animated: Swift.Bool) + @objc override dynamic open func viewDidLayoutSubviews() + @objc public func animationDidStop(_ anim: QuartzCore.CAAnimation, finished flag: Swift.Bool) + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLCustomCamera : AVFoundation.AVCapturePhotoCaptureDelegate { + @objc dynamic public func photoOutput(_ output: AVFoundation.AVCapturePhotoOutput, willCapturePhotoFor resolvedSettings: AVFoundation.AVCaptureResolvedPhotoSettings) + @objc dynamic public func photoOutput(_ output: AVFoundation.AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CoreMedia.CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CoreMedia.CMSampleBuffer?, resolvedSettings: AVFoundation.AVCaptureResolvedPhotoSettings, bracketSettings: AVFoundation.AVCaptureBracketedStillImageSettings?, error: Swift.Error?) +} +extension ZLCustomCamera : AVFoundation.AVCaptureFileOutputRecordingDelegate { + @objc dynamic public func fileOutput(_ output: AVFoundation.AVCaptureFileOutput, didStartRecordingTo fileURL: Foundation.URL, from connections: [AVFoundation.AVCaptureConnection]) + @objc dynamic public func fileOutput(_ output: AVFoundation.AVCaptureFileOutput, didFinishRecordingTo outputFileURL: Foundation.URL, from connections: [AVFoundation.AVCaptureConnection], error: Swift.Error?) +} +extension ZLCustomCamera : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizer(_ gestureRecognizer: UIKit.UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +@objc public protocol ZLImageStickerContainerDelegate { + @objc var selectImageBlock: ((UIKit.UIImage) -> Swift.Void)? { get set } + @objc var hideBlock: (() -> Swift.Void)? { get set } + @objc func show(in view: UIKit.UIView) +} +@objc @_inheritsConvenienceInitializers public class ZLEditImageConfiguration : ObjectiveC.NSObject { + @objc public enum EditTool : Swift.Int, Swift.CaseIterable { + case draw + case clip + case imageSticker + case textSticker + case mosaic + case filter + case adjust + public init?(rawValue: Swift.Int) + public typealias AllCases = [HHSDKVideo.ZLEditImageConfiguration.EditTool] + public typealias RawValue = Swift.Int + public static var allCases: [HHSDKVideo.ZLEditImageConfiguration.EditTool] { + get + } + public var rawValue: Swift.Int { + get + } + } + @objc public enum AdjustTool : Swift.Int, Swift.CaseIterable { + case brightness + case contrast + case saturation + public init?(rawValue: Swift.Int) + public typealias AllCases = [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] + public typealias RawValue = Swift.Int + public static var allCases: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] { + get + } + public var rawValue: Swift.Int { + get + } + } + public var tools: [HHSDKVideo.ZLEditImageConfiguration.EditTool] { + get + set + } + @objc public var tools_objc: [Swift.Int] { + @objc get + @objc set + } + @objc public var drawColors: [UIKit.UIColor] { + @objc get + @objc set + } + @objc public var defaultDrawColor: UIKit.UIColor + @objc public var clipRatios: [HHSDKVideo.ZLImageClipRatio] { + @objc get + @objc set + } + @objc public var textStickerTextColors: [UIKit.UIColor] { + @objc get + @objc set + } + @objc public var textStickerDefaultTextColor: UIKit.UIColor + @objc public var filters: [HHSDKVideo.ZLFilter] { + @objc get + @objc set + } + @objc public var imageStickerContainerView: (UIKit.UIView & HHSDKVideo.ZLImageStickerContainerDelegate)? + public var adjustTools: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool] { + get + set + } + @objc public var adjustTools_objc: [Swift.Int] { + @objc get + @objc set + } + @objc public var impactFeedbackWhenAdjustSliderValueIsZero: Swift.Bool + @objc public var impactFeedbackStyle: UIKit.UIImpactFeedbackGenerator.FeedbackStyle + @objc override dynamic public init() + @objc deinit +} +extension ZLEditImageConfiguration { + @discardableResult + public func tools(_ tools: [HHSDKVideo.ZLEditImageConfiguration.EditTool]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func drawColors(_ colors: [UIKit.UIColor]) -> HHSDKVideo.ZLEditImageConfiguration + public func defaultDrawColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func clipRatios(_ ratios: [HHSDKVideo.ZLImageClipRatio]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func textStickerTextColors(_ colors: [UIKit.UIColor]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func textStickerDefaultTextColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func filters(_ filters: [HHSDKVideo.ZLFilter]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func imageStickerContainerView(_ view: (UIKit.UIView & HHSDKVideo.ZLImageStickerContainerDelegate)?) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func adjustTools(_ tools: [HHSDKVideo.ZLEditImageConfiguration.AdjustTool]) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func impactFeedbackWhenAdjustSliderValueIsZero(_ value: Swift.Bool) -> HHSDKVideo.ZLEditImageConfiguration + @discardableResult + public func impactFeedbackStyle(_ style: UIKit.UIImpactFeedbackGenerator.FeedbackStyle) -> HHSDKVideo.ZLEditImageConfiguration +} +@objc public class ZLImageClipRatio : ObjectiveC.NSObject { + public var title: Swift.String + final public let whRatio: CoreGraphics.CGFloat + @objc public init(title: Swift.String, whRatio: CoreGraphics.CGFloat, isCircle: Swift.Bool = false) + @objc override dynamic public init() + @objc deinit +} +extension ZLImageClipRatio { + @objc public static let custom: HHSDKVideo.ZLImageClipRatio + @objc public static let circle: HHSDKVideo.ZLImageClipRatio + @objc public static let wh1x1: HHSDKVideo.ZLImageClipRatio + @objc public static let wh3x4: HHSDKVideo.ZLImageClipRatio + @objc public static let wh4x3: HHSDKVideo.ZLImageClipRatio + @objc public static let wh2x3: HHSDKVideo.ZLImageClipRatio + @objc public static let wh3x2: HHSDKVideo.ZLImageClipRatio + @objc public static let wh9x16: HHSDKVideo.ZLImageClipRatio + @objc public static let wh16x9: HHSDKVideo.ZLImageClipRatio +} +@objc public class ZLEditImageModel : ObjectiveC.NSObject { + final public let drawPaths: [HHSDKVideo.ZLDrawPath] + final public let mosaicPaths: [HHSDKVideo.ZLMosaicPath] + final public let editRect: CoreGraphics.CGRect? + final public let angle: CoreGraphics.CGFloat + final public let brightness: Swift.Float + final public let contrast: Swift.Float + final public let saturation: Swift.Float + final public let selectRatio: HHSDKVideo.ZLImageClipRatio? + final public let selectFilter: HHSDKVideo.ZLFilter? + final public let textStickers: [(state: HHSDKVideo.ZLTextStickerState, index: Swift.Int)]? + final public let imageStickers: [(state: HHSDKVideo.ZLImageStickerState, index: Swift.Int)]? + public init(drawPaths: [HHSDKVideo.ZLDrawPath], mosaicPaths: [HHSDKVideo.ZLMosaicPath], editRect: CoreGraphics.CGRect?, angle: CoreGraphics.CGFloat, brightness: Swift.Float, contrast: Swift.Float, saturation: Swift.Float, selectRatio: HHSDKVideo.ZLImageClipRatio?, selectFilter: HHSDKVideo.ZLFilter, textStickers: [(state: HHSDKVideo.ZLTextStickerState, index: Swift.Int)]?, imageStickers: [(state: HHSDKVideo.ZLImageStickerState, index: Swift.Int)]?) + @objc override dynamic public init() + @objc deinit +} +@objc open class ZLEditImageViewController : UIKit.UIViewController { + @objc public var drawColViewH: CoreGraphics.CGFloat + @objc public var filterColViewH: CoreGraphics.CGFloat + @objc public var adjustColViewH: CoreGraphics.CGFloat + @objc public var ashbinNormalBgColor: UIKit.UIColor + @objc public var cancelBtn: HHSDKVideo.ZLEnlargeButton { + @objc get + @objc set + } + @objc public var mainScrollView: UIKit.UIScrollView { + @objc get + @objc set + } + @objc public var topShadowView: UIKit.UIView { + @objc get + @objc set + } + @objc public var topShadowLayer: QuartzCore.CAGradientLayer { + @objc get + @objc set + } + @objc public var bottomShadowView: UIKit.UIView + @objc public var bottomShadowLayer: QuartzCore.CAGradientLayer + @objc public var doneBtn: UIKit.UIButton + @objc public var revokeBtn: UIKit.UIButton + @objc public var ashbinView: UIKit.UIView { + @objc get + @objc set + } + @objc public var ashbinImgView: UIKit.UIImageView { + @objc get + @objc set + } + @objc public var drawLineWidth: CoreGraphics.CGFloat + @objc public var mosaicLineWidth: CoreGraphics.CGFloat + @objc public var editFinishBlock: ((UIKit.UIImage, HHSDKVideo.ZLEditImageModel?) -> Swift.Void)? + @objc public var cancelEditBlock: (() -> Swift.Void)? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc deinit + @objc public class func showEditImageVC(parentVC: UIKit.UIViewController?, animate: Swift.Bool = false, image: UIKit.UIImage, editModel: HHSDKVideo.ZLEditImageModel? = nil, cancel: (() -> Swift.Void)? = nil, completion: ((UIKit.UIImage, HHSDKVideo.ZLEditImageModel?) -> Swift.Void)?) + @objc public init(image: UIKit.UIImage, editModel: HHSDKVideo.ZLEditImageModel? = nil) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic open func viewDidLoad() + @objc override dynamic open func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLEditImageViewController : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLEditImageViewController : UIKit.UIScrollViewDelegate { + @objc dynamic public func viewForZooming(in scrollView: UIKit.UIScrollView) -> UIKit.UIView? + @objc dynamic public func scrollViewDidZoom(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndZooming(_ scrollView: UIKit.UIScrollView, with view: UIKit.UIView?, atScale scale: CoreGraphics.CGFloat) + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDragging(_ scrollView: UIKit.UIScrollView, willDecelerate decelerate: Swift.Bool) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndScrollingAnimation(_ scrollView: UIKit.UIScrollView) +} +extension ZLEditImageViewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegate { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) +} +@objc @_hasMissingDesignatedInitializers public class ZLDrawPath : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_hasMissingDesignatedInitializers public class ZLMosaicPath : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_hasMissingDesignatedInitializers public class ZLEditVideoViewController : UIKit.UIViewController { + @objc public var editFinishBlock: ((Foundation.URL?) -> Swift.Void)? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var supportedInterfaceOrientations: UIKit.UIInterfaceOrientationMask { + @objc get + } + @objc deinit + @objc public init(avAsset: AVFoundation.AVAsset, animateDismiss: Swift.Bool = false) + @objc override dynamic public func viewDidLoad() + @objc override dynamic public func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) +} +extension ZLEditVideoViewController : UIKit.UIGestureRecognizerDelegate { + @objc dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLEditVideoViewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDragging(_ scrollView: UIKit.UIScrollView, willDecelerate decelerate: Swift.Bool) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, insetForSectionAt section: Swift.Int) -> UIKit.UIEdgeInsets + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) +} +@objc @_inheritsConvenienceInitializers public class ZLEnlargeButton : UIKit.UIButton { + public var enlargeInsets: UIKit.UIEdgeInsets + public var enlargeInset: CoreGraphics.CGFloat { + get + set + } + @objc override dynamic public func point(inside point: CoreGraphics.CGPoint, with event: UIKit.UIEvent?) -> Swift.Bool + @objc override dynamic public init(frame: CoreGraphics.CGRect) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc deinit +} +public typealias ZLFilterApplierType = ((UIKit.UIImage) -> UIKit.UIImage) +@objc public enum ZLFilterType : Swift.Int { + case normal + case chrome + case fade + case instant + case process + case transfer + case tone + case linear + case sepia + case mono + case noir + case tonal + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc public class ZLFilter : ObjectiveC.NSObject { + public var name: Swift.String + @objc public init(name: Swift.String, filterType: HHSDKVideo.ZLFilterType) + @objc public init(name: Swift.String, applier: HHSDKVideo.ZLFilterApplierType?) + @objc override dynamic public init() + @objc deinit +} +extension ZLFilter { + @objc public static let all: [HHSDKVideo.ZLFilter] + @objc public static let normal: HHSDKVideo.ZLFilter + @objc public static let clarendon: HHSDKVideo.ZLFilter + @objc public static let nashville: HHSDKVideo.ZLFilter + @objc public static let apply1977: HHSDKVideo.ZLFilter + @objc public static let toaster: HHSDKVideo.ZLFilter + @objc public static let chrome: HHSDKVideo.ZLFilter + @objc public static let fade: HHSDKVideo.ZLFilter + @objc public static let instant: HHSDKVideo.ZLFilter + @objc public static let process: HHSDKVideo.ZLFilter + @objc public static let transfer: HHSDKVideo.ZLFilter + @objc public static let tone: HHSDKVideo.ZLFilter + @objc public static let linear: HHSDKVideo.ZLFilter + @objc public static let sepia: HHSDKVideo.ZLFilter + @objc public static let mono: HHSDKVideo.ZLFilter + @objc public static let noir: HHSDKVideo.ZLFilter + @objc public static let tonal: HHSDKVideo.ZLFilter +} +@objc public enum ZLURLType : Swift.Int { + case image + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +@objc @_hasMissingDesignatedInitializers public class ZLImagePreviewController : UIKit.UIViewController { + @objc public var longPressBlock: ((HHSDKVideo.ZLImagePreviewController?, UIKit.UIImage?, Swift.Int) -> Swift.Void)? + @objc public var doneBlock: (([Any]) -> Swift.Void)? + @objc public var videoHttpHeader: [Swift.String : Any]? + @objc override dynamic public var prefersStatusBarHidden: Swift.Bool { + @objc get + } + @objc override dynamic public var preferredStatusBarStyle: UIKit.UIStatusBarStyle { + @objc get + } + @objc public init(datas: [Any], index: Swift.Int = 0, showSelectBtn: Swift.Bool = true, showBottomView: Swift.Bool = true, urlType: ((Foundation.URL) -> HHSDKVideo.ZLURLType)? = nil, urlImageLoader: ((Foundation.URL, UIKit.UIImageView, @escaping (CoreGraphics.CGFloat) -> Swift.Void, @escaping () -> Swift.Void) -> Swift.Void)? = nil) + @objc override dynamic public func viewDidLoad() + @objc override dynamic public func viewWillAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidAppear(_ animated: Swift.Bool) + @objc override dynamic public func viewDidLayoutSubviews() + @objc override dynamic public init(nibName nibNameOrNil: Swift.String?, bundle nibBundleOrNil: Foundation.Bundle?) + @objc deinit +} +extension ZLImagePreviewController { + @objc dynamic public func scrollViewDidScroll(_ scrollView: UIKit.UIScrollView) + @objc dynamic public func scrollViewDidEndDecelerating(_ scrollView: UIKit.UIScrollView) +} +extension ZLImagePreviewController : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, minimumInteritemSpacingForSectionAt section: Swift.Int) -> CoreGraphics.CGFloat + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, minimumLineSpacingForSectionAt section: Swift.Int) -> CoreGraphics.CGFloat + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, insetForSectionAt section: Swift.Int) -> UIKit.UIEdgeInsets + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, sizeForItemAt indexPath: Foundation.IndexPath) -> CoreGraphics.CGSize + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didEndDisplaying cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) +} +@objc @_hasMissingDesignatedInitializers public class ZLImageStickerState : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLLanguageType : Swift.Int { + case system + case chineseSimplified + case chineseTraditional + case english + case japanese + case french + case german + case russian + case vietnamese + case korean + case malay + case italian + case indonesian + case portuguese + case spanish + case turkish + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +public struct ZLLocalLanguageKey : Swift.Hashable { + public let rawValue: Swift.String + public init(rawValue: Swift.String) + public static let previewCamera: HHSDKVideo.ZLLocalLanguageKey + public static let previewCameraRecord: HHSDKVideo.ZLLocalLanguageKey + public static let previewAlbum: HHSDKVideo.ZLLocalLanguageKey + public static let cancel: HHSDKVideo.ZLLocalLanguageKey + public static let noPhotoTips: HHSDKVideo.ZLLocalLanguageKey + public static let loading: HHSDKVideo.ZLLocalLanguageKey + public static let hudLoading: HHSDKVideo.ZLLocalLanguageKey + public static let done: HHSDKVideo.ZLLocalLanguageKey + public static let ok: HHSDKVideo.ZLLocalLanguageKey + public static let timeout: HHSDKVideo.ZLLocalLanguageKey + public static let noPhotoLibratyAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let noCameraAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let noMicrophoneAuthority: HHSDKVideo.ZLLocalLanguageKey + public static let cameraUnavailable: HHSDKVideo.ZLLocalLanguageKey + public static let keepRecording: HHSDKVideo.ZLLocalLanguageKey + public static let gotoSettings: HHSDKVideo.ZLLocalLanguageKey + public static let photo: HHSDKVideo.ZLLocalLanguageKey + public static let originalPhoto: HHSDKVideo.ZLLocalLanguageKey + public static let back: HHSDKVideo.ZLLocalLanguageKey + public static let edit: HHSDKVideo.ZLLocalLanguageKey + public static let editFinish: HHSDKVideo.ZLLocalLanguageKey + public static let revert: HHSDKVideo.ZLLocalLanguageKey + public static let brightness: HHSDKVideo.ZLLocalLanguageKey + public static let contrast: HHSDKVideo.ZLLocalLanguageKey + public static let saturation: HHSDKVideo.ZLLocalLanguageKey + public static let preview: HHSDKVideo.ZLLocalLanguageKey + public static let notAllowMixSelect: HHSDKVideo.ZLLocalLanguageKey + public static let save: HHSDKVideo.ZLLocalLanguageKey + public static let saveImageError: HHSDKVideo.ZLLocalLanguageKey + public static let saveVideoError: HHSDKVideo.ZLLocalLanguageKey + public static let exceededMaxSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let exceededMaxVideoSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let lessThanMinVideoSelectCount: HHSDKVideo.ZLLocalLanguageKey + public static let longerThanMaxVideoDuration: HHSDKVideo.ZLLocalLanguageKey + public static let shorterThanMaxVideoDuration: HHSDKVideo.ZLLocalLanguageKey + public static let iCloudVideoLoadFaild: HHSDKVideo.ZLLocalLanguageKey + public static let imageLoadFailed: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraTips: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraTakePhotoTips: HHSDKVideo.ZLLocalLanguageKey + public static let customCameraRecordVideoTips: HHSDKVideo.ZLLocalLanguageKey + public static let minRecordTimeTips: HHSDKVideo.ZLLocalLanguageKey + public static let cameraRoll: HHSDKVideo.ZLLocalLanguageKey + public static let panoramas: HHSDKVideo.ZLLocalLanguageKey + public static let videos: HHSDKVideo.ZLLocalLanguageKey + public static let favorites: HHSDKVideo.ZLLocalLanguageKey + public static let timelapses: HHSDKVideo.ZLLocalLanguageKey + public static let recentlyAdded: HHSDKVideo.ZLLocalLanguageKey + public static let bursts: HHSDKVideo.ZLLocalLanguageKey + public static let slomoVideos: HHSDKVideo.ZLLocalLanguageKey + public static let selfPortraits: HHSDKVideo.ZLLocalLanguageKey + public static let screenshots: HHSDKVideo.ZLLocalLanguageKey + public static let depthEffect: HHSDKVideo.ZLLocalLanguageKey + public static let livePhotos: HHSDKVideo.ZLLocalLanguageKey + public static let animated: HHSDKVideo.ZLLocalLanguageKey + public static let myPhotoStream: HHSDKVideo.ZLLocalLanguageKey + public static let noTitleAlbumListPlaceholder: HHSDKVideo.ZLLocalLanguageKey + public static let unableToAccessAllPhotos: HHSDKVideo.ZLLocalLanguageKey + public static let textStickerRemoveTips: HHSDKVideo.ZLLocalLanguageKey + public func hash(into hasher: inout Swift.Hasher) + public static func == (a: HHSDKVideo.ZLLocalLanguageKey, b: HHSDKVideo.ZLLocalLanguageKey) -> Swift.Bool + public var hashValue: Swift.Int { + get + } +} +public typealias Second = Swift.Int +@objc @_inheritsConvenienceInitializers public class ZLPhotoConfiguration : ObjectiveC.NSObject { + @objc public class func `default`() -> HHSDKVideo.ZLPhotoConfiguration + @objc public class func resetConfiguration() + @objc public var sortAscending: Swift.Bool + @objc public var maxSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var maxVideoSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var minVideoSelectCount: Swift.Int { + @objc get + @objc set + } + @objc public var allowMixSelect: Swift.Bool + @objc public var maxPreviewCount: Swift.Int + @objc public var cellCornerRadio: CoreGraphics.CGFloat + @objc public var allowSelectImage: Swift.Bool + @objc public var allowSelectVideo: Swift.Bool + @objc public var allowSelectGif: Swift.Bool + @objc public var allowSelectLivePhoto: Swift.Bool + @objc public var allowTakePhotoInLibrary: Swift.Bool { + @objc get + @objc set + } + @objc public var allowEditImage: Swift.Bool { + @objc get + @objc set + } + @objc public var allowEditVideo: Swift.Bool { + @objc get + @objc set + } + @objc public var animateSelectBtnWhenSelect: Swift.Bool + @objc public var selectBtnAnimationDuration: Swift.Double + @objc public var editAfterSelectThumbnailImage: Swift.Bool + @objc public var cropVideoAfterSelectThumbnail: Swift.Bool + @objc public var showClipDirectlyIfOnlyHasClipTool: Swift.Bool + @objc public var saveNewImageAfterEdit: Swift.Bool + @objc public var allowSlideSelect: Swift.Bool + @objc public var autoScrollWhenSlideSelectIsActive: Swift.Bool + @objc public var autoScrollMaxSpeed: CoreGraphics.CGFloat + @objc public var allowDragSelect: Swift.Bool + @objc public var allowSelectOriginal: Swift.Bool + @objc public var allowPreviewPhotos: Swift.Bool + @objc public var showPreviewButtonInAlbum: Swift.Bool + @objc public var showSelectCountOnDoneBtn: Swift.Bool + @objc public var columnCount: Swift.Int { + @objc get + @objc set + } + @objc public var maxEditVideoTime: Swift.Int + @objc public var maxSelectVideoDuration: Swift.Int + @objc public var minSelectVideoDuration: Swift.Int + @objc public var editImageConfiguration: HHSDKVideo.ZLEditImageConfiguration + @objc public var showCaptureImageOnTakePhotoBtn: Swift.Bool + @objc public var showSelectBtnWhenSingleSelect: Swift.Bool + @objc public var showSelectedMask: Swift.Bool + @objc public var showSelectedBorder: Swift.Bool + @objc public var showInvalidMask: Swift.Bool + @objc public var showSelectedIndex: Swift.Bool + @objc public var showSelectedPhotoPreview: Swift.Bool + @objc public var shouldAnialysisAsset: Swift.Bool + @objc public var timeout: Swift.Double + @objc public var languageType: HHSDKVideo.ZLLanguageType { + @objc get + @objc set + } + @objc public var useCustomCamera: Swift.Bool + @objc public var allowTakePhoto: Swift.Bool { + @objc get + @objc set + } + @objc public var allowRecordVideo: Swift.Bool { + @objc get + @objc set + } + @objc public var minRecordDuration: HHSDKVideo.Second { + @objc get + @objc set + } + @objc public var maxRecordDuration: HHSDKVideo.Second { + @objc get + @objc set + } + @objc public var cameraConfiguration: HHSDKVideo.ZLCameraConfiguration + @objc public var hudStyle: HHSDKVideo.ZLProgressHUD.HUDStyle + @objc public var canSelectAsset: ((Photos.PHAsset) -> Swift.Bool)? + @objc public var showAddPhotoButton: Swift.Bool + @objc public var showEnterSettingTips: Swift.Bool + @objc public var noAuthorityCallback: ((HHSDKVideo.ZLNoAuthorityType) -> Swift.Void)? + @objc public var operateBeforeDoneAction: ((UIKit.UIViewController, @escaping () -> Swift.Void) -> Swift.Void)? + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLNoAuthorityType : Swift.Int { + case library + case camera + case microphone + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension ZLPhotoConfiguration { + @discardableResult + public func sortAscending(_ ascending: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxVideoSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minVideoSelectCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowMixSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxPreviewCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cellCornerRadio(_ cornerRadio: CoreGraphics.CGFloat) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func allowSelectVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectGif(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectLivePhoto(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowTakePhotoInLibrary(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowEditImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowEditVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func animateSelectBtnWhenSelect(_ animate: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func selectBtnAnimationDuration(_ duration: CoreFoundation.CFTimeInterval) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func editAfterSelectThumbnailImage(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cropVideoAfterSelectThumbnail(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showClipDirectlyIfOnlyHasClipTool(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func saveNewImageAfterEdit(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSlideSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func autoScrollWhenSlideSelectIsActive(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func autoScrollMaxSpeed(_ speed: CoreGraphics.CGFloat) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowDragSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowSelectOriginal(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowPreviewPhotos(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showPreviewButtonInAlbum(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectCountOnDoneBtn(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func columnCount(_ count: Swift.Int) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxEditVideoTime(_ second: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxSelectVideoDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minSelectVideoDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func editImageConfiguration(_ configuration: HHSDKVideo.ZLEditImageConfiguration) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showCaptureImageOnTakePhotoBtn(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectBtnWhenSingleSelect(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedMask(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedBorder(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showInvalidMask(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedIndex(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func showSelectedPhotoPreview(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func shouldAnialysisAsset(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func timeout(_ timeout: Foundation.TimeInterval) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func languageType(_ type: HHSDKVideo.ZLLanguageType) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func useCustomCamera(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowTakePhoto(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func allowRecordVideo(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func minRecordDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func maxRecordDuration(_ duration: HHSDKVideo.Second) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func cameraConfiguration(_ configuration: HHSDKVideo.ZLCameraConfiguration) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + public func hudStyle(_ style: HHSDKVideo.ZLProgressHUD.HUDStyle) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func canSelectAsset(_ block: ((Photos.PHAsset) -> Swift.Bool)?) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func showAddPhotoButton(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func showEnterSettingTips(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func noAuthorityCallback(_ callback: ((HHSDKVideo.ZLNoAuthorityType) -> Swift.Void)?) -> HHSDKVideo.ZLPhotoConfiguration + @discardableResult + @objc dynamic public func operateBeforeDoneAction(_ block: ((UIKit.UIViewController, @escaping () -> Swift.Void) -> Swift.Void)?) -> HHSDKVideo.ZLPhotoConfiguration +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoManager : ObjectiveC.NSObject { + @objc public class func saveImageToAlbum(image: UIKit.UIImage, completion: ((Swift.Bool, Photos.PHAsset?) -> Swift.Void)?) + @objc public class func saveVideoToAlbum(url: Foundation.URL, completion: ((Swift.Bool, Photos.PHAsset?) -> Swift.Void)?) + @objc public class func fetchPhoto(in result: Photos.PHFetchResult<Photos.PHAsset>, ascending: Swift.Bool, allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, limitCount: Swift.Int = .max) -> [HHSDKVideo.ZLPhotoModel] + @objc public class func getPhotoAlbumList(ascending: Swift.Bool, allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, completion: ([HHSDKVideo.ZLAlbumListModel]) -> Swift.Void) + @objc public class func getCameraRollAlbum(allowSelectImage: Swift.Bool, allowSelectVideo: Swift.Bool, completion: @escaping (HHSDKVideo.ZLAlbumListModel) -> Swift.Void) + @discardableResult + @objc public class func fetchImage(for asset: Photos.PHAsset, size: CoreGraphics.CGSize, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (UIKit.UIImage?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @discardableResult + @objc public class func fetchOriginalImage(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (UIKit.UIImage?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @discardableResult + @objc public class func fetchOriginalImageData(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (Foundation.Data, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchLivePhoto(for asset: Photos.PHAsset, completion: @escaping (Photos.PHLivePhoto?, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchVideo(for asset: Photos.PHAsset, progress: ((CoreGraphics.CGFloat, Swift.Error?, Swift.UnsafeMutablePointer<ObjectiveC.ObjCBool>, [Swift.AnyHashable : Any]?) -> Swift.Void)? = nil, completion: @escaping (AVFoundation.AVPlayerItem?, [Swift.AnyHashable : Any]?, Swift.Bool) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchAVAsset(forVideo asset: Photos.PHAsset, completion: @escaping (AVFoundation.AVAsset?, [Swift.AnyHashable : Any]?) -> Swift.Void) -> Photos.PHImageRequestID + @objc public class func fetchAssetFilePath(asset: Photos.PHAsset, completion: @escaping (Swift.String?) -> Swift.Void) + @objc override dynamic public init() + @objc deinit +} +extension ZLPhotoManager { + @objc dynamic public class func hasPhotoLibratyAuthority() -> Swift.Bool + @objc dynamic public class func hasCameraAuthority() -> Swift.Bool + @objc dynamic public class func hasMicrophoneAuthority() -> Swift.Bool +} +extension ZLPhotoModel { + public enum MediaType : Swift.Int { + case unknown + case image + case gif + case livePhoto + case video + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } +} +@objc public class ZLPhotoModel : ObjectiveC.NSObject { + final public let ident: Swift.String + final public let asset: Photos.PHAsset + public var type: HHSDKVideo.ZLPhotoModel.MediaType + public var duration: Swift.String + public var isSelected: Swift.Bool + public var editImage: UIKit.UIImage? { + get + set + } + public var second: HHSDKVideo.Second { + get + } + public var whRatio: CoreGraphics.CGFloat { + get + } + public var previewSize: CoreGraphics.CGSize { + get + } + public var editImageModel: HHSDKVideo.ZLEditImageModel? + public init(asset: Photos.PHAsset) + public func transformAssetType(for asset: Photos.PHAsset) -> HHSDKVideo.ZLPhotoModel.MediaType + public func transformDuration(for asset: Photos.PHAsset) -> Swift.String + @objc override dynamic public init() + @objc deinit +} +extension ZLPhotoModel { + public static func == (lhs: HHSDKVideo.ZLPhotoModel, rhs: HHSDKVideo.ZLPhotoModel) -> Swift.Bool +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoPreviewSheet : UIKit.UIView { + @objc public var selectImageBlock: (([UIKit.UIImage], [Photos.PHAsset], Swift.Bool) -> Swift.Void)? + @objc public var selectImageRequestErrorBlock: (([Photos.PHAsset], [Swift.Int]) -> Swift.Void)? + @objc public var cancelBlock: (() -> Swift.Void)? + @objc deinit + @objc convenience override dynamic public init(frame: CoreGraphics.CGRect) + @objc public init(selectedAssets: [Photos.PHAsset]? = nil) + @objc required dynamic public init?(coder: Foundation.NSCoder) + @objc override dynamic public func layoutSubviews() + @objc public func showPreview(animate: Swift.Bool = true, sender: UIKit.UIViewController) + @objc public func showPhotoLibrary(sender: UIKit.UIViewController) + @objc public func previewAssets(sender: UIKit.UIViewController, assets: [Photos.PHAsset], index: Swift.Int, isOriginal: Swift.Bool, showBottomViewAndSelectBtn: Swift.Bool = true) +} +extension ZLPhotoPreviewSheet : UIKit.UIGestureRecognizerDelegate { + @objc override dynamic public func gestureRecognizerShouldBegin(_ gestureRecognizer: UIKit.UIGestureRecognizer) -> Swift.Bool +} +extension ZLPhotoPreviewSheet : UIKit.UICollectionViewDataSource, UIKit.UICollectionViewDelegateFlowLayout { + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, layout collectionViewLayout: UIKit.UICollectionViewLayout, sizeForItemAt indexPath: Foundation.IndexPath) -> CoreGraphics.CGSize + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, numberOfItemsInSection section: Swift.Int) -> Swift.Int + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, cellForItemAt indexPath: Foundation.IndexPath) -> UIKit.UICollectionViewCell + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, willDisplay cell: UIKit.UICollectionViewCell, forItemAt indexPath: Foundation.IndexPath) + @objc dynamic public func collectionView(_ collectionView: UIKit.UICollectionView, didSelectItemAt indexPath: Foundation.IndexPath) +} +extension ZLPhotoPreviewSheet : UIKit.UIImagePickerControllerDelegate, UIKit.UINavigationControllerDelegate { + @objc dynamic public func imagePickerController(_ picker: UIKit.UIImagePickerController, didFinishPickingMediaWithInfo info: [UIKit.UIImagePickerController.InfoKey : Any]) +} +extension ZLPhotoPreviewSheet : Photos.PHPhotoLibraryChangeObserver { + @objc dynamic public func photoLibraryDidChange(_ changeInstance: Photos.PHChange) +} +@objc @_inheritsConvenienceInitializers public class ZLPhotoUIConfiguration : ObjectiveC.NSObject { + @objc public enum CancelButtonStyle : Swift.Int { + case text + case image + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc public class func `default`() -> HHSDKVideo.ZLPhotoUIConfiguration + @objc public class func resetConfiguration() + @objc public var style: HHSDKVideo.ZLPhotoBrowserStyle + @objc public var statusBarStyle: UIKit.UIStatusBarStyle + @objc public var navCancelButtonStyle: HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle + @objc public var showStatusBarInPreviewInterface: Swift.Bool + @objc public var navViewBlurEffectOfAlbumList: UIKit.UIBlurEffect? + @objc public var navViewBlurEffectOfPreview: UIKit.UIBlurEffect? + @objc public var bottomViewBlurEffectOfAlbumList: UIKit.UIBlurEffect? + @objc public var bottomViewBlurEffectOfPreview: UIKit.UIBlurEffect? + @objc public var customImageNames: [Swift.String] { + @objc get + @objc set + } + public var customImageForKey: [Swift.String : UIKit.UIImage?] { + get + set + } + @objc public var customImageForKey_objc: [Swift.String : UIKit.UIImage] { + @objc get + @objc set + } + public var customLanguageKeyValue: [HHSDKVideo.ZLLocalLanguageKey : Swift.String] { + get + set + } + @objc public var customLanguageKeyValue_objc: [Swift.String : Swift.String] { + @objc get + @objc set + } + @objc public var themeFontName: Swift.String? { + @objc get + @objc set + } + @objc public var sheetTranslucentColor: UIKit.UIColor + @objc public var sheetBtnBgColor: UIKit.UIColor + @objc public var sheetBtnTitleColor: UIKit.UIColor + @objc public var sheetBtnTitleTintColor: UIKit.UIColor + @objc public var navBarColor: UIKit.UIColor + @objc public var navBarColorOfPreviewVC: UIKit.UIColor + @objc public var navTitleColor: UIKit.UIColor + @objc public var navTitleColorOfPreviewVC: UIKit.UIColor + @objc public var navEmbedTitleViewBgColor: UIKit.UIColor + @objc public var albumListBgColor: UIKit.UIColor + @objc public var embedAlbumListTranslucentColor: UIKit.UIColor + @objc public var albumListTitleColor: UIKit.UIColor + @objc public var albumListCountColor: UIKit.UIColor + @objc public var separatorColor: UIKit.UIColor + @objc public var thumbnailBgColor: UIKit.UIColor + @objc public var previewVCBgColor: UIKit.UIColor + @objc public var bottomToolViewBgColor: UIKit.UIColor + @objc public var bottomToolViewBgColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnNormalTitleColor: UIKit.UIColor + @objc public var bottomToolViewDoneBtnNormalTitleColor: UIKit.UIColor + @objc public var bottomToolViewBtnNormalTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewDoneBtnNormalTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnDisableTitleColor: UIKit.UIColor + @objc public var bottomToolViewDoneBtnDisableTitleColor: UIKit.UIColor + @objc public var bottomToolViewBtnDisableTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewDoneBtnDisableTitleColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnNormalBgColor: UIKit.UIColor + @objc public var bottomToolViewBtnNormalBgColorOfPreviewVC: UIKit.UIColor + @objc public var bottomToolViewBtnDisableBgColor: UIKit.UIColor + @objc public var bottomToolViewBtnDisableBgColorOfPreviewVC: UIKit.UIColor + @objc public var selectMorePhotoWhenAuthIsLismitedTitleColor: UIKit.UIColor + @objc public var cameraRecodeProgressColor: UIKit.UIColor + @objc public var selectedMaskColor: UIKit.UIColor + @objc public var selectedBorderColor: UIKit.UIColor + @objc public var invalidMaskColor: UIKit.UIColor + @objc public var indexLabelTextColor: UIKit.UIColor + @objc public var indexLabelBgColor: UIKit.UIColor + @objc public var cameraCellBgColor: UIKit.UIColor + @objc public var adjustSliderNormalColor: UIKit.UIColor + @objc public var adjustSliderTintColor: UIKit.UIColor + @objc override dynamic public init() + @objc deinit +} +@objc public enum ZLPhotoBrowserStyle : Swift.Int { + case embedAlbumList + case externalAlbumList + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } +} +extension ZLPhotoUIConfiguration { + @discardableResult + public func style(_ style: HHSDKVideo.ZLPhotoBrowserStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func statusBarStyle(_ statusBarStyle: UIKit.UIStatusBarStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navCancelButtonStyle(_ style: HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func showStatusBarInPreviewInterface(_ value: Swift.Bool) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navViewBlurEffectOfAlbumList(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navViewBlurEffectOfPreview(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomViewBlurEffectOfAlbumList(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomViewBlurEffectOfPreview(_ effect: UIKit.UIBlurEffect?) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customImageNames(_ names: [Swift.String]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customImageForKey(_ map: [Swift.String : UIKit.UIImage?]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func customLanguageKeyValue(_ map: [HHSDKVideo.ZLLocalLanguageKey : Swift.String]) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func themeFontName(_ name: Swift.String) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetTranslucentColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func sheetBtnTitleTintColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navBarColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navBarColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func navEmbedTitleViewBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func embedAlbumListTranslucentColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func albumListCountColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func separatorColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func thumbnailBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func previewVCBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnNormalTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnNormalTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnDisableTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewDoneBtnDisableTitleColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnNormalBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func bottomToolViewBtnDisableBgColorOfPreviewVC(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectMorePhotoWhenAuthIsLismitedTitleColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func cameraRecodeProgressColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectedMaskColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func selectedBorderColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func invalidMaskColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func indexLabelTextColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func indexLabelBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func cameraCellBgColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func adjustSliderNormalColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration + @discardableResult + public func adjustSliderTintColor(_ color: UIKit.UIColor) -> HHSDKVideo.ZLPhotoUIConfiguration +} +@objc @_hasMissingDesignatedInitializers public class ZLProgressHUD : UIKit.UIView { + @objc public enum HUDStyle : Swift.Int { + case light + case lightBlur + case dark + case darkBlur + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } + @objc deinit + @objc public init(style: HHSDKVideo.ZLProgressHUD.HUDStyle) + @objc public func show(timeout: Foundation.TimeInterval = 100) + @objc public func hide() + @objc override dynamic public init(frame: CoreGraphics.CGRect) +} +@objc @_hasMissingDesignatedInitializers public class ZLTextStickerState : ObjectiveC.NSObject { + @objc override dynamic public init() + @objc deinit +} +@objc @_inheritsConvenienceInitializers public class ZLVideoManager : ObjectiveC.NSObject { + @objc public class func mergeVideos(fileUrls: [Foundation.URL], completion: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) + @objc override dynamic public init() + @objc deinit +} +extension ZLVideoManager { + @objc dynamic public class func exportVideo(for asset: Photos.PHAsset, exportType: HHSDKVideo.ZLVideoManager.ExportType = .mov, presetName: Swift.String = AVAssetExportPresetMediumQuality, complete: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) + @objc dynamic public class func exportVideo(for asset: AVFoundation.AVAsset, range: CoreMedia.CMTimeRange = CMTimeRange(start: .zero, duration: .positiveInfinity), exportType: HHSDKVideo.ZLVideoManager.ExportType = .mov, presetName: Swift.String = AVAssetExportPresetMediumQuality, complete: @escaping ((Foundation.URL?, Swift.Error?) -> Swift.Void)) +} +extension ZLVideoManager { + @objc public enum ExportType : Swift.Int { + case mov + case mp4 + public init?(rawValue: Swift.Int) + public typealias RawValue = Swift.Int + public var rawValue: Swift.Int { + get + } + } +} +extension HHSDKVideo.AES.Error : Swift.Equatable {} +extension HHSDKVideo.AES.Error : Swift.Hashable {} +extension HHSDKVideo.AES.Variant : Swift.Equatable {} +extension HHSDKVideo.AES.Variant : Swift.Hashable {} +extension HHSDKVideo.AES.Variant : Swift.RawRepresentable {} +extension HHSDKVideo.Bit : Swift.Equatable {} +extension HHSDKVideo.Bit : Swift.Hashable {} +extension HHSDKVideo.Bit : Swift.RawRepresentable {} +extension HHSDKVideo.Blowfish.Error : Swift.Equatable {} +extension HHSDKVideo.Blowfish.Error : Swift.Hashable {} +extension HHSDKVideo.CBC.Error : Swift.Equatable {} +extension HHSDKVideo.CBC.Error : Swift.Hashable {} +extension HHSDKVideo.CCM : HHSDKVideo.BlockMode {} +extension HHSDKVideo.CCM.Error : Swift.Equatable {} +extension HHSDKVideo.CCM.Error : Swift.Hashable {} +extension HHSDKVideo.CFB.Error : Swift.Equatable {} +extension HHSDKVideo.CFB.Error : Swift.Hashable {} +extension HHSDKVideo.ChaCha20.Error : Swift.Equatable {} +extension HHSDKVideo.ChaCha20.Error : Swift.Hashable {} +extension HHSDKVideo.CipherError : Swift.Equatable {} +extension HHSDKVideo.CipherError : Swift.Hashable {} +extension HHSDKVideo.CMAC.Error : Swift.Equatable {} +extension HHSDKVideo.CMAC.Error : Swift.Hashable {} +extension HHSDKVideo.CTR : HHSDKVideo.BlockMode {} +extension HHSDKVideo.CTR.Error : Swift.Equatable {} +extension HHSDKVideo.CTR.Error : Swift.Hashable {} +extension HHSDKVideo.DateTransform.Unit : Swift.Equatable {} +extension HHSDKVideo.DateTransform.Unit : Swift.Hashable {} +extension HHSDKVideo.DateTransform.Unit : Swift.RawRepresentable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.Equatable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.Hashable {} +extension HHSDKVideo.DGElasticPullToRefreshState : Swift.RawRepresentable {} +extension HHSDKVideo.EKAlertMessage.ImagePosition : Swift.Equatable {} +extension HHSDKVideo.EKAlertMessage.ImagePosition : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Animation.Translate.AnchorPosition : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.DisplayMode : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.DisplayMode : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.NotificationHapticFeedback : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.NotificationHapticFeedback : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Position : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.Position : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.PositionConstraints.Rotation.SupportedInterfaceOrientation : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.Precedence.QueueingHeuristic : Swift.Hashable {} +extension HHSDKVideo.EKAttributes.StatusBar : Swift.Equatable {} +extension HHSDKVideo.EKAttributes.StatusBar : Swift.Hashable {} +extension HHSDKVideo.GCM.Mode : Swift.Equatable {} +extension HHSDKVideo.GCM.Mode : Swift.Hashable {} +extension HHSDKVideo.GCM.Error : Swift.Equatable {} +extension HHSDKVideo.GCM.Error : Swift.Hashable {} +extension HHSDKVideo.HHBaseCallingState : Swift.Equatable {} +extension HHSDKVideo.HHBaseCallingState : Swift.Hashable {} +extension HHSDKVideo.HHBaseCallingState : Swift.RawRepresentable {} +extension HHSDKVideo.HHMediaType : Swift.Equatable {} +extension HHSDKVideo.HHMediaType : Swift.Hashable {} +extension HHSDKVideo.HHMediaType : Swift.RawRepresentable {} +extension HHSDKVideo.DateFormat : Swift.Equatable {} +extension HHSDKVideo.DateFormat : Swift.Hashable {} +extension HHSDKVideo.DateFormat : Swift.RawRepresentable {} +extension HHSDKVideo.HHConsType : Swift.Equatable {} +extension HHSDKVideo.HHConsType : Swift.Hashable {} +extension HHSDKVideo.HHConsType : Swift.RawRepresentable {} +extension HHSDKVideo.HHFileCacheManager.HHAssetPathType : Swift.Equatable {} +extension HHSDKVideo.HHFileCacheManager.HHAssetPathType : Swift.Hashable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.Equatable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.Hashable {} +extension HHSDKVideo.HHFileCacheManager.HHFileFormat : Swift.RawRepresentable {} +extension HHSDKVideo.HHLogMode : Swift.Equatable {} +extension HHSDKVideo.HHLogMode : Swift.Hashable {} +extension HHSDKVideo.HHLogMode : Swift.RawRepresentable {} +extension HHSDKVideo.HHCallType : Swift.Equatable {} +extension HHSDKVideo.HHCallType : Swift.Hashable {} +extension HHSDKVideo.HHCallType : Swift.RawRepresentable {} +extension HHSDKVideo.HHServerType : Swift.Equatable {} +extension HHSDKVideo.HHServerType : Swift.Hashable {} +extension HHSDKVideo.HHRequestMethod : Swift.Equatable {} +extension HHSDKVideo.HHRequestMethod : Swift.Hashable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.Equatable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.Hashable {} +extension HHSDKVideo.HHPagerViewTransformerType : Swift.RawRepresentable {} +extension HHSDKVideo.HHRealNameType : Swift.Equatable {} +extension HHSDKVideo.HHRealNameType : Swift.Hashable {} +extension HHSDKVideo.HHRealNameType : Swift.RawRepresentable {} +extension HHSDKVideo.TrtcLog : Swift.Equatable {} +extension HHSDKVideo.TrtcLog : Swift.Hashable {} +extension HHSDKVideo.TrtcLog : Swift.RawRepresentable {} +extension HHSDKVideo.TrtcError : Swift.Equatable {} +extension HHSDKVideo.TrtcError : Swift.Hashable {} +extension HHSDKVideo.TrtcError : Swift.RawRepresentable {} +extension HHSDKVideo.hhToastPosition : Swift.Equatable {} +extension HHSDKVideo.hhToastPosition : Swift.Hashable {} +extension HHSDKVideo.HKDF.Error : Swift.Equatable {} +extension HHSDKVideo.HKDF.Error : Swift.Hashable {} +extension HHSDKVideo.HMAC.Error : Swift.Equatable {} +extension HHSDKVideo.HMAC.Error : Swift.Hashable {} +extension HHSDKVideo.HMAC.Variant : Swift.Equatable {} +extension HHSDKVideo.HMAC.Variant : Swift.Hashable {} +extension HHSDKVideo.ItemClass : Swift.Equatable {} +extension HHSDKVideo.ItemClass : Swift.Hashable {} +extension HHSDKVideo.ProtocolType : Swift.Equatable {} +extension HHSDKVideo.ProtocolType : Swift.Hashable {} +extension HHSDKVideo.AuthenticationType : Swift.Equatable {} +extension HHSDKVideo.AuthenticationType : Swift.Hashable {} +extension HHSDKVideo.Accessibility : Swift.Equatable {} +extension HHSDKVideo.Accessibility : Swift.Hashable {} +extension HHSDKVideo.Status : Swift.Equatable {} +extension HHSDKVideo.Status : Swift.Hashable {} +extension HHSDKVideo.MappingType : Swift.Equatable {} +extension HHSDKVideo.MappingType : Swift.Hashable {} +extension HHSDKVideo.OCB.Mode : Swift.Equatable {} +extension HHSDKVideo.OCB.Mode : Swift.Hashable {} +extension HHSDKVideo.OCB.Error : Swift.Equatable {} +extension HHSDKVideo.OCB.Error : Swift.Hashable {} +extension HHSDKVideo.OFB.Error : Swift.Equatable {} +extension HHSDKVideo.OFB.Error : Swift.Hashable {} +extension HHSDKVideo.Padding : Swift.Equatable {} +extension HHSDKVideo.Padding : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF1.Error : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF1.Error : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF1.Variant : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF1.Variant : Swift.Hashable {} +extension HHSDKVideo.PKCS5.PBKDF2.Error : Swift.Equatable {} +extension HHSDKVideo.PKCS5.PBKDF2.Error : Swift.Hashable {} +extension HHSDKVideo.PCBC.Error : Swift.Equatable {} +extension HHSDKVideo.PCBC.Error : Swift.Hashable {} +extension HHSDKVideo.PermissionStatus : Swift.Equatable {} +extension HHSDKVideo.PermissionStatus : Swift.Hashable {} +extension HHSDKVideo.PermissionStatus : Swift.RawRepresentable {} +extension HHSDKVideo.HHBasePermissionType : Swift.Equatable {} +extension HHSDKVideo.HHBasePermissionType : Swift.Hashable {} +extension HHSDKVideo.HHBasePermissionType : Swift.RawRepresentable {} +extension HHSDKVideo.Poly1305.Error : Swift.Equatable {} +extension HHSDKVideo.Poly1305.Error : Swift.Hashable {} +extension HHSDKVideo.Rabbit.Error : Swift.Equatable {} +extension HHSDKVideo.Rabbit.Error : Swift.Hashable {} +extension HHSDKVideo.Reachability.NetworkStatus : Swift.Equatable {} +extension HHSDKVideo.Reachability.NetworkStatus : Swift.Hashable {} +extension HHSDKVideo.RecordImgType : Swift.Equatable {} +extension HHSDKVideo.RecordImgType : Swift.Hashable {} +extension HHSDKVideo.RecordImgType : Swift.RawRepresentable {} +extension HHSDKVideo.SHA2.Variant : Swift.Equatable {} +extension HHSDKVideo.SHA2.Variant : Swift.Hashable {} +extension HHSDKVideo.SHA3.Variant : Swift.Equatable {} +extension HHSDKVideo.SHA3.Variant : Swift.Hashable {} +extension HHSDKVideo.HHIMCmd : Swift.Equatable {} +extension HHSDKVideo.HHIMCmd : Swift.Hashable {} +extension HHSDKVideo.HHIMCmd : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.CaptureSessionPreset : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.FocusMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.ExposureMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.FlashMode : Swift.RawRepresentable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.Equatable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.Hashable {} +extension HHSDKVideo.ZLCameraConfiguration.VideoExportType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.Equatable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.Hashable {} +extension HHSDKVideo.ZLEditImageConfiguration.EditTool : Swift.RawRepresentable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.Equatable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.Hashable {} +extension HHSDKVideo.ZLEditImageConfiguration.AdjustTool : Swift.RawRepresentable {} +extension HHSDKVideo.ZLFilterType : Swift.Equatable {} +extension HHSDKVideo.ZLFilterType : Swift.Hashable {} +extension HHSDKVideo.ZLFilterType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLURLType : Swift.Equatable {} +extension HHSDKVideo.ZLURLType : Swift.Hashable {} +extension HHSDKVideo.ZLURLType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLLanguageType : Swift.Equatable {} +extension HHSDKVideo.ZLLanguageType : Swift.Hashable {} +extension HHSDKVideo.ZLLanguageType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.Equatable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.Hashable {} +extension HHSDKVideo.ZLNoAuthorityType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoModel.MediaType : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoUIConfiguration.CancelButtonStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.Equatable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.Hashable {} +extension HHSDKVideo.ZLPhotoBrowserStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.Equatable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.Hashable {} +extension HHSDKVideo.ZLProgressHUD.HUDStyle : Swift.RawRepresentable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.Equatable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.Hashable {} +extension HHSDKVideo.ZLVideoManager.ExportType : Swift.RawRepresentable {} diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftmodule b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftmodule new file mode 100644 index 0000000..2ae19d4 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Modules/HHSDKVideo.swiftmodule/x86_64.swiftmodule differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Modules/module.modulemap b/HHVDoctorSDK/HHSDKVideo.framework/Modules/module.modulemap new file mode 100644 index 0000000..013d793 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/Modules/module.modulemap @@ -0,0 +1,11 @@ +framework module HHSDKVideo { + umbrella header "HHSDKVideo-umbrella.h" + + export * + module * { export * } +} + +module HHSDKVideo.Swift { + header "HHSDKVideo-Swift.h" + requires objc +} diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/objects-11.0+.nib new file mode 100644 index 0000000..2549121 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/runtime.nib new file mode 100644 index 0000000..c5bd8d2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/ProductTipView.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/Info.plist new file mode 100644 index 0000000..b6a5f72 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/objects-11.0+.nib new file mode 100644 index 0000000..8a37911 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/runtime.nib new file mode 100644 index 0000000..8147331 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/inP-8t-qNB-view-NmI-lS-I7Y.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/objects-11.0+.nib new file mode 100644 index 0000000..7115da2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/runtime.nib new file mode 100644 index 0000000..977ebfb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/RealName.storyboardc/realName.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/RealNameView.nib b/HHVDoctorSDK/HHSDKVideo.framework/RealNameView.nib new file mode 100644 index 0000000..7e70bb5 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/RealNameView.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/1yM-In-WzS-view-hCB-L7-FyM.nib b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/1yM-In-WzS-view-hCB-L7-FyM.nib new file mode 100644 index 0000000..ebdcfb9 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/1yM-In-WzS-view-hCB-L7-FyM.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/Info.plist new file mode 100644 index 0000000..261b4ea Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/UINavigationController-nJR-FK-Nsn.nib b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/UINavigationController-nJR-FK-Nsn.nib new file mode 100644 index 0000000..6c193cb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/UINavigationController-nJR-FK-Nsn.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/blocked.nib b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/blocked.nib new file mode 100644 index 0000000..15310ed Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/blocked.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/eYM-0S-bXl-view-yXU-C3-IV4.nib b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/eYM-0S-bXl-view-yXU-C3-IV4.nib new file mode 100644 index 0000000..1363bce Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/Video.storyboardc/eYM-0S-bXl-view-yXU-C3-IV4.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/Info.plist new file mode 100644 index 0000000..f67c49d Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/R4r-RW-Ik2-view-kp3-lk-DkN.nib b/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/R4r-RW-Ik2-view-kp3-lk-DkN.nib new file mode 100644 index 0000000..3f5b490 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/R4r-RW-Ik2-view-kp3-lk-DkN.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/webbrowser.nib b/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/webbrowser.nib new file mode 100644 index 0000000..be6a959 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/WebBrowser.storyboardc/webbrowser.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMBadResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMBadResView.strings new file mode 100644 index 0000000..540a881 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMBadResView.strings @@ -0,0 +1,15 @@ + +/* Class = "UIButton"; normalTitle = "换个医生问问"; ObjectID = "Mpm-Cy-8No"; */ +"Mpm-Cy-8No.normalTitle" = "اسأل عن طبيب آخر"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "TEp-ay-Bhn"; */ +"TEp-ay-Bhn.text" = "تقييم مجهول"; + +/* Class = "UILabel"; text = "感谢反馈"; ObjectID = "dfJ-Fs-9dC"; */ +"dfJ-Fs-9dC.text" = ""; + +/* Class = "UILabel"; text = "很抱歉给您不好的体验,建议您换个医生咨询 视频医生将持续优化用户体验"; ObjectID = "gCO-sp-izU"; */ +"gCO-sp-izU.text" = "شكرًا لك على ملاحظاتك n\ سنتعامل مع شكواك في أقرب وقت ممكن وسنقدم لك ملاحظات على النتيجة. آسف للتجربة السيئة ، يمكنك أن تسأل طبيب آخر"; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "z3D-0Q-OOx"; */ +"z3D-0Q-OOx.normalTitle" = "أريد تقديم شكوى"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMGoodResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMGoodResView.strings new file mode 100644 index 0000000..66183b4 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMGoodResView.strings @@ -0,0 +1,6 @@ + +/* Class = "UILabel"; text = "感谢反馈,我们将努力提升服务体验"; ObjectID = "MT6-nh-pVb"; */ +"MT6-nh-pVb.text" = "شكرًا على التعليقات سنعمل بجد لتحسين تجربة الخدمة"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "bqq-t4-5L5"; */ +"bqq-t4-5L5.text" = "تقييم مجهول"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMStarView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMStarView.strings new file mode 100644 index 0000000..dbb6658 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMStarView.strings @@ -0,0 +1,12 @@ + +/* Class = "UIButton"; normalTitle = "匿名提交"; ObjectID = "ILd-ex-a5k"; */ +"ILd-ex-a5k.normalTitle" = "إرسال مجهول"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "MaV-co-mC3"; */ +"MaV-co-mC3.text" = "تقييم مجهول"; + +/* Class = "UILabel"; text = "您的评价会让医生做的更好"; ObjectID = "gZR-Ky-CF9"; */ +"gZR-Ky-CF9.text" = "تقييمك سيجعل الأطباء أفضل"; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "tQm-Mq-YYD"; */ +"tQm-Mq-YYD.normalTitle" = "أريد تقديم شكوى"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTouSuView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTouSuView.strings new file mode 100644 index 0000000..9500df1 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTouSuView.strings @@ -0,0 +1,9 @@ + +/* Class = "UILabel"; text = "投诉"; ObjectID = "Nuz-EA-N37"; */ +"Nuz-EA-N37.text" = "أريد تقديم شكوى"; + +/* Class = "UIButton"; normalTitle = "提交投诉"; ObjectID = "bsC-DO-hz3"; */ +"bsC-DO-hz3.normalTitle" = "إرسال مجهول"; + +/* Class = "UITextField"; placeholder = "请填写您的手机号码,来处理投诉事件"; ObjectID = "oap-bd-mdd"; */ +"oap-bd-mdd.placeholder" = "يرجى ملء رقم الاتصال الخاص بك للتعامل مع الشكاوى"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTousuResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTousuResView.strings new file mode 100644 index 0000000..1c7829d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/CMTousuResView.strings @@ -0,0 +1,12 @@ + +/* Class = "UILabel"; text = "我们将尽快处理您的投诉,并给您反馈处理结果。抱歉给您带来不好的体验,您可以换个医生问问。"; ObjectID = "7Yc-6C-XSW"; */ +"7Yc-6C-XSW.text" = "سنتعامل مع شكواك في أقرب وقت ممكن وسنقدم لك ملاحظات على النتيجة. آسف للتجربة السيئة ، يمكنك أن تسأل طبيب آخر"; + +/* Class = "UILabel"; text = "感谢反馈"; ObjectID = "QhZ-LC-49k"; */ +"QhZ-LC-49k.text" = "شكرًا لك على ملاحظاتك"; + +/* Class = "UIButton"; normalTitle = "换个医生问问"; ObjectID = "eXb-RC-HJK"; */ +"eXb-RC-HJK.normalTitle" = "تغيير الطبيب"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "lWS-3J-ofl"; */ +"lWS-3J-ofl.text" = "أريد تقديم شكوى"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ControlView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ControlView.strings new file mode 100644 index 0000000..4d628c3 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ControlView.strings @@ -0,0 +1,30 @@ + +/* Class = "UILabel"; text = "点击重传"; ObjectID = "4X7-jJ-62B"; */ +"4X7-jJ-62B.text" = "انقر لإعادة الإرسال"; + +/* Class = "UILabel"; text = "对服务不满意?"; ObjectID = "9If-1J-NYA"; */ +"9If-1J-NYA.text" = "غير راض عن الخدمة ?"; + +/* Class = "UILabel"; text = "拍照"; ObjectID = "Fia-EQ-k88"; */ +"Fia-EQ-k88.text" = "التصوير"; + +/* Class = "UILabel"; text = "挂断"; ObjectID = "Gje-EQ-Ans"; */ +"Gje-EQ-Ans.text" = "قطع الاتصال"; + +/* Class = "UIButton"; normalTitle = "换个医生"; ObjectID = "KlH-tD-4lo"; */ +"KlH-tD-4lo.normalTitle" = "تغيير طبيب"; + +/* Class = "UILabel"; text = "00:00"; ObjectID = "PdO-Bc-DQu"; */ +"PdO-Bc-DQu.text" = "00:00"; + +/* Class = "UILabel"; text = "连接中..."; ObjectID = "dEI-Tw-ffv"; */ +"dEI-Tw-ffv.text" = "...اتصال"; + +/* Class = "UILabel"; text = "切换摄像头"; ObjectID = "e45-pd-LhO"; */ +"e45-pd-LhO.text" = "تبديل الكاميرا"; + +/* Class = "UILabel"; text = "试一试"; ObjectID = "kwn-xe-FD1"; */ +"kwn-xe-FD1.text" = " حاول"; + +/* Class = "UILabel"; text = "更多"; ObjectID = "gZa-5S-uu8"; */ +"gZa-5S-uu8.text" = "More"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ExpandView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ExpandView.strings new file mode 100644 index 0000000..e7d90d2 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/ExpandView.strings @@ -0,0 +1,6 @@ + +/* Class = "UIButton"; normalTitle = "闪光灯"; ObjectID = "FUR-ZX-olE"; */ +"FUR-ZX-olE.normalTitle" = "فلاش"; + +/* Class = "UIButton"; normalTitle = "医师资格证书"; ObjectID = "SBC-mC-L2G"; */ +"SBC-mC-L2G.normalTitle" = "شهادة تأهيل الطبيب"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/HHRealNameInputNewView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/HHRealNameInputNewView.strings new file mode 100644 index 0000000..953a42b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/HHRealNameInputNewView.strings @@ -0,0 +1,27 @@ + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "4uF-mc-Xmz"; */ +"4uF-mc-Xmz.placeholder" = "请输入身份证号"; + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "E09-0a-2sA"; */ +"E09-0a-2sA.placeholder" = "请输入身份证号"; + +/* Class = "UILabel"; text = "aaaaaa"; ObjectID = "ESx-4N-xvB"; */ +"ESx-4N-xvB.text" = "aaaaaa"; + +/* Class = "UILabel"; text = "身份证号"; ObjectID = "EUU-Wr-Dgp"; */ +"EUU-Wr-Dgp.text" = "身份证号"; + +/* Class = "UILabel"; text = "手机号码"; ObjectID = "MYm-9o-RIP"; */ +"MYm-9o-RIP.text" = "手机号码"; + +/* Class = "UILabel"; text = "身份证号输入错误,请重新输入"; ObjectID = "Sol-rY-Qkq"; */ +"Sol-rY-Qkq.text" = "身份证号输入错误,请重新输入"; + +/* Class = "UILabel"; text = "需实名成员"; ObjectID = "V7V-P0-NiW"; */ +"V7V-P0-NiW.text" = "需实名成员"; + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "bqU-aR-GVR"; */ +"bqU-aR-GVR.placeholder" = "请输入身份证号"; + +/* Class = "UILabel"; text = "真实姓名"; ObjectID = "y53-HA-nfR"; */ +"y53-HA-nfR.text" = "真实姓名"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Localizable.strings new file mode 100644 index 0000000..6a3c6cd Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Localizable.strings differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoGuide.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoGuide.strings new file mode 100644 index 0000000..e5131bc --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoGuide.strings @@ -0,0 +1,12 @@ + +/* Class = "UILabel"; text = "请参照上图所示,开启所有照片权限:"; ObjectID = "FSh-K5-qLv"; */ +"FSh-K5-qLv.text" = "يرجى الرجوع إلى الصورة أعلاه لتمكين جميع أذونات الصور:"; + +/* Class = "UIButton"; normalTitle = "取消"; ObjectID = "SDD-43-Kf0"; */ +"SDD-43-Kf0.normalTitle" = "إلغاء"; + +/* Class = "UIButton"; normalTitle = "去设置"; ObjectID = "stP-Pg-03k"; */ +"stP-Pg-03k.normalTitle" = "اذهب إلى المجموعة"; + +/* Class = "UILabel"; text = "选择【照片】-【所有照片】"; ObjectID = "tqk-4N-CCF"; */ +"tqk-4N-CCF.text" = "حدد [صورة] - [كل الصور]"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoPermissionAlert.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoPermissionAlert.strings new file mode 100644 index 0000000..cb23902 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/PhotoPermissionAlert.strings @@ -0,0 +1,6 @@ + +/* Class = "UIButton"; normalTitle = "我知道了"; ObjectID = "Kr9-Xd-f61"; */ +"Kr9-Xd-f61.normalTitle" = "أنا أعرف"; + +/* Class = "UILabel"; text = "为保证正常选择照片,请在接下来 的弹窗中点击【允许访问所有照片】 如图所示。"; ObjectID = "nqg-1r-jy3"; */ +"nqg-1r-jy3.text" = "لضمان الاختيار الطبيعي للصور ، يرجى النقر فوق [السماح بالوصول إلى جميع الصور] في النافذة المنبثقة التالية كما هو موضح في الشكل."; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Upload.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Upload.strings new file mode 100644 index 0000000..c6cc98d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/Upload.strings @@ -0,0 +1,3 @@ + +/* Class = "UILabel"; text = "点击重传"; ObjectID = "Ncv-BK-oZk"; */ +"Ncv-BK-oZk.text" = "انقر لإعادة الإرسال"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/WaitingView.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/WaitingView.strings new file mode 100644 index 0000000..bef1a0d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/WaitingView.strings @@ -0,0 +1,15 @@ + +/* Class = "UILabel"; text = "取消"; ObjectID = "9i1-f8-frb"; */ +"9i1-f8-frb.text" = "إلغاء"; + +/* Class = "UILabel"; text = "取消"; ObjectID = "PSL-Bp-3BY"; */ +"PSL-Bp-3BY.text" = "إلغاء"; + +/* Class = "UILabel"; text = "."; ObjectID = "Q5a-vB-g6F"; */ +"Q5a-vB-g6F.text" = "."; + +/* Class = "UILabel"; text = "正在呼叫医生"; ObjectID = "WgS-Vj-Py7"; */ +"WgS-Vj-Py7.text" = "اتصل بالطبيب..."; + +/* Class = "UILabel"; text = "接听"; ObjectID = "cb0-dQ-3sl"; */ +"cb0-dQ-3sl.text" = "إجابة"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/comment.strings b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/comment.strings new file mode 100644 index 0000000..95271df --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/ar.lproj/comment.strings @@ -0,0 +1,9 @@ + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "Zp4-HG-4r3"; */ +"Zp4-HG-4r3.text" = "تقييم مجهول"; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "lDS-qr-4pL"; */ +"lDS-qr-4pL.normalTitle" = "أريد تقديم شكوى"; + +/* Class = "UILabel"; text = "医生坐姿端正吗?"; ObjectID = "rOF-Er-fyl"; */ +"rOF-Er-fyl.text" = ""; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/objects-11.0+.nib new file mode 100644 index 0000000..b50a0ad Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/runtime.nib new file mode 100644 index 0000000..db5899b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/HD1-cR-dpx-view-feX-1D-1rH.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Info.plist new file mode 100644 index 0000000..6cab3bb Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/objects-11.0+.nib new file mode 100644 index 0000000..f1bb489 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/runtime.nib new file mode 100644 index 0000000..c85b886 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/Wvh-CQ-zS2-view-abQ-Zf-EZJ.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/objects-11.0+.nib new file mode 100644 index 0000000..4b42d77 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/runtime.nib new file mode 100644 index 0000000..d0ed6d2 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatHome.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/objects-11.0+.nib new file mode 100644 index 0000000..78f1edd Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/runtime.nib new file mode 100644 index 0000000..31c59ad Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/chatSetting.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/objects-11.0+.nib new file mode 100644 index 0000000..6add51c Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/runtime.nib new file mode 100644 index 0000000..3ef269d Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/inviteCode.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/objects-11.0+.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/objects-11.0+.nib new file mode 100644 index 0000000..3d7fc20 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/objects-11.0+.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/runtime.nib b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/runtime.nib new file mode 100644 index 0000000..b82ebee Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/chat.storyboardc/owg-fH-2cD-view-Cej-rh-xMJ.nib/runtime.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMBadResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMBadResView.strings new file mode 100644 index 0000000..5679d39 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMBadResView.strings @@ -0,0 +1,15 @@ + +/* Class = "UIButton"; normalTitle = "换个医生问问"; ObjectID = "Mpm-Cy-8No"; */ +"Mpm-Cy-8No.normalTitle" = "Changing doctors"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "TEp-ay-Bhn"; */ +"TEp-ay-Bhn.text" = "Anonymous feedback"; + +/* Class = "UILabel"; text = "感谢反馈"; ObjectID = "dfJ-Fs-9dC"; */ +"dfJ-Fs-9dC.text" = "Thank for feedback"; + +/* Class = "UILabel"; text = "很抱歉给您不好的体验,建议您换个医生咨询 视频医生将持续优化用户体验"; ObjectID = "gCO-sp-izU"; */ +"gCO-sp-izU.text" = "Thank you for your feedback. \n We will deal with your complaint as soon as possible and give you feedback on the result. Sorry to bring you a bad experience, you can consult another doctor."; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "z3D-0Q-OOx"; */ +"z3D-0Q-OOx.normalTitle" = "Make a complain"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMGoodResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMGoodResView.strings new file mode 100644 index 0000000..8f711bb --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMGoodResView.strings @@ -0,0 +1,6 @@ + +/* Class = "UILabel"; text = "感谢反馈,我们将努力提升服务体验"; ObjectID = "MT6-nh-pVb"; */ +"MT6-nh-pVb.text" = "Thank you for the feedback, we will improve our service"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "bqq-t4-5L5"; */ +"bqq-t4-5L5.text" = "Anonymous feedback"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMStarView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMStarView.strings new file mode 100644 index 0000000..0fd849c --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMStarView.strings @@ -0,0 +1,12 @@ + +/* Class = "UIButton"; normalTitle = "匿名提交"; ObjectID = "ILd-ex-a5k"; */ +"ILd-ex-a5k.normalTitle" = "Complaints"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "MaV-co-mC3"; */ +"MaV-co-mC3.text" = "Anonymous feedback"; + +/* Class = "UILabel"; text = "您的评价会让医生做的更好"; ObjectID = "gZR-Ky-CF9"; */ +"gZR-Ky-CF9.text" = "Your evaluation matters"; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "tQm-Mq-YYD"; */ +"tQm-Mq-YYD.normalTitle" = "Make a complain"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTouSuView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTouSuView.strings new file mode 100644 index 0000000..0e28734 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTouSuView.strings @@ -0,0 +1,9 @@ + +/* Class = "UILabel"; text = "投诉"; ObjectID = "Nuz-EA-N37"; */ +"Nuz-EA-N37.text" = "Make a complain"; + +/* Class = "UIButton"; normalTitle = "提交投诉"; ObjectID = "bsC-DO-hz3"; */ +"bsC-DO-hz3.normalTitle" = "Anonymous submit"; + +/* Class = "UITextField"; placeholder = "请填写您的手机号码,来处理投诉事件"; ObjectID = "oap-bd-mdd"; */ +"oap-bd-mdd.placeholder" = "Please fill in your contact number for handling the complaint"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTousuResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTousuResView.strings new file mode 100644 index 0000000..c09b75d --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/CMTousuResView.strings @@ -0,0 +1,12 @@ + +/* Class = "UILabel"; text = "我们将尽快处理您的投诉,并给您反馈处理结果。抱歉给您带来不好的体验,您可以换个医生问问。"; ObjectID = "7Yc-6C-XSW"; */ +"7Yc-6C-XSW.text" = "We will deal with your complaint as soon as possible and give you feedback on the result. Sorry to bring you a bad experience, you can consult another doctor."; + +/* Class = "UILabel"; text = "感谢反馈"; ObjectID = "QhZ-LC-49k"; */ +"QhZ-LC-49k.text" = "Thank you for your feedback"; + +/* Class = "UIButton"; normalTitle = "换个医生问问"; ObjectID = "eXb-RC-HJK"; */ +"eXb-RC-HJK.normalTitle" = "Change a doctor"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "lWS-3J-ofl"; */ +"lWS-3J-ofl.text" = "Make a complain"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ControlView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ControlView.strings new file mode 100644 index 0000000..c4db100 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ControlView.strings @@ -0,0 +1,30 @@ + +/* Class = "UILabel"; text = "点击重传"; ObjectID = "4X7-jJ-62B"; */ +"4X7-jJ-62B.text" = "Click to re-upload"; + +/* Class = "UILabel"; text = "对服务不满意?"; ObjectID = "9If-1J-NYA"; */ +"9If-1J-NYA.text" = "Not satisfied with the service?"; + +/* Class = "UILabel"; text = "拍照"; ObjectID = "Fia-EQ-k88"; */ +"Fia-EQ-k88.text" = "CAMERA"; + +/* Class = "UILabel"; text = "挂断"; ObjectID = "Gje-EQ-Ans"; */ +"Gje-EQ-Ans.text" = "HANG UP"; + +/* Class = "UIButton"; normalTitle = "换个医生"; ObjectID = "KlH-tD-4lo"; */ +"KlH-tD-4lo.normalTitle" = "Try a different doctor"; + +/* Class = "UILabel"; text = "00:00"; ObjectID = "PdO-Bc-DQu"; */ +"PdO-Bc-DQu.text" = "00:00"; + +/* Class = "UILabel"; text = "连接中..."; ObjectID = "dEI-Tw-ffv"; */ +"dEI-Tw-ffv.text" = "Connecting..."; + +/* Class = "UILabel"; text = "切换摄像头"; ObjectID = "e45-pd-LhO"; */ +"e45-pd-LhO.text" = "SWITCH CAMERAS"; + +/* Class = "UILabel"; text = "试一试"; ObjectID = "kwn-xe-FD1"; */ +"kwn-xe-FD1.text" = "Have a try."; + +/* Class = "UILabel"; text = "更多"; ObjectID = "gZa-5S-uu8"; */ +"gZa-5S-uu8.text" = "More"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ExpandView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ExpandView.strings new file mode 100644 index 0000000..6587031 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/ExpandView.strings @@ -0,0 +1,6 @@ + +/* Class = "UIButton"; normalTitle = "闪光灯"; ObjectID = "FUR-ZX-olE"; */ +"FUR-ZX-olE.normalTitle" = "Flash lamp"; + +/* Class = "UIButton"; normalTitle = "医师资格证书"; ObjectID = "SBC-mC-L2G"; */ +"SBC-mC-L2G.normalTitle" = "Doctor Qualification Certificate"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputNewView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputNewView.strings new file mode 100644 index 0000000..953a42b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputNewView.strings @@ -0,0 +1,27 @@ + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "4uF-mc-Xmz"; */ +"4uF-mc-Xmz.placeholder" = "请输入身份证号"; + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "E09-0a-2sA"; */ +"E09-0a-2sA.placeholder" = "请输入身份证号"; + +/* Class = "UILabel"; text = "aaaaaa"; ObjectID = "ESx-4N-xvB"; */ +"ESx-4N-xvB.text" = "aaaaaa"; + +/* Class = "UILabel"; text = "身份证号"; ObjectID = "EUU-Wr-Dgp"; */ +"EUU-Wr-Dgp.text" = "身份证号"; + +/* Class = "UILabel"; text = "手机号码"; ObjectID = "MYm-9o-RIP"; */ +"MYm-9o-RIP.text" = "手机号码"; + +/* Class = "UILabel"; text = "身份证号输入错误,请重新输入"; ObjectID = "Sol-rY-Qkq"; */ +"Sol-rY-Qkq.text" = "身份证号输入错误,请重新输入"; + +/* Class = "UILabel"; text = "需实名成员"; ObjectID = "V7V-P0-NiW"; */ +"V7V-P0-NiW.text" = "需实名成员"; + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "bqU-aR-GVR"; */ +"bqU-aR-GVR.placeholder" = "请输入身份证号"; + +/* Class = "UILabel"; text = "真实姓名"; ObjectID = "y53-HA-nfR"; */ +"y53-HA-nfR.text" = "真实姓名"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputView.strings new file mode 100644 index 0000000..703421b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/HHRealNameInputView.strings @@ -0,0 +1,48 @@ + +/* Class = "UILabel"; text = "证件类型"; ObjectID = "8Gc-Ly-dyt"; */ +"8Gc-Ly-dyt.text" = "ID Type"; + +/* Class = "UILabel"; text = "7周岁以下儿童购买非处方药需填写监护人信息"; ObjectID = "KVK-hN-q28"; */ +"KVK-hN-q28.text" = "Need to input guardian information for children aged under 7."; + +/* Class = "UITextField"; placeholder = "请输入患者真实姓名"; ObjectID = "NKu-JD-W1l"; */ +"NKu-JD-W1l.placeholder" = "Name on your ID"; + +/* Class = "UITextField"; placeholder = "请输入患者真实证件号码"; ObjectID = "PUJ-Uf-7Vj"; */ +"PUJ-Uf-7Vj.placeholder" = "Enter the ID number"; + +/* Class = "UITextField"; placeholder = "请输入监护人真实姓名"; ObjectID = "WKp-a7-SO4"; */ +"WKp-a7-SO4.placeholder" = "Name on your ID"; + +/* Class = "UILabel"; text = "监护人姓名"; ObjectID = "aLs-eO-mDI"; */ +"aLs-eO-mDI.text" = "Name"; + +/* Class = "UILabel"; text = "就诊成员"; ObjectID = "d45-aF-A11"; */ +"d45-aF-A11.text" = "User name"; + +/* Class = "UILabel"; text = "护照"; ObjectID = "fDj-jD-6Ua"; */ +"fDj-jD-6Ua.text" = "Passport"; + +/* Class = "UILabel"; text = "身份证号输入错误,请重新输入"; ObjectID = "gCY-DF-spy"; */ +"gCY-DF-spy.text" = "身份证号输入错误,请重新输入"; + +/* Class = "UITextField"; placeholder = "请输入监护人真实身份证号码"; ObjectID = "gOp-yZ-pM8"; */ +"gOp-yZ-pM8.placeholder" = "Name on your guardian ID"; + +/* Class = "UILabel"; text = "身份证号"; ObjectID = "iQo-gx-qKS"; */ +"iQo-gx-qKS.text" = "ID Card"; + +/* Class = "UILabel"; text = "手机号码"; ObjectID = "n3Q-YI-2Bm"; */ +"n3Q-YI-2Bm.text" = "Mobile number +86"; + +/* Class = "UILabel"; text = "身份证号"; ObjectID = "pIf-Vf-5cg"; */ +"pIf-Vf-5cg.text" = "ID Card"; + +/* Class = "UITextField"; placeholder = "请输入手机号"; ObjectID = "rAX-9h-cxA"; */ +"rAX-9h-cxA.placeholder" = "Enter mobile number"; + +/* Class = "UILabel"; text = "真实姓名"; ObjectID = "sdb-9c-QdC"; */ +"sdb-9c-QdC.text" = "Name"; + +/* Class = "UILabel"; text = "真实姓名"; ObjectID = "X3H-am-1eR"; */ +"X3H-am-1eR.text" = "Please upload the image of the page that shows your name and ID number."; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Localizable.strings new file mode 100644 index 0000000..069cb73 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Localizable.strings differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoGuide.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoGuide.strings new file mode 100644 index 0000000..858397a --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoGuide.strings @@ -0,0 +1,12 @@ + +/* Class = "UILabel"; text = "请参照上图所示,开启所有照片权限:"; ObjectID = "FSh-K5-qLv"; */ +"FSh-K5-qLv.text" = "Please refer to the image above to enable all photo permissions:"; + +/* Class = "UIButton"; normalTitle = "取消"; ObjectID = "SDD-43-Kf0"; */ +"SDD-43-Kf0.normalTitle" = "Cancel"; + +/* Class = "UIButton"; normalTitle = "去设置"; ObjectID = "stP-Pg-03k"; */ +"stP-Pg-03k.normalTitle" = "Setup Now"; + +/* Class = "UILabel"; text = "选择【照片】-【所有照片】"; ObjectID = "tqk-4N-CCF"; */ +"tqk-4N-CCF.text" = "Press [Photos] - [All Photos]"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoPermissionAlert.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoPermissionAlert.strings new file mode 100644 index 0000000..5eacb12 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/PhotoPermissionAlert.strings @@ -0,0 +1,6 @@ + +/* Class = "UIButton"; normalTitle = "我知道了"; ObjectID = "Kr9-Xd-f61"; */ +"Kr9-Xd-f61.normalTitle" = "I got it"; + +/* Class = "UILabel"; text = "为保证正常选择照片,请在接下来 的弹窗中点击【允许访问所有照片】 如图所示。"; ObjectID = "nqg-1r-jy3"; */ +"nqg-1r-jy3.text" = "为保证正常选择照片,请在接下来 的弹窗中点击【允许访问所有照片】 如图所示。"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Upload.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Upload.strings new file mode 100644 index 0000000..33aa739 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/Upload.strings @@ -0,0 +1,3 @@ + +/* Class = "UILabel"; text = "点击重传"; ObjectID = "Ncv-BK-oZk"; */ +"Ncv-BK-oZk.text" = "click to re-upload"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/WaitingView.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/WaitingView.strings new file mode 100644 index 0000000..f81ce70 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/WaitingView.strings @@ -0,0 +1,15 @@ + +/* Class = "UILabel"; text = "取消"; ObjectID = "9i1-f8-frb"; */ +"9i1-f8-frb.text" = "REJECT"; + +/* Class = "UILabel"; text = "取消"; ObjectID = "PSL-Bp-3BY"; */ +"PSL-Bp-3BY.text" = "CANCEL"; + +/* Class = "UILabel"; text = "."; ObjectID = "Q5a-vB-g6F"; */ +"Q5a-vB-g6F.text" = "."; + +/* Class = "UILabel"; text = "正在呼叫医生"; ObjectID = "WgS-Vj-Py7"; */ +"WgS-Vj-Py7.text" = "Calling"; + +/* Class = "UILabel"; text = "接听"; ObjectID = "cb0-dQ-3sl"; */ +"cb0-dQ-3sl.text" = "ANSWER"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/comment.strings b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/comment.strings new file mode 100644 index 0000000..584d1ce --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/en.lproj/comment.strings @@ -0,0 +1,9 @@ + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "Zp4-HG-4r3"; */ +"Zp4-HG-4r3.text" = "Anonymous feedback"; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "lDS-qr-4pL"; */ +"lDS-qr-4pL.normalTitle" = "Make a complain"; + +/* Class = "UILabel"; text = "医生坐姿端正吗?"; ObjectID = "rOF-Er-fyl"; */ +"rOF-Er-fyl.text" = "Is the doctor sitting up straight?"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/Info.plist b/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/Info.plist new file mode 100644 index 0000000..bcd9a2b Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/Info.plist differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/addMember.nib b/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/addMember.nib new file mode 100644 index 0000000..c5a5cc9 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/addMember.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/profit.nib b/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/profit.nib new file mode 100644 index 0000000..c55c208 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/profit.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/xsQ-4A-MAW-view-Qcf-gy-1uQ.nib b/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/xsQ-4A-MAW-view-Qcf-gy-1uQ.nib new file mode 100644 index 0000000..eacf321 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/member.storyboardc/xsQ-4A-MAW-view-Qcf-gy-1uQ.nib differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMBadResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMBadResView.strings new file mode 100644 index 0000000..f76d29a --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMBadResView.strings @@ -0,0 +1,15 @@ + +/* Class = "UIButton"; normalTitle = "换个医生问问"; ObjectID = "Mpm-Cy-8No"; */ +"Mpm-Cy-8No.normalTitle" = "换个医生问问"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "TEp-ay-Bhn"; */ +"TEp-ay-Bhn.text" = "匿名评价"; + +/* Class = "UILabel"; text = "感谢反馈"; ObjectID = "dfJ-Fs-9dC"; */ +"dfJ-Fs-9dC.text" = "感谢反馈"; + +/* Class = "UILabel"; text = "很抱歉给您不好的体验,建议您换个医生咨询 视频医生将持续优化用户体验"; ObjectID = "gCO-sp-izU"; */ +"gCO-sp-izU.text" = "很抱歉给您不好的体验,建议您换个医生咨询 视频医生将持续优化用户体验"; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "z3D-0Q-OOx"; */ +"z3D-0Q-OOx.normalTitle" = "我要投诉"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMGoodResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMGoodResView.strings new file mode 100644 index 0000000..4a752f3 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMGoodResView.strings @@ -0,0 +1,6 @@ + +/* Class = "UILabel"; text = "感谢反馈,我们将努力提升服务体验"; ObjectID = "MT6-nh-pVb"; */ +"MT6-nh-pVb.text" = "感谢反馈,我们将努力提升服务体验"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "bqq-t4-5L5"; */ +"bqq-t4-5L5.text" = "匿名评价"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMStarView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMStarView.strings new file mode 100644 index 0000000..645c3e0 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMStarView.strings @@ -0,0 +1,12 @@ + +/* Class = "UIButton"; normalTitle = "匿名提交"; ObjectID = "ILd-ex-a5k"; */ +"ILd-ex-a5k.normalTitle" = "匿名提交"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "MaV-co-mC3"; */ +"MaV-co-mC3.text" = "匿名评价"; + +/* Class = "UILabel"; text = "您的评价会让医生做的更好"; ObjectID = "gZR-Ky-CF9"; */ +"gZR-Ky-CF9.text" = "您的评价会让医生做的更好"; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "tQm-Mq-YYD"; */ +"tQm-Mq-YYD.normalTitle" = "我要投诉"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTouSuView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTouSuView.strings new file mode 100644 index 0000000..73f7818 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTouSuView.strings @@ -0,0 +1,9 @@ + +/* Class = "UILabel"; text = "投诉"; ObjectID = "Nuz-EA-N37"; */ +"Nuz-EA-N37.text" = "投诉"; + +/* Class = "UIButton"; normalTitle = "提交投诉"; ObjectID = "bsC-DO-hz3"; */ +"bsC-DO-hz3.normalTitle" = "提交投诉"; + +/* Class = "UITextField"; placeholder = "请填写您的手机号码,来处理投诉事件"; ObjectID = "oap-bd-mdd"; */ +"oap-bd-mdd.placeholder" = "请填写您的手机号码,来处理投诉事件"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTousuResView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTousuResView.strings new file mode 100644 index 0000000..7d2f6f6 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/CMTousuResView.strings @@ -0,0 +1,12 @@ + +/* Class = "UILabel"; text = "我们将尽快处理您的投诉,并给您反馈处理结果。抱歉给您带来不好的体验,您可以换个医生问问。"; ObjectID = "7Yc-6C-XSW"; */ +"7Yc-6C-XSW.text" = "我们将尽快处理您的投诉,并给您反馈处理结果。抱歉给您带来不好的体验,您可以换个医生问问。"; + +/* Class = "UILabel"; text = "感谢反馈"; ObjectID = "QhZ-LC-49k"; */ +"QhZ-LC-49k.text" = "感谢反馈"; + +/* Class = "UIButton"; normalTitle = "换个医生问问"; ObjectID = "eXb-RC-HJK"; */ +"eXb-RC-HJK.normalTitle" = "换个医生问问"; + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "lWS-3J-ofl"; */ +"lWS-3J-ofl.text" = "匿名评价"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ControlView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ControlView.strings new file mode 100644 index 0000000..3653357 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ControlView.strings @@ -0,0 +1,31 @@ + +/* Class = "UILabel"; text = "点击重传"; ObjectID = "4X7-jJ-62B"; */ +"4X7-jJ-62B.text" = "点击重传"; + +/* Class = "UILabel"; text = "对服务不满意?"; ObjectID = "9If-1J-NYA"; */ +"9If-1J-NYA.text" = "对服务不满意?"; + +/* Class = "UILabel"; text = "拍照"; ObjectID = "Fia-EQ-k88"; */ +"Fia-EQ-k88.text" = "拍照"; + +/* Class = "UILabel"; text = "挂断"; ObjectID = "Gje-EQ-Ans"; */ +"Gje-EQ-Ans.text" = "挂断"; + +/* Class = "UIButton"; normalTitle = "换个医生"; ObjectID = "KlH-tD-4lo"; */ +"KlH-tD-4lo.normalTitle" = "换个医生"; + +/* Class = "UILabel"; text = "00:00"; ObjectID = "PdO-Bc-DQu"; */ +"PdO-Bc-DQu.text" = "00:00"; + +/* Class = "UILabel"; text = "连接中..."; ObjectID = "dEI-Tw-ffv"; */ +"dEI-Tw-ffv.text" = "连接中..."; + +/* Class = "UILabel"; text = "切换摄像头"; ObjectID = "e45-pd-LhO"; */ +"e45-pd-LhO.text" = "翻转"; + +/* Class = "UILabel"; text = "试一试"; ObjectID = "kwn-xe-FD1"; */ +"kwn-xe-FD1.text" = "试一试"; + +/* Class = "UILabel"; text = "更多"; ObjectID = "gZa-5S-uu8"; */ +"gZa-5S-uu8.text" = "更多"; + diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ExpandView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ExpandView.strings new file mode 100644 index 0000000..c5a0c8f --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/ExpandView.strings @@ -0,0 +1,6 @@ + +/* Class = "UIButton"; normalTitle = "闪光灯"; ObjectID = "FUR-ZX-olE"; */ +"FUR-ZX-olE.normalTitle" = "闪光灯"; + +/* Class = "UIButton"; normalTitle = "医师资格证书"; ObjectID = "SBC-mC-L2G"; */ +"SBC-mC-L2G.normalTitle" = "资格证"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputNewView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputNewView.strings new file mode 100644 index 0000000..953a42b --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputNewView.strings @@ -0,0 +1,27 @@ + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "4uF-mc-Xmz"; */ +"4uF-mc-Xmz.placeholder" = "请输入身份证号"; + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "E09-0a-2sA"; */ +"E09-0a-2sA.placeholder" = "请输入身份证号"; + +/* Class = "UILabel"; text = "aaaaaa"; ObjectID = "ESx-4N-xvB"; */ +"ESx-4N-xvB.text" = "aaaaaa"; + +/* Class = "UILabel"; text = "身份证号"; ObjectID = "EUU-Wr-Dgp"; */ +"EUU-Wr-Dgp.text" = "身份证号"; + +/* Class = "UILabel"; text = "手机号码"; ObjectID = "MYm-9o-RIP"; */ +"MYm-9o-RIP.text" = "手机号码"; + +/* Class = "UILabel"; text = "身份证号输入错误,请重新输入"; ObjectID = "Sol-rY-Qkq"; */ +"Sol-rY-Qkq.text" = "身份证号输入错误,请重新输入"; + +/* Class = "UILabel"; text = "需实名成员"; ObjectID = "V7V-P0-NiW"; */ +"V7V-P0-NiW.text" = "需实名成员"; + +/* Class = "UITextField"; placeholder = "请输入身份证号"; ObjectID = "bqU-aR-GVR"; */ +"bqU-aR-GVR.placeholder" = "请输入身份证号"; + +/* Class = "UILabel"; text = "真实姓名"; ObjectID = "y53-HA-nfR"; */ +"y53-HA-nfR.text" = "真实姓名"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputView.strings new file mode 100644 index 0000000..43b2fdb --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/HHRealNameInputView.strings @@ -0,0 +1,48 @@ + +/* Class = "UILabel"; text = "证件类型"; ObjectID = "8Gc-Ly-dyt"; */ +"8Gc-Ly-dyt.text" = "证件类型"; + +/* Class = "UILabel"; text = "7周岁以下儿童购买非处方药需填写监护人信息"; ObjectID = "KVK-hN-q28"; */ +"KVK-hN-q28.text" = "7周岁以下儿童需填写监护人信息"; + +/* Class = "UITextField"; placeholder = "请输入患者真实姓名"; ObjectID = "NKu-JD-W1l"; */ +"NKu-JD-W1l.placeholder" = "请输入真实姓名"; + +/* Class = "UITextField"; placeholder = "请输入患者真实身份证号码"; ObjectID = "PUJ-Uf-7Vj"; */ +"PUJ-Uf-7Vj.placeholder" = "请输入真实证件号码"; + +/* Class = "UITextField"; placeholder = "请输入监护人真实姓名"; ObjectID = "WKp-a7-SO4"; */ +"WKp-a7-SO4.placeholder" = "请输入监护人真实姓名"; + +/* Class = "UILabel"; text = "监护人姓名"; ObjectID = "aLs-eO-mDI"; */ +"aLs-eO-mDI.text" = "监护人姓名"; + +/* Class = "UILabel"; text = "就诊成员"; ObjectID = "d45-aF-A11"; */ +"d45-aF-A11.text" = "就诊成员"; + +/* Class = "UILabel"; text = "护照"; ObjectID = "fDj-jD-6Ua"; */ +"fDj-jD-6Ua.text" = "护照"; + +/* Class = "UILabel"; text = "身份证号输入错误,请重新输入"; ObjectID = "gCY-DF-spy"; */ +"gCY-DF-spy.text" = "身份证号输入错误,请重新输入"; + +/* Class = "UITextField"; placeholder = "请输入监护人真实身份证号码"; ObjectID = "gOp-yZ-pM8"; */ +"gOp-yZ-pM8.placeholder" = "请输入监护人真实身份证号码"; + +/* Class = "UILabel"; text = "身份证号"; ObjectID = "iQo-gx-qKS"; */ +"iQo-gx-qKS.text" = "身份证号"; + +/* Class = "UILabel"; text = "手机号码"; ObjectID = "n3Q-YI-2Bm"; */ +"n3Q-YI-2Bm.text" = "手机号码 +86"; + +/* Class = "UILabel"; text = "身份证号"; ObjectID = "pIf-Vf-5cg"; */ +"pIf-Vf-5cg.text" = "身份证号"; + +/* Class = "UITextField"; placeholder = "请输入手机号"; ObjectID = "rAX-9h-cxA"; */ +"rAX-9h-cxA.placeholder" = "请输入联系人电话"; + +/* Class = "UILabel"; text = "真实姓名"; ObjectID = "sdb-9c-QdC"; */ +"sdb-9c-QdC.text" = "真实姓名"; + +/* Class = "UILabel"; text = "真实姓名"; ObjectID = "X3H-am-1eR"; */ +"X3H-am-1eR.text" = "请上传证件照片"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Localizable.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Localizable.strings new file mode 100644 index 0000000..21094a7 Binary files /dev/null and b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Localizable.strings differ diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoGuide.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoGuide.strings new file mode 100644 index 0000000..2d9452c --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoGuide.strings @@ -0,0 +1,12 @@ + +/* Class = "UILabel"; text = "请参照上图所示,开启所有照片权限:"; ObjectID = "FSh-K5-qLv"; */ +"FSh-K5-qLv.text" = "请参照上图所示,开启所有照片权限:"; + +/* Class = "UIButton"; normalTitle = "取消"; ObjectID = "SDD-43-Kf0"; */ +"SDD-43-Kf0.normalTitle" = "取消"; + +/* Class = "UIButton"; normalTitle = "去设置"; ObjectID = "stP-Pg-03k"; */ +"stP-Pg-03k.normalTitle" = "去设置"; + +/* Class = "UILabel"; text = "选择【照片】-【所有照片】"; ObjectID = "tqk-4N-CCF"; */ +"tqk-4N-CCF.text" = "选择【照片】-【所有照片】"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoPermissionAlert.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoPermissionAlert.strings new file mode 100644 index 0000000..d478bd0 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/PhotoPermissionAlert.strings @@ -0,0 +1,6 @@ + +/* Class = "UIButton"; normalTitle = "我知道了"; ObjectID = "Kr9-Xd-f61"; */ +"Kr9-Xd-f61.normalTitle" = "我知道了"; + +/* Class = "UILabel"; text = "为保证正常选择照片,请在接下来 的弹窗中点击【允许访问所有照片】 如图所示。"; ObjectID = "nqg-1r-jy3"; */ +"nqg-1r-jy3.text" = "为保证正常选择照片,请在接下来 的弹窗中点击【允许访问所有照片】 如图所示。"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Upload.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Upload.strings new file mode 100644 index 0000000..2efb1c2 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/Upload.strings @@ -0,0 +1,3 @@ + +/* Class = "UILabel"; text = "点击重传"; ObjectID = "Ncv-BK-oZk"; */ +"Ncv-BK-oZk.text" = "点击重传"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/WaitingView.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/WaitingView.strings new file mode 100644 index 0000000..2aec542 --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/WaitingView.strings @@ -0,0 +1,15 @@ + +/* Class = "UILabel"; text = "取消"; ObjectID = "9i1-f8-frb"; */ +"9i1-f8-frb.text" = "拒绝"; + +/* Class = "UILabel"; text = "取消"; ObjectID = "PSL-Bp-3BY"; */ +"PSL-Bp-3BY.text" = "取消"; + +/* Class = "UILabel"; text = "."; ObjectID = "Q5a-vB-g6F"; */ +"Q5a-vB-g6F.text" = "."; + +/* Class = "UILabel"; text = "正在呼叫医生"; ObjectID = "WgS-Vj-Py7"; */ +"WgS-Vj-Py7.text" = "正在呼叫医生"; + +/* Class = "UILabel"; text = "接听"; ObjectID = "cb0-dQ-3sl"; */ +"cb0-dQ-3sl.text" = "接听"; diff --git a/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/comment.strings b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/comment.strings new file mode 100644 index 0000000..c3a680f --- /dev/null +++ b/HHVDoctorSDK/HHSDKVideo.framework/zh-Hans.lproj/comment.strings @@ -0,0 +1,9 @@ + +/* Class = "UILabel"; text = "匿名评价"; ObjectID = "Zp4-HG-4r3"; */ +"Zp4-HG-4r3.text" = "匿名评价"; + +/* Class = "UIButton"; normalTitle = "我要投诉"; ObjectID = "lDS-qr-4pL"; */ +"lDS-qr-4pL.normalTitle" = "我要投诉"; + +/* Class = "UILabel"; text = "医生坐姿端正吗?"; ObjectID = "rOF-Er-fyl"; */ +"rOF-Er-fyl.text" = "医生坐姿端正吗?"; diff --git a/HHVDoctorSDK/SecurityKit.framework/.DS_Store b/HHVDoctorSDK/SecurityKit.framework/.DS_Store index ba578c9..a685d10 100644 Binary files a/HHVDoctorSDK/SecurityKit.framework/.DS_Store and b/HHVDoctorSDK/SecurityKit.framework/.DS_Store differ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/.DS_Store b/HHVDoctorSDK/TXFFmpeg.xcframework/.DS_Store new file mode 100644 index 0000000..e18b7c4 Binary files /dev/null and b/HHVDoctorSDK/TXFFmpeg.xcframework/.DS_Store differ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/Info.plist b/HHVDoctorSDK/TXFFmpeg.xcframework/Info.plist new file mode 100644 index 0000000..d30576a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/Info.plist @@ -0,0 +1,40 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>AvailableLibraries</key> + <array> + <dict> + <key>LibraryIdentifier</key> + <string>ios-arm64_armv7</string> + <key>LibraryPath</key> + <string>TXFFmpeg.framework</string> + <key>SupportedArchitectures</key> + <array> + <string>arm64</string> + <string>armv7</string> + </array> + <key>SupportedPlatform</key> + <string>ios</string> + </dict> + <dict> + <key>LibraryIdentifier</key> + <string>ios-x86_64-simulator</string> + <key>LibraryPath</key> + <string>TXFFmpeg.framework</string> + <key>SupportedArchitectures</key> + <array> + <string>x86_64</string> + </array> + <key>SupportedPlatform</key> + <string>ios</string> + <key>SupportedPlatformVariant</key> + <string>simulator</string> + </dict> + </array> + <key>CFBundlePackageType</key> + <string>XFWK</string> + <key>XCFrameworkFormatVersion</key> + <string>1.0</string> +</dict> +</plist> diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/.DS_Store b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/.DS_Store new file mode 100644 index 0000000..b549297 Binary files /dev/null and b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/.DS_Store differ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/.DS_Store b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/.DS_Store new file mode 100644 index 0000000..e935736 Binary files /dev/null and b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/.DS_Store differ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/TXFFmpeg.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/TXFFmpeg.h new file mode 100644 index 0000000..9eba835 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/TXFFmpeg.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2022 Tencent. All Rights Reserved. + * + */ + +#import <TXFFmpeg/ffmpeg_rename_defines.h> +#import <TXFFmpeg/libavutil/adler32.h> +#import <TXFFmpeg/libavutil/aes.h> +#import <TXFFmpeg/libavutil/aes_ctr.h> +#import <TXFFmpeg/libavutil/attributes.h> +#import <TXFFmpeg/libavutil/audio_fifo.h> +#import <TXFFmpeg/libavutil/avassert.h> +#import <TXFFmpeg/libavutil/avstring.h> +#import <TXFFmpeg/libavutil/avutil.h> +#import <TXFFmpeg/libavutil/base64.h> +#import <TXFFmpeg/libavutil/blowfish.h> +#import <TXFFmpeg/libavutil/bprint.h> +#import <TXFFmpeg/libavutil/bswap.h> +#import <TXFFmpeg/libavutil/buffer.h> +#import <TXFFmpeg/libavutil/cast5.h> +#import <TXFFmpeg/libavutil/camellia.h> +#import <TXFFmpeg/libavutil/channel_layout.h> +#import <TXFFmpeg/libavutil/common.h> +#import <TXFFmpeg/libavutil/cpu.h> +#import <TXFFmpeg/libavutil/crc.h> +#import <TXFFmpeg/libavutil/des.h> +#import <TXFFmpeg/libavutil/dict.h> +#import <TXFFmpeg/libavutil/display.h> +#import <TXFFmpeg/libavutil/dovi_meta.h> +#import <TXFFmpeg/libavutil/downmix_info.h> +#import <TXFFmpeg/libavutil/encryption_info.h> +#import <TXFFmpeg/libavutil/error.h> +#import <TXFFmpeg/libavutil/eval.h> +#import <TXFFmpeg/libavutil/fifo.h> +#import <TXFFmpeg/libavutil/file.h> +#import <TXFFmpeg/libavutil/frame.h> +#import <TXFFmpeg/libavutil/hash.h> +#import <TXFFmpeg/libavutil/hmac.h> +#import <TXFFmpeg/libavutil/hwcontext.h> +#import <TXFFmpeg/libavutil/hwcontext_cuda.h> +#import <TXFFmpeg/libavutil/hwcontext_d3d11va.h> +#import <TXFFmpeg/libavutil/hwcontext_drm.h> +#import <TXFFmpeg/libavutil/hwcontext_dxva2.h> +#import <TXFFmpeg/libavutil/hwcontext_qsv.h> +#import <TXFFmpeg/libavutil/hwcontext_mediacodec.h> +#import <TXFFmpeg/libavutil/hwcontext_vaapi.h> +#import <TXFFmpeg/libavutil/hwcontext_videotoolbox.h> +#import <TXFFmpeg/libavutil/hwcontext_vdpau.h> +#import <TXFFmpeg/libavutil/imgutils.h> +#import <TXFFmpeg/libavutil/intfloat.h> +#import <TXFFmpeg/libavutil/intreadwrite.h> +#import <TXFFmpeg/libavutil/lfg.h> +#import <TXFFmpeg/libavutil/log.h> +#import <TXFFmpeg/libavutil/macros.h> +#import <TXFFmpeg/libavutil/mathematics.h> +#import <TXFFmpeg/libavutil/mastering_display_metadata.h> +#import <TXFFmpeg/libavutil/md5.h> +#import <TXFFmpeg/libavutil/mem.h> +#import <TXFFmpeg/libavutil/motion_vector.h> +#import <TXFFmpeg/libavutil/murmur3.h> +#import <TXFFmpeg/libavutil/opt.h> +#import <TXFFmpeg/libavutil/parseutils.h> +#import <TXFFmpeg/libavutil/pixdesc.h> +#import <TXFFmpeg/libavutil/pixfmt.h> +#import <TXFFmpeg/libavutil/random_seed.h> +#import <TXFFmpeg/libavutil/rc4.h> +#import <TXFFmpeg/libavutil/rational.h> +#import <TXFFmpeg/libavutil/replaygain.h> +#import <TXFFmpeg/libavutil/ripemd.h> +#import <TXFFmpeg/libavutil/samplefmt.h> +#import <TXFFmpeg/libavutil/sha.h> +#import <TXFFmpeg/libavutil/sha512.h> +#import <TXFFmpeg/libavutil/spherical.h> +#import <TXFFmpeg/libavutil/stereo3d.h> +#import <TXFFmpeg/libavutil/threadmessage.h> +#import <TXFFmpeg/libavutil/time.h> +#import <TXFFmpeg/libavutil/timecode.h> +#import <TXFFmpeg/libavutil/timestamp.h> +#import <TXFFmpeg/libavutil/tree.h> +#import <TXFFmpeg/libavutil/twofish.h> +#import <TXFFmpeg/libavutil/version.h> +#import <TXFFmpeg/libavutil/xtea.h> +#import <TXFFmpeg/libavutil/tea.h> +#import <TXFFmpeg/libavutil/pthread_helper.h> +#import <TXFFmpeg/libavutil/tx.h> +#import <TXFFmpeg/libavutil/avconfig.h> +#import <TXFFmpeg/libavutil/ffversion.h> +#import <TXFFmpeg/libavutil/lzo.h> +#import <TXFFmpeg/libavfilter/avfilter.h> +#import <TXFFmpeg/libavfilter/version.h> +#import <TXFFmpeg/libavfilter/buffersink.h> +#import <TXFFmpeg/libavfilter/buffersrc.h> +#import <TXFFmpeg/libswresample/swresample.h> +#import <TXFFmpeg/libswresample/version.h> +#import <TXFFmpeg/libswscale/swscale.h> +#import <TXFFmpeg/libswscale/version.h> +#import <TXFFmpeg/libavcodec/ac3_parser.h> +#import <TXFFmpeg/libavcodec/adts_parser.h> +#import <TXFFmpeg/libavcodec/avcodec.h> +#import <TXFFmpeg/libavcodec/avdct.h> +#import <TXFFmpeg/libavcodec/avfft.h> +#import <TXFFmpeg/libavcodec/d3d11va.h> +#import <TXFFmpeg/libavcodec/dirac.h> +#import <TXFFmpeg/libavcodec/dv_profile.h> +#import <TXFFmpeg/libavcodec/dxva2.h> +#import <TXFFmpeg/libavcodec/jni.h> +#import <TXFFmpeg/libavcodec/mediacodec.h> +#import <TXFFmpeg/libavcodec/qsv.h> +#import <TXFFmpeg/libavcodec/vaapi.h> +#import <TXFFmpeg/libavcodec/vdpau.h> +#import <TXFFmpeg/libavcodec/version.h> +#import <TXFFmpeg/libavcodec/videotoolbox.h> +#import <TXFFmpeg/libavcodec/vorbis_parser.h> +#import <TXFFmpeg/libavcodec/xvmc.h> +#import <TXFFmpeg/libavcodec/ass_split.h> +#import <TXFFmpeg/libavcodec/bytestream.h> +#import <TXFFmpeg/libavformat/avformat.h> +#import <TXFFmpeg/libavformat/avio.h> +#import <TXFFmpeg/libavformat/version.h> +#import <TXFFmpeg/libavformat/internal.h> +#import <TXFFmpeg/libavformat/os_support.h> +#import <TXFFmpeg/libavformat/avc.h> +#import <TXFFmpeg/libavformat/url.h> diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h new file mode 100644 index 0000000..f90e7b3 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h @@ -0,0 +1,3462 @@ +// Copyright (c) 2021 Tencent. All rights reserved. + +// This file generated by rename_symbols_generater.py. +// Do not modify it manually. + +#ifndef THIRD_PARTY_FFMPEG_FFMPEG_RENAME_DEFINES_H +#define THIRD_PARTY_FFMPEG_FFMPEG_RENAME_DEFINES_H + +// clang-format off +#define ff_hevc_merge_flag_decode liteav_ff_hevc_merge_flag_decode +#define ff_deblock_h_chroma_10_avx liteav_ff_deblock_h_chroma_10_avx +#define ff_pred16x16_plane_rv40_8_mmx liteav_ff_pred16x16_plane_rv40_8_mmx +#define ff_avg_h264_qpel8_mc03_neon liteav_ff_avg_h264_qpel8_mc03_neon +#define ff_h264_direct_dist_scale_factor liteav_ff_h264_direct_dist_scale_factor +#define ff_deblock_h_chroma_intra_8_mmxext liteav_ff_deblock_h_chroma_intra_8_mmxext +#define av_buffer_is_writable liteav_av_buffer_is_writable +#define ff_pw_96 liteav_ff_pw_96 +#define webvtt_packet_parse liteav_webvtt_packet_parse +#define ff_dv_frame_profile liteav_ff_dv_frame_profile +#define av_buffer_unref liteav_av_buffer_unref +#define av_opt_query_ranges_default liteav_av_opt_query_ranges_default +#define av_frame_set_color_range liteav_av_frame_set_color_range +#define av_bprint_init liteav_av_bprint_init +#define av_des_mac liteav_av_des_mac +#define ff_init_desc_chscale liteav_ff_init_desc_chscale +#define ff_fdctdsp_init liteav_ff_fdctdsp_init +#define ff_hevcdsp_init_neon_intrinsics liteav_ff_hevcdsp_init_neon_intrinsics +#define ff_pcm_read_seek liteav_ff_pcm_read_seek +#define av_fifo_generic_write liteav_av_fifo_generic_write +#define avio_close_dir liteav_avio_close_dir +#define av_strlcpy liteav_av_strlcpy +#define av_sha_final liteav_av_sha_final +#define avfilter_link liteav_avfilter_link +#define ff_mpeg4_intra_run liteav_ff_mpeg4_intra_run +#define ff_check_interrupt liteav_ff_check_interrupt +#define ff_ps_hybrid_synthesis_deint_neon liteav_ff_ps_hybrid_synthesis_deint_neon +#define av_strdup liteav_av_strdup +#define av_get_channel_layout_nb_channels liteav_av_get_channel_layout_nb_channels +#define ff_sws_init_output_funcs liteav_ff_sws_init_output_funcs +#define ff_pred4x4_horizontal_down_10_ssse3 liteav_ff_pred4x4_horizontal_down_10_ssse3 +#define ff_hevc_pred_angular_16x16_v_zero_neon_8 liteav_ff_hevc_pred_angular_16x16_v_zero_neon_8 +#define ff_put_h264_qpel8_mc30_neon liteav_ff_put_h264_qpel8_mc30_neon +#define vlc_css_declaration_New liteav_vlc_css_declaration_New +#define ff_videotoolbox_alloc_frame liteav_ff_videotoolbox_alloc_frame +#define ff_draw_init liteav_ff_draw_init +#define av_find_best_pix_fmt_of_2 liteav_av_find_best_pix_fmt_of_2 +#define ff_avg_pixels16_xy2_neon liteav_ff_avg_pixels16_xy2_neon +#define avpriv_slicethread_free liteav_avpriv_slicethread_free +#define ff_blockdsp_init_x86 liteav_ff_blockdsp_init_x86 +#define av_tree_node_size liteav_av_tree_node_size +#define ff_pred4x4_down_left_10_sse2 liteav_ff_pred4x4_down_left_10_sse2 +#define av_image_fill_max_pixsteps liteav_av_image_fill_max_pixsteps +#define ff_attach_decode_data liteav_ff_attach_decode_data +#define ff_aic_dc_scale_table liteav_ff_aic_dc_scale_table +#define ff_h264_idct_add16_8_mmxext liteav_ff_h264_idct_add16_8_mmxext +#define ff_mp4_read_descr liteav_ff_mp4_read_descr +#define ffurl_closep liteav_ffurl_closep +#define ff_mov_init_hinting liteav_ff_mov_init_hinting +#define ff_hevc_put_pel_uw_pixels_w4_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w4_neon_8_asm +#define av_packet_new_side_data liteav_av_packet_new_side_data +#define ff_hevc_put_qpel_uw_v3_neon_8 liteav_ff_hevc_put_qpel_uw_v3_neon_8 +#define ff_dct32_float_sse2 liteav_ff_dct32_float_sse2 +#define av_append_path_component liteav_av_append_path_component +#define ff_pack_8ch_float_to_float_u_sse2 liteav_ff_pack_8ch_float_to_float_u_sse2 +#define av_log_set_level liteav_av_log_set_level +#define ff_h264_chroma422_dc_scan liteav_ff_h264_chroma422_dc_scan +#define ff_af_aformat liteav_ff_af_aformat +#define ff_pw_4 liteav_ff_pw_4 +#define ff_fmt_is_in liteav_ff_fmt_is_in +#define ff_pw_2 liteav_ff_pw_2 +#define ff_pw_3 liteav_ff_pw_3 +#define ff_hyscale_fast_c liteav_ff_hyscale_fast_c +#define ff_pw_1 liteav_ff_pw_1 +#define ff_pw_8 liteav_ff_pw_8 +#define av_opt_is_set_to_default liteav_av_opt_is_set_to_default +#define ff_dither_2x2_4 liteav_ff_dither_2x2_4 +#define ff_flac_parse_picture liteav_ff_flac_parse_picture +#define ff_dct32_fixed liteav_ff_dct32_fixed +#define ff_h264_weight_4_10_sse2 liteav_ff_h264_weight_4_10_sse2 +#define ff_put_pixels8_l2_mmxext liteav_ff_put_pixels8_l2_mmxext +#define ff_h263_static_rl_table_store liteav_ff_h263_static_rl_table_store +#define ff_mpv_common_init liteav_ff_mpv_common_init +#define rgb24to32 liteav_rgb24to32 +#define ff_aac_num_swb_128 liteav_ff_aac_num_swb_128 +#define av_videotoolbox_default_free liteav_av_videotoolbox_default_free +#define ff_amf_match_string liteav_ff_amf_match_string +#define ff_h263_h_loop_filter_mmx liteav_ff_h263_h_loop_filter_mmx +#define av_get_colorspace_name liteav_av_get_colorspace_name +#define ff_h264_execute_ref_pic_marking liteav_ff_h264_execute_ref_pic_marking +#define ff_aac_num_swb_120 liteav_ff_aac_num_swb_120 +#define ff_put_h264_chroma_mc8_10_avx liteav_ff_put_h264_chroma_mc8_10_avx +#define av_rescale liteav_av_rescale +#define ffurl_open_whitelist liteav_ffurl_open_whitelist +#define ff_mdct_end liteav_ff_mdct_end +#define av_register_all liteav_av_register_all +#define ff_h264_idct_add8_8_mmxext liteav_ff_h264_idct_add8_8_mmxext +#define ff_sbr_hf_apply_noise_0_neon liteav_ff_sbr_hf_apply_noise_0_neon +#define av_dv_codec_profile liteav_av_dv_codec_profile +#define ff_rtmpts_protocol liteav_ff_rtmpts_protocol +#define ff_j_rev_dct1 liteav_ff_j_rev_dct1 +#define ff_j_rev_dct4 liteav_ff_j_rev_dct4 +#define ff_h264_chroma_dc_dequant_idct_12_c liteav_ff_h264_chroma_dc_dequant_idct_12_c +#define av_tree_destroy liteav_av_tree_destroy +#define av_bsf_list_append2 liteav_av_bsf_list_append2 +#define ff_avg_h264_qpel16_mc22_10_sse2 liteav_ff_avg_h264_qpel16_mc22_10_sse2 +#define ff_videotoolbox_h264_start_frame liteav_ff_videotoolbox_h264_start_frame +#define ff_frame_thread_encoder_init liteav_ff_frame_thread_encoder_init +#define ff_cos_4096 liteav_ff_cos_4096 +#define ff_pred8x8l_dc_8_ssse3 liteav_ff_pred8x8l_dc_8_ssse3 +#define ff_mvtab liteav_ff_mvtab +#define ff_blend_mask liteav_ff_blend_mask +#define ff_hevc_put_qpel_uw_h1v2_neon_8 liteav_ff_hevc_put_qpel_uw_h1v2_neon_8 +#define ff_h263_decode_mb liteav_ff_h263_decode_mb +#define ff_simple_idct_neon liteav_ff_simple_idct_neon +#define av_hwframe_get_buffer liteav_av_hwframe_get_buffer +#define rgb32to16 liteav_rgb32to16 +#define rgb32to15 liteav_rgb32to15 +#define ff_put_pixels8_neon liteav_ff_put_pixels8_neon +#define ff_avg_h264_qpel16_mc10_10_sse2_cache64 liteav_ff_avg_h264_qpel16_mc10_10_sse2_cache64 +#define ff_avg_h264_qpel4_mc20_10_mmxext liteav_ff_avg_h264_qpel4_mc20_10_mmxext +#define ff_ebur128_loudness_momentary liteav_ff_ebur128_loudness_momentary +#define ff_deblock_h_chroma422_intra_8_mmxext liteav_ff_deblock_h_chroma422_intra_8_mmxext +#define av_packet_unpack_dictionary liteav_av_packet_unpack_dictionary +#define ff_sprite_trajectory_tab liteav_ff_sprite_trajectory_tab +#define ff_inlink_peek_frame liteav_ff_inlink_peek_frame +#define avio_wb16 liteav_avio_wb16 +#define ff_int32_to_int16_u_sse2 liteav_ff_int32_to_int16_u_sse2 +#define ff_h263_decode_init liteav_ff_h263_decode_init +#define avcodec_dct_init liteav_avcodec_dct_init +#define ff_hevc_put_qpel_uw_weight_h3v1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h3v1_neon_8 +#define ff_make_formatu64_list liteav_ff_make_formatu64_list +#define ff_h263_update_motion_val liteav_ff_h263_update_motion_val +#define ff_mpeg4video_parser liteav_ff_mpeg4video_parser +#define ff_raw_video_read_header liteav_ff_raw_video_read_header +#define av_dv_codec_profile2 liteav_av_dv_codec_profile2 +#define ff_inlink_check_available_samples liteav_ff_inlink_check_available_samples +#define av_get_bits_per_pixel liteav_av_get_bits_per_pixel +#define ff_yuv2rgb_c_init_tables liteav_ff_yuv2rgb_c_init_tables +#define avio_get_str16le liteav_avio_get_str16le +#define ff_simple_idct_int16_10bit liteav_ff_simple_idct_int16_10bit +#define ff_codec_bmp_tags liteav_ff_codec_bmp_tags +#define ff_h264_idct_dc_add_8_sse2 liteav_ff_h264_idct_dc_add_8_sse2 +#define av_opt_set_defaults2 liteav_av_opt_set_defaults2 +#define ff_avg_h264_qpel4_mc33_10_mmxext liteav_ff_avg_h264_qpel4_mc33_10_mmxext +#define av_audio_fifo_peek liteav_av_audio_fifo_peek +#define ff_mpeg4_default_intra_matrix liteav_ff_mpeg4_default_intra_matrix +#define ff_h264_idct_add16_8_mmx liteav_ff_h264_idct_add16_8_mmx +#define ff_put_h264_qpel8_mc23_10_sse2 liteav_ff_put_h264_qpel8_mc23_10_sse2 +#define av_frame_get_side_data liteav_av_frame_get_side_data +#define avcodec_decode_audio4 liteav_avcodec_decode_audio4 +#define ff_put_pixels8_mmx liteav_ff_put_pixels8_mmx +#define ff_h264_p_mb_type_info liteav_ff_h264_p_mb_type_info +#define ff_mpv_common_end liteav_ff_mpv_common_end +#define ff_cbrt_tab liteav_ff_cbrt_tab +#define swri_rematrix_init_x86 liteav_swri_rematrix_init_x86 +#define ff_avg_h264_qpel4_mc10_10_mmxext liteav_ff_avg_h264_qpel4_mc10_10_mmxext +#define ff_framequeue_take liteav_ff_framequeue_take +#define ff_h263dsp_init_x86 liteav_ff_h263dsp_init_x86 +#define av_packet_move_ref liteav_av_packet_move_ref +#define ff_avg_h264_qpel16_mc02_10_sse2 liteav_ff_avg_h264_qpel16_mc02_10_sse2 +#define av_cpu_max_align liteav_av_cpu_max_align +#define av_buffer_default_free liteav_av_buffer_default_free +#define av_int2i liteav_av_int2i +#define ff_unpack_6ch_float_to_int32_a_avx liteav_ff_unpack_6ch_float_to_int32_a_avx +#define ff_codec_wav_tags liteav_ff_codec_wav_tags +#define ff_pred16x16_dc_8_sse2 liteav_ff_pred16x16_dc_8_sse2 +#define ff_init_ff_sine_windows liteav_ff_init_ff_sine_windows +#define ff_simple_idct10_sse2 liteav_ff_simple_idct10_sse2 +#define av_camellia_size liteav_av_camellia_size +#define ff_put_h264_qpel16_mc10_10_sse2_cache64 liteav_ff_put_h264_qpel16_mc10_10_sse2_cache64 +#define ff_pred8x8_top_dc_8_mmxext liteav_ff_pred8x8_top_dc_8_mmxext +#define rgb64tobgr48_nobswap liteav_rgb64tobgr48_nobswap +#define ff_parse_time_base liteav_ff_parse_time_base +#define av_chroma_location_from_name liteav_av_chroma_location_from_name +#define ff_yuv422p_to_argb_neon liteav_ff_yuv422p_to_argb_neon +#define ff_hevc_put_qpel_uw_h2v3_neon_8 liteav_ff_hevc_put_qpel_uw_h2v3_neon_8 +#define av_get_pix_fmt_loss liteav_av_get_pix_fmt_loss +#define ffio_free_dyn_buf liteav_ffio_free_dyn_buf +#define ff_unpack_2ch_int16_to_int16_a_sse2 liteav_ff_unpack_2ch_int16_to_int16_a_sse2 +#define ff_h264_chroma_dc_dequant_idct_10_c liteav_ff_h264_chroma_dc_dequant_idct_10_c +#define ff_cos_tabs_fixed liteav_ff_cos_tabs_fixed +#define av_frame_set_channel_layout liteav_av_frame_set_channel_layout +#define ff_h264_get_profile liteav_ff_h264_get_profile +#define ff_h264_idct8_add4_14_c liteav_ff_h264_idct8_add4_14_c +#define ff_pred4x4_down_right_8_mmxext liteav_ff_pred4x4_down_right_8_mmxext +#define ff_float_to_int32_u_sse2 liteav_ff_float_to_int32_u_sse2 +#define ff_pred16x16_plane_h264_8_mmx liteav_ff_pred16x16_plane_h264_8_mmx +#define ff_hevc_put_qpel_h1v3_neon_8 liteav_ff_hevc_put_qpel_h1v3_neon_8 +#define ff_hevc_luma_mv_merge_mode liteav_ff_hevc_luma_mv_merge_mode +#define ff_bsf_get_packet_ref liteav_ff_bsf_get_packet_ref +#define ff_hevc_put_qpel_uw_pixels_w24_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w24_neon_8 +#define ff_h264_b_mb_type_info liteav_ff_h264_b_mb_type_info +#define ff_h264_biweight_16_mmxext liteav_ff_h264_biweight_16_mmxext +#define ff_h264qpel_init liteav_ff_h264qpel_init +#define av_opt_get_pixel_fmt liteav_av_opt_get_pixel_fmt +#define ff_int16_to_float_a_sse2 liteav_ff_int16_to_float_a_sse2 +#define ff_mpa_synth_filter_fixed liteav_ff_mpa_synth_filter_fixed +#define ff_qpeldsp_init liteav_ff_qpeldsp_init +#define av_mdct_end liteav_av_mdct_end +#define ff_alloc_packet2 liteav_ff_alloc_packet2 +#define avfilter_config_links liteav_avfilter_config_links +#define ff_aac_scalefactor_bits liteav_ff_aac_scalefactor_bits +#define ff_avg_pixels16_xy2_no_rnd_neon liteav_ff_avg_pixels16_xy2_no_rnd_neon +#define ff_hevc_put_pel_uw_pixels_w16_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w16_neon_8_asm +#define avio_get_str16be liteav_avio_get_str16be +#define ff_pack_2ch_int32_to_int16_u_sse2 liteav_ff_pack_2ch_int32_to_int16_u_sse2 +#define av_thread_message_queue_nb_elems liteav_av_thread_message_queue_nb_elems +#define ff_amf_write_string liteav_ff_amf_write_string +#define ff_vf_rotate liteav_ff_vf_rotate +#define ff_codec_wav_guids liteav_ff_codec_wav_guids +#define ff_put_pixels16_sse2 liteav_ff_put_pixels16_sse2 +#define ff_blockdsp_init liteav_ff_blockdsp_init +#define avio_read liteav_avio_read +#define av_frame_get_best_effort_timestamp liteav_av_frame_get_best_effort_timestamp +#define avcodec_decode_video2 liteav_avcodec_decode_video2 +#define ff_avg_h264_qpel8or16_v_lowpass_op_mmxext liteav_ff_avg_h264_qpel8or16_v_lowpass_op_mmxext +#define ff_swb_offset_1024 liteav_ff_swb_offset_1024 +#define ff_mpv_decode_defaults liteav_ff_mpv_decode_defaults +#define ff_h263_chroma_qscale_table liteav_ff_h263_chroma_qscale_table +#define ff_hevc_sao_edge_eo0_w32_neon_8 liteav_ff_hevc_sao_edge_eo0_w32_neon_8 +#define ff_rtmp_calc_digest liteav_ff_rtmp_calc_digest +#define swr_alloc_set_opts liteav_swr_alloc_set_opts +#define av_thread_message_queue_alloc liteav_av_thread_message_queue_alloc +#define av_strnstr liteav_av_strnstr +#define av_write_trailer liteav_av_write_trailer +#define ff_inlink_acknowledge_status liteav_ff_inlink_acknowledge_status +#define ff_id3v2_parse_chapters liteav_ff_id3v2_parse_chapters +#define avfilter_init_dict liteav_avfilter_init_dict +#define ff_init_cabac_encoder liteav_ff_init_cabac_encoder +#define ff_pred8x8l_down_right_8_mmxext liteav_ff_pred8x8l_down_right_8_mmxext +#define ff_mpeg_draw_horiz_band liteav_ff_mpeg_draw_horiz_band +#define ff_hevc_diag_scan8x8_x liteav_ff_hevc_diag_scan8x8_x +#define ff_hevc_diag_scan8x8_y liteav_ff_hevc_diag_scan8x8_y +#define ff_amf_write_null liteav_ff_amf_write_null +#define ff_avg_h264_qpel16_mc21_neon liteav_ff_avg_h264_qpel16_mc21_neon +#define rgb32tobgr24 liteav_rgb32tobgr24 +#define ff_amf_read_number liteav_ff_amf_read_number +#define ff_h264_idct_add16intra_8_c liteav_ff_h264_idct_add16intra_8_c +#define avio_skip liteav_avio_skip +#define ff_w4_min_w6_lo liteav_ff_w4_min_w6_lo +#define av_probe_input_buffer liteav_av_probe_input_buffer +#define ff_draw_supported_pixel_formats liteav_ff_draw_supported_pixel_formats +#define ff_ac3_muxer liteav_ff_ac3_muxer +#define ff_hevc_reset_sei liteav_ff_hevc_reset_sei +#define ff_h264_idct_add_12_c liteav_ff_h264_idct_add_12_c +#define ff_mp4_muxer liteav_ff_mp4_muxer +#define ff_pack_8ch_float_to_float_a_sse2 liteav_ff_pack_8ch_float_to_float_a_sse2 +#define ff_videotoolbox_hvcc_extradata_create liteav_ff_videotoolbox_hvcc_extradata_create +#define ff_hevc_end_of_slice_flag_decode liteav_ff_hevc_end_of_slice_flag_decode +#define ff_frame_pool_video_init liteav_ff_frame_pool_video_init +#define ff_h264_idct_add_14_c liteav_ff_h264_idct_add_14_c +#define avcodec_pix_fmt_to_codec_tag liteav_avcodec_pix_fmt_to_codec_tag +#define av_dovi_alloc liteav_av_dovi_alloc +#define av_copy_packet liteav_av_copy_packet +#define ff_h264_v_loop_filter_chroma_neon liteav_ff_h264_v_loop_filter_chroma_neon +#define av_opt_find liteav_av_opt_find +#define av_write_uncoded_frame liteav_av_write_uncoded_frame +#define ff_get_chomp_line liteav_ff_get_chomp_line +#define swr_set_matrix liteav_swr_set_matrix +#define ff_listen_bind liteav_ff_listen_bind +#define av_thread_message_queue_set_free_func liteav_av_thread_message_queue_set_free_func +#define av_opt_query_ranges liteav_av_opt_query_ranges +#define sws_addVec liteav_sws_addVec +#define av_hwdevice_ctx_init liteav_av_hwdevice_ctx_init +#define ff_pack_8ch_int32_to_float_u_avx liteav_ff_pack_8ch_int32_to_float_u_avx +#define av_parse_cpu_caps liteav_av_parse_cpu_caps +#define av_mod_i liteav_av_mod_i +#define avfilter_get_matrix liteav_avfilter_get_matrix +#define ff_id3v2_tags liteav_ff_id3v2_tags +#define avpriv_mpa_freq_tab liteav_avpriv_mpa_freq_tab +#define av_frame_get_pkt_duration liteav_av_frame_get_pkt_duration +#define ff_emulated_edge_mc_8 liteav_ff_emulated_edge_mc_8 +#define ff_mpeg4_y_dc_scale_table liteav_ff_mpeg4_y_dc_scale_table +#define avpriv_pix_fmt_bps_mov liteav_avpriv_pix_fmt_bps_mov +#define ff_outlink_get_status liteav_ff_outlink_get_status +#define ff_sws_alphablendaway liteav_ff_sws_alphablendaway +#define ff_avg_pixels16_sse2 liteav_ff_avg_pixels16_sse2 +#define ff_ebur128_loudness_range liteav_ff_ebur128_loudness_range +#define ff_h263_mbtype_b_tab liteav_ff_h263_mbtype_b_tab +#define av_image_get_linesize liteav_av_image_get_linesize +#define ff_cos_16_fixed liteav_ff_cos_16_fixed +#define ff_h264_i_mb_type_info liteav_ff_h264_i_mb_type_info +#define ff_h264_decode_mb_cabac liteav_ff_h264_decode_mb_cabac +#define ff_imdct_half_c liteav_ff_imdct_half_c +#define ff_h264_dequant8_coeff_init liteav_ff_h264_dequant8_coeff_init +#define ff_smil_extract_next_text_chunk liteav_ff_smil_extract_next_text_chunk +#define ff_mpeg4_init_direct_mv liteav_ff_mpeg4_init_direct_mv +#define ff_id3v2_parse_priv_dict liteav_ff_id3v2_parse_priv_dict +#define av_tree_find liteav_av_tree_find +#define av_calloc liteav_av_calloc +#define ff_h264_idct_add8_422_14_c liteav_ff_h264_idct_add8_422_14_c +#define yyset_in liteav_yyset_in +#define av_pix_fmt_get_chroma_sub_sample liteav_av_pix_fmt_get_chroma_sub_sample +#define av_murmur3_final liteav_av_murmur3_final +#define av_frame_get_channel_layout liteav_av_frame_get_channel_layout +#define ff_pack_6ch_float_to_float_a_mmx liteav_ff_pack_6ch_float_to_float_a_mmx +#define av_fft_calc liteav_av_fft_calc +#define ff_init_2d_vlc_rl liteav_ff_init_2d_vlc_rl +#define ff_hevc_put_qpel_uw_h1v1_neon_8 liteav_ff_hevc_put_qpel_uw_h1v1_neon_8 +#define ff_reshuffle_raw_rgb liteav_ff_reshuffle_raw_rgb +#define ff_hevc_put_epel_uw_pixels_w12_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w12_neon_8 +#define av_get_token liteav_av_get_token +#define ff_videodsp_init_aarch64 liteav_ff_videodsp_init_aarch64 +#define ff_vf_hflip liteav_ff_vf_hflip +#define ff_hevc_prev_intra_luma_pred_flag_decode liteav_ff_hevc_prev_intra_luma_pred_flag_decode +#define av_frame_get_pkt_pos liteav_av_frame_get_pkt_pos +#define ff_default_query_formats liteav_ff_default_query_formats +#define ff_h264_idct8_dc_add_8_c liteav_ff_h264_idct8_dc_add_8_c +#define av_packet_free_side_data liteav_av_packet_free_side_data +#define ff_avg_h264_qpel16_mc30_10_sse2_cache64 liteav_ff_avg_h264_qpel16_mc30_10_sse2_cache64 +#define ff_interleaved_peek liteav_ff_interleaved_peek +#define ff_hevc_hls_mvd_coding liteav_ff_hevc_hls_mvd_coding +#define ff_avg_h264_qpel8_mc00_neon liteav_ff_avg_h264_qpel8_mc00_neon +#define ff_rtmp_packet_create liteav_ff_rtmp_packet_create +#define av_expr_eval liteav_av_expr_eval +#define ff_pd_65535 liteav_ff_pd_65535 +#define ff_pred16x16_128_dc_neon liteav_ff_pred16x16_128_dc_neon +#define ff_mpeg12_find_best_frame_rate liteav_ff_mpeg12_find_best_frame_rate +#define ff_hevc_put_qpel_uw_weight_v3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_v3_neon_8 +#define av_bsf_receive_packet liteav_av_bsf_receive_packet +#define ff_simple_idct_int16_8bit liteav_ff_simple_idct_int16_8bit +#define ff_rtmp_packet_dump liteav_ff_rtmp_packet_dump +#define ff_pack_8ch_float_to_int32_a_sse2 liteav_ff_pack_8ch_float_to_int32_a_sse2 +#define ff_pack_6ch_float_to_float_u_sse liteav_ff_pack_6ch_float_to_float_u_sse +#define av_frame_side_data_name liteav_av_frame_side_data_name +#define ff_deblock_h_luma_8_avx liteav_ff_deblock_h_luma_8_avx +#define ff_pred8x8_horizontal_8_mmx liteav_ff_pred8x8_horizontal_8_mmx +#define ff_hevc_put_qpel_uw_weight_h2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h2_neon_8 +#define ff_side_data_set_encoder_stats liteav_ff_side_data_set_encoder_stats +#define av_samples_fill_arrays liteav_av_samples_fill_arrays +#define ff_nv12_to_argb_neon liteav_ff_nv12_to_argb_neon +#define ff_put_h264_qpel4_v_lowpass_mmxext liteav_ff_put_h264_qpel4_v_lowpass_mmxext +#define ff_get_line liteav_ff_get_line +#define ff_simple_idct_put_int32_10bit liteav_ff_simple_idct_put_int32_10bit +#define av_audio_fifo_space liteav_av_audio_fifo_space +#define ff_hevc_videotoolbox_hwaccel liteav_ff_hevc_videotoolbox_hwaccel +#define ff_sws_rgb2rgb_init liteav_ff_sws_rgb2rgb_init +#define ff_vsink_buffer liteav_ff_vsink_buffer +#define av_iformat_next liteav_av_iformat_next +#define ff_hevc_pred_mode_decode liteav_ff_hevc_pred_mode_decode +#define av_fast_mallocz liteav_av_fast_mallocz +#define ff_deblock_h_chroma422_10_sse2 liteav_ff_deblock_h_chroma422_10_sse2 +#define avio_flush liteav_avio_flush +#define av_frame_ref liteav_av_frame_ref +#define ff_hwframe_map_replace liteav_ff_hwframe_map_replace +#define ff_deblock_h_chroma422_8_avx liteav_ff_deblock_h_chroma422_8_avx +#define ff_hevc_put_pel_uw_pixels_w32_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w32_neon_8_asm +#define yuv422ptoyuy2 liteav_yuv422ptoyuy2 +#define ff_hevc_idct_32x32_dc_neon_8 liteav_ff_hevc_idct_32x32_dc_neon_8 +#define yy_create_buffer liteav_yy_create_buffer +#define ff_hevc_add_residual_8x8_neon_8 liteav_ff_hevc_add_residual_8x8_neon_8 +#define av_parser_close liteav_av_parser_close +#define av_buffer_create liteav_av_buffer_create +#define ff_pred4x4_vertical_left_10_avx liteav_ff_pred4x4_vertical_left_10_avx +#define swr_get_delay liteav_swr_get_delay +#define ff_jpeg_fdct_islow_10 liteav_ff_jpeg_fdct_islow_10 +#define ff_h264_idct8_add_8_c liteav_ff_h264_idct8_add_8_c +#define av_frame_get_qp_table liteav_av_frame_get_qp_table +#define avpicture_layout liteav_avpicture_layout +#define ff_deblock_h_chroma_8_avx liteav_ff_deblock_h_chroma_8_avx +#define av_packet_merge_side_data liteav_av_packet_merge_side_data +#define ff_get_buffer liteav_ff_get_buffer +#define av_fft_permute liteav_av_fft_permute +#define av_realloc_array liteav_av_realloc_array +#define ff_h264_chroma_dc_dequant_idct_9_c liteav_ff_h264_chroma_dc_dequant_idct_9_c +#define ff_fetch_timestamp liteav_ff_fetch_timestamp +#define av_buffer_pool_uninit liteav_av_buffer_pool_uninit +#define ff_set_common_samplerates liteav_ff_set_common_samplerates +#define avio_get_dyn_buf liteav_avio_get_dyn_buf +#define ff_put_bmp_header liteav_ff_put_bmp_header +#define av_fifo_alloc liteav_av_fifo_alloc +#define ff_aac_pow34sf_tab liteav_ff_aac_pow34sf_tab +#define ff_float_to_int32_a_sse2 liteav_ff_float_to_int32_a_sse2 +#define ff_deblock_h_chroma422_10_avx liteav_ff_deblock_h_chroma422_10_avx +#define ff_hevc_put_pixels_w32_w48_w64_neon_8 liteav_ff_hevc_put_pixels_w32_w48_w64_neon_8 +#define ff_hevc_frame_nb_refs liteav_ff_hevc_frame_nb_refs +#define yyset_out liteav_yyset_out +#define ff_put_h264_qpel8_mc30_10_ssse3_cache64 liteav_ff_put_h264_qpel8_mc30_10_ssse3_cache64 +#define av_aes_ctr_set_random_iv liteav_av_aes_ctr_set_random_iv +#define ff_hevc_put_qpel_h3v2_neon_8 liteav_ff_hevc_put_qpel_h3v2_neon_8 +#define av_tree_insert liteav_av_tree_insert +#define ff_avg_pixels4_l2_shift5_mmxext liteav_ff_avg_pixels4_l2_shift5_mmxext +#define ff_put_pixels8_x2_neon liteav_ff_put_pixels8_x2_neon +#define ff_mpegts_muxer liteav_ff_mpegts_muxer +#define ff_put_h264_qpel8_mc02_neon liteav_ff_put_h264_qpel8_mc02_neon +#define shuffle_bytes_3012 liteav_shuffle_bytes_3012 +#define ff_h263_parser liteav_ff_h263_parser +#define av_dynarray_add liteav_av_dynarray_add +#define ff_sine_2048_fixed liteav_ff_sine_2048_fixed +#define av_lfg_init_from_data liteav_av_lfg_init_from_data +#define av_hmac_alloc liteav_av_hmac_alloc +#define avpriv_mpeg4audio_get_config liteav_avpriv_mpeg4audio_get_config +#define av_get_pix_fmt_string liteav_av_get_pix_fmt_string +#define ff_hevc_slice_rpl liteav_ff_hevc_slice_rpl +#define ff_h264_idct_dc_add_9_c liteav_ff_h264_idct_dc_add_9_c +#define ff_get_qtpalette liteav_ff_get_qtpalette +#define av_aes_init liteav_av_aes_init +#define ff_avg_h264_qpel16_mc22_neon liteav_ff_avg_h264_qpel16_mc22_neon +#define ff_avg_pixels4_mmxext liteav_ff_avg_pixels4_mmxext +#define ff_put_pixels8_y2_no_rnd_neon liteav_ff_put_pixels8_y2_no_rnd_neon +#define ff_pred4x4_vertical_left_8_mmxext liteav_ff_pred4x4_vertical_left_8_mmxext +#define ff_put_qpel8_mc32_old_c liteav_ff_put_qpel8_mc32_old_c +#define ff_hls_protocol liteav_ff_hls_protocol +#define av_get_pix_fmt liteav_av_get_pix_fmt +#define ff_bsf_get_packet liteav_ff_bsf_get_packet +#define avfilter_get_class liteav_avfilter_get_class +#define ff_h264chroma_init_x86 liteav_ff_h264chroma_init_x86 +#define ff_mpv_report_decode_progress liteav_ff_mpv_report_decode_progress +#define yv12touyvy liteav_yv12touyvy +#define ff_put_h264_qpel8_mc33_10_sse2 liteav_ff_put_h264_qpel8_mc33_10_sse2 +#define ff_frame_pool_get liteav_ff_frame_pool_get +#define ff_h264_direct_ref_list_init liteav_ff_h264_direct_ref_list_init +#define ff_rl_init liteav_ff_rl_init +#define ff_hevc_add_residual_16x16_neon_8 liteav_ff_hevc_add_residual_16x16_neon_8 +#define av_encryption_init_info_alloc liteav_av_encryption_init_info_alloc +#define avfilter_pad_count liteav_avfilter_pad_count +#define ff_idctdsp_init_aarch64 liteav_ff_idctdsp_init_aarch64 +#define ff_imdct36_float_avx liteav_ff_imdct36_float_avx +#define av_get_padded_bits_per_pixel liteav_av_get_padded_bits_per_pixel +#define av_ac3_parse_header liteav_av_ac3_parse_header +#define av_fifo_reset liteav_av_fifo_reset +#define ff_w4_min_w6_hi liteav_ff_w4_min_w6_hi +#define av_bitstream_filter_close liteav_av_bitstream_filter_close +#define avfilter_mul_matrix liteav_avfilter_mul_matrix +#define avcodec_descriptor_get_by_name liteav_avcodec_descriptor_get_by_name +#define ff_put_qpel16_mc13_old_c liteav_ff_put_qpel16_mc13_old_c +#define ff_put_h264_qpel8or16_hv1_lowpass_op_mmxext liteav_ff_put_h264_qpel8or16_hv1_lowpass_op_mmxext +#define yv12toyuy2 liteav_yv12toyuy2 +#define ff_inter_vlc liteav_ff_inter_vlc +#define vlc_css_declarations_Delete liteav_vlc_css_declarations_Delete +#define ff_flacdsp_init_x86 liteav_ff_flacdsp_init_x86 +#define ff_mov_get_channel_layout liteav_ff_mov_get_channel_layout +#define ff_pw_5 liteav_ff_pw_5 +#define ff_deblock_h_luma_intra_8_sse2 liteav_ff_deblock_h_luma_intra_8_sse2 +#define ff_hflip_init liteav_ff_hflip_init +#define ff_h264_idct_add8_8_c liteav_ff_h264_idct_add8_8_c +#define ff_pred16x16_horizontal_10_mmxext liteav_ff_pred16x16_horizontal_10_mmxext +#define ff_pd_8192 liteav_ff_pd_8192 +#define ffio_open_whitelist liteav_ffio_open_whitelist +#define avio_feof liteav_avio_feof +#define ff_flv_demuxer liteav_ff_flv_demuxer +#define avio_rb64 liteav_avio_rb64 +#define av_log_default_callback liteav_av_log_default_callback +#define ff_pred16x16_dc_8_ssse3 liteav_ff_pred16x16_dc_8_ssse3 +#define ff_pred8x8l_top_dc_10_sse2 liteav_ff_pred8x8l_top_dc_10_sse2 +#define av_max_alloc liteav_av_max_alloc +#define ff_put_qpel8_mc11_old_c liteav_ff_put_qpel8_mc11_old_c +#define ff_avg_h264_chroma_mc4_mmxext liteav_ff_avg_h264_chroma_mc4_mmxext +#define ff_mpeg4_resync_prefix liteav_ff_mpeg4_resync_prefix +#define ff_pred16x16_top_dc_10_sse2 liteav_ff_pred16x16_top_dc_10_sse2 +#define swri_resample_dsp_init liteav_swri_resample_dsp_init +#define ff_avfilter_graph_update_heap liteav_ff_avfilter_graph_update_heap +#define ff_hevc_sao_offset_abs_decode liteav_ff_hevc_sao_offset_abs_decode +#define av_buffersrc_parameters_alloc liteav_av_buffersrc_parameters_alloc +#define av_gettime_relative_is_monotonic liteav_av_gettime_relative_is_monotonic +#define avpicture_get_size liteav_avpicture_get_size +#define avcodec_register_all liteav_avcodec_register_all +#define swri_audio_convert_alloc liteav_swri_audio_convert_alloc +#define avpriv_request_sample liteav_avpriv_request_sample +#define ff_put_h264_qpel8_mc31_10_sse2 liteav_ff_put_h264_qpel8_mc31_10_sse2 +#define ff_hevc_inter_pred_idc_decode liteav_ff_hevc_inter_pred_idc_decode +#define ff_pw_9 liteav_ff_pw_9 +#define ff_er_add_slice liteav_ff_er_add_slice +#define ff_pd_16 liteav_ff_pd_16 +#define ff_unpack_2ch_float_to_int16_a_sse2 liteav_ff_unpack_2ch_float_to_int16_a_sse2 +#define ff_subtitles_read_line liteav_ff_subtitles_read_line +#define av_strerror liteav_av_strerror +#define swr_drop_output liteav_swr_drop_output +#define avio_r8 liteav_avio_r8 +#define sws_getIdentityVec liteav_sws_getIdentityVec +#define ff_put_qpel16_mc31_old_c liteav_ff_put_qpel16_mc31_old_c +#define av_audio_fifo_reset liteav_av_audio_fifo_reset +#define ff_pred16x16_plane_rv40_8_mmxext liteav_ff_pred16x16_plane_rv40_8_mmxext +#define sws_freeFilter liteav_sws_freeFilter +#define ff_startcode_find_candidate_c liteav_ff_startcode_find_candidate_c +#define vu9_to_vu12 liteav_vu9_to_vu12 +#define ff_tls_deinit liteav_ff_tls_deinit +#define av_hash_get_name liteav_av_hash_get_name +#define ff_unpack_2ch_float_to_int32_a_sse2 liteav_ff_unpack_2ch_float_to_int32_a_sse2 +#define avfilter_free liteav_avfilter_free +#define swr_set_compensation liteav_swr_set_compensation +#define planar2x liteav_planar2x +#define ff_aac_spectral_bits liteav_ff_aac_spectral_bits +#define ff_mpv_reconstruct_mb liteav_ff_mpv_reconstruct_mb +#define av_buffersink_get_type liteav_av_buffersink_get_type +#define ff_avg_pixels16_l2_mmxext liteav_ff_avg_pixels16_l2_mmxext +#define av_content_light_metadata_alloc liteav_av_content_light_metadata_alloc +#define av_get_sample_fmt liteav_av_get_sample_fmt +#define ff_hevc_put_qpel_uw_bi_h_neon_8 liteav_ff_hevc_put_qpel_uw_bi_h_neon_8 +#define ff_id3v2_parse_priv liteav_ff_id3v2_parse_priv +#define ff_hevc_put_qpel_uw_bi_hv_neon_8 liteav_ff_hevc_put_qpel_uw_bi_hv_neon_8 +#define ff_dither_8x8_128 liteav_ff_dither_8x8_128 +#define ff_mpeg1_videotoolbox_hwaccel liteav_ff_mpeg1_videotoolbox_hwaccel +#define avio_rl64 liteav_avio_rl64 +#define ff_isom_write_av1c liteav_ff_isom_write_av1c +#define sws_scaleVec liteav_sws_scaleVec +#define ff_isom_write_avcc liteav_ff_isom_write_avcc +#define ff_w1_plus_w5 liteav_ff_w1_plus_w5 +#define ff_put_h264_qpel8or16_hv2_lowpass_op_mmxext liteav_ff_put_h264_qpel8or16_hv2_lowpass_op_mmxext +#define yyget_column liteav_yyget_column +#define ff_hevc_put_qpel_uw_pixels_w8_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w8_neon_8 +#define ff_mpeg2_videotoolbox_hwaccel liteav_ff_mpeg2_videotoolbox_hwaccel +#define ff_ape_parse_tag liteav_ff_ape_parse_tag +#define ff_http_match_no_proxy liteav_ff_http_match_no_proxy +#define ff_h264_idct_dc_add_neon liteav_ff_h264_idct_dc_add_neon +#define ff_h264_idct_add8_422_10_sse2 liteav_ff_h264_idct_add8_422_10_sse2 +#define ff_put_h264_qpel4_mc20_10_mmxext liteav_ff_put_h264_qpel4_mc20_10_mmxext +#define ff_cos_32768 liteav_ff_cos_32768 +#define ff_h264_idct_add16_10_c liteav_ff_h264_idct_add16_10_c +#define av_interleaved_write_uncoded_frame liteav_av_interleaved_write_uncoded_frame +#define av_opt_set_dict2 liteav_av_opt_set_dict2 +#define ff_h264_idct8_add_10_c liteav_ff_h264_idct8_add_10_c +#define ff_avg_vc1_chroma_mc8_nornd_mmxext liteav_ff_avg_vc1_chroma_mc8_nornd_mmxext +#define av_nearer_q liteav_av_nearer_q +#define ff_mpeg2_frame_rate_tab liteav_ff_mpeg2_frame_rate_tab +#define avio_write_marker liteav_avio_write_marker +#define av_spherical_alloc liteav_av_spherical_alloc +#define av_fft_init liteav_av_fft_init +#define ff_put_no_rnd_qpel8_mc33_old_c liteav_ff_put_no_rnd_qpel8_mc33_old_c +#define ff_http_averror liteav_ff_http_averror +#define ff_h264_idct_add8_neon liteav_ff_h264_idct_add8_neon +#define ff_put_h264_qpel8_h_lowpass_l2_ssse3 liteav_ff_put_h264_qpel8_h_lowpass_l2_ssse3 +#define ff_decode_get_packet liteav_ff_decode_get_packet +#define ff_mp3on4float_decoder liteav_ff_mp3on4float_decoder +#define ff_avg_qpel16_mc33_old_c liteav_ff_avg_qpel16_mc33_old_c +#define avfilter_graph_parse_ptr liteav_avfilter_graph_parse_ptr +#define ff_interleave_packet_per_dts liteav_ff_interleave_packet_per_dts +#define ff_hevc_sao_band_w64_neon_8 liteav_ff_hevc_sao_band_w64_neon_8 +#define ff_put_qpel16_mc11_old_c liteav_ff_put_qpel16_mc11_old_c +#define ff_frame_thread_init liteav_ff_frame_thread_init +#define ff_webvtt_demuxer liteav_ff_webvtt_demuxer +#define ff_float_to_int16_u_sse2 liteav_ff_float_to_int16_u_sse2 +#define ff_avg_h264_qpel16_mc21_10_sse2 liteav_ff_avg_h264_qpel16_mc21_10_sse2 +#define ff_avg_pixels8_mmxext liteav_ff_avg_pixels8_mmxext +#define ff_avg_h264_qpel4_mc03_10_mmxext liteav_ff_avg_h264_qpel4_mc03_10_mmxext +#define ff_hevc_pred_planar_8x8_neon_8 liteav_ff_hevc_pred_planar_8x8_neon_8 +#define avfilter_sub_matrix liteav_avfilter_sub_matrix +#define rgb15tobgr24 liteav_rgb15tobgr24 +#define ff_init_lls_x86 liteav_ff_init_lls_x86 +#define av_get_packed_sample_fmt liteav_av_get_packed_sample_fmt +#define av_frame_set_pkt_pos liteav_av_frame_set_pkt_pos +#define ff_put_h264_qpel16_mc13_neon liteav_ff_put_h264_qpel16_mc13_neon +#define av_hash_names liteav_av_hash_names +#define ff_h263_v_loop_filter_mmx liteav_ff_h263_v_loop_filter_mmx +#define ff_qdm2_at_decoder liteav_ff_qdm2_at_decoder +#define ff_put_no_rnd_qpel16_mc12_old_c liteav_ff_put_no_rnd_qpel16_mc12_old_c +#define ff_avg_pixels8_neon liteav_ff_avg_pixels8_neon +#define ff_mp4_read_descr_len liteav_ff_mp4_read_descr_len +#define ff_decode_bsfs_uninit liteav_ff_decode_bsfs_uninit +#define ffio_realloc_buf liteav_ffio_realloc_buf +#define av_bmg_get liteav_av_bmg_get +#define av_dump_format liteav_av_dump_format +#define ff_thread_flush liteav_ff_thread_flush +#define ff_hevc_put_qpel_uw_v2_neon_8 liteav_ff_hevc_put_qpel_uw_v2_neon_8 +#define ff_pixblockdsp_init_x86 liteav_ff_pixblockdsp_init_x86 +#define rgb48tobgr64_nobswap liteav_rgb48tobgr64_nobswap +#define ff_mjpegenc_huffman_compute_bits liteav_ff_mjpegenc_huffman_compute_bits +#define ff_aac_codebook_vector_idx liteav_ff_aac_codebook_vector_idx +#define text_segment_chain_delete liteav_text_segment_chain_delete +#define yylex_init_extra liteav_yylex_init_extra +#define ff_avg_qpel8_mc11_old_c liteav_ff_avg_qpel8_mc11_old_c +#define ff_volume_init_x86 liteav_ff_volume_init_x86 +#define ff_mpeg12_init_vlcs liteav_ff_mpeg12_init_vlcs +#define ff_w7_plus_w3_lo liteav_ff_w7_plus_w3_lo +#define av_md5_sum liteav_av_md5_sum +#define ff_pred4x4_horizontal_up_8_mmxext liteav_ff_pred4x4_horizontal_up_8_mmxext +#define ff_imdct_half_avx liteav_ff_imdct_half_avx +#define ff_h264_idct_add8_10_avx liteav_ff_h264_idct_add8_10_avx +#define av_aes_ctr_set_iv liteav_av_aes_ctr_set_iv +#define ff_print_debug_info liteav_ff_print_debug_info +#define ff_cos_2048 liteav_ff_cos_2048 +#define ff_put_h264_qpel16_h_lowpass_l2_ssse3 liteav_ff_put_h264_qpel16_h_lowpass_l2_ssse3 +#define ffurl_open liteav_ffurl_open +#define av_grow_packet liteav_av_grow_packet +#define avpriv_mpegts_parse_open liteav_avpriv_mpegts_parse_open +#define ff_list_bsf liteav_ff_list_bsf +#define ff_put_h264_qpel4_mc11_10_mmxext liteav_ff_put_h264_qpel4_mc11_10_mmxext +#define yyrestart liteav_yyrestart +#define ff_pred8x8_dc_neon liteav_ff_pred8x8_dc_neon +#define ff_isom_write_vpcc liteav_ff_isom_write_vpcc +#define ff_hevc_pred_planar_4x4_neon_8 liteav_ff_hevc_pred_planar_4x4_neon_8 +#define ff_add_pixels_clamped_c liteav_ff_add_pixels_clamped_c +#define avio_wb32 liteav_avio_wb32 +#define av_qsv_alloc_context liteav_av_qsv_alloc_context +#define ff_put_pixels_clamped_c liteav_ff_put_pixels_clamped_c +#define ff_mpeg4_studio_intra liteav_ff_mpeg4_studio_intra +#define av_write_image_line2 liteav_av_write_image_line2 +#define av_vorbis_parse_reset liteav_av_vorbis_parse_reset +#define ff_pred4x4_vertical_right_8_mmxext liteav_ff_pred4x4_vertical_right_8_mmxext +#define ff_h264_decode_seq_parameter_set liteav_ff_h264_decode_seq_parameter_set +#define ff_swb_offset_128 liteav_ff_swb_offset_128 +#define ff_pack_2ch_float_to_int32_a_sse2 liteav_ff_pack_2ch_float_to_int32_a_sse2 +#define ffurl_close liteav_ffurl_close +#define ff_put_v liteav_ff_put_v +#define ff_swb_offset_120 liteav_ff_swb_offset_120 +#define ff_avg_pixels16_neon liteav_ff_avg_pixels16_neon +#define ff_resample_common_apply_filter_x8_float_neon liteav_ff_resample_common_apply_filter_x8_float_neon +#define ff_pred8x8_plane_8_mmx liteav_ff_pred8x8_plane_8_mmx +#define av_new_packet liteav_av_new_packet +#define av_reallocp_array liteav_av_reallocp_array +#define yvu9_to_yuy2 liteav_yvu9_to_yuy2 +#define sws_getConstVec liteav_sws_getConstVec +#define ff_pack_6ch_float_to_float_u_avx liteav_ff_pack_6ch_float_to_float_u_avx +#define ff_mpeg12_vlc_dc_lum_bits liteav_ff_mpeg12_vlc_dc_lum_bits +#define ff_init_mpadsp_tabs_float liteav_ff_init_mpadsp_tabs_float +#define ff_vf_vflip liteav_ff_vf_vflip +#define ff_avg_h264_qpel4_v_lowpass_mmxext liteav_ff_avg_h264_qpel4_v_lowpass_mmxext +#define av_ripemd_init liteav_av_ripemd_init +#define text_style_copy liteav_text_style_copy +#define ff_rtp_get_payload_type liteav_ff_rtp_get_payload_type +#define av_packet_from_data liteav_av_packet_from_data +#define ff_cos_2048_fixed liteav_ff_cos_2048_fixed +#define ff_sine_4096 liteav_ff_sine_4096 +#define ff_aac_num_swb_960 liteav_ff_aac_num_swb_960 +#define swri_resample_dsp_x86_init liteav_swri_resample_dsp_x86_init +#define ff_hevc_put_epel_uw_pixels_w48_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w48_neon_8 +#define ff_sine_windows liteav_ff_sine_windows +#define ff_put_pixels16_xy2_no_rnd_neon liteav_ff_put_pixels16_xy2_no_rnd_neon +#define ff_mov_close_hinting liteav_ff_mov_close_hinting +#define ff_decode_get_hw_frames_ctx liteav_ff_decode_get_hw_frames_ctx +#define ff_put_h264_qpel16_mc10_10_ssse3_cache64 liteav_ff_put_h264_qpel16_mc10_10_ssse3_cache64 +#define av_opt_get_image_size liteav_av_opt_get_image_size +#define av_image_alloc liteav_av_image_alloc +#define ff_parse_close liteav_ff_parse_close +#define ff_h264_dequant8_coeff_init_scan liteav_ff_h264_dequant8_coeff_init_scan +#define ff_put_h264_qpel8_mc03_10_sse2 liteav_ff_put_h264_qpel8_mc03_10_sse2 +#define ff_mpeg12_common_init liteav_ff_mpeg12_common_init +#define ff_pred8x8l_horizontal_down_8_ssse3 liteav_ff_pred8x8l_horizontal_down_8_ssse3 +#define av_hwframe_map liteav_av_hwframe_map +#define ff_hevc_pred_planar_8x8_neon_8_1 liteav_ff_hevc_pred_planar_8x8_neon_8_1 +#define ff_er_frame_end liteav_ff_er_frame_end +#define ff_amf_write_object_start liteav_ff_amf_write_object_start +#define ff_pred16x16_vertical_8_mmx liteav_ff_pred16x16_vertical_8_mmx +#define ff_h264_idct_add_8_avx liteav_ff_h264_idct_add_8_avx +#define av_aes_ctr_get_iv liteav_av_aes_ctr_get_iv +#define av_opt_child_class_next liteav_av_opt_child_class_next +#define ff_codec_movsubtitle_tags liteav_ff_codec_movsubtitle_tags +#define ff_mdct_calcw_c liteav_ff_mdct_calcw_c +#define rgb12to15 liteav_rgb12to15 +#define ff_hevc_idct_8x8_dc_neon_8_asm liteav_ff_hevc_idct_8x8_dc_neon_8_asm +#define avcodec_get_hw_frames_parameters liteav_avcodec_get_hw_frames_parameters +#define ff_yuv422p_to_bgra_neon liteav_ff_yuv422p_to_bgra_neon +#define ff_unpack_2ch_int16_to_float_a_ssse3 liteav_ff_unpack_2ch_int16_to_float_a_ssse3 +#define ff_deblock_v_luma_intra_10_sse2 liteav_ff_deblock_v_luma_intra_10_sse2 +#define ff_avg_h264_qpel16_mc03_neon liteav_ff_avg_h264_qpel16_mc03_neon +#define yyset_extra liteav_yyset_extra +#define av_log_set_callback liteav_av_log_set_callback +#define ff_tlog_link liteav_ff_tlog_link +#define ff_h264_luma_dc_dequant_idct_sse2 liteav_ff_h264_luma_dc_dequant_idct_sse2 +#define text_style_delete liteav_text_style_delete +#define ff_pred8x8l_down_left_10_avx liteav_ff_pred8x8l_down_left_10_avx +#define avcodec_dct_alloc liteav_avcodec_dct_alloc +#define ff_ebur128_destroy liteav_ff_ebur128_destroy +#define ff_int16_to_int32_u_mmx liteav_ff_int16_to_int32_u_mmx +#define ff_mpeg_update_thread_context liteav_ff_mpeg_update_thread_context +#define ff_id3v1_genre_str liteav_ff_id3v1_genre_str +#define av_adts_header_parse liteav_av_adts_header_parse +#define ff_h263_inter_MCBPC_code liteav_ff_h263_inter_MCBPC_code +#define ff_pack_6ch_float_to_float_a_sse liteav_ff_pack_6ch_float_to_float_a_sse +#define ff_butterflies_float_neon liteav_ff_butterflies_float_neon +#define ff_h264_biweight_16_sse2 liteav_ff_h264_biweight_16_sse2 +#define avcodec_descriptor_get liteav_avcodec_descriptor_get +#define ff_put_h264_qpel8_mc11_neon liteav_ff_put_h264_qpel8_mc11_neon +#define av_cmp_i liteav_av_cmp_i +#define uyvytoyuv420 liteav_uyvytoyuv420 +#define ff_pred4x4_vertical_vp8_8_mmxext liteav_ff_pred4x4_vertical_vp8_8_mmxext +#define ff_avg_qpel8_mc13_old_c liteav_ff_avg_qpel8_mc13_old_c +#define av_div_q liteav_av_div_q +#define ff_h263_pred_acdc liteav_ff_h263_pred_acdc +#define av_color_space_name liteav_av_color_space_name +#define ff_h263_videotoolbox_hwaccel liteav_ff_h263_videotoolbox_hwaccel +#define ff_mpa_decode_header liteav_ff_mpa_decode_header +#define ff_isom_write_hvcc liteav_ff_isom_write_hvcc +#define ff_put_pixels8x8_c liteav_ff_put_pixels8x8_c +#define ff_hevc_add_residual_4x4_neon_8 liteav_ff_hevc_add_residual_4x4_neon_8 +#define ff_avg_pixels16_y2_neon liteav_ff_avg_pixels16_y2_neon +#define av_div_i liteav_av_div_i +#define ff_default_get_video_buffer liteav_ff_default_get_video_buffer +#define swri_oldapi_conv_fltp_to_s16_nch_neon liteav_swri_oldapi_conv_fltp_to_s16_nch_neon +#define ff_put_h264_qpel16_mc23_neon liteav_ff_put_h264_qpel16_mc23_neon +#define ff_eac3_demuxer liteav_ff_eac3_demuxer +#define ff_mpeg4_get_video_packet_prefix_length liteav_ff_mpeg4_get_video_packet_prefix_length +#define yuv422ptouyvy liteav_yuv422ptouyvy +#define ff_simple_idct12_avx liteav_ff_simple_idct12_avx +#define ff_unpack_2ch_int16_to_int32_u_sse2 liteav_ff_unpack_2ch_int16_to_int32_u_sse2 +#define ff_pred8x8_0lt_dc_neon liteav_ff_pred8x8_0lt_dc_neon +#define ff_inlink_check_available_frame liteav_ff_inlink_check_available_frame +#define ff_mpa_quant_steps liteav_ff_mpa_quant_steps +#define ff_thread_can_start_frame liteav_ff_thread_can_start_frame +#define ff_h264_filter_mb_fast liteav_ff_h264_filter_mb_fast +#define av_hash_final_hex liteav_av_hash_final_hex +#define ff_put_h264_qpel16_mc13_10_sse2 liteav_ff_put_h264_qpel16_mc13_10_sse2 +#define ffio_set_buf_size liteav_ffio_set_buf_size +#define av_timecode_get_smpte_from_framenum liteav_av_timecode_get_smpte_from_framenum +#define swri_audio_convert_free liteav_swri_audio_convert_free +#define ff_h264_idct_add16intra_12_c liteav_ff_h264_idct_add16intra_12_c +#define ff_metadata_conv liteav_ff_metadata_conv +#define ffurl_get_file_handle liteav_ffurl_get_file_handle +#define ff_put_h264_qpel16_mc11_neon liteav_ff_put_h264_qpel16_mc11_neon +#define ff_h264_golomb_to_inter_cbp liteav_ff_h264_golomb_to_inter_cbp +#define ff_mpeg_unref_picture liteav_ff_mpeg_unref_picture +#define ff_imdct36_blocks_fixed liteav_ff_imdct36_blocks_fixed +#define ff_avg_h264_qpel8_mc11_neon liteav_ff_avg_h264_qpel8_mc11_neon +#define ff_h264_idct_add16intra_14_c liteav_ff_h264_idct_add16intra_14_c +#define ff_cbrt_tableinit liteav_ff_cbrt_tableinit +#define ff_mpeg4_pred_ac liteav_ff_mpeg4_pred_ac +#define ff_h264_weight_16_mmxext liteav_ff_h264_weight_16_mmxext +#define ff_hevc_put_epel_uw_bi_v_neon_8 liteav_ff_hevc_put_epel_uw_bi_v_neon_8 +#define ff_h264_idct8_add4_10_sse2 liteav_ff_h264_idct8_add4_10_sse2 +#define vlc_css_expression_New liteav_vlc_css_expression_New +#define ff_hevc_ps_uninit liteav_ff_hevc_ps_uninit +#define ff_four_imdct36_float_avx liteav_ff_four_imdct36_float_avx +#define ff_hevc_pred_angular_32x32_v_neon_8 liteav_ff_hevc_pred_angular_32x32_v_neon_8 +#define av_mdct_init liteav_av_mdct_init +#define ff_put_h264_qpel8or16_hv2_lowpass_ssse3 liteav_ff_put_h264_qpel8or16_hv2_lowpass_ssse3 +#define ff_pd_32 liteav_ff_pd_32 +#define ff_mpa_l2_select_table liteav_ff_mpa_l2_select_table +#define ff_frame_pool_get_video_config liteav_ff_frame_pool_get_video_config +#define ff_hevc_cu_chroma_qp_offset_idx liteav_ff_hevc_cu_chroma_qp_offset_idx +#define ff_http_init_auth_state liteav_ff_http_init_auth_state +#define sws_freeContext liteav_sws_freeContext +#define av_probe_input_format liteav_av_probe_input_format +#define vlc_css_parser_ParseBytes liteav_vlc_css_parser_ParseBytes +#define av_strireplace liteav_av_strireplace +#define ff_h264_luma_dc_dequant_idct_14_c liteav_ff_h264_luma_dc_dequant_idct_14_c +#define ff_put_h264_qpel4_mc03_10_mmxext liteav_ff_put_h264_qpel4_mc03_10_mmxext +#define avcodec_get_chroma_sub_sample liteav_avcodec_get_chroma_sub_sample +#define av_vlog liteav_av_vlog +#define ff_avg_h264_qpel16_mc00_10_sse2 liteav_ff_avg_h264_qpel16_mc00_10_sse2 +#define swr_get_out_samples liteav_swr_get_out_samples +#define ff_choose_timebase liteav_ff_choose_timebase +#define av_match_name liteav_av_match_name +#define ff_rtmp_packet_read_internal liteav_ff_rtmp_packet_read_internal +#define sws_setColorspaceDetails liteav_sws_setColorspaceDetails +#define ff_pred8x8l_horizontal_down_8_mmxext liteav_ff_pred8x8l_horizontal_down_8_mmxext +#define av_opt_eval_int64 liteav_av_opt_eval_int64 +#define ff_w7_min_w5 liteav_ff_w7_min_w5 +#define ff_put_h264_qpel4_mc12_10_mmxext liteav_ff_put_h264_qpel4_mc12_10_mmxext +#define av_buffersink_get_h liteav_av_buffersink_get_h +#define av_abuffersink_params_alloc liteav_av_abuffersink_params_alloc +#define avio_put_str liteav_avio_put_str +#define sws_isSupportedOutput liteav_sws_isSupportedOutput +#define ff_ps_hybrid_analysis_sse liteav_ff_ps_hybrid_analysis_sse +#define ff_h264_field_end liteav_ff_h264_field_end +#define ff_hevc_put_pixels_w16_neon_8_asm liteav_ff_hevc_put_pixels_w16_neon_8_asm +#define yyparse liteav_yyparse +#define av_sha512_update liteav_av_sha512_update +#define av_buffersink_get_w liteav_av_buffersink_get_w +#define av_vbprintf liteav_av_vbprintf +#define av_image_fill_linesizes liteav_av_image_fill_linesizes +#define ff_deblock_h_luma_mbaff_8_sse2 liteav_ff_deblock_h_luma_mbaff_8_sse2 +#define avcodec_find_encoder liteav_avcodec_find_encoder +#define av_frame_get_pkt_size liteav_av_frame_get_pkt_size +#define yyfree liteav_yyfree +#define ff_hevc_output_frame liteav_ff_hevc_output_frame +#define ff_avg_h264_qpel8or16_hv1_lowpass_op_mmxext liteav_ff_avg_h264_qpel8or16_hv1_lowpass_op_mmxext +#define ff_avg_qpel8_mc12_old_c liteav_ff_avg_qpel8_mc12_old_c +#define ff_ac3_frame_size_tab liteav_ff_ac3_frame_size_tab +#define ff_init_desc_hscale liteav_ff_init_desc_hscale +#define ff_dct_init liteav_ff_dct_init +#define ff_af_loudnorm liteav_ff_af_loudnorm +#define ff_ps_mul_pair_single_sse liteav_ff_ps_mul_pair_single_sse +#define ff_aac_latm_parser liteav_ff_aac_latm_parser +#define ff_h264_luma_dc_dequant_idct_10_c liteav_ff_h264_luma_dc_dequant_idct_10_c +#define avio_open_dyn_buf liteav_avio_open_dyn_buf +#define avcodec_get_pix_fmt_loss liteav_avcodec_get_pix_fmt_loss +#define sws_getCoefficients liteav_sws_getCoefficients +#define ff_merge_samplerates liteav_ff_merge_samplerates +#define avfilter_graph_parse liteav_avfilter_graph_parse +#define sws_cloneVec liteav_sws_cloneVec +#define ff_sbr_hf_apply_noise_3_neon liteav_ff_sbr_hf_apply_noise_3_neon +#define ff_parse_pixel_format liteav_ff_parse_pixel_format +#define ff_h264_alloc_tables liteav_ff_h264_alloc_tables +#define ff_put_h264_qpel8_mc22_10_sse2 liteav_ff_put_h264_qpel8_mc22_10_sse2 +#define ff_h264_luma_dc_dequant_idct_12_c liteav_ff_h264_luma_dc_dequant_idct_12_c +#define av_rc4_init liteav_av_rc4_init +#define ff_network_wait_fd_timeout liteav_ff_network_wait_fd_timeout +#define ff_nv12_to_abgr_neon liteav_ff_nv12_to_abgr_neon +#define ff_hevc_put_epel_uw_h_neon_8 liteav_ff_hevc_put_epel_uw_h_neon_8 +#define yylex_destroy liteav_yylex_destroy +#define sws_getCachedContext liteav_sws_getCachedContext +#define ff_avg_h264_qpel8_mc22_10_sse2 liteav_ff_avg_h264_qpel8_mc22_10_sse2 +#define ffurl_size liteav_ffurl_size +#define swr_free liteav_swr_free +#define ff_simple_idct10_avx liteav_ff_simple_idct10_avx +#define ff_fft_calc_neon liteav_ff_fft_calc_neon +#define ff_rtmp_packet_read liteav_ff_rtmp_packet_read +#define ff_vorbiscomment_metadata_conv liteav_ff_vorbiscomment_metadata_conv +#define ff_asrc_abuffer liteav_ff_asrc_abuffer +#define ff_pw_4096 liteav_ff_pw_4096 +#define ff_hevc_put_pel_uw_pixels_w8_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w8_neon_8_asm +#define ff_h264_pred_direct_motion liteav_ff_h264_pred_direct_motion +#define ff_pw_4095 liteav_ff_pw_4095 +#define av_aes_size liteav_av_aes_size +#define ff_get_audio_buffer liteav_ff_get_audio_buffer +#define ff_hevc_put_qpel_h2v2_neon_8 liteav_ff_hevc_put_qpel_h2v2_neon_8 +#define ff_vorbis_stream_comment liteav_ff_vorbis_stream_comment +#define ff_hevc_put_pixels_w12_neon_8 liteav_ff_hevc_put_pixels_w12_neon_8 +#define ff_pack_2ch_int16_to_int16_u_sse2 liteav_ff_pack_2ch_int16_to_int16_u_sse2 +#define ff_put_no_rnd_qpel8_mc11_old_c liteav_ff_put_no_rnd_qpel8_mc11_old_c +#define ff_hevc_cabac_init liteav_ff_hevc_cabac_init +#define ff_h264_chroma422_dc_dequant_idct_9_c liteav_ff_h264_chroma422_dc_dequant_idct_9_c +#define av_frame_copy liteav_av_frame_copy +#define ff_codec_guid_get_id liteav_ff_codec_guid_get_id +#define ff_h263_decoder liteav_ff_h263_decoder +#define ff_h264_decode_extradata liteav_ff_h264_decode_extradata +#define ff_init_filters liteav_ff_init_filters +#define av_opt_get_double liteav_av_opt_get_double +#define ff_parse_sample_rate liteav_ff_parse_sample_rate +#define ff_ass_split_override_codes liteav_ff_ass_split_override_codes +#define ff_deblock_h_chroma422_8_sse2 liteav_ff_deblock_h_chroma422_8_sse2 +#define ff_rtmp_protocol liteav_ff_rtmp_protocol +#define ff_hevc_decode_nal_pps liteav_ff_hevc_decode_nal_pps +#define ffurl_read liteav_ffurl_read +#define av_get_channel_name liteav_av_get_channel_name +#define ff_crop_tab liteav_ff_crop_tab +#define ff_mpeg4_decode_video_packet_header liteav_ff_mpeg4_decode_video_packet_header +#define av_hwdevice_get_hwframe_constraints liteav_av_hwdevice_get_hwframe_constraints +#define ff_hevc_put_pixels_w24_neon_8_asm liteav_ff_hevc_put_pixels_w24_neon_8_asm +#define ff_rdft_calc_neon liteav_ff_rdft_calc_neon +#define ff_filter_graph_remove_filter liteav_ff_filter_graph_remove_filter +#define ff_ac3_at_decoder liteav_ff_ac3_at_decoder +#define ff_put_h264_qpel8_mc20_10_sse2 liteav_ff_put_h264_qpel8_mc20_10_sse2 +#define ff_hevc_put_pixels_w12_neon_8_asm liteav_ff_hevc_put_pixels_w12_neon_8_asm +#define ff_init_scantable_permutation liteav_ff_init_scantable_permutation +#define av_get_default_channel_layout liteav_av_get_default_channel_layout +#define ff_put_h264_qpel8_mc10_10_sse2_cache64 liteav_ff_put_h264_qpel8_mc10_10_sse2_cache64 +#define avio_wb24 liteav_avio_wb24 +#define av_display_rotation_get liteav_av_display_rotation_get +#define ff_make_format_list liteav_ff_make_format_list +#define ff_get_v_length liteav_ff_get_v_length +#define ff_filter_init_hw_frames liteav_ff_filter_init_hw_frames +#define ff_h264_muxer liteav_ff_h264_muxer +#define ff_unpack_6ch_float_to_int32_u_avx liteav_ff_unpack_6ch_float_to_int32_u_avx +#define av_color_transfer_name liteav_av_color_transfer_name +#define av_sha_alloc liteav_av_sha_alloc +#define ff_text_eof liteav_ff_text_eof +#define av_rc4_alloc liteav_av_rc4_alloc +#define text_style_duplicate liteav_text_style_duplicate +#define av_map_videotoolbox_format_from_pixfmt liteav_av_map_videotoolbox_format_from_pixfmt +#define ff_hevc_put_pixels_w64_neon_8_asm liteav_ff_hevc_put_pixels_w64_neon_8_asm +#define swri_resampler liteav_swri_resampler +#define ff_avg_h264_qpel4_mc23_10_mmxext liteav_ff_avg_h264_qpel4_mc23_10_mmxext +#define ff_h264_decode_ref_pic_marking liteav_ff_h264_decode_ref_pic_marking +#define ff_sws_init_swscale_aarch64 liteav_ff_sws_init_swscale_aarch64 +#define ff_avg_rv40_chroma_mc8_mmxext liteav_ff_avg_rv40_chroma_mc8_mmxext +#define ff_put_h264_qpel8_mc21_neon liteav_ff_put_h264_qpel8_mc21_neon +#define ff_hevc_pred_init liteav_ff_hevc_pred_init +#define ff_sbrdsp_init_aarch64 liteav_ff_sbrdsp_init_aarch64 +#define ff_put_h264_qpel16_mc31_10_sse2 liteav_ff_put_h264_qpel16_mc31_10_sse2 +#define ff_avg_h264_qpel4_mc00_10_mmxext liteav_ff_avg_h264_qpel4_mc00_10_mmxext +#define ff_ps_hybrid_synthesis_deint_sse liteav_ff_ps_hybrid_synthesis_deint_sse +#define av_rdft_end liteav_av_rdft_end +#define ff_avg_pixels16_x2_no_rnd_neon liteav_ff_avg_pixels16_x2_no_rnd_neon +#define ff_sbr_hf_gen_neon liteav_ff_sbr_hf_gen_neon +#define av_packet_shrink_side_data liteav_av_packet_shrink_side_data +#define ff_inlink_evaluate_timeline_at_frame liteav_ff_inlink_evaluate_timeline_at_frame +#define ff_cbpc_b_tab liteav_ff_cbpc_b_tab +#define ff_avg_h264_chroma_mc4_10_mmxext liteav_ff_avg_h264_chroma_mc4_10_mmxext +#define ff_h263_decode_init_vlc liteav_ff_h263_decode_init_vlc +#define ff_pred8x8_vertical_10_sse2 liteav_ff_pred8x8_vertical_10_sse2 +#define av_opt_eval_double liteav_av_opt_eval_double +#define ff_fdct_ifast liteav_ff_fdct_ifast +#define ff_h264_idct_add_10_sse2 liteav_ff_h264_idct_add_10_sse2 +#define ff_vector_fmul_add_neon liteav_ff_vector_fmul_add_neon +#define ff_rtmps_protocol liteav_ff_rtmps_protocol +#define ff_mpeg12_mbPatTable liteav_ff_mpeg12_mbPatTable +#define av_bsf_get_by_name liteav_av_bsf_get_by_name +#define ff_default_get_audio_buffer liteav_ff_default_get_audio_buffer +#define ff_amf_write_field_name liteav_ff_amf_write_field_name +#define ff_yuv422p_to_abgr_neon liteav_ff_yuv422p_to_abgr_neon +#define ff_graph_thread_free liteav_ff_graph_thread_free +#define av_register_codec_parser liteav_av_register_codec_parser +#define ff_avg_h264_qpel8_mc10_10_ssse3_cache64 liteav_ff_avg_h264_qpel8_mc10_10_ssse3_cache64 +#define av_image_fill_arrays liteav_av_image_fill_arrays +#define avfilter_all_channel_layouts liteav_avfilter_all_channel_layouts +#define av_log liteav_av_log +#define ff_network_close liteav_ff_network_close +#define av_pix_fmt_desc_next liteav_av_pix_fmt_desc_next +#define ff_vf_pad liteav_ff_vf_pad +#define av_timecode_check_frame_rate liteav_av_timecode_check_frame_rate +#define ff_mpeg4_rl_intra liteav_ff_mpeg4_rl_intra +#define ff_tls_open_underlying liteav_ff_tls_open_underlying +#define av_timecode_init_from_string liteav_av_timecode_init_from_string +#define ffio_read_indirect liteav_ffio_read_indirect +#define ff_h264_mb_sizes liteav_ff_h264_mb_sizes +#define ff_h263_pixel_aspect liteav_ff_h263_pixel_aspect +#define ff_pred8x8_top_dc_neon liteav_ff_pred8x8_top_dc_neon +#define ff_aac_kbd_long_960 liteav_ff_aac_kbd_long_960 +#define ff_hevc_put_pixels_w6_neon_8 liteav_ff_hevc_put_pixels_w6_neon_8 +#define ff_put_h264_qpel16_mc21_10_sse2 liteav_ff_put_h264_qpel16_mc21_10_sse2 +#define ff_deblock_h_luma_8_sse2 liteav_ff_deblock_h_luma_8_sse2 +#define ff_mpv_frame_start liteav_ff_mpv_frame_start +#define ff_avg_qpel16_mc32_old_c liteav_ff_avg_qpel16_mc32_old_c +#define av_opt_serialize liteav_av_opt_serialize +#define swr_convert_frame liteav_swr_convert_frame +#define ff_pack_8ch_int32_to_float_a_sse2 liteav_ff_pack_8ch_int32_to_float_a_sse2 +#define ff_hevc_epel_filters liteav_ff_hevc_epel_filters +#define rgb12tobgr12 liteav_rgb12tobgr12 +#define ff_alternate_horizontal_scan liteav_ff_alternate_horizontal_scan +#define av_image_check_size2 liteav_av_image_check_size2 +#define ff_mpv_idct_init liteav_ff_mpv_idct_init +#define av_memdup liteav_av_memdup +#define ff_ac3_enc_channel_map liteav_ff_ac3_enc_channel_map +#define ff_amf_write_object_end liteav_ff_amf_write_object_end +#define av_opt_get_channel_layout liteav_av_opt_get_channel_layout +#define ff_hevc_luma_mv_mvp_mode liteav_ff_hevc_luma_mv_mvp_mode +#define av_opt_eval_flags liteav_av_opt_eval_flags +#define ff_sine_64_fixed liteav_ff_sine_64_fixed +#define av_opt_find2 liteav_av_opt_find2 +#define ff_subtitles_queue_seek liteav_ff_subtitles_queue_seek +#define av_tea_crypt liteav_av_tea_crypt +#define ff_simple_idct_add_neon liteav_ff_simple_idct_add_neon +#define ff_put_h264_qpel16_mc22_10_sse2 liteav_ff_put_h264_qpel16_mc22_10_sse2 +#define ff_float_to_int32_u_avx2 liteav_ff_float_to_int32_u_avx2 +#define ff_hevc_put_qpel_h3v1_neon_8 liteav_ff_hevc_put_qpel_h3v1_neon_8 +#define av_frame_set_sample_rate liteav_av_frame_set_sample_rate +#define ffio_open_null_buf liteav_ffio_open_null_buf +#define ff_ac3_bitrate_tab liteav_ff_ac3_bitrate_tab +#define ff_hpeldsp_init_x86 liteav_ff_hpeldsp_init_x86 +#define ff_interleaved_ue_golomb_vlc_code liteav_ff_interleaved_ue_golomb_vlc_code +#define ff_ac3_sample_rate_tab liteav_ff_ac3_sample_rate_tab +#define ff_pred8x8_dc_10_sse2 liteav_ff_pred8x8_dc_10_sse2 +#define sws_getDefaultFilter liteav_sws_getDefaultFilter +#define ff_shuffle_bytes_2103_mmxext liteav_ff_shuffle_bytes_2103_mmxext +#define ff_h264_biweight_16_ssse3 liteav_ff_h264_biweight_16_ssse3 +#define ff_put_pixels16_neon liteav_ff_put_pixels16_neon +#define av_murmur3_update liteav_av_murmur3_update +#define av_get_channel_description liteav_av_get_channel_description +#define av_frame_move_ref liteav_av_frame_move_ref +#define av_opt_set_sample_fmt liteav_av_opt_set_sample_fmt +#define ff_mov_get_channel_layout_tag liteav_ff_mov_get_channel_layout_tag +#define ff_h264_slice_context_init liteav_ff_h264_slice_context_init +#define rgb32tobgr15 liteav_rgb32tobgr15 +#define ff_shuffle_bytes_1230_ssse3 liteav_ff_shuffle_bytes_1230_ssse3 +#define rgb32tobgr16 liteav_rgb32tobgr16 +#define ff_pw_128 liteav_ff_pw_128 +#define ff_mpeg4_static_rl_table_store liteav_ff_mpeg4_static_rl_table_store +#define ff_mpadsp_apply_window_float liteav_ff_mpadsp_apply_window_float +#define ffio_open2_wrapper liteav_ffio_open2_wrapper +#define ff_vector_fmul_vfp liteav_ff_vector_fmul_vfp +#define ff_hevc_put_qpel_uw_h2v1_neon_8 liteav_ff_hevc_put_qpel_uw_h2v1_neon_8 +#define av_opt_child_next liteav_av_opt_child_next +#define av_encryption_info_add_side_data liteav_av_encryption_info_add_side_data +#define ffurl_handshake liteav_ffurl_handshake +#define av_probe_input_format3 liteav_av_probe_input_format3 +#define av_image_fill_pointers liteav_av_image_fill_pointers +#define ff_pred8x8l_dc_8_mmxext liteav_ff_pred8x8l_dc_8_mmxext +#define ff_pred16x16_plane_h264_8_sse2 liteav_ff_pred16x16_plane_h264_8_sse2 +#define av_buffersink_get_hw_frames_ctx liteav_av_buffersink_get_hw_frames_ctx +#define ff_put_h264_qpel16_mc31_neon liteav_ff_put_h264_qpel16_mc31_neon +#define ff_avg_qpel8_mc31_old_c liteav_ff_avg_qpel8_mc31_old_c +#define yy_scan_buffer liteav_yy_scan_buffer +#define ff_avg_h264_chroma_mc8_rnd_mmxext liteav_ff_avg_h264_chroma_mc8_rnd_mmxext +#define avpriv_tempfile liteav_avpriv_tempfile +#define av_camellia_alloc liteav_av_camellia_alloc +#define ff_avg_h264_qpel8_mc10_10_sse2 liteav_ff_avg_h264_qpel8_mc10_10_sse2 +#define av_file_map liteav_av_file_map +#define av_encryption_info_alloc liteav_av_encryption_info_alloc +#define av_hmac_init liteav_av_hmac_init +#define av_hash_final liteav_av_hash_final +#define av_lfg_init liteav_av_lfg_init +#define avcodec_register liteav_avcodec_register +#define text_segment_delete liteav_text_segment_delete +#define ff_hevc_transform_16x16_neon_8_asm liteav_ff_hevc_transform_16x16_neon_8_asm +#define ff_mpadsp_apply_window_float_neon liteav_ff_mpadsp_apply_window_float_neon +#define ff_interleaved_golomb_vlc_len liteav_ff_interleaved_golomb_vlc_len +#define ff_hevc_decode_extradata liteav_ff_hevc_decode_extradata +#define ff_print_debug_info2 liteav_ff_print_debug_info2 +#define av_opt_get_int liteav_av_opt_get_int +#define ff_hevc_put_qpel_h2v3_neon_8 liteav_ff_hevc_put_qpel_h2v3_neon_8 +#define ff_hevc_put_pixels_w2_neon_8_asm liteav_ff_hevc_put_pixels_w2_neon_8_asm +#define av_opt_freep_ranges liteav_av_opt_freep_ranges +#define ff_avg_h264_qpel8_mc13_neon liteav_ff_avg_h264_qpel8_mc13_neon +#define avio_open liteav_avio_open +#define ff_h264_weight_8_10_sse2 liteav_ff_h264_weight_8_10_sse2 +#define ff_h264_weight_8_10_sse4 liteav_ff_h264_weight_8_10_sse4 +#define yyget_debug liteav_yyget_debug +#define av_write_frame liteav_av_write_frame +#define ff_hevc_put_qpel_hv_neon_8_wrapper liteav_ff_hevc_put_qpel_hv_neon_8_wrapper +#define avio_enum_protocols liteav_avio_enum_protocols +#define av_buffer_make_writable liteav_av_buffer_make_writable +#define ff_check_alignment liteav_ff_check_alignment +#define ff_put_pixels16_xy2_neon liteav_ff_put_pixels16_xy2_neon +#define ff_ebur128_loudness_window liteav_ff_ebur128_loudness_window +#define av_fifo_generic_peek_at liteav_av_fifo_generic_peek_at +#define ff_put_rv40_chroma_mc8_mmx liteav_ff_put_rv40_chroma_mc8_mmx +#define ff_h264_idct_add16intra_10_avx liteav_ff_h264_idct_add16intra_10_avx +#define ff_hevc_sao_offset_sign_decode liteav_ff_hevc_sao_offset_sign_decode +#define avio_context_free liteav_avio_context_free +#define ffio_open_dyn_packet_buf liteav_ffio_open_dyn_packet_buf +#define ff_avg_h264_qpel4_mc13_10_mmxext liteav_ff_avg_h264_qpel4_mc13_10_mmxext +#define ff_h264_parse_ref_count liteav_ff_h264_parse_ref_count +#define ff_init_scantable_permutation_x86 liteav_ff_init_scantable_permutation_x86 +#define sws_freeVec liteav_sws_freeVec +#define ff_af_amix liteav_ff_af_amix +#define avpriv_ac3_parse_header liteav_avpriv_ac3_parse_header +#define ff_mp3adu_decoder liteav_ff_mp3adu_decoder +#define ff_deblock_h_chroma422_intra_8_sse2 liteav_ff_deblock_h_chroma422_intra_8_sse2 +#define ff_interleave_add_packet liteav_ff_interleave_add_packet +#define ff_inlink_set_status liteav_ff_inlink_set_status +#define ff_cos_131072_fixed liteav_ff_cos_131072_fixed +#define av_compare_ts liteav_av_compare_ts +#define sws_getGaussianVec liteav_sws_getGaussianVec +#define ff_mov_read_stsd_entries liteav_ff_mov_read_stsd_entries +#define ff_pred8x8l_down_right_10_ssse3 liteav_ff_pred8x8l_down_right_10_ssse3 +#define ff_psdsp_init_x86 liteav_ff_psdsp_init_x86 +#define ff_hevc_put_qpel_v_neon_8_wrapper liteav_ff_hevc_put_qpel_v_neon_8_wrapper +#define ff_dct_init_x86 liteav_ff_dct_init_x86 +#define ff_hevc_set_new_ref liteav_ff_hevc_set_new_ref +#define ff_fft_lut_init liteav_ff_fft_lut_init +#define av_packet_make_refcounted liteav_av_packet_make_refcounted +#define av_hmac_calc liteav_av_hmac_calc +#define av_dup_packet liteav_av_dup_packet +#define ff_swb_offset_960 liteav_ff_swb_offset_960 +#define ff_id3v2_match liteav_ff_id3v2_match +#define ff_put_h264_qpel16_mc20_10_ssse3_cache64 liteav_ff_put_h264_qpel16_mc20_10_ssse3_cache64 +#define ff_hevc_sao_edge_eo3_w32_neon_8 liteav_ff_hevc_sao_edge_eo3_w32_neon_8 +#define av_dirname liteav_av_dirname +#define ff_cos_16384_fixed liteav_ff_cos_16384_fixed +#define ff_avg_h264_qpel8_mc30_10_ssse3_cache64 liteav_ff_avg_h264_qpel8_mc30_10_ssse3_cache64 +#define avfilter_license liteav_avfilter_license +#define ff_pred4x4_tm_vp8_8_mmxext liteav_ff_pred4x4_tm_vp8_8_mmxext +#define av_bprintf liteav_av_bprintf +#define av_audio_fifo_size liteav_av_audio_fifo_size +#define ff_pred16x16_left_dc_neon liteav_ff_pred16x16_left_dc_neon +#define ff_mpadsp_init liteav_ff_mpadsp_init +#define ff_codec_movvideo_tags liteav_ff_codec_movvideo_tags +#define ff_videotoolbox_h264_decode_slice liteav_ff_videotoolbox_h264_decode_slice +#define ff_h264_sei_decode liteav_ff_h264_sei_decode +#define ff_videodsp_init_x86 liteav_ff_videodsp_init_x86 +#define ff_h264_decoder liteav_ff_h264_decoder +#define ff_unpack_2ch_int16_to_float_a_sse2 liteav_ff_unpack_2ch_int16_to_float_a_sse2 +#define ff_hevc_pred_init_aarch64 liteav_ff_hevc_pred_init_aarch64 +#define yy_delete_buffer liteav_yy_delete_buffer +#define ff_avc_parse_nal_units_buf liteav_ff_avc_parse_nal_units_buf +#define ff_vorbis_channel_layout_offsets liteav_ff_vorbis_channel_layout_offsets +#define ff_avg_qpel16_mc31_old_c liteav_ff_avg_qpel16_mc31_old_c +#define avio_find_protocol_name liteav_avio_find_protocol_name +#define ff_mpeg4video_split liteav_ff_mpeg4video_split +#define ff_int32_to_int16_a_sse2 liteav_ff_int32_to_int16_a_sse2 +#define av_opt_set_channel_layout liteav_av_opt_set_channel_layout +#define av_xtea_crypt liteav_av_xtea_crypt +#define ff_thread_decode_frame liteav_ff_thread_decode_frame +#define ff_avg_pixels4_l2_mmxext liteav_ff_avg_pixels4_l2_mmxext +#define av_opt_copy liteav_av_opt_copy +#define av_buffersink_get_frame liteav_av_buffersink_get_frame +#define ff_get_unscaled_swscale_aarch64 liteav_ff_get_unscaled_swscale_aarch64 +#define ff_fft_offsets_lut liteav_ff_fft_offsets_lut +#define yyget_in liteav_yyget_in +#define ff_hevc_res_scale_sign_flag liteav_ff_hevc_res_scale_sign_flag +#define ff_sine_32_fixed liteav_ff_sine_32_fixed +#define avfilter_graph_create_filter liteav_avfilter_graph_create_filter +#define ff_formats_unref liteav_ff_formats_unref +#define ff_ac3_rematrix_band_tab liteav_ff_ac3_rematrix_band_tab +#define ff_mpeg1_dc_scale_table liteav_ff_mpeg1_dc_scale_table +#define ff_yuv420p_to_argb_neon liteav_ff_yuv420p_to_argb_neon +#define ff_ass_get_dialog liteav_ff_ass_get_dialog +#define ff_deblock_v_luma_intra_8_sse2 liteav_ff_deblock_v_luma_intra_8_sse2 +#define ff_pred16x16_horizontal_8_ssse3 liteav_ff_pred16x16_horizontal_8_ssse3 +#define avpriv_io_move liteav_avpriv_io_move +#define ff_videodsp_init liteav_ff_videodsp_init +#define ff_framequeue_peek liteav_ff_framequeue_peek +#define ff_sine_window_init liteav_ff_sine_window_init +#define av_sha_init liteav_av_sha_init +#define ff_mpeg4videodec_static_init liteav_ff_mpeg4videodec_static_init +#define av_camellia_crypt liteav_av_camellia_crypt +#define sws_isSupportedEndiannessConversion liteav_sws_isSupportedEndiannessConversion +#define ff_imdct_half_c_fixed liteav_ff_imdct_half_c_fixed +#define ff_mp3float_decoder liteav_ff_mp3float_decoder +#define ff_int32_to_int16_u_mmx liteav_ff_int32_to_int16_u_mmx +#define ff_h264_idct_dc_add_10_mmxext liteav_ff_h264_idct_dc_add_10_mmxext +#define ff_sine_120 liteav_ff_sine_120 +#define av_read_image_line liteav_av_read_image_line +#define ff_faanidct_add liteav_ff_faanidct_add +#define ff_sine_128 liteav_ff_sine_128 +#define sws_init_context liteav_sws_init_context +#define ff_avg_pixels16_mmx liteav_ff_avg_pixels16_mmx +#define ff_pred8x8l_horizontal_up_10_sse2 liteav_ff_pred8x8l_horizontal_up_10_sse2 +#define ff_draw_round_to_sub liteav_ff_draw_round_to_sub +#define ff_intel_h263_decode_picture_header liteav_ff_intel_h263_decode_picture_header +#define avformat_alloc_output_context2 liteav_avformat_alloc_output_context2 +#define ff_h264_draw_horiz_band liteav_ff_h264_draw_horiz_band +#define ffurl_seek liteav_ffurl_seek +#define av_mallocz_array liteav_av_mallocz_array +#define ff_cos_16384 liteav_ff_cos_16384 +#define ff_hevc_put_qpel_v3_neon_8 liteav_ff_hevc_put_qpel_v3_neon_8 +#define ff_avg_h264_qpel8_mc20_10_ssse3_cache64 liteav_ff_avg_h264_qpel8_mc20_10_ssse3_cache64 +#define ff_update_link_current_pts liteav_ff_update_link_current_pts +#define av_frame_copy_props liteav_av_frame_copy_props +#define av_xtea_le_init liteav_av_xtea_le_init +#define ff_simple_idct248_put liteav_ff_simple_idct248_put +#define avpriv_align_put_bits liteav_avpriv_align_put_bits +#define ff_unpack_6ch_int32_to_float_a_sse2 liteav_ff_unpack_6ch_int32_to_float_a_sse2 +#define ff_af_dynaudnorm liteav_ff_af_dynaudnorm +#define av_log_format_line liteav_av_log_format_line +#define vlc_css_unquoted liteav_vlc_css_unquoted +#define ff_put_h264_chroma_mc2_neon liteav_ff_put_h264_chroma_mc2_neon +#define ff_put_h264_qpel8_mc01_neon liteav_ff_put_h264_qpel8_mc01_neon +#define av_murmur3_init_seeded liteav_av_murmur3_init_seeded +#define av_samples_set_silence liteav_av_samples_set_silence +#define ff_inlink_consume_frame liteav_ff_inlink_consume_frame +#define ff_thread_get_buffer liteav_ff_thread_get_buffer +#define ff_ebur128_add_frames_int liteav_ff_ebur128_add_frames_int +#define ff_hevc_v_loop_filter_chroma_neon liteav_ff_hevc_v_loop_filter_chroma_neon +#define ff_vorbis_comment liteav_ff_vorbis_comment +#define avfilter_make_format64_list liteav_avfilter_make_format64_list +#define ff_pred8x8l_horizontal_10_avx liteav_ff_pred8x8l_horizontal_10_avx +#define av_parser_parse2 liteav_av_parser_parse2 +#define ff_hevc_put_qpel_uw_pixels_w48_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w48_neon_8 +#define ff_mp3on4_decoder liteav_ff_mp3on4_decoder +#define ff_hpeldsp_init liteav_ff_hpeldsp_init +#define ff_h264_idct_dc_add_10_c liteav_ff_h264_idct_dc_add_10_c +#define ff_imdct_half_sse liteav_ff_imdct_half_sse +#define ff_vf_scale2ref liteav_ff_vf_scale2ref +#define ff_aac_kbd_long_1024_fixed liteav_ff_aac_kbd_long_1024_fixed +#define ff_h264_idct_add16_9_c liteav_ff_h264_idct_add16_9_c +#define ff_pack_8ch_float_to_int32_u_avx liteav_ff_pack_8ch_float_to_int32_u_avx +#define ff_avg_h264_qpel8_mc03_10_sse2 liteav_ff_avg_h264_qpel8_mc03_10_sse2 +#define ff_h264_idct_dc_add_12_c liteav_ff_h264_idct_dc_add_12_c +#define avio_seek liteav_avio_seek +#define av_rc4_crypt liteav_av_rc4_crypt +#define ff_h263_decode_picture_header liteav_ff_h263_decode_picture_header +#define ff_ps_hybrid_analysis_sse3 liteav_ff_ps_hybrid_analysis_sse3 +#define av_murmur3_init liteav_av_murmur3_init +#define ff_mpadsp_apply_window_fixed liteav_ff_mpadsp_apply_window_fixed +#define ff_h264_idct_dc_add_14_c liteav_ff_h264_idct_dc_add_14_c +#define ff_ac3_bap_tab liteav_ff_ac3_bap_tab +#define ff_avg_h264_qpel8_h_lowpass_mmxext liteav_ff_avg_h264_qpel8_h_lowpass_mmxext +#define ff_mdct15_init_x86 liteav_ff_mdct15_init_x86 +#define ff_mp4_parse_es_descr liteav_ff_mp4_parse_es_descr +#define ff_mp4_read_dec_config_descr liteav_ff_mp4_read_dec_config_descr +#define ff_sbrdsp_init liteav_ff_sbrdsp_init +#define ff_put_h264_chroma_mc8_10_sse2 liteav_ff_put_h264_chroma_mc8_10_sse2 +#define ff_h264_sei_stereo_mode liteav_ff_h264_sei_stereo_mode +#define rgb16tobgr32 liteav_rgb16tobgr32 +#define avpriv_pix_fmt_bps_avi liteav_avpriv_pix_fmt_bps_avi +#define av_utf8_decode liteav_av_utf8_decode +#define ff_avio_class liteav_ff_avio_class +#define ff_pack_8ch_float_to_float_u_avx liteav_ff_pack_8ch_float_to_float_u_avx +#define ff_hevc_idct_32x32_dc_neon_8_asm liteav_ff_hevc_idct_32x32_dc_neon_8_asm +#define ff_mpeg2_aspect liteav_ff_mpeg2_aspect +#define ff_avg_h264_qpel16_mc30_neon liteav_ff_avg_h264_qpel16_mc30_neon +#define av_opt_get_dict_val liteav_av_opt_get_dict_val +#define ff_h263_inter_MCBPC_bits liteav_ff_h263_inter_MCBPC_bits +#define ff_subtitles_queue_insert liteav_ff_subtitles_queue_insert +#define avcodec_descriptor_next liteav_avcodec_descriptor_next +#define ff_amr_nb_at_decoder liteav_ff_amr_nb_at_decoder +#define ff_h264_quant_rem6 liteav_ff_h264_quant_rem6 +#define ff_mdct_calc_c_fixed_32 liteav_ff_mdct_calc_c_fixed_32 +#define ff_connect_parallel liteav_ff_connect_parallel +#define ff_libfdk_aac_encoder liteav_ff_libfdk_aac_encoder +#define ff_w4_min_w2_hi liteav_ff_w4_min_w2_hi +#define ff_pw_512 liteav_ff_pw_512 +#define avio_rb16 liteav_avio_rb16 +#define ff_unpack_6ch_float_to_float_u_sse liteav_ff_unpack_6ch_float_to_float_u_sse +#define ff_deblock_v_chroma_10_sse2 liteav_ff_deblock_v_chroma_10_sse2 +#define ff_copy_rectangle2 liteav_ff_copy_rectangle2 +#define ff_mpa_enwindow liteav_ff_mpa_enwindow +#define ff_h264_dequant4_coeff_init liteav_ff_h264_dequant4_coeff_init +#define avio_open_dir liteav_avio_open_dir +#define ff_h264_pred_init liteav_ff_h264_pred_init +#define ff_hevc_mp4toannexb_bsf liteav_ff_hevc_mp4toannexb_bsf +#define ff_blend_rectangle liteav_ff_blend_rectangle +#define ff_avc_find_startcode liteav_ff_avc_find_startcode +#define ff_h264_idct_add16intra_8_sse2 liteav_ff_h264_idct_add16intra_8_sse2 +#define ff_pred8x8l_horizontal_down_8_sse2 liteav_ff_pred8x8l_horizontal_down_8_sse2 +#define ff_pred16x16_tm_vp8_8_sse2 liteav_ff_pred16x16_tm_vp8_8_sse2 +#define ff_id3v2_4_metadata_conv liteav_ff_id3v2_4_metadata_conv +#define ff_ue_golomb_len liteav_ff_ue_golomb_len +#define ff_h264_b_sub_mb_type_info liteav_ff_h264_b_sub_mb_type_info +#define ff_rgb24toyv12_c liteav_ff_rgb24toyv12_c +#define sws_isSupportedInput liteav_sws_isSupportedInput +#define ff_resample_common_apply_filter_x4_float_neon liteav_ff_resample_common_apply_filter_x4_float_neon +#define avpriv_scalarproduct_float_c liteav_avpriv_scalarproduct_float_c +#define swri_rematrix liteav_swri_rematrix +#define av_packet_free liteav_av_packet_free +#define ff_deblock_h_chroma_intra_8_avx liteav_ff_deblock_h_chroma_intra_8_avx +#define ff_framequeue_free liteav_ff_framequeue_free +#define ff_pack_2ch_int32_to_int16_a_sse2 liteav_ff_pack_2ch_int32_to_int16_a_sse2 +#define ff_aac_eld_window_480_fixed liteav_ff_aac_eld_window_480_fixed +#define av_mediacodec_default_free liteav_av_mediacodec_default_free +#define av_strtok liteav_av_strtok +#define ff_pred8x8l_horizontal_8_mmxext liteav_ff_pred8x8l_horizontal_8_mmxext +#define ff_avs3_profiles liteav_ff_avs3_profiles +#define ff_w5_plus_w7 liteav_ff_w5_plus_w7 +#define ff_nv12_to_bgra_neon liteav_ff_nv12_to_bgra_neon +#define ff_vorbiscomment_write liteav_ff_vorbiscomment_write +#define ff_hevc_put_qpel_uw_h3_neon_8 liteav_ff_hevc_put_qpel_uw_h3_neon_8 +#define ff_avg_h264_qpel16_mc20_neon liteav_ff_avg_h264_qpel16_mc20_neon +#define av_xtea_init liteav_av_xtea_init +#define ff_pred8x8_vertical_8_mmx liteav_ff_pred8x8_vertical_8_mmx +#define ff_deblock_h_luma_intra_10_avx liteav_ff_deblock_h_luma_intra_10_avx +#define ff_hevcdsp_init_neon_asm liteav_ff_hevcdsp_init_neon_asm +#define avfilter_configuration liteav_avfilter_configuration +#define ff_w7_plus_w3_hi liteav_ff_w7_plus_w3_hi +#define ff_hevc_put_epel_uw_pixels_w16_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w16_neon_8 +#define ff_hevc_transform_16x16_neon_8 liteav_ff_hevc_transform_16x16_neon_8 +#define av_frame_set_pkt_duration liteav_av_frame_set_pkt_duration +#define ff_hevc_part_mode_decode liteav_ff_hevc_part_mode_decode +#define ff_ps_stereo_interpolate_ipdopd_neon liteav_ff_ps_stereo_interpolate_ipdopd_neon +#define ff_h264_idct_add_8_sse2 liteav_ff_h264_idct_add_8_sse2 +#define av_frame_get_metadata liteav_av_frame_get_metadata +#define ff_hevc_put_qpel_h1v1_neon_8 liteav_ff_hevc_put_qpel_h1v1_neon_8 +#define ff_fft_init_fixed_32 liteav_ff_fft_init_fixed_32 +#define av_bsf_list_parse_str liteav_av_bsf_list_parse_str +#define ff_avg_h264_qpel16_mc30_10_sse2 liteav_ff_avg_h264_qpel16_mc30_10_sse2 +#define ff_int16_to_int32_a_sse2 liteav_ff_int16_to_int32_a_sse2 +#define ff_hevc_flush_dpb liteav_ff_hevc_flush_dpb +#define yyset_column liteav_yyset_column +#define ff_request_frame liteav_ff_request_frame +#define ff_pack_2ch_int16_to_int32_u_sse2 liteav_ff_pack_2ch_int16_to_int32_u_sse2 +#define ff_mpa_synth_filter_float liteav_ff_mpa_synth_filter_float +#define ffio_fill liteav_ffio_fill +#define ff_mov_cenc_write_sinf_tag liteav_ff_mov_cenc_write_sinf_tag +#define av_find_input_format liteav_av_find_input_format +#define ff_mpv_common_init_neon liteav_ff_mpv_common_init_neon +#define ff_dct32_float liteav_ff_dct32_float +#define av_oformat_next liteav_av_oformat_next +#define av_audio_fifo_peek_at liteav_av_audio_fifo_peek_at +#define ff_put_h264_qpel4_hv_lowpass_v_mmxext liteav_ff_put_h264_qpel4_hv_lowpass_v_mmxext +#define av_pix_fmt_swap_endianness liteav_av_pix_fmt_swap_endianness +#define ff_hevc_pred_angular_16x16_v_neon_8 liteav_ff_hevc_pred_angular_16x16_v_neon_8 +#define ff_mpv_common_frame_size_change liteav_ff_mpv_common_frame_size_change +#define ff_h264_idct_add_8_c liteav_ff_h264_idct_add_8_c +#define av_cast5_crypt liteav_av_cast5_crypt +#define ff_h264_weight_4_mmxext liteav_ff_h264_weight_4_mmxext +#define ff_graph_thread_init liteav_ff_graph_thread_init +#define av_filter_iterate liteav_av_filter_iterate +#define ff_avg_h264_qpel4_h_lowpass_l2_mmxext liteav_ff_avg_h264_qpel4_h_lowpass_l2_mmxext +#define ff_inlink_process_commands liteav_ff_inlink_process_commands +#define ff_pred8x8_hor_neon liteav_ff_pred8x8_hor_neon +#define ff_aac_codebook_vectors liteav_ff_aac_codebook_vectors +#define avcodec_encode_subtitle liteav_avcodec_encode_subtitle +#define ff_hevc_ref_idx_lx_decode liteav_ff_hevc_ref_idx_lx_decode +#define uyvytoyuv422 liteav_uyvytoyuv422 +#define ff_hevc_sao_band_w32_neon_8 liteav_ff_hevc_sao_band_w32_neon_8 +#define ff_hevc_pred_init_neon_intrinsics liteav_ff_hevc_pred_init_neon_intrinsics +#define ff_read_riff_info liteav_ff_read_riff_info +#define ff_mpeg_ref_picture liteav_ff_mpeg_ref_picture +#define av_d2q liteav_av_d2q +#define av_stristr liteav_av_stristr +#define ff_int32_to_int16_a_mmx liteav_ff_int32_to_int16_a_mmx +#define av_fifo_generic_peek liteav_av_fifo_generic_peek +#define ff_all_samplerates liteav_ff_all_samplerates +#define ff_pack_8ch_int32_to_float_u_sse2 liteav_ff_pack_8ch_int32_to_float_u_sse2 +#define ff_pred8x8_plane_8_mmxext liteav_ff_pred8x8_plane_8_mmxext +#define ff_simple_idct48_add liteav_ff_simple_idct48_add +#define av_image_check_sar liteav_av_image_check_sar +#define av_copy_packet_side_data liteav_av_copy_packet_side_data +#define ff_parse_specific_params liteav_ff_parse_specific_params +#define swri_oldapi_conv_fltp_to_s16_2ch_neon liteav_swri_oldapi_conv_fltp_to_s16_2ch_neon +#define ff_avfilter_link_set_out_status liteav_ff_avfilter_link_set_out_status +#define ff_deblock_v_luma_intra_10_avx liteav_ff_deblock_v_luma_intra_10_avx +#define ff_av1_profiles liteav_ff_av1_profiles +#define avcodec_find_best_pix_fmt_of_2 liteav_avcodec_find_best_pix_fmt_of_2 +#define swri_realloc_audio liteav_swri_realloc_audio +#define ff_add_format liteav_ff_add_format +#define ff_pred8x8l_vertical_10_avx liteav_ff_pred8x8l_vertical_10_avx +#define av_strtod liteav_av_strtod +#define av_encryption_init_info_get_side_data liteav_av_encryption_init_info_get_side_data +#define ff_avg_h264_qpel8_mc20_neon liteav_ff_avg_h264_qpel8_mc20_neon +#define ff_pred16x16_tm_vp8_8_mmxext liteav_ff_pred16x16_tm_vp8_8_mmxext +#define avformat_get_riff_audio_tags liteav_avformat_get_riff_audio_tags +#define ff_rotate_slice liteav_ff_rotate_slice +#define ff_hevc_mvp_lx_flag_decode liteav_ff_hevc_mvp_lx_flag_decode +#define ff_imdct_half_c_fixed_32 liteav_ff_imdct_half_c_fixed_32 +#define swr_config_frame liteav_swr_config_frame +#define av_guess_codec liteav_av_guess_codec +#define ff_check_pixfmt_descriptors liteav_ff_check_pixfmt_descriptors +#define ff_http_auth_handle_header liteav_ff_http_auth_handle_header +#define ff_hevc_put_pixels_w32_neon_8 liteav_ff_hevc_put_pixels_w32_neon_8 +#define ff_unpack_2ch_int32_to_int32_a_sse2 liteav_ff_unpack_2ch_int32_to_int32_a_sse2 +#define ff_flac_get_max_frame_size liteav_ff_flac_get_max_frame_size +#define ff_subtitles_read_chunk liteav_ff_subtitles_read_chunk +#define vlc_css_selector_New liteav_vlc_css_selector_New +#define av_buffersrc_add_frame liteav_av_buffersrc_add_frame +#define ff_sine_256 liteav_ff_sine_256 +#define ff_put_h264_qpel16_mc30_10_ssse3_cache64 liteav_ff_put_h264_qpel16_mc30_10_ssse3_cache64 +#define ff_h263_show_pict_info liteav_ff_h263_show_pict_info +#define ff_jref_idct_put liteav_ff_jref_idct_put +#define ff_rtmpte_protocol liteav_ff_rtmpte_protocol +#define ff_formats_changeref liteav_ff_formats_changeref +#define ff_avg_pixels16_mmxext liteav_ff_avg_pixels16_mmxext +#define av_hwdevice_ctx_alloc liteav_av_hwdevice_ctx_alloc +#define ff_zigzag_direct liteav_ff_zigzag_direct +#define ff_get_codec_guid liteav_ff_get_codec_guid +#define ff_h263_loop_filter_strength liteav_ff_h263_loop_filter_strength +#define ff_inlink_queued_frames liteav_ff_inlink_queued_frames +#define ff_network_sleep_interruptible liteav_ff_network_sleep_interruptible +#define ff_hevc_put_qpel_uw_h1v3_neon_8 liteav_ff_hevc_put_qpel_uw_h1v3_neon_8 +#define ff_put_guid liteav_ff_put_guid +#define av_bsf_get_class liteav_av_bsf_get_class +#define ff_hwframe_map_create liteav_ff_hwframe_map_create +#define ff_amf_read_null liteav_ff_amf_read_null +#define ff_aac_num_swb_512 liteav_ff_aac_num_swb_512 +#define ff_hevc_put_pel_uw_pixels_w48_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w48_neon_8_asm +#define ff_sine_8192 liteav_ff_sine_8192 +#define vlc_css_selector_AddSpecifier liteav_vlc_css_selector_AddSpecifier +#define av_ripemd_size liteav_av_ripemd_size +#define ff_vf_crop liteav_ff_vf_crop +#define ff_float_to_int16_a_sse2 liteav_ff_float_to_int16_a_sse2 +#define ff_h264_h_loop_filter_chroma_neon liteav_ff_h264_h_loop_filter_chroma_neon +#define ff_hevc_cbf_luma_decode liteav_ff_hevc_cbf_luma_decode +#define av_frame_unref liteav_av_frame_unref +#define ff_rdft_end liteav_ff_rdft_end +#define ff_put_qpel16_mc12_old_c liteav_ff_put_qpel16_mc12_old_c +#define vlc_css_parser_AddRule liteav_vlc_css_parser_AddRule +#define ff_mov_add_hinted_packet liteav_ff_mov_add_hinted_packet +#define ff_socket_nonblock liteav_ff_socket_nonblock +#define ff_put_h264_qpel16_mc23_10_sse2 liteav_ff_put_h264_qpel16_mc23_10_sse2 +#define ff_er_frame_start liteav_ff_er_frame_start +#define avio_free_directory_entry liteav_avio_free_directory_entry +#define ff_fill_line_with_color liteav_ff_fill_line_with_color +#define av_bitstream_filter_filter liteav_av_bitstream_filter_filter +#define av_bprint_channel_layout liteav_av_bprint_channel_layout +#define ff_pcm_read_packet liteav_ff_pcm_read_packet +#define av_bitstream_filter_next liteav_av_bitstream_filter_next +#define ff_avg_h264_qpel8_mc23_10_sse2 liteav_ff_avg_h264_qpel8_mc23_10_sse2 +#define av_opt_eval_float liteav_av_opt_eval_float +#define avformat_get_mov_video_tags liteav_avformat_get_mov_video_tags +#define ff_h264_idct_add8_422_8_c liteav_ff_h264_idct_add8_422_8_c +#define ff_put_h264_qpel8_mc13_neon liteav_ff_put_h264_qpel8_mc13_neon +#define ff_cos_128_fixed liteav_ff_cos_128_fixed +#define avcodec_find_decoder_by_name liteav_avcodec_find_decoder_by_name +#define avpriv_slicethread_execute liteav_avpriv_slicethread_execute +#define ff_ssa_decoder liteav_ff_ssa_decoder +#define av_audio_fifo_alloc liteav_av_audio_fifo_alloc +#define ff_unpack_6ch_float_to_float_a_sse liteav_ff_unpack_6ch_float_to_float_a_sse +#define av_dct_calc liteav_av_dct_calc +#define ff_framesync_init liteav_ff_framesync_init +#define ff_hevc_pred_angular_8x8_v_neon_8 liteav_ff_hevc_pred_angular_8x8_v_neon_8 +#define av_find_info_tag liteav_av_find_info_tag +#define av_sha512_final liteav_av_sha512_final +#define swr_is_initialized liteav_swr_is_initialized +#define av_mastering_display_metadata_alloc liteav_av_mastering_display_metadata_alloc +#define av_filter_ffversion liteav_av_filter_ffversion +#define swr_init liteav_swr_init +#define ff_pred4x4_down_right_10_sse2 liteav_ff_pred4x4_down_right_10_sse2 +#define av_dict_free liteav_av_dict_free +#define ff_id3v2_write_apic liteav_ff_id3v2_write_apic +#define ff_codec_movdata_tags liteav_ff_codec_movdata_tags +#define ff_fft_init_x86 liteav_ff_fft_init_x86 +#define ff_hevc_put_epel_uw_bi_hv_neon_8 liteav_ff_hevc_put_epel_uw_bi_hv_neon_8 +#define av_memcpy_backptr liteav_av_memcpy_backptr +#define ff_put_h264_qpel16_mc01_10_sse2 liteav_ff_put_h264_qpel16_mc01_10_sse2 +#define yy_scan_bytes liteav_yy_scan_bytes +#define ff_avg_h264_chroma_mc4_3dnow liteav_ff_avg_h264_chroma_mc4_3dnow +#define ff_https_protocol liteav_ff_https_protocol +#define av_map_videotoolbox_format_to_pixfmt liteav_av_map_videotoolbox_format_to_pixfmt +#define av_opt_set_defaults liteav_av_opt_set_defaults +#define ff_text_init_avio liteav_ff_text_init_avio +#define av_opt_set_dict_val liteav_av_opt_set_dict_val +#define ff_put_no_rnd_qpel16_mc31_old_c liteav_ff_put_no_rnd_qpel16_mc31_old_c +#define av_frame_apply_cropping liteav_av_frame_apply_cropping +#define avfilter_inout_free liteav_avfilter_inout_free +#define av_dynarray2_add liteav_av_dynarray2_add +#define av_get_extended_channel_layout liteav_av_get_extended_channel_layout +#define ff_log2_tab liteav_ff_log2_tab +#define ff_init_desc_fmt_convert liteav_ff_init_desc_fmt_convert +#define ff_get_unscaled_swscale liteav_ff_get_unscaled_swscale +#define ff_shuffle_bytes_2103_ssse3 liteav_ff_shuffle_bytes_2103_ssse3 +#define ff_listen liteav_ff_listen +#define ff_hevc_sao_edge_eo1_w32_neon_8 liteav_ff_hevc_sao_edge_eo1_w32_neon_8 +#define ff_hevc_sao_band_filter_8_neon_asm liteav_ff_hevc_sao_band_filter_8_neon_asm +#define av_packet_alloc liteav_av_packet_alloc +#define ff_avg_h264_qpel8_mc20_10_sse2 liteav_ff_avg_h264_qpel8_mc20_10_sse2 +#define ff_h264chroma_init liteav_ff_h264chroma_init +#define ff_put_h264_qpel8_mc21_10_sse2 liteav_ff_put_h264_qpel8_mc21_10_sse2 +#define ff_h263_resync liteav_ff_h263_resync +#define ff_put_h264_qpel8_h_lowpass_mmxext liteav_ff_put_h264_qpel8_h_lowpass_mmxext +#define ff_unpack_2ch_int32_to_float_a_sse2 liteav_ff_unpack_2ch_int32_to_float_a_sse2 +#define av_gcd liteav_av_gcd +#define ff_ps_add_squares_neon liteav_ff_ps_add_squares_neon +#define ff_free_vlc liteav_ff_free_vlc +#define ff_h264_demuxer liteav_ff_h264_demuxer +#define av_usleep liteav_av_usleep +#define ff_deblock_h_luma_mbaff_8_avx liteav_ff_deblock_h_luma_mbaff_8_avx +#define ff_int32_to_float_u_avx liteav_ff_int32_to_float_u_avx +#define ff_pred16x16_dc_10_sse2 liteav_ff_pred16x16_dc_10_sse2 +#define ff_ac3_slow_decay_tab liteav_ff_ac3_slow_decay_tab +#define avfilter_graph_send_command liteav_avfilter_graph_send_command +#define avpriv_mpeg4audio_sample_rates liteav_avpriv_mpeg4audio_sample_rates +#define ff_null_get_video_buffer liteav_ff_null_get_video_buffer +#define ff_swb_offset_480 liteav_ff_swb_offset_480 +#define ff_hevc_put_pel_bi_neon_8_asm liteav_ff_hevc_put_pel_bi_neon_8_asm +#define ff_eac3_custom_channel_map_locations liteav_ff_eac3_custom_channel_map_locations +#define av_log_get_level liteav_av_log_get_level +#define av_mastering_display_metadata_create_side_data liteav_av_mastering_display_metadata_create_side_data +#define ff_ebur128_loudness_global_multiple liteav_ff_ebur128_loudness_global_multiple +#define ff_mov_cenc_avc_write_nal_units liteav_ff_mov_cenc_avc_write_nal_units +#define ff_ebur128_add_frames_double liteav_ff_ebur128_add_frames_double +#define ff_simple_idct84_add liteav_ff_simple_idct84_add +#define ff_mpa_synth_init_fixed liteav_ff_mpa_synth_init_fixed +#define ff_avg_h264_qpel8_mc31_neon liteav_ff_avg_h264_qpel8_mc31_neon +#define ff_hevc_pred_planar_16x16_neon_8 liteav_ff_hevc_pred_planar_16x16_neon_8 +#define ff_deblock_h_chroma_8_sse2 liteav_ff_deblock_h_chroma_8_sse2 +#define ff_hevc_put_qpel_v2_neon_8 liteav_ff_hevc_put_qpel_v2_neon_8 +#define ff_all_channel_layouts liteav_ff_all_channel_layouts +#define ff_pred16x16_top_dc_neon liteav_ff_pred16x16_top_dc_neon +#define av_malloc_array liteav_av_malloc_array +#define ff_mp4_obj_type liteav_ff_mp4_obj_type +#define ff_put_vc1_chroma_mc8_nornd_mmx liteav_ff_put_vc1_chroma_mc8_nornd_mmx +#define av_frame_new_side_data_from_buf liteav_av_frame_new_side_data_from_buf +#define ff_mpeg_flush liteav_ff_mpeg_flush +#define av_encryption_info_free liteav_av_encryption_info_free +#define av_parse_cpu_flags liteav_av_parse_cpu_flags +#define ff_avg_h264_qpel16_mc10_neon liteav_ff_avg_h264_qpel16_mc10_neon +#define ff_avg_h264_qpel16_mc02_neon liteav_ff_avg_h264_qpel16_mc02_neon +#define avfilter_graph_queue_command liteav_avfilter_graph_queue_command +#define avpriv_copy_bits liteav_avpriv_copy_bits +#define av_malloc liteav_av_malloc +#define ff_avg_pixels8_mmx liteav_ff_avg_pixels8_mmx +#define ff_sine_256_fixed liteav_ff_sine_256_fixed +#define av_hwframe_transfer_get_formats liteav_av_hwframe_transfer_get_formats +#define av_log_set_flags liteav_av_log_set_flags +#define ff_int16_to_int32_a_mmx liteav_ff_int16_to_int32_a_mmx +#define ff_amf_write_bool liteav_ff_amf_write_bool +#define avio_rb24 liteav_avio_rb24 +#define ff_copy_rectangle liteav_ff_copy_rectangle +#define avpriv_split_xiph_headers liteav_avpriv_split_xiph_headers +#define ff_aac_eld_window_512_fixed liteav_ff_aac_eld_window_512_fixed +#define ff_avg_vc1_chroma_mc8_nornd_ssse3 liteav_ff_avg_vc1_chroma_mc8_nornd_ssse3 +#define ff_rl_intra_aic liteav_ff_rl_intra_aic +#define avfilter_link_free liteav_avfilter_link_free +#define ff_weight_h264_pixels_16_neon liteav_ff_weight_h264_pixels_16_neon +#define ff_prefetch_aarch64 liteav_ff_prefetch_aarch64 +#define vlc_css_unescape liteav_vlc_css_unescape +#define av_tea_init liteav_av_tea_init +#define ff_avg_h264_qpel16_mc11_neon liteav_ff_avg_h264_qpel16_mc11_neon +#define av_buffersrc_parameters_set liteav_av_buffersrc_parameters_set +#define av_picture_crop liteav_av_picture_crop +#define ff_h264_decode_mb_cavlc liteav_ff_h264_decode_mb_cavlc +#define ff_simple_idct_add_int16_8bit liteav_ff_simple_idct_add_int16_8bit +#define ff_put_h264_qpel16_mc20_neon liteav_ff_put_h264_qpel16_mc20_neon +#define ff_pred16x16_horizontal_8_mmxext liteav_ff_pred16x16_horizontal_8_mmxext +#define av_fast_malloc liteav_av_fast_malloc +#define ff_put_h264_qpel8_mc02_10_sse2 liteav_ff_put_h264_qpel8_mc02_10_sse2 +#define ff_pack_6ch_int32_to_float_a_avx liteav_ff_pack_6ch_int32_to_float_a_avx +#define ff_thread_video_encode_frame liteav_ff_thread_video_encode_frame +#define ff_avg_h264_qpel4_mc32_10_mmxext liteav_ff_avg_h264_qpel4_mc32_10_mmxext +#define ff_jpeg2000_profiles liteav_ff_jpeg2000_profiles +#define avio_size liteav_avio_size +#define ff_fft_calc_avx liteav_ff_fft_calc_avx +#define av_aes_alloc liteav_av_aes_alloc +#define ff_pw_1019 liteav_ff_pw_1019 +#define ff_sqrt_tab liteav_ff_sqrt_tab +#define ff_unpack_2ch_int32_to_int16_a_sse2 liteav_ff_unpack_2ch_int32_to_int16_a_sse2 +#define ff_mpeg12_vlc_dc_lum_code liteav_ff_mpeg12_vlc_dc_lum_code +#define av_sub_i liteav_av_sub_i +#define ff_nv21_to_argb_neon liteav_ff_nv21_to_argb_neon +#define avio_alloc_context liteav_avio_alloc_context +#define avfilter_inout_alloc liteav_avfilter_inout_alloc +#define ff_imdct36_float_ssse3 liteav_ff_imdct36_float_ssse3 +#define ff_sbr_qmf_pre_shuffle_neon liteav_ff_sbr_qmf_pre_shuffle_neon +#define ff_mdct_init_fixed_32 liteav_ff_mdct_init_fixed_32 +#define ff_flac_lpc_16_arm liteav_ff_flac_lpc_16_arm +#define ff_avg_pixels16_x2_neon liteav_ff_avg_pixels16_x2_neon +#define ff_put_qpel8_mc12_old_c liteav_ff_put_qpel8_mc12_old_c +#define ff_mpeg4_frame_end liteav_ff_mpeg4_frame_end +#define av_sub_q liteav_av_sub_q +#define avpriv_register_devices liteav_avpriv_register_devices +#define ff_sine_128_fixed liteav_ff_sine_128_fixed +#define av_opt_set_bin liteav_av_opt_set_bin +#define ff_deblock_v_chroma_intra_8_mmxext liteav_ff_deblock_v_chroma_intra_8_mmxext +#define ff_h264_idct8_dc_add_10_avx liteav_ff_h264_idct8_dc_add_10_avx +#define ff_h264chroma_init_aarch64 liteav_ff_h264chroma_init_aarch64 +#define ff_raw_data_read_header liteav_ff_raw_data_read_header +#define swresample_license liteav_swresample_license +#define ff_put_h264_qpel16_mc32_neon liteav_ff_put_h264_qpel16_mc32_neon +#define ff_cos_65536_fixed liteav_ff_cos_65536_fixed +#define ff_pw_15 liteav_ff_pw_15 +#define ff_pw_16 liteav_ff_pw_16 +#define ff_pw_17 liteav_ff_pw_17 +#define ff_h264_remove_all_refs liteav_ff_h264_remove_all_refs +#define avio_put_str16le liteav_avio_put_str16le +#define webvtt_FillStyleFromCssDeclaration liteav_webvtt_FillStyleFromCssDeclaration +#define avpriv_float_dsp_alloc liteav_avpriv_float_dsp_alloc +#define codec_ism_tags liteav_codec_ism_tags +#define ff_raw_read_partial_packet liteav_ff_raw_read_partial_packet +#define av_pix_fmt_desc_get_id liteav_av_pix_fmt_desc_get_id +#define ff_pred8x8_tm_vp8_8_ssse3 liteav_ff_pred8x8_tm_vp8_8_ssse3 +#define text_segment_new liteav_text_segment_new +#define ff_ebur128_sample_peak liteav_ff_ebur128_sample_peak +#define ff_framesync_dualinput_get_writable liteav_ff_framesync_dualinput_get_writable +#define ff_h264_idct8_dc_add_8_mmxext liteav_ff_h264_idct8_dc_add_8_mmxext +#define ffurl_shutdown liteav_ffurl_shutdown +#define ff_h264_idct8_add4_8_mmx liteav_ff_h264_idct8_add4_8_mmx +#define ff_avs3_muxer liteav_ff_avs3_muxer +#define ff_put_h264_qpel16_mc30_10_sse2_cache64 liteav_ff_put_h264_qpel16_mc30_10_sse2_cache64 +#define av_strstart liteav_av_strstart +#define ff_h264_luma_dc_dequant_idct_9_c liteav_ff_h264_luma_dc_dequant_idct_9_c +#define ff_h264_pred_init_aarch64 liteav_ff_h264_pred_init_aarch64 +#define av_free liteav_av_free +#define ff_simple_idct12_put_avx liteav_ff_simple_idct12_put_avx +#define av_aes_crypt liteav_av_aes_crypt +#define ff_hevc_dsp_init_aarch64 liteav_ff_hevc_dsp_init_aarch64 +#define parse_sequence_header_info liteav_parse_sequence_header_info +#define ff_hevc_transform_8x8_neon_8 liteav_ff_hevc_transform_8x8_neon_8 +#define ff_put_qpel8_mc31_old_c liteav_ff_put_qpel8_mc31_old_c +#define avio_wl32 liteav_avio_wl32 +#define ff_mov_cenc_avc_parse_nal_units liteav_ff_mov_cenc_avc_parse_nal_units +#define ff_h264_chroma422_dc_dequant_idct_10_c liteav_ff_h264_chroma422_dc_dequant_idct_10_c +#define av_stereo3d_create_side_data liteav_av_stereo3d_create_side_data +#define av_hash_alloc liteav_av_hash_alloc +#define ff_wav_codec_get_id liteav_ff_wav_codec_get_id +#define ff_pack_6ch_int32_to_float_u_sse2 liteav_ff_pack_6ch_int32_to_float_u_sse2 +#define ff_hevc_put_pixels_w6_neon_8_asm liteav_ff_hevc_put_pixels_w6_neon_8_asm +#define ff_id3v2_4_tags liteav_ff_id3v2_4_tags +#define ff_imdct_calc_c_fixed liteav_ff_imdct_calc_c_fixed +#define ff_hevc_transform_add_16x16_neon_8_asm liteav_ff_hevc_transform_add_16x16_neon_8_asm +#define ff_mdct_end_fixed_32 liteav_ff_mdct_end_fixed_32 +#define ff_avg_h264_qpel4_mc02_10_mmxext liteav_ff_avg_h264_qpel4_mc02_10_mmxext +#define av_bsf_list_alloc liteav_av_bsf_list_alloc +#define ff_hevc_put_qpel_uw_h3v1_neon_8 liteav_ff_hevc_put_qpel_uw_h3v1_neon_8 +#define avio_handshake liteav_avio_handshake +#define ff_mpeg4_video_profiles liteav_ff_mpeg4_video_profiles +#define ff_h2645_packet_uninit liteav_ff_h2645_packet_uninit +#define ff_h264_chroma422_dc_dequant_idct_14_c liteav_ff_h264_chroma422_dc_dequant_idct_14_c +#define ff_subtitles_queue_finalize liteav_ff_subtitles_queue_finalize +#define ff_hevc_merge_idx_decode liteav_ff_hevc_merge_idx_decode +#define ff_set_common_channel_layouts liteav_ff_set_common_channel_layouts +#define rgb15tobgr32 liteav_rgb15tobgr32 +#define ff_h264_idct8_dc_add_10_c liteav_ff_h264_idct8_dc_add_10_c +#define ff_h264_decode_picture_parameter_set liteav_ff_h264_decode_picture_parameter_set +#define ff_deblock_v_chroma_intra_10_sse2 liteav_ff_deblock_v_chroma_intra_10_sse2 +#define ff_flac_parse_streaminfo liteav_ff_flac_parse_streaminfo +#define ff_hls_demuxer liteav_ff_hls_demuxer +#define ff_h264_idct8_dc_add_12_c liteav_ff_h264_idct8_dc_add_12_c +#define av_bprint_chars liteav_av_bprint_chars +#define ff_pred8x8l_horizontal_up_10_avx liteav_ff_pred8x8l_horizontal_up_10_avx +#define ff_avg_h264_chroma_mc8_neon liteav_ff_avg_h264_chroma_mc8_neon +#define ff_mpa_sblimit_table liteav_ff_mpa_sblimit_table +#define ff_put_h264_chroma_mc4_10_mmxext liteav_ff_put_h264_chroma_mc4_10_mmxext +#define ff_framesync_activate liteav_ff_framesync_activate +#define ff_subtitles_queue_read_packet liteav_ff_subtitles_queue_read_packet +#define ff_h264_idct8_dc_add_14_c liteav_ff_h264_idct8_dc_add_14_c +#define av_sample_fmt_is_planar liteav_av_sample_fmt_is_planar +#define ff_pred8x8l_dc_10_sse2 liteav_ff_pred8x8l_dc_10_sse2 +#define yyget_out liteav_yyget_out +#define sws_convVec liteav_sws_convVec +#define ff_vorbiscomment_length liteav_ff_vorbiscomment_length +#define ff_hevc_put_qpel_uw_pixels_w12_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w12_neon_8 +#define ff_hevc_put_pixels_w2_neon_8 liteav_ff_hevc_put_pixels_w2_neon_8 +#define ff_h264_biweight_8_sse2 liteav_ff_h264_biweight_8_sse2 +#define ff_pack_6ch_float_to_int32_a_avx liteav_ff_pack_6ch_float_to_int32_a_avx +#define ff_thread_finish_setup liteav_ff_thread_finish_setup +#define avfilter_get_by_name liteav_avfilter_get_by_name +#define ff_h264_videotoolbox_hwaccel liteav_ff_h264_videotoolbox_hwaccel +#define ff_aac_profiles liteav_ff_aac_profiles +#define ff_slice_thread_free liteav_ff_slice_thread_free +#define rendition_matched_tags liteav_rendition_matched_tags +#define rgb64to48_bswap liteav_rgb64to48_bswap +#define ff_h264_chroma422_dc_dequant_idct_12_c liteav_ff_h264_chroma422_dc_dequant_idct_12_c +#define ff_ps_hybrid_analysis_neon liteav_ff_ps_hybrid_analysis_neon +#define ff_mpeg2_video_profiles liteav_ff_mpeg2_video_profiles +#define ff_w4_plus_w2_hi liteav_ff_w4_plus_w2_hi +#define ff_h264_idct_add16_10_avx liteav_ff_h264_idct_add16_10_avx +#define yyset_lineno liteav_yyset_lineno +#define av_des_alloc liteav_av_des_alloc +#define ff_pred8x8l_down_left_10_ssse3 liteav_ff_pred8x8l_down_left_10_ssse3 +#define ff_h264_biweight_16_10_sse4 liteav_ff_h264_biweight_16_10_sse4 +#define ff_framesync_dualinput_get liteav_ff_framesync_dualinput_get +#define ff_hevc_dsp_init liteav_ff_hevc_dsp_init +#define ff_h264_biweight_16_10_sse2 liteav_ff_h264_biweight_16_10_sse2 +#define av_parse_video_rate liteav_av_parse_video_rate +#define av_register_bitstream_filter liteav_av_register_bitstream_filter +#define av_packet_rescale_ts liteav_av_packet_rescale_ts +#define ff_aac_scalefactor_code liteav_ff_aac_scalefactor_code +#define ff_rvlc_rl_inter liteav_ff_rvlc_rl_inter +#define ff_pred16x16_vert_neon liteav_ff_pred16x16_vert_neon +#define ff_crc04C11DB7_update liteav_ff_crc04C11DB7_update +#define ff_mov_write_chan liteav_ff_mov_write_chan +#define ff_sbr_apply liteav_ff_sbr_apply +#define ff_query_formats_all_layouts liteav_ff_query_formats_all_layouts +#define ff_h264_idct_add16intra_8_mmxext liteav_ff_h264_idct_add16intra_8_mmxext +#define ff_rtmp_packet_write liteav_ff_rtmp_packet_write +#define avfilter_add_matrix liteav_avfilter_add_matrix +#define yyrealloc liteav_yyrealloc +#define ff_hevc_put_qpel_uw_pixels_w32_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w32_neon_8 +#define ff_yuv422p_to_rgba_neon liteav_ff_yuv422p_to_rgba_neon +#define ff_put_pixels16_mmx liteav_ff_put_pixels16_mmx +#define av_bprint_init_for_buffer liteav_av_bprint_init_for_buffer +#define av_aes_ctr_init liteav_av_aes_ctr_init +#define av_opt_free liteav_av_opt_free +#define ff_avg_h264_qpel16_mc32_10_sse2 liteav_ff_avg_h264_qpel16_mc32_10_sse2 +#define ff_mjpeg_encode_huffman_close liteav_ff_mjpeg_encode_huffman_close +#define ff_clean_intra_table_entries liteav_ff_clean_intra_table_entries +#define ff_pred8x8_0l0_dc_neon liteav_ff_pred8x8_0l0_dc_neon +#define ff_mpeg1_clean_buffers liteav_ff_mpeg1_clean_buffers +#define ff_image_copy_plane_uc_from_x86 liteav_ff_image_copy_plane_uc_from_x86 +#define ff_ebur128_add_frames_planar_double liteav_ff_ebur128_add_frames_planar_double +#define av_audio_fifo_write liteav_av_audio_fifo_write +#define ff_deblock_h_luma_intra_10_sse2 liteav_ff_deblock_h_luma_intra_10_sse2 +#define av_rdft_calc liteav_av_rdft_calc +#define ff_pw_1023 liteav_ff_pw_1023 +#define ff_inlink_consume_samples liteav_ff_inlink_consume_samples +#define av_get_alt_sample_fmt liteav_av_get_alt_sample_fmt +#define ff_hpeldsp_init_aarch64 liteav_ff_hpeldsp_init_aarch64 +#define av_spherical_from_name liteav_av_spherical_from_name +#define ff_openssl_deinit liteav_ff_openssl_deinit +#define ff_hevc_pred_angular_16x16_h_neon_8 liteav_ff_hevc_pred_angular_16x16_h_neon_8 +#define ff_subtitles_queue_clean liteav_ff_subtitles_queue_clean +#define ff_put_h264_qpel16_mc11_10_sse2 liteav_ff_put_h264_qpel16_mc11_10_sse2 +#define ff_amf_read_string liteav_ff_amf_read_string +#define ff_id3v2_read liteav_ff_id3v2_read +#define ff_simple_idct8_sse2 liteav_ff_simple_idct8_sse2 +#define av_base64_encode liteav_av_base64_encode +#define ff_hevc_sao_edge_eo0_w64_neon_8 liteav_ff_hevc_sao_edge_eo0_w64_neon_8 +#define ff_hevc_transform_luma_4x4_neon_8_asm liteav_ff_hevc_transform_luma_4x4_neon_8_asm +#define av_buffer_ref liteav_av_buffer_ref +#define rgb48to64_nobswap liteav_rgb48to64_nobswap +#define ff_idctdsp_init liteav_ff_idctdsp_init +#define swresample_configuration liteav_swresample_configuration +#define openssl_mutexes liteav_openssl_mutexes +#define ff_alloc_entries liteav_ff_alloc_entries +#define ff_hevc_put_qpel_uw_h_neon_8 liteav_ff_hevc_put_qpel_uw_h_neon_8 +#define av_bprint_append_data liteav_av_bprint_append_data +#define ff_h264_idct_add_neon liteav_ff_h264_idct_add_neon +#define ff_tns_max_bands_128 liteav_ff_tns_max_bands_128 +#define ff_cos_512_fixed liteav_ff_cos_512_fixed +#define ff_sine_64 liteav_ff_sine_64 +#define av_fifo_freep liteav_av_fifo_freep +#define ffurl_get_multi_file_handle liteav_ffurl_get_multi_file_handle +#define ff_prores_idct liteav_ff_prores_idct +#define ff_hevc_put_epel_v_neon_8 liteav_ff_hevc_put_epel_v_neon_8 +#define ff_ac3_db_per_bit_tab liteav_ff_ac3_db_per_bit_tab +#define ff_put_h264_chroma_mc8_rnd_mmx liteav_ff_put_h264_chroma_mc8_rnd_mmx +#define ff_smil_get_attr_ptr liteav_ff_smil_get_attr_ptr +#define ff_pb_3 liteav_ff_pb_3 +#define ff_pb_2 liteav_ff_pb_2 +#define ff_pb_1 liteav_ff_pb_1 +#define ff_pb_0 liteav_ff_pb_0 +#define ff_w3_min_w1_lo liteav_ff_w3_min_w1_lo +#define ff_h264_biweight_8_mmxext liteav_ff_h264_biweight_8_mmxext +#define ff_hevc_put_pixels_w4_neon_8 liteav_ff_hevc_put_pixels_w4_neon_8 +#define av_imdct_half liteav_av_imdct_half +#define av_add_i liteav_av_add_i +#define sws_alloc_context liteav_sws_alloc_context +#define ff_thread_report_progress liteav_ff_thread_report_progress +#define ff_h264_set_erpic liteav_ff_h264_set_erpic +#define ff_pred4x4_down_left_10_avx liteav_ff_pred4x4_down_left_10_avx +#define ff_init_gamma_convert liteav_ff_init_gamma_convert +#define ff_put_no_rnd_qpel8_mc32_old_c liteav_ff_put_no_rnd_qpel8_mc32_old_c +#define ff_subtitles_read_text_chunk liteav_ff_subtitles_read_text_chunk +#define swr_ffversion liteav_swr_ffversion +#define av_add_q liteav_av_add_q +#define ff_insert_pad liteav_ff_insert_pad +#define avio_w8 liteav_avio_w8 +#define ff_zigzag_scan liteav_ff_zigzag_scan +#define ff_pred16x16_dc_10_mmxext liteav_ff_pred16x16_dc_10_mmxext +#define ff_choose_chroma_location liteav_ff_choose_chroma_location +#define ff_put_h264_qpel4_mc10_10_mmxext liteav_ff_put_h264_qpel4_mc10_10_mmxext +#define ff_deblock_h_chroma422_8_mmxext liteav_ff_deblock_h_chroma422_8_mmxext +#define ff_mpeg4_c_dc_scale_table liteav_ff_mpeg4_c_dc_scale_table +#define ff_put_h264_qpel4_mc00_10_mmxext liteav_ff_put_h264_qpel4_mc00_10_mmxext +#define ff_frame_pool_uninit liteav_ff_frame_pool_uninit +#define ff_ps_init liteav_ff_ps_init +#define ff_hevc_put_pixels_w48_neon_8 liteav_ff_hevc_put_pixels_w48_neon_8 +#define av_rescale_delta liteav_av_rescale_delta +#define ff_unpack_2ch_int32_to_int32_u_sse2 liteav_ff_unpack_2ch_int32_to_int32_u_sse2 +#define av_hash_update liteav_av_hash_update +#define ff_hevc_put_pixels_w48_neon_8_asm liteav_ff_hevc_put_pixels_w48_neon_8_asm +#define av_opt_set_int liteav_av_opt_set_int +#define av_mediacodec_alloc_context liteav_av_mediacodec_alloc_context +#define ff_avg_h264_chroma_mc8_10_sse2 liteav_ff_avg_h264_chroma_mc8_10_sse2 +#define ff_filter_graph_run_once liteav_ff_filter_graph_run_once +#define ff_alternate_vertical_scan liteav_ff_alternate_vertical_scan +#define ff_avg_h264_qpel4_h_lowpass_mmxext liteav_ff_avg_h264_qpel4_h_lowpass_mmxext +#define av_gettime_relative liteav_av_gettime_relative +#define av_md5_size liteav_av_md5_size +#define ff_dct32_float_avx liteav_ff_dct32_float_avx +#define avio_rb32 liteav_avio_rb32 +#define ff_hevc_no_residual_syntax_flag_decode liteav_ff_hevc_no_residual_syntax_flag_decode +#define rgb16tobgr15 liteav_rgb16tobgr15 +#define ff_put_h264_chroma_mc4_ssse3 liteav_ff_put_h264_chroma_mc4_ssse3 +#define ff_avg_h264_qpel8_mc22_neon liteav_ff_avg_h264_qpel8_mc22_neon +#define ff_sbr_autocorrelate_neon liteav_ff_sbr_autocorrelate_neon +#define ff_vc1_profiles liteav_ff_vc1_profiles +#define av_frame_alloc liteav_av_frame_alloc +#define av_hash_final_b64 liteav_av_hash_final_b64 +#define ff_pred8x8l_down_left_8_ssse3 liteav_ff_pred8x8l_down_left_8_ssse3 +#define ff_avg_h264_qpel16_mc33_neon liteav_ff_avg_h264_qpel16_mc33_neon +#define ff_pred8x8_dc_8_mmxext liteav_ff_pred8x8_dc_8_mmxext +#define avfilter_graph_alloc_filter liteav_avfilter_graph_alloc_filter +#define ff_avg_qpel16_mc11_old_c liteav_ff_avg_qpel16_mc11_old_c +#define ff_ebur128_relative_threshold liteav_ff_ebur128_relative_threshold +#define ff_ps_stereo_interpolate_ipdopd_sse3 liteav_ff_ps_stereo_interpolate_ipdopd_sse3 +#define avio_rl32 liteav_avio_rl32 +#define av_write_image_line liteav_av_write_image_line +#define ff_aac_spectral_codes liteav_ff_aac_spectral_codes +#define ff_pb_15 liteav_ff_pb_15 +#define swri_audio_convert_init_x86 liteav_swri_audio_convert_init_x86 +#define ff_dither_2x2_8 liteav_ff_dither_2x2_8 +#define ff_mpeg4_decoder liteav_ff_mpeg4_decoder +#define ff_put_h264_qpel8_mc11_10_sse2 liteav_ff_put_h264_qpel8_mc11_10_sse2 +#define ff_me_cmp_init_x86 liteav_ff_me_cmp_init_x86 +#define ff_simple_idct_int16_12bit liteav_ff_simple_idct_int16_12bit +#define ff_pred16x16_tm_vp8_8_avx2 liteav_ff_pred16x16_tm_vp8_8_avx2 +#define ff_pred4x4_horizontal_down_8_mmxext liteav_ff_pred4x4_horizontal_down_8_mmxext +#define ff_vector_fmul_reverse_vfp liteav_ff_vector_fmul_reverse_vfp +#define ffio_init_context liteav_ffio_init_context +#define ff_riff_info_conv liteav_ff_riff_info_conv +#define ff_hevc_put_pixels_w8_neon_8 liteav_ff_hevc_put_pixels_w8_neon_8 +#define ff_avg_rv40_chroma_mc4_mmxext liteav_ff_avg_rv40_chroma_mc4_mmxext +#define av_frame_get_plane_buffer liteav_av_frame_get_plane_buffer +#define ff_hevc_put_qpel_uw_weight_h3v2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h3v2_neon_8 +#define ff_img_tags liteav_ff_img_tags +#define ff_init_ff_cos_tabs_fixed_32 liteav_ff_init_ff_cos_tabs_fixed_32 +#define ff_httpproxy_protocol liteav_ff_httpproxy_protocol +#define ff_h264_chroma_dc_dequant_idct_14_c liteav_ff_h264_chroma_dc_dequant_idct_14_c +#define ff_amf_tag_size liteav_ff_amf_tag_size +#define av_aes_ctr_free liteav_av_aes_ctr_free +#define ff_simple_idct_add_int16_12bit liteav_ff_simple_idct_add_int16_12bit +#define ff_pred4x4_dc_10_mmxext liteav_ff_pred4x4_dc_10_mmxext +#define ff_flac_set_channel_layout liteav_ff_flac_set_channel_layout +#define ff_put_no_rnd_qpel16_mc32_old_c liteav_ff_put_no_rnd_qpel16_mc32_old_c +#define swri_rematrix_free liteav_swri_rematrix_free +#define avpriv_solve_lls liteav_avpriv_solve_lls +#define ff_fft_init_fixed liteav_ff_fft_init_fixed +#define ff_h263_loop_filter liteav_ff_h263_loop_filter +#define ff_init_scantable liteav_ff_init_scantable +#define ff_put_h264_qpel16_mc20_10_sse2_cache64 liteav_ff_put_h264_qpel16_mc20_10_sse2_cache64 +#define av_opt_eval_q liteav_av_opt_eval_q +#define av_downmix_info_update_side_data liteav_av_downmix_info_update_side_data +#define ff_flac_parser liteav_ff_flac_parser +#define ff_mp3_decoder liteav_ff_mp3_decoder +#define av_des_init liteav_av_des_init +#define ff_listen_connect liteav_ff_listen_connect +#define ff_hevc_bump_frame liteav_ff_hevc_bump_frame +#define ff_filter_alloc liteav_ff_filter_alloc +#define ff_parse_channel_layout liteav_ff_parse_channel_layout +#define av_frame_set_qp_table liteav_av_frame_set_qp_table +#define ff_h263_cbpy_vlc liteav_ff_h263_cbpy_vlc +#define ff_put_pixels16_l2_mmxext liteav_ff_put_pixels16_l2_mmxext +#define ff_mdct_win_float liteav_ff_mdct_win_float +#define ff_avg_h264_chroma_mc8_rnd_ssse3 liteav_ff_avg_h264_chroma_mc8_rnd_ssse3 +#define ff_mpv_motion liteav_ff_mpv_motion +#define swri_rematrix_init liteav_swri_rematrix_init +#define ff_put_h264_qpel4_mc32_10_mmxext liteav_ff_put_h264_qpel4_mc32_10_mmxext +#define vlc_css_term_Clean liteav_vlc_css_term_Clean +#define ff_avc_mp4_find_startcode liteav_ff_avc_mp4_find_startcode +#define ff_h264_biweight_4_10_sse2 liteav_ff_h264_biweight_4_10_sse2 +#define ff_vf_scale liteav_ff_vf_scale +#define ff_h264_biweight_4_10_sse4 liteav_ff_h264_biweight_4_10_sse4 +#define avio_wl24 liteav_avio_wl24 +#define ff_hevc_put_qpel_uw_h2_neon_8 liteav_ff_hevc_put_qpel_uw_h2_neon_8 +#define ff_put_pixels4_l2_mmxext liteav_ff_put_pixels4_l2_mmxext +#define ff_hevc_put_pixels_w24_neon_8 liteav_ff_hevc_put_pixels_w24_neon_8 +#define ff_pack_8ch_float_to_int32_u_sse2 liteav_ff_pack_8ch_float_to_int32_u_sse2 +#define shuffle_bytes_0321 liteav_shuffle_bytes_0321 +#define ff_pred8x8l_top_dc_8_mmxext liteav_ff_pred8x8l_top_dc_8_mmxext +#define av_image_copy_to_buffer liteav_av_image_copy_to_buffer +#define ff_vector_fmul_scalar_neon liteav_ff_vector_fmul_scalar_neon +#define ff_h264_idct_add16intra_9_c liteav_ff_h264_idct_add16intra_9_c +#define ff_put_pixels4_mmx liteav_ff_put_pixels4_mmx +#define av_color_transfer_from_name liteav_av_color_transfer_from_name +#define av_ripemd_alloc liteav_av_ripemd_alloc +#define ff_getSwsFunc liteav_ff_getSwsFunc +#define av_cast5_size liteav_av_cast5_size +#define ff_pw_8192 liteav_ff_pw_8192 +#define ff_w_tab_sr liteav_ff_w_tab_sr +#define ff_hevc_decode_nal_vps liteav_ff_hevc_decode_nal_vps +#define av_get_channel_layout_channel_index liteav_av_get_channel_layout_channel_index +#define ff_tcp_protocol liteav_ff_tcp_protocol +#define ff_h264_golomb_to_intra4x4_cbp liteav_ff_h264_golomb_to_intra4x4_cbp +#define ff_avc_parse_nal_units liteav_ff_avc_parse_nal_units +#define ff_put_h264_qpel8_mc32_neon liteav_ff_put_h264_qpel8_mc32_neon +#define av_imdct_calc liteav_av_imdct_calc +#define ff_mpeg4_DCtab_chrom liteav_ff_mpeg4_DCtab_chrom +#define ff_unpack_2ch_int16_to_int16_u_sse2 liteav_ff_unpack_2ch_int16_to_int16_u_sse2 +#define shuffle_bytes_3210 liteav_shuffle_bytes_3210 +#define ff_codec_movaudio_tags liteav_ff_codec_movaudio_tags +#define ff_ps_apply liteav_ff_ps_apply +#define avpriv_get_trc_function_from_trc liteav_avpriv_get_trc_function_from_trc +#define ff_h264_check_intra4x4_pred_mode liteav_ff_h264_check_intra4x4_pred_mode +#define ff_sbrdsp_init_x86 liteav_ff_sbrdsp_init_x86 +#define av_buffer_allocz liteav_av_buffer_allocz +#define ff_hevc_diag_scan4x4_x liteav_ff_hevc_diag_scan4x4_x +#define ff_hevc_diag_scan4x4_y liteav_ff_hevc_diag_scan4x4_y +#define ff_simple_idct_put_int16_12bit liteav_ff_simple_idct_put_int16_12bit +#define ff_imdct_calc_neon liteav_ff_imdct_calc_neon +#define swri_noise_shaping_float liteav_swri_noise_shaping_float +#define av_audio_fifo_drain liteav_av_audio_fifo_drain +#define ff_h264_idct_add16_8_sse2 liteav_ff_h264_idct_add16_8_sse2 +#define ff_id3v2_3_tags liteav_ff_id3v2_3_tags +#define webvtt_parser_close liteav_webvtt_parser_close +#define avfilter_graph_parse2 liteav_avfilter_graph_parse2 +#define ff_avg_pixels8_l2_mmxext liteav_ff_avg_pixels8_l2_mmxext +#define ff_h264_mp4toannexb_bsf liteav_ff_h264_mp4toannexb_bsf +#define ff_pcm_mulaw_at_decoder liteav_ff_pcm_mulaw_at_decoder +#define ff_avg_h264_qpel16_mc31_10_sse2 liteav_ff_avg_h264_qpel16_mc31_10_sse2 +#define ff_hevc_put_qpel_h1v2_neon_8 liteav_ff_hevc_put_qpel_h1v2_neon_8 +#define avpriv_init_lls liteav_avpriv_init_lls +#define av_pixelutils_get_sad_fn liteav_av_pixelutils_get_sad_fn +#define ff_avg_h264_chroma_mc2_mmxext liteav_ff_avg_h264_chroma_mc2_mmxext +#define av_d3d11va_alloc_context liteav_av_d3d11va_alloc_context +#define av_buffersrc_add_frame_flags liteav_av_buffersrc_add_frame_flags +#define ff_hevc_idct_8x8_dc_neon_8 liteav_ff_hevc_idct_8x8_dc_neon_8 +#define ff_mpa_quant_bits liteav_ff_mpa_quant_bits +#define ff_h263_rl_inter liteav_ff_h263_rl_inter +#define ff_cos_131072 liteav_ff_cos_131072 +#define ff_put_h264_qpel4_mc22_10_mmxext liteav_ff_put_h264_qpel4_mc22_10_mmxext +#define sws_getColorspaceDetails liteav_sws_getColorspaceDetails +#define av_stereo3d_alloc liteav_av_stereo3d_alloc +#define ff_mpeg4_DCtab_lum liteav_ff_mpeg4_DCtab_lum +#define av_bprint_get_buffer liteav_av_bprint_get_buffer +#define av_hash_final_bin liteav_av_hash_final_bin +#define ff_h264_idct8_add4_8_mmxext liteav_ff_h264_idct8_add4_8_mmxext +#define ff_hevc_put_qpel_uw_v_neon_8 liteav_ff_hevc_put_qpel_uw_v_neon_8 +#define ff_sine_2048 liteav_ff_sine_2048 +#define ff_unicode_ass_add_rect liteav_ff_unicode_ass_add_rect +#define ff_put_h264_chroma_mc8_neon liteav_ff_put_h264_chroma_mc8_neon +#define avfilter_process_command liteav_avfilter_process_command +#define avfilter_graph_free liteav_avfilter_graph_free +#define ff_subtitles_unicode_external_read_chunk liteav_ff_subtitles_unicode_external_read_chunk +#define ff_deblock_h_luma_intra_8_avx liteav_ff_deblock_h_luma_intra_8_avx +#define rgb64to48_nobswap liteav_rgb64to48_nobswap +#define ff_frame_thread_encoder_free liteav_ff_frame_thread_encoder_free +#define ff_ps_hybrid_synthesis_deint_sse4 liteav_ff_ps_hybrid_synthesis_deint_sse4 +#define ff_frame_thread_free liteav_ff_frame_thread_free +#define av_buffersink_get_channels liteav_av_buffersink_get_channels +#define av_md5_final liteav_av_md5_final +#define ff_put_h264_qpel4_hv_lowpass_h_mmxext liteav_ff_put_h264_qpel4_hv_lowpass_h_mmxext +#define ff_reget_buffer liteav_ff_reget_buffer +#define ff_put_h264_qpel16_mc02_neon liteav_ff_put_h264_qpel16_mc02_neon +#define ff_framesync_uninit liteav_ff_framesync_uninit +#define ff_aac_kbd_long_1024 liteav_ff_aac_kbd_long_1024 +#define av_cast5_crypt2 liteav_av_cast5_crypt2 +#define ff_avg_h264_qpel8or16_hv2_lowpass_op_mmxext liteav_ff_avg_h264_qpel8or16_hv2_lowpass_op_mmxext +#define ff_hevc_put_qpel_uw_weight_h3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h3_neon_8 +#define ff_pred8x8_plane_10_sse2 liteav_ff_pred8x8_plane_10_sse2 +#define ff_avg_h264_qpel16_mc31_neon liteav_ff_avg_h264_qpel16_mc31_neon +#define av_parse_ratio liteav_av_parse_ratio +#define ff_put_qpel8_mc13_old_c liteav_ff_put_qpel8_mc13_old_c +#define rgb48tobgr48_nobswap liteav_rgb48tobgr48_nobswap +#define ff_put_h264_qpel8or16_v_lowpass_sse2 liteav_ff_put_h264_qpel8or16_v_lowpass_sse2 +#define ff_af_queue_add liteav_ff_af_queue_add +#define ff_h263i_decoder liteav_ff_h263i_decoder +#define ff_avg_vc1_chroma_mc8_nornd_3dnow liteav_ff_avg_vc1_chroma_mc8_nornd_3dnow +#define ff_sbc_profiles liteav_ff_sbc_profiles +#define ff_frame_pool_get_audio_config liteav_ff_frame_pool_get_audio_config +#define av_twofish_crypt liteav_av_twofish_crypt +#define av_sha512_alloc liteav_av_sha512_alloc +#define avio_close_dyn_buf liteav_avio_close_dyn_buf +#define ff_ac3_window liteav_ff_ac3_window +#define ff_avg_h264_qpel16_mc10_10_sse2 liteav_ff_avg_h264_qpel16_mc10_10_sse2 +#define av_buffersrc_get_nb_failed_requests liteav_av_buffersrc_get_nb_failed_requests +#define ff_avg_h264_qpel8_mc10_10_sse2_cache64 liteav_ff_avg_h264_qpel8_mc10_10_sse2_cache64 +#define ff_ilbc_at_decoder liteav_ff_ilbc_at_decoder +#define ff_pred8x8l_vertical_right_10_sse2 liteav_ff_pred8x8l_vertical_right_10_sse2 +#define ff_hevc_transform_add_4x4_neon_8_asm liteav_ff_hevc_transform_add_4x4_neon_8_asm +#define av_ripemd_final liteav_av_ripemd_final +#define ff_get_cpu_max_align_aarch64 liteav_ff_get_cpu_max_align_aarch64 +#define ff_ebur128_add_frames_float liteav_ff_ebur128_add_frames_float +#define ff_avg_qpel16_mc12_old_c liteav_ff_avg_qpel16_mc12_old_c +#define ff_sbr_neg_odd_64_neon liteav_ff_sbr_neg_odd_64_neon +#define ff_pred8x8l_vertical_8_mmxext liteav_ff_pred8x8l_vertical_8_mmxext +#define ff_slice_thread_execute_with_mainfunc liteav_ff_slice_thread_execute_with_mainfunc +#define av_hmac_free liteav_av_hmac_free +#define av_thread_message_flush liteav_av_thread_message_flush +#define ff_hevc_hls_residual_coding liteav_ff_hevc_hls_residual_coding +#define av_get_output_timestamp liteav_av_get_output_timestamp +#define ff_tns_max_bands_512 liteav_ff_tns_max_bands_512 +#define ff_h264_idct_add_10_c liteav_ff_h264_idct_add_10_c +#define ff_pred8x8_vert_neon liteav_ff_pred8x8_vert_neon +#define ff_av1_filter_obus liteav_ff_av1_filter_obus +#define ff_framesync_init_dualinput liteav_ff_framesync_init_dualinput +#define ff_init_ff_sine_windows_fixed liteav_ff_init_ff_sine_windows_fixed +#define ff_h264_weight_16_sse2 liteav_ff_h264_weight_16_sse2 +#define ff_free_filters liteav_ff_free_filters +#define av_d2str liteav_av_d2str +#define ff_pw_255 liteav_ff_pw_255 +#define av_probe_input_buffer2 liteav_av_probe_input_buffer2 +#define ff_pw_256 liteav_ff_pw_256 +#define avfilter_transform liteav_avfilter_transform +#define ff_cos_8192_fixed liteav_ff_cos_8192_fixed +#define av_parse_time liteav_av_parse_time +#define ff_pack_2ch_int32_to_int32_u_sse2 liteav_ff_pack_2ch_int32_to_int32_u_sse2 +#define ff_simple_idct_put_neon liteav_ff_simple_idct_put_neon +#define av_color_range_name liteav_av_color_range_name +#define rgb15to16 liteav_rgb15to16 +#define ff_fft_permute_neon liteav_ff_fft_permute_neon +#define av_dv_frame_profile liteav_av_dv_frame_profile +#define ff_h264_idct8_add_9_c liteav_ff_h264_idct8_add_9_c +#define ff_avg_h264_qpel8_h_lowpass_l2_ssse3 liteav_ff_avg_h264_qpel8_h_lowpass_l2_ssse3 +#define av_buffersink_get_sample_rate liteav_av_buffersink_get_sample_rate +#define ff_mpeg4_workaround_bugs liteav_ff_mpeg4_workaround_bugs +#define ff_pred16x16_left_dc_10_mmxext liteav_ff_pred16x16_left_dc_10_mmxext +#define ff_id3v2_free_extra_meta liteav_ff_id3v2_free_extra_meta +#define ff_pw_2048 liteav_ff_pw_2048 +#define vlc_css_parser_ParseString liteav_vlc_css_parser_ParseString +#define ff_pred8x8l_down_left_8_sse2 liteav_ff_pred8x8l_down_left_8_sse2 +#define ff_h264_idct_dc_add_8_mmxext liteav_ff_h264_idct_dc_add_8_mmxext +#define ff_unpack_2ch_int16_to_int16_u_ssse3 liteav_ff_unpack_2ch_int16_to_int16_u_ssse3 +#define sws_scale liteav_sws_scale +#define av_parse_video_size liteav_av_parse_video_size +#define ff_hevc_sao_band_w8_neon_8 liteav_ff_hevc_sao_band_w8_neon_8 +#define ff_nv21_to_bgra_neon liteav_ff_nv21_to_bgra_neon +#define ff_nv21_to_abgr_neon liteav_ff_nv21_to_abgr_neon +#define deinterleaveBytes liteav_deinterleaveBytes +#define ff_put_pixels8_l2_shift5_mmxext liteav_ff_put_pixels8_l2_shift5_mmxext +#define av_opt_is_set_to_default_by_name liteav_av_opt_is_set_to_default_by_name +#define swri_resample_dsp_aarch64_init liteav_swri_resample_dsp_aarch64_init +#define ff_avg_pixels8_xy2_neon liteav_ff_avg_pixels8_xy2_neon +#define ff_hscale_8_to_15_neon liteav_ff_hscale_8_to_15_neon +#define ff_avg_h264_qpel16_mc01_10_sse2 liteav_ff_avg_h264_qpel16_mc01_10_sse2 +#define ff_put_h264_chroma_mc2_mmxext liteav_ff_put_h264_chroma_mc2_mmxext +#define ff_simple_idct_put_int16_10bit liteav_ff_simple_idct_put_int16_10bit +#define ff_put_no_rnd_qpel8_mc31_old_c liteav_ff_put_no_rnd_qpel8_mc31_old_c +#define ff_simple_idct_add_int16_10bit liteav_ff_simple_idct_add_int16_10bit +#define av_timecode_init liteav_av_timecode_init +#define av_frame_get_buffer liteav_av_frame_get_buffer +#define ff_int32_to_float_a_sse2 liteav_ff_int32_to_float_a_sse2 +#define ff_ue_golomb_vlc_code liteav_ff_ue_golomb_vlc_code +#define ff_ac3_hearing_threshold_tab liteav_ff_ac3_hearing_threshold_tab +#define ff_put_h264_qpel4_h_lowpass_mmxext liteav_ff_put_h264_qpel4_h_lowpass_mmxext +#define ff_put_h264_qpel8_mc12_10_sse2 liteav_ff_put_h264_qpel8_mc12_10_sse2 +#define ff_h264_idct_add8_422_12_c liteav_ff_h264_idct_add8_422_12_c +#define av_frame_set_best_effort_timestamp liteav_av_frame_set_best_effort_timestamp +#define ff_h263_inter_MCBPC_vlc liteav_ff_h263_inter_MCBPC_vlc +#define ff_w4_plus_w6_hi liteav_ff_w4_plus_w6_hi +#define ffio_geturlcontext liteav_ffio_geturlcontext +#define av_fifo_space liteav_av_fifo_space +#define ff_h264_idct_add8_422_10_c liteav_ff_h264_idct_add8_422_10_c +#define ff_vector_fmul_window_neon liteav_ff_vector_fmul_window_neon +#define ff_deblock_h_luma_10_sse2 liteav_ff_deblock_h_luma_10_sse2 +#define av_xtea_le_crypt liteav_av_xtea_le_crypt +#define ff_cos_8192 liteav_ff_cos_8192 +#define rgb24to15 liteav_rgb24to15 +#define ff_mpeg_framesize_alloc liteav_ff_mpeg_framesize_alloc +#define ff_aac_eld_window_480 liteav_ff_aac_eld_window_480 +#define av_frame_remove_side_data liteav_av_frame_remove_side_data +#define ff_hevc_put_qpel_uw_hv_neon_8 liteav_ff_hevc_put_qpel_uw_hv_neon_8 +#define ff_h264_idct8_dc_add_9_c liteav_ff_h264_idct8_dc_add_9_c +#define ff_inlink_request_frame liteav_ff_inlink_request_frame +#define ff_hevc_put_pixels_w32_neon_8_asm liteav_ff_hevc_put_pixels_w32_neon_8_asm +#define ff_mpegts_demuxer liteav_ff_mpegts_demuxer +#define sws_get_class liteav_sws_get_class +#define av_buffersink_get_frame_flags liteav_av_buffersink_get_frame_flags +#define av_frame_get_channels liteav_av_frame_get_channels +#define avcodec_get_type liteav_avcodec_get_type +#define ff_pred8x8l_vertical_right_8_mmxext liteav_ff_pred8x8l_vertical_right_8_mmxext +#define ff_fft16_vfp liteav_ff_fft16_vfp +#define ff_log2_run liteav_ff_log2_run +#define av_chroma_location_name liteav_av_chroma_location_name +#define av_blowfish_init liteav_av_blowfish_init +#define ff_avg_h264_qpel8_mc33_neon liteav_ff_avg_h264_qpel8_mc33_neon +#define ff_deblock_v_chroma_8_avx liteav_ff_deblock_v_chroma_8_avx +#define ff_h264_idct_add8_9_c liteav_ff_h264_idct_add8_9_c +#define av_tea_alloc liteav_av_tea_alloc +#define av_strncasecmp liteav_av_strncasecmp +#define av_bsf_next liteav_av_bsf_next +#define rgb24to16 liteav_rgb24to16 +#define ff_pw_32 liteav_ff_pw_32 +#define ff_put_h264_qpel4_mc33_10_mmxext liteav_ff_put_h264_qpel4_mc33_10_mmxext +#define ff_ac3_slow_gain_tab liteav_ff_ac3_slow_gain_tab +#define ff_h264_filter_mb liteav_ff_h264_filter_mb +#define ff_mdct15_uninit liteav_ff_mdct15_uninit +#define ff_h264_loop_filter_strength_mmxext liteav_ff_h264_loop_filter_strength_mmxext +#define avpriv_set_systematic_pal2 liteav_avpriv_set_systematic_pal2 +#define ff_avg_h264_qpel8_mc30_10_sse2 liteav_ff_avg_h264_qpel8_mc30_10_sse2 +#define ff_framequeue_add liteav_ff_framequeue_add +#define ff_simple_idct_put_int16_8bit liteav_ff_simple_idct_put_int16_8bit +#define av_hash_get_size liteav_av_hash_get_size +#define ff_hevc_put_qpel_uw_weight_v2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_v2_neon_8 +#define av_twofish_alloc liteav_av_twofish_alloc +#define ff_put_h264_qpel4_mc31_10_mmxext liteav_ff_put_h264_qpel4_mc31_10_mmxext +#define av_buffersink_params_alloc liteav_av_buffersink_params_alloc +#define avformat_write_header liteav_avformat_write_header +#define av_reduce liteav_av_reduce +#define ff_set_qscale liteav_ff_set_qscale +#define ff_hevc_sao_band_filter_8_neon liteav_ff_hevc_sao_band_filter_8_neon +#define ff_mpadsp_apply_window_fixed_neon liteav_ff_mpadsp_apply_window_fixed_neon +#define ff_pred8x8l_vertical_right_8_ssse3 liteav_ff_pred8x8l_vertical_right_8_ssse3 +#define ff_text_r8 liteav_ff_text_r8 +#define avfilter_graph_dump liteav_avfilter_graph_dump +#define ff_put_h264_qpel8_mc33_neon liteav_ff_put_h264_qpel8_mc33_neon +#define ff_avg_h264_qpel4_mc31_10_mmxext liteav_ff_avg_h264_qpel4_mc31_10_mmxext +#define rgb48to64_bswap liteav_rgb48to64_bswap +#define swri_audio_convert_init_aarch64 liteav_swri_audio_convert_init_aarch64 +#define ff_flac_sample_rate_table liteav_ff_flac_sample_rate_table +#define ff_hevc_pred_angular_8x8_neon_8 liteav_ff_hevc_pred_angular_8x8_neon_8 +#define ff_hevc_put_epel_uw_pixels_w64_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w64_neon_8 +#define ff_flacdsp_init liteav_ff_flacdsp_init +#define ff_put_h264_chroma_mc4_neon liteav_ff_put_h264_chroma_mc4_neon +#define av_hex_dump liteav_av_hex_dump +#define avio_wl16 liteav_avio_wl16 +#define ff_hevc_put_qpel_h_neon_8_wrapper liteav_ff_hevc_put_qpel_h_neon_8_wrapper +#define av_bsf_list_finalize liteav_av_bsf_list_finalize +#define av_bprint_escape liteav_av_bprint_escape +#define av_hwframe_ctx_create_derived liteav_av_hwframe_ctx_create_derived +#define ff_deblock_v_chroma_intra_10_avx liteav_ff_deblock_v_chroma_intra_10_avx +#define ff_videotoolbox_avcc_extradata_create liteav_ff_videotoolbox_avcc_extradata_create +#define ff_pred16x16_top_dc_10_mmxext liteav_ff_pred16x16_top_dc_10_mmxext +#define av_jni_get_java_vm liteav_av_jni_get_java_vm +#define ff_gif_encoder liteav_ff_gif_encoder +#define ff_riff_write_info_tag liteav_ff_riff_write_info_tag +#define av_interleaved_write_frame liteav_av_interleaved_write_frame +#define ff_h264_biweight_8_10_sse4 liteav_ff_h264_biweight_8_10_sse4 +#define ff_h264_biweight_8_10_sse2 liteav_ff_h264_biweight_8_10_sse2 +#define avcodec_decode_subtitle2 liteav_avcodec_decode_subtitle2 +#define ff_hevc_put_qpel_h2_neon_8 liteav_ff_hevc_put_qpel_h2_neon_8 +#define av_crc_init liteav_av_crc_init +#define ff_hevc_intra_chroma_pred_mode_decode liteav_ff_hevc_intra_chroma_pred_mode_decode +#define ff_put_h264_qpel16_mc10_neon liteav_ff_put_h264_qpel16_mc10_neon +#define ff_mpeg1_default_non_intra_matrix liteav_ff_mpeg1_default_non_intra_matrix +#define rgb15tobgr16 liteav_rgb15tobgr16 +#define ff_mov_read_chan liteav_ff_mov_read_chan +#define rgb15tobgr15 liteav_rgb15tobgr15 +#define ff_amf_get_string liteav_ff_amf_get_string +#define av_parse_color liteav_av_parse_color +#define ff_pixblockdsp_init liteav_ff_pixblockdsp_init +#define ff_pred16x16_tm_vp8_8_mmx liteav_ff_pred16x16_tm_vp8_8_mmx +#define ff_deblock_v_chroma_10_avx liteav_ff_deblock_v_chroma_10_avx +#define vlc_css_unquotedunescaped liteav_vlc_css_unquotedunescaped +#define ff_rdft_init liteav_ff_rdft_init +#define ff_hevc_put_epel_uw_v_neon_8 liteav_ff_hevc_put_epel_uw_v_neon_8 +#define ff_lzw_encode_init liteav_ff_lzw_encode_init +#define avfilter_graph_get_filter liteav_avfilter_graph_get_filter +#define yypush_buffer_state liteav_yypush_buffer_state +#define ff_hevc_put_qpel_h3_neon_8 liteav_ff_hevc_put_qpel_h3_neon_8 +#define av_match_ext liteav_av_match_ext +#define ff_int32_to_float_u_sse2 liteav_ff_int32_to_float_u_sse2 +#define avio_check liteav_avio_check +#define ff_openssl_init liteav_ff_openssl_init +#define ff_simple_idct8_put_avx liteav_ff_simple_idct8_put_avx +#define avcodec_receive_frame liteav_avcodec_receive_frame +#define ff_id3v2_write_simple liteav_ff_id3v2_write_simple +#define ff_pred4x4_tm_vp8_8_mmx liteav_ff_pred4x4_tm_vp8_8_mmx +#define av_sha_update liteav_av_sha_update +#define av_demuxer_iterate liteav_av_demuxer_iterate +#define ff_h264_idct_add16_14_c liteav_ff_h264_idct_add16_14_c +#define ff_deblock_h_luma_10_avx liteav_ff_deblock_h_luma_10_avx +#define av_hwdevice_iterate_types liteav_av_hwdevice_iterate_types +#define ff_vector_fmul_neon liteav_ff_vector_fmul_neon +#define ff_avg_h264_qpel8_mc21_10_sse2 liteav_ff_avg_h264_qpel8_mc21_10_sse2 +#define ff_mpeg4_decode_studio_slice_header liteav_ff_mpeg4_decode_studio_slice_header +#define avio_accept liteav_avio_accept +#define ff_put_h264_qpel4_mc23_10_mmxext liteav_ff_put_h264_qpel4_mc23_10_mmxext +#define ff_vsrc_buffer liteav_ff_vsrc_buffer +#define ff_log_net_error liteav_ff_log_net_error +#define ff_set_common_formats liteav_ff_set_common_formats +#define avpriv_ac3_channel_layout_tab liteav_avpriv_ac3_channel_layout_tab +#define av_timecode_make_string liteav_av_timecode_make_string +#define av_tree_node_alloc liteav_av_tree_node_alloc +#define av_frame_free liteav_av_frame_free +#define ff_h264_idct_add8_8_mmx liteav_ff_h264_idct_add8_8_mmx +#define ff_put_pixels16_x2_no_rnd_neon liteav_ff_put_pixels16_x2_no_rnd_neon +#define av_opt_set_q liteav_av_opt_set_q +#define ff_raw_audio_read_header liteav_ff_raw_audio_read_header +#define swri_noise_shaping_double liteav_swri_noise_shaping_double +#define ff_modified_quant_tab liteav_ff_modified_quant_tab +#define ff_pack_8ch_float_to_float_a_avx liteav_ff_pack_8ch_float_to_float_a_avx +#define ff_sws_init_range_convert liteav_ff_sws_init_range_convert +#define ff_cos_512 liteav_ff_cos_512 +#define ff_sine_1024 liteav_ff_sine_1024 +#define av_frame_get_sample_rate liteav_av_frame_get_sample_rate +#define ff_hevc_put_qpel_uw_pixels_w16_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w16_neon_8 +#define ff_pred8x8l_128_dc_10_mmxext liteav_ff_pred8x8l_128_dc_10_mmxext +#define ff_h264_idct8_add4_10_c liteav_ff_h264_idct8_add4_10_c +#define ff_h264_free_tables liteav_ff_h264_free_tables +#define ff_mpeg1_find_frame_end liteav_ff_mpeg1_find_frame_end +#define ff_hevc_put_qpel_uw_weight_h2v3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h2v3_neon_8 +#define ff_cos_128 liteav_ff_cos_128 +#define av_hmac_update liteav_av_hmac_update +#define ff_se_golomb_vlc_code liteav_ff_se_golomb_vlc_code +#define av_get_channel_layout liteav_av_get_channel_layout +#define ff_hevc_put_qpel_uw_weight_h3v3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h3v3_neon_8 +#define av_bprint_strftime liteav_av_bprint_strftime +#define avcodec_flush_buffers liteav_avcodec_flush_buffers +#define ff_mpeg4_default_non_intra_matrix liteav_ff_mpeg4_default_non_intra_matrix +#define vlc_css_declarations_Append liteav_vlc_css_declarations_Append +#define ff_sine_8192_fixed liteav_ff_sine_8192_fixed +#define ff_pred4x4_tm_vp8_8_ssse3 liteav_ff_pred4x4_tm_vp8_8_ssse3 +#define ff_rtmp_calc_digest_pos liteav_ff_rtmp_calc_digest_pos +#define ff_mpv_frame_end liteav_ff_mpv_frame_end +#define ff_h264_idct8_add4_12_c liteav_ff_h264_idct8_add4_12_c +#define ff_reset_entries liteav_ff_reset_entries +#define avfilter_graph_request_oldest liteav_avfilter_graph_request_oldest +#define ff_socket liteav_ff_socket +#define ff_fdctdsp_init_x86 liteav_ff_fdctdsp_init_x86 +#define ff_mpeg4_studio_dc_luma liteav_ff_mpeg4_studio_dc_luma +#define av_blowfish_alloc liteav_av_blowfish_alloc +#define ff_put_pixels8_xy2_neon liteav_ff_put_pixels8_xy2_neon +#define ff_pred16x16_plane_rv40_8_sse2 liteav_ff_pred16x16_plane_rv40_8_sse2 +#define ff_hevc_sao_eo_class_decode liteav_ff_hevc_sao_eo_class_decode +#define av_bsf_get_null_filter liteav_av_bsf_get_null_filter +#define avio_get_str liteav_avio_get_str +#define av_packet_clone liteav_av_packet_clone +#define ff_hevc_put_pel_uw_pixels_w6_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w6_neon_8_asm +#define yuyvtoyuv422 liteav_yuyvtoyuv422 +#define yuyvtoyuv420 liteav_yuyvtoyuv420 +#define ff_pack_2ch_int32_to_float_u_sse2 liteav_ff_pack_2ch_int32_to_float_u_sse2 +#define yypop_buffer_state liteav_yypop_buffer_state +#define ff_h264_idct_add8_422_8_mmx liteav_ff_h264_idct_add8_422_8_mmx +#define ff_pred8x8l_horizontal_up_10_ssse3 liteav_ff_pred8x8l_horizontal_up_10_ssse3 +#define rgb16to15 liteav_rgb16to15 +#define ff_avg_h264_chroma_mc2_neon liteav_ff_avg_h264_chroma_mc2_neon +#define av_packet_pack_dictionary liteav_av_packet_pack_dictionary +#define av_basename liteav_av_basename +#define ff_sws_context_class liteav_ff_sws_context_class +#define ff_w4_min_w2_lo liteav_ff_w4_min_w2_lo +#define ff_channel_layouts_unref liteav_ff_channel_layouts_unref +#define vlc_css_parser_Init liteav_vlc_css_parser_Init +#define ff_put_no_rnd_qpel8_mc12_old_c liteav_ff_put_no_rnd_qpel8_mc12_old_c +#define ff_deblock_v_luma_8_avx liteav_ff_deblock_v_luma_8_avx +#define av_write_uncoded_frame_query liteav_av_write_uncoded_frame_query +#define ff_hevc_h_loop_filter_chroma_neon liteav_ff_hevc_h_loop_filter_chroma_neon +#define avio_printf liteav_avio_printf +#define av_parser_init liteav_av_parser_init +#define ff_cos_64_fixed liteav_ff_cos_64_fixed +#define avcodec_send_packet liteav_avcodec_send_packet +#define ff_put_no_rnd_qpel16_mc13_old_c liteav_ff_put_no_rnd_qpel16_mc13_old_c +#define ff_h263_format liteav_ff_h263_format +#define ff_cos_tabs liteav_ff_cos_tabs +#define ff_url_join liteav_ff_url_join +#define av_aes_ctr_increment_iv liteav_av_aes_ctr_increment_iv +#define ff_network_init liteav_ff_network_init +#define ff_avg_h264_chroma_mc4_neon liteav_ff_avg_h264_chroma_mc4_neon +#define ff_hevc_profiles liteav_ff_hevc_profiles +#define ff_thread_get_format liteav_ff_thread_get_format +#define ff_flac_is_extradata_valid liteav_ff_flac_is_extradata_valid +#define ff_ass_subtitle_header_default liteav_ff_ass_subtitle_header_default +#define ff_hevc_put_epel_uw_pixels_w4_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w4_neon_8 +#define ff_read_line_to_bprint_overwrite liteav_ff_read_line_to_bprint_overwrite +#define rgb15to24 liteav_rgb15to24 +#define ff_h264_idct8_dc_add_10_sse2 liteav_ff_h264_idct8_dc_add_10_sse2 +#define ff_avg_qpel8_mc32_old_c liteav_ff_avg_qpel8_mc32_old_c +#define yyget_text liteav_yyget_text +#define shuffle_bytes_1230 liteav_shuffle_bytes_1230 +#define ff_decode_bsfs_init liteav_ff_decode_bsfs_init +#define ff_fft_end liteav_ff_fft_end +#define ff_start_tag liteav_ff_start_tag +#define ff_crcA001_update liteav_ff_crcA001_update +#define ff_sbr_hf_apply_noise_1_neon liteav_ff_sbr_hf_apply_noise_1_neon +#define av_realloc_f liteav_av_realloc_f +#define ff_pred8x8l_horizontal_10_sse2 liteav_ff_pred8x8l_horizontal_10_sse2 +#define av_image_copy_plane liteav_av_image_copy_plane +#define ff_mp3adufloat_decoder liteav_ff_mp3adufloat_decoder +#define av_buffersink_get_sample_aspect_ratio liteav_av_buffersink_get_sample_aspect_ratio +#define ff_vp9_profiles liteav_ff_vp9_profiles +#define ff_overlay_init_x86 liteav_ff_overlay_init_x86 +#define av_bprint_clear liteav_av_bprint_clear +#define av_get_pix_fmt_name liteav_av_get_pix_fmt_name +#define av_tx_uninit liteav_av_tx_uninit +#define ff_hevc_sao_band_filter_neon_8 liteav_ff_hevc_sao_band_filter_neon_8 +#define av_opt_flag_is_set liteav_av_opt_flag_is_set +#define ff_aac_sbr_init liteav_ff_aac_sbr_init +#define ff_ps_hybrid_analysis_ileave_sse liteav_ff_ps_hybrid_analysis_ileave_sse +#define ff_h264_build_ref_list liteav_ff_h264_build_ref_list +#define ff_h264_idct_dc_add_8_c liteav_ff_h264_idct_dc_add_8_c +#define ff_h263_intra_MCBPC_vlc liteav_ff_h263_intra_MCBPC_vlc +#define av_md5_init liteav_av_md5_init +#define av_thread_message_queue_free liteav_av_thread_message_queue_free +#define av_dynarray_add_nofree liteav_av_dynarray_add_nofree +#define ff_psdsp_init liteav_ff_psdsp_init +#define ff_avg_h264_qpel8_mc20_10_sse2_cache64 liteav_ff_avg_h264_qpel8_mc20_10_sse2_cache64 +#define av_match_list liteav_av_match_list +#define ff_mpeg12_frame_rate_tab liteav_ff_mpeg12_frame_rate_tab +#define ff_thread_await_progress liteav_ff_thread_await_progress +#define ff_put_h264_qpel8_mc10_neon liteav_ff_put_h264_qpel8_mc10_neon +#define ff_pred4x4_horizontal_up_10_mmxext liteav_ff_pred4x4_horizontal_up_10_mmxext +#define ff_float_to_int32_a_avx2 liteav_ff_float_to_int32_a_avx2 +#define ff_ps_mul_pair_single_neon liteav_ff_ps_mul_pair_single_neon +#define ff_null_get_audio_buffer liteav_ff_null_get_audio_buffer +#define ff_init_ff_cos_tabs liteav_ff_init_ff_cos_tabs +#define ff_h264_idct8_add4_8_c liteav_ff_h264_idct8_add4_8_c +#define ff_cos_1024_fixed liteav_ff_cos_1024_fixed +#define ff_fdct248_islow_8 liteav_ff_fdct248_islow_8 +#define av_buffersink_set_frame_size liteav_av_buffersink_set_frame_size +#define yyset_lval liteav_yyset_lval +#define ff_aac_kbd_short_128_fixed liteav_ff_aac_kbd_short_128_fixed +#define ff_avg_h264_qpel16_mc00_neon liteav_ff_avg_h264_qpel16_mc00_neon +#define avio_rl16 liteav_avio_rl16 +#define ff_hevc_put_epel_uw_hv_neon_8 liteav_ff_hevc_put_epel_uw_hv_neon_8 +#define ff_hevc_hls_filter liteav_ff_hevc_hls_filter +#define ff_aac_pow2sf_tab liteav_ff_aac_pow2sf_tab +#define av_buffersrc_close liteav_av_buffersrc_close +#define avcodec_receive_packet liteav_avcodec_receive_packet +#define ff_mpegvideo_parser liteav_ff_mpegvideo_parser +#define ff_hevc_put_qpel_uw_weight_h1v2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h1v2_neon_8 +#define swr_next_pts liteav_swr_next_pts +#define av_get_sample_fmt_string liteav_av_get_sample_fmt_string +#define av_thread_message_queue_send liteav_av_thread_message_queue_send +#define ff_h264_idct_add16_12_c liteav_ff_h264_idct_add16_12_c +#define ff_h264_idct8_add_neon liteav_ff_h264_idct8_add_neon +#define ff_yuv2rgb_get_func_ptr liteav_ff_yuv2rgb_get_func_ptr +#define av_packet_ref liteav_av_packet_ref +#define ff_fdct_ifast248 liteav_ff_fdct_ifast248 +#define ff_pw_18 liteav_ff_pw_18 +#define av_opt_set_dict liteav_av_opt_set_dict +#define ff_hevc_put_pixels_w8_neon_8_asm liteav_ff_hevc_put_pixels_w8_neon_8_asm +#define ff_ps_read_data liteav_ff_ps_read_data +#define av_channel_layout_extract_channel liteav_av_channel_layout_extract_channel +#define av_encryption_info_clone liteav_av_encryption_info_clone +#define sws_allocVec liteav_sws_allocVec +#define ff_hevc_set_neighbour_available liteav_ff_hevc_set_neighbour_available +#define ff_yuv2planeX_8_neon liteav_ff_yuv2planeX_8_neon +#define ff_flac_blocksize_table liteav_ff_flac_blocksize_table +#define vlc_css_selector_Append liteav_vlc_css_selector_Append +#define ff_parse_mpeg2_descriptor liteav_ff_parse_mpeg2_descriptor +#define ffio_read_varlen liteav_ffio_read_varlen +#define ffio_read_size liteav_ffio_read_size +#define ff_accept liteav_ff_accept +#define ff_ebur128_add_frames_planar_float liteav_ff_ebur128_add_frames_planar_float +#define ff_draw_color liteav_ff_draw_color +#define ff_isom_get_vpcc_features liteav_ff_isom_get_vpcc_features +#define ff_framesync_preinit liteav_ff_framesync_preinit +#define ff_hevc_parser liteav_ff_hevc_parser +#define ff_pred8x8l_down_right_8_ssse3 liteav_ff_pred8x8l_down_right_8_ssse3 +#define av_buffer_pool_init2 liteav_av_buffer_pool_init2 +#define ff_hevc_put_qpel_uni_neon_wrapper liteav_ff_hevc_put_qpel_uni_neon_wrapper +#define ff_avg_h264_qpel8or16_hv2_lowpass_ssse3 liteav_ff_avg_h264_qpel8or16_hv2_lowpass_ssse3 +#define ffurl_get_short_seek liteav_ffurl_get_short_seek +#define ff_pred16x16_vertical_8_sse liteav_ff_pred16x16_vertical_8_sse +#define ff_mb_type_b_tab liteav_ff_mb_type_b_tab +#define ff_h263_decode_motion liteav_ff_h263_decode_motion +#define ff_hevc_put_pixels_w4_neon_8_asm liteav_ff_hevc_put_pixels_w4_neon_8_asm +#define ff_free_picture_tables liteav_ff_free_picture_tables +#define av_timecode_make_smpte_tc_string liteav_av_timecode_make_smpte_tc_string +#define av_murmur3_alloc liteav_av_murmur3_alloc +#define ff_deblock_v_luma_10_avx liteav_ff_deblock_v_luma_10_avx +#define ff_mpeg12_vlc_dc_chroma_bits liteav_ff_mpeg12_vlc_dc_chroma_bits +#define ff_put_pixels8_x2_no_rnd_neon liteav_ff_put_pixels8_x2_no_rnd_neon +#define ff_simple_idct8_avx liteav_ff_simple_idct8_avx +#define ff_nv21_to_rgba_neon liteav_ff_nv21_to_rgba_neon +#define ff_h264_chroma_dc_dequant_idct_8_c liteav_ff_h264_chroma_dc_dequant_idct_8_c +#define ff_hevc_sao_band_w16_neon_8 liteav_ff_hevc_sao_band_w16_neon_8 +#define av_hwdevice_ctx_create liteav_av_hwdevice_ctx_create +#define av_muxer_iterate liteav_av_muxer_iterate +#define ff_faandct248 liteav_ff_faandct248 +#define ff_pack_2ch_int16_to_float_a_sse2 liteav_ff_pack_2ch_int16_to_float_a_sse2 +#define ff_mov_read_esds liteav_ff_mov_read_esds +#define avformat_init_output liteav_avformat_init_output +#define av_strndup liteav_av_strndup +#define ff_simple_idct12_sse2 liteav_ff_simple_idct12_sse2 +#define av_msg liteav_av_msg +#define ff_hevc_compute_poc liteav_ff_hevc_compute_poc +#define text_style_merge liteav_text_style_merge +#define av_strlcat liteav_av_strlcat +#define ff_h2645_packet_split liteav_ff_h2645_packet_split +#define ff_avg_h264_qpel16_mc23_10_sse2 liteav_ff_avg_h264_qpel16_mc23_10_sse2 +#define av_buffer_realloc liteav_av_buffer_realloc +#define ff_ass_split_dialog liteav_ff_ass_split_dialog +#define ff_hevc_deblocking_boundary_strengths liteav_ff_hevc_deblocking_boundary_strengths +#define ff_w1_plus_w3_lo liteav_ff_w1_plus_w3_lo +#define ff_hevc_transform_8x8_neon_8_asm liteav_ff_hevc_transform_8x8_neon_8_asm +#define av_thread_message_queue_set_err_send liteav_av_thread_message_queue_set_err_send +#define av_log_get_flags liteav_av_log_get_flags +#define ff_get_format liteav_ff_get_format +#define ff_framesync_get_class liteav_ff_framesync_get_class +#define ff_h264_cabac_tables liteav_ff_h264_cabac_tables +#define ff_hevc_put_qpel_h3v3_neon_8 liteav_ff_hevc_put_qpel_h3v3_neon_8 +#define ff_update_picture_tables liteav_ff_update_picture_tables +#define ff_w3_min_w7_lo liteav_ff_w3_min_w7_lo +#define av_bsf_init liteav_av_bsf_init +#define av_frame_set_colorspace liteav_av_frame_set_colorspace +#define ff_fdct248_islow_10 liteav_ff_fdct248_islow_10 +#define ff_put_h264_qpel16_mc33_neon liteav_ff_put_h264_qpel16_mc33_neon +#define ff_ac3_log_add_tab liteav_ff_ac3_log_add_tab +#define ff_hevc_put_qpel_uw_weight_h2v1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h2v1_neon_8 +#define ff_tns_max_bands_1024 liteav_ff_tns_max_bands_1024 +#define ff_avg_h264_qpel8_mc31_10_sse2 liteav_ff_avg_h264_qpel8_mc31_10_sse2 +#define ff_pred8x8l_vertical_right_8_sse2 liteav_ff_pred8x8l_vertical_right_8_sse2 +#define av_bsf_alloc liteav_av_bsf_alloc +#define ff_h263_find_frame_end liteav_ff_h263_find_frame_end +#define ff_dither_8x8_32 liteav_ff_dither_8x8_32 +#define ff_h264_weight_4_10_sse4 liteav_ff_h264_weight_4_10_sse4 +#define avcodec_default_get_format liteav_avcodec_default_get_format +#define ff_biweight_h264_pixels_16_neon liteav_ff_biweight_h264_pixels_16_neon +#define ff_pred8x8_dc_rv40_8_mmxext liteav_ff_pred8x8_dc_rv40_8_mmxext +#define av_set_options_string liteav_av_set_options_string +#define ff_srt_demuxer liteav_ff_srt_demuxer +#define swri_oldapi_conv_flt_to_s16_neon liteav_swri_oldapi_conv_flt_to_s16_neon +#define ff_h264_idct8_add_14_c liteav_ff_h264_idct8_add_14_c +#define swri_dither_init liteav_swri_dither_init +#define ff_h264_pred_weight_table liteav_ff_h264_pred_weight_table +#define ff_h264_h_loop_filter_luma_neon liteav_ff_h264_h_loop_filter_luma_neon +#define ff_put_pixels4_l2_shift5_mmxext liteav_ff_put_pixels4_l2_shift5_mmxext +#define yydebug liteav_yydebug +#define av_packet_unref liteav_av_packet_unref +#define ff_hevc_put_qpel_uw_h3v3_neon_8 liteav_ff_hevc_put_qpel_uw_h3v3_neon_8 +#define ff_pack_8ch_int32_to_float_a_avx liteav_ff_pack_8ch_int32_to_float_a_avx +#define av_spherical_projection_name liteav_av_spherical_projection_name +#define ff_flac_demuxer liteav_ff_flac_demuxer +#define ff_tls_protocol liteav_ff_tls_protocol +#define avcodec_find_encoder_by_name liteav_avcodec_find_encoder_by_name +#define ff_mpeg4_decode_partitions liteav_ff_mpeg4_decode_partitions +#define ff_put_no_rnd_qpel8_mc13_old_c liteav_ff_put_no_rnd_qpel8_mc13_old_c +#define av_bsf_send_packet liteav_av_bsf_send_packet +#define ff_ass_add_rect liteav_ff_ass_add_rect +#define ff_faandct liteav_ff_faandct +#define ff_put_h264_qpel8or16_hv1_lowpass_op_sse2 liteav_ff_put_h264_qpel8or16_hv1_lowpass_op_sse2 +#define ff_avg_h264_qpel8_mc30_10_sse2_cache64 liteav_ff_avg_h264_qpel8_mc30_10_sse2_cache64 +#define ff_alloc_dir_entry liteav_ff_alloc_dir_entry +#define ff_hevc_qpel_filters liteav_ff_hevc_qpel_filters +#define ff_mdct_win_fixed liteav_ff_mdct_win_fixed +#define ff_mov_write_packet liteav_ff_mov_write_packet +#define ff_sine_512 liteav_ff_sine_512 +#define ff_rtmp_check_alloc_array liteav_ff_rtmp_check_alloc_array +#define av_image_check_size liteav_av_image_check_size +#define ff_pred8x8_plane_neon liteav_ff_pred8x8_plane_neon +#define ff_h264_weight_8_mmxext liteav_ff_h264_weight_8_mmxext +#define ff_aac_codebook_vector_vals liteav_ff_aac_codebook_vector_vals +#define ff_af_queue_init liteav_ff_af_queue_init +#define ff_pred8x8l_top_dc_8_ssse3 liteav_ff_pred8x8l_top_dc_8_ssse3 +#define ff_swb_offset_512 liteav_ff_swb_offset_512 +#define vlc_css_expression_AddTerm liteav_vlc_css_expression_AddTerm +#define sws_getContext liteav_sws_getContext +#define ff_h264_update_thread_context liteav_ff_h264_update_thread_context +#define ff_hevc_put_qpel_uw_weight_h1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h1_neon_8 +#define avio_read_dir liteav_avio_read_dir +#define sws_printVec2 liteav_sws_printVec2 +#define av_get_channel_layout_string liteav_av_get_channel_layout_string +#define av_audio_fifo_read liteav_av_audio_fifo_read +#define ff_put_h264_qpel4_mc02_10_mmxext liteav_ff_put_h264_qpel4_mc02_10_mmxext +#define ff_filter_activate liteav_ff_filter_activate +#define av_cpu_count liteav_av_cpu_count +#define ff_put_pixels8_y2_neon liteav_ff_put_pixels8_y2_neon +#define av_parser_iterate liteav_av_parser_iterate +#define ffio_ensure_seekback liteav_ffio_ensure_seekback +#define ff_lzw_encode_flush liteav_ff_lzw_encode_flush +#define ff_sbr_sum_square_neon liteav_ff_sbr_sum_square_neon +#define ff_pw_m1 liteav_ff_pw_m1 +#define av_get_known_color_name liteav_av_get_known_color_name +#define ffio_get_checksum liteav_ffio_get_checksum +#define ff_put_h264_qpel16_mc01_neon liteav_ff_put_h264_qpel16_mc01_neon +#define ff_live_flv_demuxer liteav_ff_live_flv_demuxer +#define avpriv_get_raw_pix_fmt_tags liteav_avpriv_get_raw_pix_fmt_tags +#define ff_mpa_synth_window_float liteav_ff_mpa_synth_window_float +#define av_display_rotation_set liteav_av_display_rotation_set +#define ff_rgb24toyv12 liteav_ff_rgb24toyv12 +#define av_hex_dump_log liteav_av_hex_dump_log +#define av_encryption_init_info_free liteav_av_encryption_init_info_free +#define ff_pred8x8l_vertical_8_ssse3 liteav_ff_pred8x8l_vertical_8_ssse3 +#define av_find_nearest_q_idx liteav_av_find_nearest_q_idx +#define ff_put_h264_qpel16_mc10_10_sse2 liteav_ff_put_h264_qpel16_mc10_10_sse2 +#define ff_hevc_put_qpel_uw_h3v2_neon_8 liteav_ff_hevc_put_qpel_uw_h3v2_neon_8 +#define ff_avg_h264_qpel16_mc23_neon liteav_ff_avg_h264_qpel16_mc23_neon +#define av_dict_set_int liteav_av_dict_set_int +#define ff_h264_weight_16_10_sse4 liteav_ff_h264_weight_16_10_sse4 +#define av_get_planar_sample_fmt liteav_av_get_planar_sample_fmt +#define ff_w3_min_w7_hi liteav_ff_w3_min_w7_hi +#define ff_sine_windows_fixed liteav_ff_sine_windows_fixed +#define ff_deblock_v_luma_intra_8_avx liteav_ff_deblock_v_luma_intra_8_avx +#define ff_yuv420p_to_rgba_neon liteav_ff_yuv420p_to_rgba_neon +#define av_packet_add_side_data liteav_av_packet_add_side_data +#define ff_unpack_6ch_float_to_float_a_avx liteav_ff_unpack_6ch_float_to_float_a_avx +#define avio_read_partial liteav_avio_read_partial +#define avpriv_dict_set_timestamp liteav_avpriv_dict_set_timestamp +#define ff_h263_pred_dc liteav_ff_h263_pred_dc +#define ff_hevc_put_qpel_neon_wrapper liteav_ff_hevc_put_qpel_neon_wrapper +#define ff_sine_4096_fixed liteav_ff_sine_4096_fixed +#define ff_id3v2_write_metadata liteav_ff_id3v2_write_metadata +#define av_pkt_dump_log2 liteav_av_pkt_dump_log2 +#define ff_rtp_codec_id liteav_ff_rtp_codec_id +#define av_get_random_seed liteav_av_get_random_seed +#define av_opt_eval_int liteav_av_opt_eval_int +#define ff_alac_at_decoder liteav_ff_alac_at_decoder +#define ff_ac3_parse_header liteav_ff_ac3_parse_header +#define ff_avg_h264_qpel4_mc12_10_mmxext liteav_ff_avg_h264_qpel4_mc12_10_mmxext +#define ff_ass_decoder_flush liteav_ff_ass_decoder_flush +#define ff_hevc_skip_flag_decode liteav_ff_hevc_skip_flag_decode +#define avpriv_vga16_font liteav_avpriv_vga16_font +#define av_tx_init liteav_av_tx_init +#define ff_af_aresample liteav_ff_af_aresample +#define av_ripemd_update liteav_av_ripemd_update +#define ff_hevc_h_loop_filter_luma_neon liteav_ff_hevc_h_loop_filter_luma_neon +#define ff_raw_write_packet liteav_ff_raw_write_packet +#define ff_null_bsf liteav_ff_null_bsf +#define ff_jpeg_fdct_islow_8 liteav_ff_jpeg_fdct_islow_8 +#define ff_h264_idct_add16intra_10_c liteav_ff_h264_idct_add16intra_10_c +#define ff_write_chained liteav_ff_write_chained +#define ffio_close_null_buf liteav_ffio_close_null_buf +#define ff_pred8x8_plane_8_sse2 liteav_ff_pred8x8_plane_8_sse2 +#define ff_filter_frame liteav_ff_filter_frame +#define ff_filter_get_nb_threads liteav_ff_filter_get_nb_threads +#define ff_h263_decode_end liteav_ff_h263_decode_end +#define avpriv_cga_font liteav_avpriv_cga_font +#define ff_hevc_decode_short_term_rps liteav_ff_hevc_decode_short_term_rps +#define ff_pred8x8l_horizontal_8_ssse3 liteav_ff_pred8x8l_horizontal_8_ssse3 +#define ff_aac_adtstoasc_bsf liteav_ff_aac_adtstoasc_bsf +#define ff_hevc_cu_qp_delta_sign_flag liteav_ff_hevc_cu_qp_delta_sign_flag +#define av_bprint_finalize liteav_av_bprint_finalize +#define ff_hevc_unref_frame liteav_ff_hevc_unref_frame +#define ff_mpegaudio_parser liteav_ff_mpegaudio_parser +#define ff_put_h264_qpel8_mc03_neon liteav_ff_put_h264_qpel8_mc03_neon +#define av_packet_make_writable liteav_av_packet_make_writable +#define av_force_cpu_flags liteav_av_force_cpu_flags +#define av_fast_realloc liteav_av_fast_realloc +#define ff_default_chroma_qscale_table liteav_ff_default_chroma_qscale_table +#define av_bsf_list_free liteav_av_bsf_list_free +#define av_frame_set_channels liteav_av_frame_set_channels +#define ff_put_h264_qpel16_mc30_neon liteav_ff_put_h264_qpel16_mc30_neon +#define ff_vorbis_channel_layouts liteav_ff_vorbis_channel_layouts +#define ff_cos_32768_fixed liteav_ff_cos_32768_fixed +#define ff_flv_muxer liteav_ff_flv_muxer +#define ff_hevc_idct_16x16_dc_neon_8 liteav_ff_hevc_idct_16x16_dc_neon_8 +#define ff_h264_execute_decode_slices liteav_ff_h264_execute_decode_slices +#define ff_af_queue_remove liteav_ff_af_queue_remove +#define avpicture_alloc liteav_avpicture_alloc +#define ff_mpeg2_non_linear_qscale liteav_ff_mpeg2_non_linear_qscale +#define ff_mpegvideodsp_init liteav_ff_mpegvideodsp_init +#define ff_hevc_sao_edge_eo1_w64_neon_8 liteav_ff_hevc_sao_edge_eo1_w64_neon_8 +#define interleaveBytes liteav_interleaveBytes +#define ff_avg_h264_qpel16_mc13_10_sse2 liteav_ff_avg_h264_qpel16_mc13_10_sse2 +#define av_parser_next liteav_av_parser_next +#define ff_pred8x8_top_dc_10_sse2 liteav_ff_pred8x8_top_dc_10_sse2 +#define ff_avg_h264_qpel4_mc11_10_mmxext liteav_ff_avg_h264_qpel4_mc11_10_mmxext +#define ff_wavpack_decoder liteav_ff_wavpack_decoder +#define avio_seek_time liteav_avio_seek_time +#define ff_hevc_add_residual_32x32_neon_8 liteav_ff_hevc_add_residual_32x32_neon_8 +#define av_small_strptime liteav_av_small_strptime +#define ff_put_pixels16_y2_neon liteav_ff_put_pixels16_y2_neon +#define ff_hevc_put_epel_h_neon_8 liteav_ff_hevc_put_epel_h_neon_8 +#define ff_imdct_calc_sse liteav_ff_imdct_calc_sse +#define av_picture_copy liteav_av_picture_copy +#define av_stereo3d_type_name liteav_av_stereo3d_type_name +#define av_frame_set_metadata liteav_av_frame_set_metadata +#define av_hwdevice_ctx_create_derived liteav_av_hwdevice_ctx_create_derived +#define av_sdp_create liteav_av_sdp_create +#define ff_mpeg4_intra_level liteav_ff_mpeg4_intra_level +#define ff_hevc_transform_32x32_neon_8 liteav_ff_hevc_transform_32x32_neon_8 +#define ff_pred8x8_dc_10_mmxext liteav_ff_pred8x8_dc_10_mmxext +#define rgb15to32 liteav_rgb15to32 +#define av_opt_set liteav_av_opt_set +#define ff_h264_luma_dc_dequant_idct_mmx liteav_ff_h264_luma_dc_dequant_idct_mmx +#define ff_avg_h264_qpel8_h_lowpass_l2_mmxext liteav_ff_avg_h264_qpel8_h_lowpass_l2_mmxext +#define avio_pause liteav_avio_pause +#define ff_fill_rgba_map liteav_ff_fill_rgba_map +#define ff_yuv420p_to_bgra_neon liteav_ff_yuv420p_to_bgra_neon +#define av_dict_get_string liteav_av_dict_get_string +#define ff_hcscale_fast_c liteav_ff_hcscale_fast_c +#define ff_is_multicast_address liteav_ff_is_multicast_address +#define ff_replaygain_export_raw liteav_ff_replaygain_export_raw +#define ff_fft_permute_sse liteav_ff_fft_permute_sse +#define ff_mba_max liteav_ff_mba_max +#define vlc_css_rules_Delete liteav_vlc_css_rules_Delete +#define ff_shuffle_bytes_3210_ssse3 liteav_ff_shuffle_bytes_3210_ssse3 +#define ff_put_h264_qpel16_mc20_10_sse2 liteav_ff_put_h264_qpel16_mc20_10_sse2 +#define ff_htmlmarkup_to_ass liteav_ff_htmlmarkup_to_ass +#define av_frame_get_color_range liteav_av_frame_get_color_range +#define ff_h263_pred_motion liteav_ff_h263_pred_motion +#define av_fifo_free liteav_av_fifo_free +#define ff_urlcontext_child_class_next liteav_ff_urlcontext_child_class_next +#define ff_avg_h264_qpel8_mc33_10_sse2 liteav_ff_avg_h264_qpel8_mc33_10_sse2 +#define ff_pw_53 liteav_ff_pw_53 +#define ff_h263_decode_mba liteav_ff_h263_decode_mba +#define ff_avg_h264_qpel4_hv_lowpass_v_mmxext liteav_ff_avg_h264_qpel4_hv_lowpass_v_mmxext +#define ff_sbr_sum64x5_neon liteav_ff_sbr_sum64x5_neon +#define av_samples_alloc_array_and_samples liteav_av_samples_alloc_array_and_samples +#define av_audio_fifo_realloc liteav_av_audio_fifo_realloc +#define ff_thread_release_buffer liteav_ff_thread_release_buffer +#define ff_pack_2ch_int32_to_int32_a_sse2 liteav_ff_pack_2ch_int32_to_int32_a_sse2 +#define ff_hevc_put_qpel_uni_w_neon_8 liteav_ff_hevc_put_qpel_uni_w_neon_8 +#define ff_hevc_pred_angular_8x8_h_neon_8 liteav_ff_hevc_pred_angular_8x8_h_neon_8 +#define ff_mpv_export_qp_table liteav_ff_mpv_export_qp_table +#define vlc_css_unescaped liteav_vlc_css_unescaped +#define ff_avg_h264_qpel8_mc10_neon liteav_ff_avg_h264_qpel8_mc10_neon +#define av_rescale_q liteav_av_rescale_q +#define ff_psdsp_init_aarch64 liteav_ff_psdsp_init_aarch64 +#define av_q2intfloat liteav_av_q2intfloat +#define ff_pred8x8l_down_left_10_sse2 liteav_ff_pred8x8l_down_left_10_sse2 +#define ff_aac_at_decoder liteav_ff_aac_at_decoder +#define ff_filter_process_command liteav_ff_filter_process_command +#define ff_ass_split_free liteav_ff_ass_split_free +#define av_stristart liteav_av_stristart +#define ff_simple_idct8_put_sse2 liteav_ff_simple_idct8_put_sse2 +#define ff_mp3_at_decoder liteav_ff_mp3_at_decoder +#define avcodec_find_best_pix_fmt_of_list liteav_avcodec_find_best_pix_fmt_of_list +#define ff_hevc_put_qpel_h1_neon_8 liteav_ff_hevc_put_qpel_h1_neon_8 +#define ff_avg_h264_qpel16_mc20_10_sse2 liteav_ff_avg_h264_qpel16_mc20_10_sse2 +#define avpriv_io_delete liteav_avpriv_io_delete +#define ff_h264_v_loop_filter_luma_neon liteav_ff_h264_v_loop_filter_luma_neon +#define ff_ebur128_add_frames_planar_short liteav_ff_ebur128_add_frames_planar_short +#define ff_mpeg4_decode_picture_header liteav_ff_mpeg4_decode_picture_header +#define av_buffersrc_write_frame liteav_av_buffersrc_write_frame +#define av_crc_get_table liteav_av_crc_get_table +#define ff_ebur128_set_channel liteav_ff_ebur128_set_channel +#define ff_dither_4x4_16 liteav_ff_dither_4x4_16 +#define ff_framesync_get_frame liteav_ff_framesync_get_frame +#define ff_aac_ac3_parse liteav_ff_aac_ac3_parse +#define ff_vector_fmul_reverse_neon liteav_ff_vector_fmul_reverse_neon +#define ff_mpeg2_dc_scale_table liteav_ff_mpeg2_dc_scale_table +#define webvtt_parser_init liteav_webvtt_parser_init +#define sws_alloc_set_opts liteav_sws_alloc_set_opts +#define ff_text_peek_r8 liteav_ff_text_peek_r8 +#define ff_framesync_configure liteav_ff_framesync_configure +#define ff_aac_parser liteav_ff_aac_parser +#define ff_hevc_put_qpel_h2v1_neon_8 liteav_ff_hevc_put_qpel_h2v1_neon_8 +#define ff_put_h264_qpel8_mc20_10_ssse3_cache64 liteav_ff_put_h264_qpel8_mc20_10_ssse3_cache64 +#define ff_hevc_transform_add_8x8_neon_8_asm liteav_ff_hevc_transform_add_8x8_neon_8_asm +#define ff_pred8x8_horizontal_8_ssse3 liteav_ff_pred8x8_horizontal_8_ssse3 +#define ff_w4_plus_w6_lo liteav_ff_w4_plus_w6_lo +#define av_fifo_alloc_array liteav_av_fifo_alloc_array +#define ff_fft_end_fixed_32 liteav_ff_fft_end_fixed_32 +#define ff_avg_pixels8_y2_neon liteav_ff_avg_pixels8_y2_neon +#define ff_init_vlc_sparse liteav_ff_init_vlc_sparse +#define ff_hevc_put_pel_uw_pixels_w24_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w24_neon_8_asm +#define ff_sbr_qmf_deint_bfly_neon liteav_ff_sbr_qmf_deint_bfly_neon +#define sws_subVec liteav_sws_subVec +#define ff_ac3_channels_tab liteav_ff_ac3_channels_tab +#define avformat_get_riff_video_tags liteav_avformat_get_riff_video_tags +#define ff_faanidct_put liteav_ff_faanidct_put +#define ff_pred4x4_down_right_10_avx liteav_ff_pred4x4_down_right_10_avx +#define ff_put_h264_qpel4_h_lowpass_l2_mmxext liteav_ff_put_h264_qpel4_h_lowpass_l2_mmxext +#define ff_get_cpu_flags_aarch64 liteav_ff_get_cpu_flags_aarch64 +#define ffurl_alloc liteav_ffurl_alloc +#define av_set_cpu_flags_mask liteav_av_set_cpu_flags_mask +#define ff_avg_h264_chroma_mc8_rnd_3dnow liteav_ff_avg_h264_chroma_mc8_rnd_3dnow +#define ff_merge_formats liteav_ff_merge_formats +#define ff_h264_p_sub_mb_type_info liteav_ff_h264_p_sub_mb_type_info +#define ff_mpeg1_decode_block_intra liteav_ff_mpeg1_decode_block_intra +#define yylex liteav_yylex +#define ff_sdp_write_media liteav_ff_sdp_write_media +#define ff_idctdsp_init_x86 liteav_ff_idctdsp_init_x86 +#define ff_hevc_rem_intra_luma_pred_mode_decode liteav_ff_hevc_rem_intra_luma_pred_mode_decode +#define ff_aac_sbr_ctx_init liteav_ff_aac_sbr_ctx_init +#define ff_put_h264_qpel16_mc02_10_sse2 liteav_ff_put_h264_qpel16_mc02_10_sse2 +#define ff_mpeg12_mbAddrIncrTable liteav_ff_mpeg12_mbAddrIncrTable +#define av_get_cpu_flags liteav_av_get_cpu_flags +#define ff_avg_h264_qpel8_mc11_10_sse2 liteav_ff_avg_h264_qpel8_mc11_10_sse2 +#define ff_eac3_default_cpl_band_struct liteav_ff_eac3_default_cpl_band_struct +#define ff_h264_decode_init_vlc liteav_ff_h264_decode_init_vlc +#define ff_frame_pool_audio_init liteav_ff_frame_pool_audio_init +#define ff_tlog_ref liteav_ff_tlog_ref +#define ff_w4_plus_w2_lo liteav_ff_w4_plus_w2_lo +#define ff_h264_quant_div6 liteav_ff_h264_quant_div6 +#define ff_get_guid liteav_ff_get_guid +#define ff_pack_2ch_float_to_int16_a_sse2 liteav_ff_pack_2ch_float_to_int16_a_sse2 +#define avcodec_send_frame liteav_avcodec_send_frame +#define ff_hevc_pred_angular_32x32_h_zero_neon_8 liteav_ff_hevc_pred_angular_32x32_h_zero_neon_8 +#define ff_put_h264_qpel16_mc30_10_sse2 liteav_ff_put_h264_qpel16_mc30_10_sse2 +#define ff_h264_init_poc liteav_ff_h264_init_poc +#define avfilter_graph_set_auto_convert liteav_avfilter_graph_set_auto_convert +#define ff_adts_header_parse liteav_ff_adts_header_parse +#define ff_h264dsp_init liteav_ff_h264dsp_init +#define ff_jref_idct_add liteav_ff_jref_idct_add +#define ff_bswapdsp_init liteav_ff_bswapdsp_init +#define av_des_crypt liteav_av_des_crypt +#define ff_put_h264_qpel16_mc00_10_sse2 liteav_ff_put_h264_qpel16_mc00_10_sse2 +#define ff_deblock_v_chroma_8_mmxext liteav_ff_deblock_v_chroma_8_mmxext +#define rgb48tobgr64_bswap liteav_rgb48tobgr64_bswap +#define ff_yuv2rgb_coeffs liteav_ff_yuv2rgb_coeffs +#define ff_hevc_pred_planar_4x4_neon_8_1 liteav_ff_hevc_pred_planar_4x4_neon_8_1 +#define av_samples_get_buffer_size liteav_av_samples_get_buffer_size +#define ff_pred16x16_plane_svq3_8_ssse3 liteav_ff_pred16x16_plane_svq3_8_ssse3 +#define ff_filter_set_ready liteav_ff_filter_set_ready +#define avcodec_find_best_pix_fmt2 liteav_avcodec_find_best_pix_fmt2 +#define ff_w5_min_w1 liteav_ff_w5_min_w1 +#define ff_mpeg4_intra_vlc liteav_ff_mpeg4_intra_vlc +#define ff_metadata_conv_ctx liteav_ff_metadata_conv_ctx +#define ff_raw_pix_fmt_tags liteav_ff_raw_pix_fmt_tags +#define ff_avg_h264_qpel16_mc33_10_sse2 liteav_ff_avg_h264_qpel16_mc33_10_sse2 +#define ff_framequeue_init liteav_ff_framequeue_init +#define ff_mdct_calc_c liteav_ff_mdct_calc_c +#define ff_h264qpel_init_x86 liteav_ff_h264qpel_init_x86 +#define ff_pred8x8_left_dc_neon liteav_ff_pred8x8_left_dc_neon +#define ff_hevc_pred_angular_4x4_neon_8 liteav_ff_hevc_pred_angular_4x4_neon_8 +#define ff_unpack_6ch_float_to_int32_u_sse2 liteav_ff_unpack_6ch_float_to_int32_u_sse2 +#define ff_mov_cenc_free liteav_ff_mov_cenc_free +#define ff_text_pos liteav_ff_text_pos +#define ff_int16_to_int32_u_sse2 liteav_ff_int16_to_int32_u_sse2 +#define av_opt_get_sample_fmt liteav_av_opt_get_sample_fmt +#define swr_set_channel_mapping liteav_swr_set_channel_mapping +#define av_hwdevice_hwconfig_alloc liteav_av_hwdevice_hwconfig_alloc +#define ff_hevc_sao_band_position_decode liteav_ff_hevc_sao_band_position_decode +#define ff_unpack_2ch_int16_to_float_u_ssse3 liteav_ff_unpack_2ch_int16_to_float_u_ssse3 +#define av_content_light_metadata_create_side_data liteav_av_content_light_metadata_create_side_data +#define ff_lzw_encode_state_size liteav_ff_lzw_encode_state_size +#define ff_mpv_decode_init liteav_ff_mpv_decode_init +#define ff_mov_demuxer liteav_ff_mov_demuxer +#define ff_cos_32_fixed liteav_ff_cos_32_fixed +#define ffio_init_checksum liteav_ffio_init_checksum +#define ff_h264_idct_add_8_mmx liteav_ff_h264_idct_add_8_mmx +#define ff_h264_profiles liteav_ff_h264_profiles +#define ffurl_context_class liteav_ffurl_context_class +#define ff_ebur128_loudness_shortterm liteav_ff_ebur128_loudness_shortterm +#define ff_framequeue_global_init liteav_ff_framequeue_global_init +#define ff_latm_muxer liteav_ff_latm_muxer +#define av_hwframe_ctx_init liteav_av_hwframe_ctx_init +#define ff_qdmc_at_decoder liteav_ff_qdmc_at_decoder +#define ff_avg_h264_qpel16_mc20_10_sse2_cache64 liteav_ff_avg_h264_qpel16_mc20_10_sse2_cache64 +#define yy_switch_to_buffer liteav_yy_switch_to_buffer +#define ff_thread_await_progress2 liteav_ff_thread_await_progress2 +#define avpriv_mpa_bitrate_tab liteav_avpriv_mpa_bitrate_tab +#define ff_avg_h264_qpel8_mc32_10_sse2 liteav_ff_avg_h264_qpel8_mc32_10_sse2 +#define ff_mpeg4audio_get_config_gb liteav_ff_mpeg4audio_get_config_gb +#define ff_hevc_put_qpel_uw_weight_v1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_v1_neon_8 +#define av_fopen_utf8 liteav_av_fopen_utf8 +#define ff_fft_calc_sse liteav_ff_fft_calc_sse +#define ff_h264_parser liteav_ff_h264_parser +#define ff_aac_sbr_ctx_close liteav_ff_aac_sbr_ctx_close +#define ff_avg_h264_qpel8_mc23_neon liteav_ff_avg_h264_qpel8_mc23_neon +#define rgb16to32 liteav_rgb16to32 +#define ff_shuffle_bytes_3012_ssse3 liteav_ff_shuffle_bytes_3012_ssse3 +#define av_sha512_size liteav_av_sha512_size +#define ff_pred16x16_plane_rv40_8_ssse3 liteav_ff_pred16x16_plane_rv40_8_ssse3 +#define ff_h264_queue_decode_slice liteav_ff_h264_queue_decode_slice +#define ff_weight_h264_pixels_8_neon liteav_ff_weight_h264_pixels_8_neon +#define ff_pred8x8l_horizontal_up_8_ssse3 liteav_ff_pred8x8l_horizontal_up_8_ssse3 +#define av_packet_split_side_data liteav_av_packet_split_side_data +#define ff_put_h264_qpel8_mc00_10_sse2 liteav_ff_put_h264_qpel8_mc00_10_sse2 +#define av_color_space_from_name liteav_av_color_space_from_name +#define ff_nv12_to_rgba_neon liteav_ff_nv12_to_rgba_neon +#define ff_put_h264_qpel8_mc12_neon liteav_ff_put_h264_qpel8_mc12_neon +#define ff_mdct_calc_neon liteav_ff_mdct_calc_neon +#define ff_init_desc_no_chr liteav_ff_init_desc_no_chr +#define ff_unpack_2ch_int16_to_int16_a_ssse3 liteav_ff_unpack_2ch_int16_to_int16_a_ssse3 +#define ff_hevc_put_qpel_bi_w_neon_8 liteav_ff_hevc_put_qpel_bi_w_neon_8 +#define ff_init_slice_from_src liteav_ff_init_slice_from_src +#define ff_mpa_synth_window_fixed liteav_ff_mpa_synth_window_fixed +#define ff_all_channel_counts liteav_ff_all_channel_counts +#define ff_me_cmp_init liteav_ff_me_cmp_init +#define ff_pred4x4_dc_8_mmxext liteav_ff_pred4x4_dc_8_mmxext +#define av_opt_set_double liteav_av_opt_set_double +#define av_hash_init liteav_av_hash_init +#define ff_weight_h264_pixels_4_neon liteav_ff_weight_h264_pixels_4_neon +#define av_fifo_realloc2 liteav_av_fifo_realloc2 +#define ff_fft_end_fixed liteav_ff_fft_end_fixed +#define ff_amf_write_number liteav_ff_amf_write_number +#define ff_sbr_qmf_deint_neg_neon liteav_ff_sbr_qmf_deint_neg_neon +#define ff_poll_frame liteav_ff_poll_frame +#define av_codec_iterate liteav_av_codec_iterate +#define ff_unpack_6ch_int32_to_float_u_sse2 liteav_ff_unpack_6ch_int32_to_float_u_sse2 +#define ff_mpeg4audio_channels liteav_ff_mpeg4audio_channels +#define ff_hevc_sao_edge_filter_8_neon liteav_ff_hevc_sao_edge_filter_8_neon +#define ff_h263_hwaccel_pixfmt_list_420 liteav_ff_h263_hwaccel_pixfmt_list_420 +#define av_buffersink_get_channel_layout liteav_av_buffersink_get_channel_layout +#define av_buffer_alloc liteav_av_buffer_alloc +#define yyget_leng liteav_yyget_leng +#define av_buffer_pool_get liteav_av_buffer_pool_get +#define ff_pred16x16_plane_svq3_8_sse2 liteav_ff_pred16x16_plane_svq3_8_sse2 +#define ff_hevc_decode_nal_sei liteav_ff_hevc_decode_nal_sei +#define ff_pack_6ch_int32_to_float_a_sse2 liteav_ff_pack_6ch_int32_to_float_a_sse2 +#define av_stereo3d_from_name liteav_av_stereo3d_from_name +#define ff_guess_image2_codec liteav_ff_guess_image2_codec +#define ff_mba_length liteav_ff_mba_length +#define ff_id3v2_picture_types liteav_ff_id3v2_picture_types +#define ff_pred8x8l_128_dc_10_sse2 liteav_ff_pred8x8l_128_dc_10_sse2 +#define avio_read_to_bprint liteav_avio_read_to_bprint +#define ff_decode_frame_props liteav_ff_decode_frame_props +#define av_encryption_info_get_side_data liteav_av_encryption_info_get_side_data +#define ff_pack_6ch_float_to_float_a_avx liteav_ff_pack_6ch_float_to_float_a_avx +#define ff_hevc_pred_angular_16x16_h_zero_neon_8 liteav_ff_hevc_pred_angular_16x16_h_zero_neon_8 +#define avpriv_get_gamma_from_trc liteav_avpriv_get_gamma_from_trc +#define av_fifo_drain liteav_av_fifo_drain +#define ff_inter_level liteav_ff_inter_level +#define ff_xvid_idct_init liteav_ff_xvid_idct_init +#define av_vorbis_parse_frame liteav_av_vorbis_parse_frame +#define ff_h264_flush_change liteav_ff_h264_flush_change +#define ff_h264_idct_dc_add_8_avx liteav_ff_h264_idct_dc_add_8_avx +#define ff_dither_8x8_73 liteav_ff_dither_8x8_73 +#define ff_hevc_put_qpel_uw_h2v2_neon_8 liteav_ff_hevc_put_qpel_uw_h2v2_neon_8 +#define ff_id3v2_finish liteav_ff_id3v2_finish +#define av_buffersink_get_frame_rate liteav_av_buffersink_get_frame_rate +#define ff_ps_add_squares_sse liteav_ff_ps_add_squares_sse +#define ff_aac_eld_window_512 liteav_ff_aac_eld_window_512 +#define ff_mov_cenc_init liteav_ff_mov_cenc_init +#define av_expr_parse_and_eval liteav_av_expr_parse_and_eval +#define ff_af_queue_close liteav_ff_af_queue_close +#define av_bitstream_filter_init liteav_av_bitstream_filter_init +#define ff_put_h264_qpel16_mc32_10_sse2 liteav_ff_put_h264_qpel16_mc32_10_sse2 +#define av_color_primaries_from_name liteav_av_color_primaries_from_name +#define ff_pred16x16_left_dc_10_sse2 liteav_ff_pred16x16_left_dc_10_sse2 +#define ff_hevc_sao_merge_flag_decode liteav_ff_hevc_sao_merge_flag_decode +#define ff_avg_h264_qpel4_mc01_10_mmxext liteav_ff_avg_h264_qpel4_mc01_10_mmxext +#define av_init_packet liteav_av_init_packet +#define ff_cos_64 liteav_ff_cos_64 +#define avpicture_fill liteav_avpicture_fill +#define swri_get_dither liteav_swri_get_dither +#define av_frame_is_writable liteav_av_frame_is_writable +#define ff_pred8x8l_horizontal_10_ssse3 liteav_ff_pred8x8l_horizontal_10_ssse3 +#define ff_hevc_put_qpel_uw_weight_h2v2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h2v2_neon_8 +#define ff_ac3_fast_gain_tab liteav_ff_ac3_fast_gain_tab +#define ffio_rewind_with_probe_data liteav_ffio_rewind_with_probe_data +#define ff_unpack_2ch_float_to_int16_u_sse2 liteav_ff_unpack_2ch_float_to_int16_u_sse2 +#define ff_h264_idct_add16_8_c liteav_ff_h264_idct_add16_8_c +#define ff_sbr_qmf_post_shuffle_neon liteav_ff_sbr_qmf_post_shuffle_neon +#define ff_h264_chroma422_dc_dequant_idct_8_c liteav_ff_h264_chroma422_dc_dequant_idct_8_c +#define ff_ps_ctx_init liteav_ff_ps_ctx_init +#define ff_rtmpt_protocol liteav_ff_rtmpt_protocol +#define ff_h264_biweight_8_ssse3 liteav_ff_h264_biweight_8_ssse3 +#define ff_put_no_rnd_qpel16_mc33_old_c liteav_ff_put_no_rnd_qpel16_mc33_old_c +#define ff_pred8x8_tm_vp8_8_mmxext liteav_ff_pred8x8_tm_vp8_8_mmxext +#define ff_put_h264_chroma_mc8_rnd_ssse3 liteav_ff_put_h264_chroma_mc8_rnd_ssse3 +#define avpriv_h264_has_num_reorder_frames liteav_avpriv_h264_has_num_reorder_frames +#define ff_vorbis_vwin liteav_ff_vorbis_vwin +#define ff_put_h264_qpel8or16_v_lowpass_op_mmxext liteav_ff_put_h264_qpel8or16_v_lowpass_op_mmxext +#define ff_hevc_transform_4x4_neon_8_asm liteav_ff_hevc_transform_4x4_neon_8_asm +#define ff_mpa_alloc_tables liteav_ff_mpa_alloc_tables +#define ff_hevc_split_coding_unit_flag_decode liteav_ff_hevc_split_coding_unit_flag_decode +#define ff_channel_layouts_ref liteav_ff_channel_layouts_ref +#define ff_mdct_end_fixed liteav_ff_mdct_end_fixed +#define ff_gmc_c liteav_ff_gmc_c +#define ff_pred8x8_l0t_dc_neon liteav_ff_pred8x8_l0t_dc_neon +#define ff_pw_42 liteav_ff_pw_42 +#define avcodec_dct_get_class liteav_avcodec_dct_get_class +#define ff_h263p_decoder liteav_ff_h263p_decoder +#define ff_sine_window_init_fixed liteav_ff_sine_window_init_fixed +#define ff_w3_min_w1_hi liteav_ff_w3_min_w1_hi +#define ff_hevc_get_ref_list liteav_ff_hevc_get_ref_list +#define av_hmac_final liteav_av_hmac_final +#define av_vorbis_parse_frame_flags liteav_av_vorbis_parse_frame_flags +#define ff_h264_golomb_to_pict_type liteav_ff_h264_golomb_to_pict_type +#define ff_h264_pred_init_x86 liteav_ff_h264_pred_init_x86 +#define av_tea_size liteav_av_tea_size +#define av_display_matrix_flip liteav_av_display_matrix_flip +#define avfilter_init_str liteav_avfilter_init_str +#define ff_ass_style_get liteav_ff_ass_style_get +#define av_md5_alloc liteav_av_md5_alloc +#define rgb48tobgr48_bswap liteav_rgb48tobgr48_bswap +#define ff_avg_h264_qpel16_mc13_neon liteav_ff_avg_h264_qpel16_mc13_neon +#define ff_rtp_enc_name liteav_ff_rtp_enc_name +#define ff_mpadsp_init_aarch64 liteav_ff_mpadsp_init_aarch64 +#define ff_avg_pixels4_mmx liteav_ff_avg_pixels4_mmx +#define av_bsf_list_append liteav_av_bsf_list_append +#define av_vorbis_parse_free liteav_av_vorbis_parse_free +#define swri_noise_shaping_int16 liteav_swri_noise_shaping_int16 +#define av_mallocz liteav_av_mallocz +#define ff_cpu_xgetbv liteav_ff_cpu_xgetbv +#define ff_pred8x8_128_dc_neon liteav_ff_pred8x8_128_dc_neon +#define ff_hevc_v_loop_filter_luma_neon liteav_ff_hevc_v_loop_filter_luma_neon +#define ff_pack_6ch_float_to_float_u_mmx liteav_ff_pack_6ch_float_to_float_u_mmx +#define ff_hevc_idct_16x16_dc_neon_8_asm liteav_ff_hevc_idct_16x16_dc_neon_8_asm +#define avio_wl64 liteav_avio_wl64 +#define vlc_css_parser_Clean liteav_vlc_css_parser_Clean +#define av_videotoolbox_alloc_context liteav_av_videotoolbox_alloc_context +#define ff_h264_idct_add16intra_neon liteav_ff_h264_idct_add16intra_neon +#define ff_pred8x8l_vertical_10_sse2 liteav_ff_pred8x8l_vertical_10_sse2 +#define av_escape liteav_av_escape +#define ff_draw_horiz_band liteav_ff_draw_horiz_band +#define ff_hevc_put_qpel_bi_neon_wrapper liteav_ff_hevc_put_qpel_bi_neon_wrapper +#define ff_mpeg_er_init liteav_ff_mpeg_er_init +#define ff_hevc_hls_filters liteav_ff_hevc_hls_filters +#define av_freep liteav_av_freep +#define ff_pred4x4_vertical_left_10_sse2 liteav_ff_pred4x4_vertical_left_10_sse2 +#define av_tempfile liteav_av_tempfile +#define ff_ps_add_squares_sse3 liteav_ff_ps_add_squares_sse3 +#define ff_ape_write_tag liteav_ff_ape_write_tag +#define ff_pred8x8_tm_vp8_8_mmx liteav_ff_pred8x8_tm_vp8_8_mmx +#define ff_avg_h264_qpel16_mc12_neon liteav_ff_avg_h264_qpel16_mc12_neon +#define ff_hevc_cbf_cb_cr_decode liteav_ff_hevc_cbf_cb_cr_decode +#define ff_pred4x4_vertical_right_10_avx liteav_ff_pred4x4_vertical_right_10_avx +#define ff_h264_idct_add16_neon liteav_ff_h264_idct_add16_neon +#define ff_mdct_init_fixed liteav_ff_mdct_init_fixed +#define ff_put_pixels16x16_c liteav_ff_put_pixels16x16_c +#define ff_hevc_put_pixels_w64_neon_8 liteav_ff_hevc_put_pixels_w64_neon_8 +#define ff_simple_idct12_put_sse2 liteav_ff_simple_idct12_put_sse2 +#define av_asprintf liteav_av_asprintf +#define ff_dither_8x8_220 liteav_ff_dither_8x8_220 +#define av_dict_get liteav_av_dict_get +#define ff_h264_idct8_add4_neon liteav_ff_h264_idct8_add4_neon +#define ff_hevc_pred_planar_32x32_neon_8 liteav_ff_hevc_pred_planar_32x32_neon_8 +#define avpriv_alloc_fixed_dsp liteav_avpriv_alloc_fixed_dsp +#define ff_hevc_sao_edge_filter_neon_8 liteav_ff_hevc_sao_edge_filter_neon_8 +#define ff_put_h264_qpel8_mc22_neon liteav_ff_put_h264_qpel8_mc22_neon +#define ff_http_do_new_request liteav_ff_http_do_new_request +#define ff_pred4x4_down_right_10_ssse3 liteav_ff_pred4x4_down_right_10_ssse3 +#define ff_pcm_bluray_decoder liteav_ff_pcm_bluray_decoder +#define ff_aac_num_swb_480 liteav_ff_aac_num_swb_480 +#define ff_put_qpel16_mc33_old_c liteav_ff_put_qpel16_mc33_old_c +#define ff_put_h264_qpel8_mc20_10_sse2_cache64 liteav_ff_put_h264_qpel8_mc20_10_sse2_cache64 +#define ff_hevc_put_qpel_uw_weight_h1v3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h1v3_neon_8 +#define ff_sine_32 liteav_ff_sine_32 +#define av_log2_16bit liteav_av_log2_16bit +#define avio_write liteav_avio_write +#define rgb16tobgr16 liteav_rgb16tobgr16 +#define ff_hevc_sao_edge_eo2_w32_neon_8 liteav_ff_hevc_sao_edge_eo2_w32_neon_8 +#define ff_simple_idct10_put_sse2 liteav_ff_simple_idct10_put_sse2 +#define ff_mpeg_er_frame_start liteav_ff_mpeg_er_frame_start +#define yylex_init liteav_yylex_init +#define ff_rtp_chain_mux_open liteav_ff_rtp_chain_mux_open +#define ff_h264_idct_add_9_c liteav_ff_h264_idct_add_9_c +#define ff_deblock_v_chroma_intra_8_sse2 liteav_ff_deblock_v_chroma_intra_8_sse2 +#define ff_pred16x16_plane_svq3_8_mmx liteav_ff_pred16x16_plane_svq3_8_mmx +#define ff_h264_idct8_add_10_sse2 liteav_ff_h264_idct8_add_10_sse2 +#define ffurl_write liteav_ffurl_write +#define ff_avg_h264_qpel8_mc12_10_sse2 liteav_ff_avg_h264_qpel8_mc12_10_sse2 +#define av_opt_get liteav_av_opt_get +#define ff_qpeldsp_init_x86 liteav_ff_qpeldsp_init_x86 +#define ff_hevc_pred_angular_16x16_neon_8 liteav_ff_hevc_pred_angular_16x16_neon_8 +#define ff_avg_h264_qpel16_mc11_10_sse2 liteav_ff_avg_h264_qpel16_mc11_10_sse2 +#define ff_rtp_get_codec_info liteav_ff_rtp_get_codec_info +#define ff_butterflies_float_vfp liteav_ff_butterflies_float_vfp +#define ff_hevc_put_qpel_uw_bi_v_neon_8 liteav_ff_hevc_put_qpel_uw_bi_v_neon_8 +#define ff_resample_common_apply_filter_x4_s16_neon liteav_ff_resample_common_apply_filter_x4_s16_neon +#define ff_j_rev_dct2 liteav_ff_j_rev_dct2 +#define av_videotoolbox_default_init liteav_av_videotoolbox_default_init +#define av_fft_end liteav_av_fft_end +#define ff_set_cmp liteav_ff_set_cmp +#define ff_sine_960 liteav_ff_sine_960 +#define ff_hevc_transform_add_32x32_neon_8_asm liteav_ff_hevc_transform_add_32x32_neon_8_asm +#define ff_avg_h264_chroma_mc4_ssse3 liteav_ff_avg_h264_chroma_mc4_ssse3 +#define ff_unpack_2ch_float_to_int32_u_sse2 liteav_ff_unpack_2ch_float_to_int32_u_sse2 +#define ff_avg_qpel16_mc13_old_c liteav_ff_avg_qpel16_mc13_old_c +#define ff_avg_pixels8_l2_shift5_mmxext liteav_ff_avg_pixels8_l2_shift5_mmxext +#define ff_pred8x8l_down_right_8_sse2 liteav_ff_pred8x8l_down_right_8_sse2 +#define avcodec_encode_video2 liteav_avcodec_encode_video2 +#define ff_pred4x4_vertical_right_10_sse2 liteav_ff_pred4x4_vertical_right_10_sse2 +#define rgb24tobgr32 liteav_rgb24tobgr32 +#define ff_sine_1024_fixed liteav_ff_sine_1024_fixed +#define ff_avg_h264_qpel4_mc22_10_mmxext liteav_ff_avg_h264_qpel4_mc22_10_mmxext +#define av_mdct_calc liteav_av_mdct_calc +#define swscale_license liteav_swscale_license +#define ff_ass_split liteav_ff_ass_split +#define ff_pred8x8l_down_right_10_avx liteav_ff_pred8x8l_down_right_10_avx +#define ff_avg_h264_qpel4_hv_lowpass_h_mmxext liteav_ff_avg_h264_qpel4_hv_lowpass_h_mmxext +#define av_i2int liteav_av_i2int +#define ff_pred4x4_horizontal_down_10_sse2 liteav_ff_pred4x4_horizontal_down_10_sse2 +#define ff_avg_h264_qpel8_mc30_neon liteav_ff_avg_h264_qpel8_mc30_neon +#define av_frame_set_pkt_size liteav_av_frame_set_pkt_size +#define ff_cos_4096_fixed liteav_ff_cos_4096_fixed +#define ff_put_h264_chroma_mc2_10_mmxext liteav_ff_put_h264_chroma_mc2_10_mmxext +#define av_strlcatf liteav_av_strlcatf +#define ff_mpeg1_aspect liteav_ff_mpeg1_aspect +#define av_strcasecmp liteav_av_strcasecmp +#define ff_id3v2_34_metadata_conv liteav_ff_id3v2_34_metadata_conv +#define ff_thread_report_progress2 liteav_ff_thread_report_progress2 +#define ff_simple_idct8_add_sse2 liteav_ff_simple_idct8_add_sse2 +#define avcodec_default_get_buffer2 liteav_avcodec_default_get_buffer2 +#define ff_mpv_common_defaults liteav_ff_mpv_common_defaults +#define ff_pred8x8l_down_right_10_sse2 liteav_ff_pred8x8l_down_right_10_sse2 +#define ff_ps_neg liteav_ff_ps_neg +#define ff_pack_2ch_int32_to_float_a_sse2 liteav_ff_pack_2ch_int32_to_float_a_sse2 +#define ff_pcm_dvd_decoder liteav_ff_pcm_dvd_decoder +#define ff_unpack_2ch_int16_to_float_u_sse2 liteav_ff_unpack_2ch_int16_to_float_u_sse2 +#define ff_pred8x8l_vertical_right_10_ssse3 liteav_ff_pred8x8l_vertical_right_10_ssse3 +#define av_opt_get_key_value liteav_av_opt_get_key_value +#define rgb16to24 liteav_rgb16to24 +#define ff_aac_kbd_short_128 liteav_ff_aac_kbd_short_128 +#define ff_hevc_split_transform_flag_decode liteav_ff_hevc_split_transform_flag_decode +#define ff_init_vscale_pfn liteav_ff_init_vscale_pfn +#define ff_hevc_pred_angular_32x32_neon_8 liteav_ff_hevc_pred_angular_32x32_neon_8 +#define ff_pack_6ch_int32_to_float_u_avx liteav_ff_pack_6ch_int32_to_float_u_avx +#define ff_pred16x16_hor_neon liteav_ff_pred16x16_hor_neon +#define av_default_item_name liteav_av_default_item_name +#define ff_h263_intra_MCBPC_bits liteav_ff_h263_intra_MCBPC_bits +#define av_timegm liteav_av_timegm +#define ff_pred8x8l_top_dc_10_avx liteav_ff_pred8x8l_top_dc_10_avx +#define ff_h264_idct_add16intra_10_sse2 liteav_ff_h264_idct_add16intra_10_sse2 +#define ff_h264_ref_picture liteav_ff_h264_ref_picture +#define ff_mp1_at_decoder liteav_ff_mp1_at_decoder +#define av_buffer_get_ref_count liteav_av_buffer_get_ref_count +#define ff_rawvideo_options liteav_ff_rawvideo_options +#define ff_parse_sample_format liteav_ff_parse_sample_format +#define ff_ac3_fast_decay_tab liteav_ff_ac3_fast_decay_tab +#define ff_avg_h264_qpel8_mc12_neon liteav_ff_avg_h264_qpel8_mc12_neon +#define av_spherical_tile_bounds liteav_av_spherical_tile_bounds +#define av_fifo_size liteav_av_fifo_size +#define ff_avg_h264_qpel16_mc03_10_sse2 liteav_ff_avg_h264_qpel16_mc03_10_sse2 +#define ff_avc_write_annexb_extradata liteav_ff_avc_write_annexb_extradata +#define av_buffer_pool_init liteav_av_buffer_pool_init +#define av_shrink_packet liteav_av_shrink_packet +#define ff_sine_512_fixed liteav_ff_sine_512_fixed +#define swr_inject_silence liteav_swr_inject_silence +#define ff_pred8x8l_vertical_right_10_avx liteav_ff_pred8x8l_vertical_right_10_avx +#define ff_hevc_transform_32x32_neon_8_asm liteav_ff_hevc_transform_32x32_neon_8_asm +#define ff_hevc_cu_chroma_qp_offset_flag liteav_ff_hevc_cu_chroma_qp_offset_flag +#define ff_put_h264_qpel16_mc33_10_sse2 liteav_ff_put_h264_qpel16_mc33_10_sse2 +#define ff_hevc_pred_angular_8x8_h_zero_neon_8 liteav_ff_hevc_pred_angular_8x8_h_zero_neon_8 +#define ff_m4v_demuxer liteav_ff_m4v_demuxer +#define ff_hevc_sao_edge_eo3_w64_neon_8 liteav_ff_hevc_sao_edge_eo3_w64_neon_8 +#define av_opt_set_video_rate liteav_av_opt_set_video_rate +#define ff_vorbis_codec liteav_ff_vorbis_codec +#define ff_h264_idct_add8_8_sse2 liteav_ff_h264_idct_add8_8_sse2 +#define ff_mdct_calc_c_fixed liteav_ff_mdct_calc_c_fixed +#define ff_avg_h264_qpel8_mc02_10_sse2 liteav_ff_avg_h264_qpel8_mc02_10_sse2 +#define avpriv_put_string liteav_avpriv_put_string +#define ff_h264_idct8_add4_8_sse2 liteav_ff_h264_idct8_add4_8_sse2 +#define av_sha_size liteav_av_sha_size +#define ff_id3v2_mime_tags liteav_ff_id3v2_mime_tags +#define ff_init_mpadsp_tabs_fixed liteav_ff_init_mpadsp_tabs_fixed +#define ff_put_h264_qpel8_mc32_10_sse2 liteav_ff_put_h264_qpel8_mc32_10_sse2 +#define av_dict_copy liteav_av_dict_copy +#define ff_pred8x8l_vertical_left_8_sse2 liteav_ff_pred8x8l_vertical_left_8_sse2 +#define ff_kbd_window_init_fixed liteav_ff_kbd_window_init_fixed +#define avfilter_link_get_channels liteav_avfilter_link_get_channels +#define ff_command_queue_pop liteav_ff_command_queue_pop +#define ff_hevc_put_epel_uw_pixels_w32_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w32_neon_8 +#define ff_imdct_half_vfp liteav_ff_imdct_half_vfp +#define ff_put_h264_qpel8_h_lowpass_l2_mmxext liteav_ff_put_h264_qpel8_h_lowpass_l2_mmxext +#define ff_rtmp_packet_destroy liteav_ff_rtmp_packet_destroy +#define ff_mpeg4_dc_threshold liteav_ff_mpeg4_dc_threshold +#define ff_hevc_transform_4x4_neon_8 liteav_ff_hevc_transform_4x4_neon_8 +#define av_adler32_update liteav_av_adler32_update +#define swresample_version liteav_swresample_version +#define ff_avg_qpel8_mc33_old_c liteav_ff_avg_qpel8_mc33_old_c +#define ff_update_duplicate_context liteav_ff_update_duplicate_context +#define ff_h264_check_intra_pred_mode liteav_ff_h264_check_intra_pred_mode +#define av_frame_get_decode_error_flags liteav_av_frame_get_decode_error_flags +#define ff_inlink_queued_samples liteav_ff_inlink_queued_samples +#define ff_avg_h264_qpel8_mc13_10_sse2 liteav_ff_avg_h264_qpel8_mc13_10_sse2 +#define ff_init_desc_cfmt_convert liteav_ff_init_desc_cfmt_convert +#define av_rescale_rnd liteav_av_rescale_rnd +#define av_hwframe_ctx_alloc liteav_av_hwframe_ctx_alloc +#define ff_find_unused_picture liteav_ff_find_unused_picture +#define swr_build_matrix liteav_swr_build_matrix +#define ff_simple_idct10_put_avx liteav_ff_simple_idct10_put_avx +#define ff_alloc_picture liteav_ff_alloc_picture +#define ff_cos_16 liteav_ff_cos_16 +#define avpicture_free liteav_avpicture_free +#define ff_put_no_rnd_qpel16_mc11_old_c liteav_ff_put_no_rnd_qpel16_mc11_old_c +#define av_hwdevice_get_type_name liteav_av_hwdevice_get_type_name +#define ff_rvlc_rl_intra liteav_ff_rvlc_rl_intra +#define av_log2 liteav_av_log2 +#define ff_pred16x16_plane_neon liteav_ff_pred16x16_plane_neon +#define ff_avg_pixels16x16_c liteav_ff_avg_pixels16x16_c +#define ff_check_h264_startcode liteav_ff_check_h264_startcode +#define ff_aac_num_swb_1024 liteav_ff_aac_num_swb_1024 +#define ff_mov_iso639_to_lang liteav_ff_mov_iso639_to_lang +#define ff_pred8x8_l00_dc_neon liteav_ff_pred8x8_l00_dc_neon +#define ff_af_volume liteav_ff_af_volume +#define ff_put_pixels16_x2_neon liteav_ff_put_pixels16_x2_neon +#define ff_pb_80 liteav_ff_pb_80 +#define ff_mpeg4_studio_dc_chroma liteav_ff_mpeg4_studio_dc_chroma +#define ffurl_accept liteav_ffurl_accept +#define ff_vorbis_encoding_channel_layout_offsets liteav_ff_vorbis_encoding_channel_layout_offsets +#define ff_pred16x16_plane_svq3_8_mmxext liteav_ff_pred16x16_plane_svq3_8_mmxext +#define ff_put_vc1_chroma_mc8_nornd_ssse3 liteav_ff_put_vc1_chroma_mc8_nornd_ssse3 +#define ff_avg_h264_qpel8_mc00_10_sse2 liteav_ff_avg_h264_qpel8_mc00_10_sse2 +#define ff_avg_h264_qpel16_mc10_10_ssse3_cache64 liteav_ff_avg_h264_qpel16_mc10_10_ssse3_cache64 +#define av_samples_copy liteav_av_samples_copy +#define ff_text_read liteav_ff_text_read +#define avio_close liteav_avio_close +#define ff_init_block_index liteav_ff_init_block_index +#define ff_put_h264_qpel16_mc12_10_sse2 liteav_ff_put_h264_qpel16_mc12_10_sse2 +#define ff_mov_lang_to_iso639 liteav_ff_mov_lang_to_iso639 +#define ff_avg_h264_chroma_mc2_10_mmxext liteav_ff_avg_h264_chroma_mc2_10_mmxext +#define ff_put_h264_qpel4_mc13_10_mmxext liteav_ff_put_h264_qpel4_mc13_10_mmxext +#define yy_flush_buffer liteav_yy_flush_buffer +#define av_dict_set liteav_av_dict_set +#define ff_pred4x4_horizontal_down_10_avx liteav_ff_pred4x4_horizontal_down_10_avx +#define vlc_css_expression_Delete liteav_vlc_css_expression_Delete +#define av_twofish_size liteav_av_twofish_size +#define ff_put_pixels8_l2_8 liteav_ff_put_pixels8_l2_8 +#define ff_imdct36_blocks_float liteav_ff_imdct36_blocks_float +#define ff_h263_decode_frame liteav_ff_h263_decode_frame +#define ff_pw_1024 liteav_ff_pw_1024 +#define ff_hevc_cu_transquant_bypass_flag_decode liteav_ff_hevc_cu_transquant_bypass_flag_decode +#define ff_h264_idct8_add4_10_avx liteav_ff_h264_idct8_add4_10_avx +#define av_mediacodec_render_buffer_at_time liteav_av_mediacodec_render_buffer_at_time +#define ff_pack_8ch_float_to_int32_a_avx liteav_ff_pack_8ch_float_to_int32_a_avx +#define ff_mpeg12_vlc_dc_chroma_code liteav_ff_mpeg12_vlc_dc_chroma_code +#define ff_flac_decode_frame_header liteav_ff_flac_decode_frame_header +#define ff_id3v2_start liteav_ff_id3v2_start +#define ff_put_h264_qpel16_mc22_neon liteav_ff_put_h264_qpel16_mc22_neon +#define ff_put_h264_qpel8_mc31_neon liteav_ff_put_h264_qpel8_mc31_neon +#define ff_pred16x16_128_dc_10_sse2 liteav_ff_pred16x16_128_dc_10_sse2 +#define ff_avg_h264_qpel8_h_lowpass_ssse3 liteav_ff_avg_h264_qpel8_h_lowpass_ssse3 +#define av_default_get_category liteav_av_default_get_category +#define ff_pack_6ch_float_to_int32_u_sse2 liteav_ff_pack_6ch_float_to_int32_u_sse2 +#define ff_cos_1024 liteav_ff_cos_1024 +#define ff_crcEDB88320_update liteav_ff_crcEDB88320_update +#define yyalloc liteav_yyalloc +#define ff_hevc_parse_sps liteav_ff_hevc_parse_sps +#define ff_avg_h264_qpel8_mc01_10_sse2 liteav_ff_avg_h264_qpel8_mc01_10_sse2 +#define ff_merge_channel_layouts liteav_ff_merge_channel_layouts +#define av_hwframe_transfer_data liteav_av_hwframe_transfer_data +#define ff_all_formats liteav_ff_all_formats +#define ff_h264_weight_16_10_sse2 liteav_ff_h264_weight_16_10_sse2 +#define ff_mdct15_init liteav_ff_mdct15_init +#define av_thread_message_queue_recv liteav_av_thread_message_queue_recv +#define ff_avg_h264_qpel16_mc30_10_ssse3_cache64 liteav_ff_avg_h264_qpel16_mc30_10_ssse3_cache64 +#define av_samples_alloc liteav_av_samples_alloc +#define ff_http_protocol liteav_ff_http_protocol +#define avio_closep liteav_avio_closep +#define ff_add_channel_layout liteav_ff_add_channel_layout +#define ff_h264_hl_decode_mb liteav_ff_h264_hl_decode_mb +#define ff_hevc_put_epel_uw_pixels_w24_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w24_neon_8 +#define ff_dca_profiles liteav_ff_dca_profiles +#define ff_deblock_h_chroma_10_sse2 liteav_ff_deblock_h_chroma_10_sse2 +#define ff_avg_h264_qpel16_mc32_neon liteav_ff_avg_h264_qpel16_mc32_neon +#define avfilter_link_set_closed liteav_avfilter_link_set_closed +#define ff_pred8x8l_down_left_8_mmxext liteav_ff_pred8x8l_down_left_8_mmxext +#define avpriv_slicethread_create liteav_avpriv_slicethread_create +#define ff_put_h264_chroma_mc4_mmx liteav_ff_put_h264_chroma_mc4_mmx +#define ff_ac3_parser liteav_ff_ac3_parser +#define ff_uyvytoyuv422_sse2 liteav_ff_uyvytoyuv422_sse2 +#define av_opt_get_video_rate liteav_av_opt_get_video_rate +#define ffio_fdopen liteav_ffio_fdopen +#define avfilter_register liteav_avfilter_register +#define ff_init_ff_cos_tabs_fixed liteav_ff_init_ff_cos_tabs_fixed +#define yyget_lval liteav_yyget_lval +#define av_file_unmap liteav_av_file_unmap +#define ff_hevc_sao_type_idx_decode liteav_ff_hevc_sao_type_idx_decode +#define ff_hevc_idct_4x4_dc_neon_8_asm liteav_ff_hevc_idct_4x4_dc_neon_8_asm +#define av_compare_mod liteav_av_compare_mod +#define av_realloc liteav_av_realloc +#define yyset_debug liteav_yyset_debug +#define av_fifo_generic_read liteav_av_fifo_generic_read +#define avio_put_str16be liteav_avio_put_str16be +#define ff_ebur128_add_frames_planar_int liteav_ff_ebur128_add_frames_planar_int +#define avfilter_graph_config liteav_avfilter_graph_config +#define ff_mpa_synth_init_float liteav_ff_mpa_synth_init_float +#define av_md5_update liteav_av_md5_update +#define ff_h264_idct_add8_10_sse2 liteav_ff_h264_idct_add8_10_sse2 +#define av_cast5_init liteav_av_cast5_init +#define ff_imdct_calc_c liteav_ff_imdct_calc_c +#define ff_resample_common_apply_filter_x8_s16_neon liteav_ff_resample_common_apply_filter_x8_s16_neon +#define ff_unpack_2ch_int16_to_int32_a_ssse3 liteav_ff_unpack_2ch_int16_to_int32_a_ssse3 +#define ff_put_pixels8_xy2_no_rnd_neon liteav_ff_put_pixels8_xy2_no_rnd_neon +#define ff_bsf_child_class_next liteav_ff_bsf_child_class_next +#define av_xtea_alloc liteav_av_xtea_alloc +#define ff_pcm_alaw_at_decoder liteav_ff_pcm_alaw_at_decoder +#define av_fifo_grow liteav_av_fifo_grow +#define ff_biweight_h264_pixels_8_neon liteav_ff_biweight_h264_pixels_8_neon +#define av_image_fill_black liteav_av_image_fill_black +#define av_sha512_init liteav_av_sha512_init +#define ff_avg_h264_qpel16_mc20_10_ssse3_cache64 liteav_ff_avg_h264_qpel16_mc20_10_ssse3_cache64 +#define ff_hevc_save_states liteav_ff_hevc_save_states +#define ff_mdct_init liteav_ff_mdct_init +#define ff_put_h264_qpel8_mc30_10_sse2_cache64 liteav_ff_put_h264_qpel8_mc30_10_sse2_cache64 +#define rgb16tobgr24 liteav_rgb16tobgr24 +#define av_tree_enumerate liteav_av_tree_enumerate +#define swscale_version liteav_swscale_version +#define ff_sbr_hf_apply_noise_2_neon liteav_ff_sbr_hf_apply_noise_2_neon +#define ff_slice_thread_init liteav_ff_slice_thread_init +#define av_dict_parse_string liteav_av_dict_parse_string +#define ff_fixed_dsp_init_x86 liteav_ff_fixed_dsp_init_x86 +#define ff_ps_stereo_interpolate_sse3 liteav_ff_ps_stereo_interpolate_sse3 +#define av_buffer_get_opaque liteav_av_buffer_get_opaque +#define ff_pack_2ch_int16_to_int32_a_sse2 liteav_ff_pack_2ch_int16_to_int32_a_sse2 +#define ff_imdct_calc_c_fixed_32 liteav_ff_imdct_calc_c_fixed_32 +#define av_base64_decode liteav_av_base64_decode +#define av_reallocp liteav_av_reallocp +#define av_jni_set_java_vm liteav_av_jni_set_java_vm +#define ff_cos_256 liteav_ff_cos_256 +#define ff_h263dsp_init liteav_ff_h263dsp_init +#define ff_pack_2ch_float_to_int32_u_sse2 liteav_ff_pack_2ch_float_to_int32_u_sse2 +#define ff_rl_free liteav_ff_rl_free +#define ff_h264_chroma_dc_scan liteav_ff_h264_chroma_dc_scan +#define av_packet_copy_props liteav_av_packet_copy_props +#define yyget_lineno liteav_yyget_lineno +#define ff_pred8x8l_horizontal_up_8_mmxext liteav_ff_pred8x8l_horizontal_up_8_mmxext +#define ff_h264_weight_8_sse2 liteav_ff_h264_weight_8_sse2 +#define ff_hevc_idct_4x4_dc_neon_8 liteav_ff_hevc_idct_4x4_dc_neon_8 +#define ff_ebur128_loudness_global liteav_ff_ebur128_loudness_global +#define ff_j_rev_dct liteav_ff_j_rev_dct +#define ff_pred16x16_horizontal_10_sse2 liteav_ff_pred16x16_horizontal_10_sse2 +#define ff_hevc_put_epel_h_neon_8_wrapper liteav_ff_hevc_put_epel_h_neon_8_wrapper +#define yy_scan_string liteav_yy_scan_string +#define ff_end_tag liteav_ff_end_tag +#define rgb24tobgr24 liteav_rgb24tobgr24 +#define avcodec_find_decoder liteav_avcodec_find_decoder +#define av_dict_count liteav_av_dict_count +#define sws_convertPalette8ToPacked32 liteav_sws_convertPalette8ToPacked32 +#define ff_hevc_mpm_idx_decode liteav_ff_hevc_mpm_idx_decode +#define ff_put_pixels16_y2_no_rnd_neon liteav_ff_put_pixels16_y2_no_rnd_neon +#define ff_avg_h264_chroma_mc8_10_avx liteav_ff_avg_h264_chroma_mc8_10_avx +#define av_pkt_dump2 liteav_av_pkt_dump2 +#define ff_put_h264_qpel8_mc01_10_sse2 liteav_ff_put_h264_qpel8_mc01_10_sse2 +#define av_buffersink_get_format liteav_av_buffersink_get_format +#define avfilter_next liteav_avfilter_next +#define ff_hwcontext_type_videotoolbox liteav_ff_hwcontext_type_videotoolbox +#define ff_combine_frame liteav_ff_combine_frame +#define ff_dnxhd_profiles liteav_ff_dnxhd_profiles +#define ff_id3v1_read liteav_ff_id3v1_read +#define ff_hevc_clear_refs liteav_ff_hevc_clear_refs +#define ff_pred8x8_plane_8_ssse3 liteav_ff_pred8x8_plane_8_ssse3 +#define av_timecode_make_mpeg_tc_string liteav_av_timecode_make_mpeg_tc_string +#define ff_ass_bprint_text_event liteav_ff_ass_bprint_text_event +#define av_log_format_line2 liteav_av_log_format_line2 +#define ff_h264_idct_add16intra_8_mmx liteav_ff_h264_idct_add16intra_8_mmx +#define ff_uyvytoyuv422_avx liteav_ff_uyvytoyuv422_avx +#define ff_inter_run liteav_ff_inter_run +#define ff_id3v2_parse_apic liteav_ff_id3v2_parse_apic +#define ff_deblock_h_chroma422_intra_8_avx liteav_ff_deblock_h_chroma422_intra_8_avx +#define ff_formats_ref liteav_ff_formats_ref +#define ff_rtmpe_protocol liteav_ff_rtmpe_protocol +#define ff_mov_cenc_write_stbl_atoms liteav_ff_mov_cenc_write_stbl_atoms +#define ff_faanidct liteav_ff_faanidct +#define av_aes_ctr_alloc liteav_av_aes_ctr_alloc +#define ff_put_rv40_chroma_mc4_mmx liteav_ff_put_rv40_chroma_mc4_mmx +#define ff_h264_ps_uninit liteav_ff_h264_ps_uninit +#define sws_normalizeVec liteav_sws_normalizeVec +#define ff_h264_chroma_qp liteav_ff_h264_chroma_qp +#define av_vorbis_parse_init liteav_av_vorbis_parse_init +#define ff_h264_sei_uninit liteav_ff_h264_sei_uninit +#define ff_pred8x8_horizontal_8_mmxext liteav_ff_pred8x8_horizontal_8_mmxext +#define ff_hevc_put_qpel_uw_pixels_w64_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w64_neon_8 +#define ff_avg_rv40_chroma_mc8_3dnow liteav_ff_avg_rv40_chroma_mc8_3dnow +#define ff_videotoolbox_uninit liteav_ff_videotoolbox_uninit +#define ff_vorbis_floor1_inverse_db_table liteav_ff_vorbis_floor1_inverse_db_table +#define ff_cos_256_fixed liteav_ff_cos_256_fixed +#define ff_inverse liteav_ff_inverse +#define avpriv_mpegts_parse_packet liteav_avpriv_mpegts_parse_packet +#define ff_hevc_annexb2mp4_buf liteav_ff_hevc_annexb2mp4_buf +#define ff_amf_get_field_value liteav_ff_amf_get_field_value +#define ff_pred16x16_128_dc_10_mmxext liteav_ff_pred16x16_128_dc_10_mmxext +#define ff_pred8x8_horizontal_10_sse2 liteav_ff_pred8x8_horizontal_10_sse2 +#define ff_hevc_pred_planar_16x16_neon_8_1 liteav_ff_hevc_pred_planar_16x16_neon_8_1 +#define ff_hevc_transform_luma_4x4_neon_8 liteav_ff_hevc_transform_luma_4x4_neon_8 +#define av_expr_parse liteav_av_expr_parse +#define ff_mpegtsraw_demuxer liteav_ff_mpegtsraw_demuxer +#define ff_put_qpel8_mc33_old_c liteav_ff_put_qpel8_mc33_old_c +#define av_crc liteav_av_crc +#define ff_hevc_demuxer liteav_ff_hevc_demuxer +#define ff_hevc_put_qpel_uw_v1_neon_8 liteav_ff_hevc_put_qpel_uw_v1_neon_8 +#define av_opt_set_from_string liteav_av_opt_set_from_string +#define ff_http_auth_create_response liteav_ff_http_auth_create_response +#define vlc_css_rule_New liteav_vlc_css_rule_New +#define avfilter_pad_get_name liteav_avfilter_pad_get_name +#define ff_hevc_set_qPy liteav_ff_hevc_set_qPy +#define av_picture_pad liteav_av_picture_pad +#define ff_yuv420p_to_abgr_neon liteav_ff_yuv420p_to_abgr_neon +#define av_probe_input_format2 liteav_av_probe_input_format2 +#define ff_vector_fmac_scalar_neon liteav_ff_vector_fmac_scalar_neon +#define av_frame_clone liteav_av_frame_clone +#define ff_pred16x16_dc_8_mmxext liteav_ff_pred16x16_dc_8_mmxext +#define ff_mov_cenc_write_packet liteav_ff_mov_cenc_write_packet +#define ff_h264_decode_ref_pic_list_reordering liteav_ff_h264_decode_ref_pic_list_reordering +#define ff_aac_pred_sfb_max liteav_ff_aac_pred_sfb_max +#define ff_put_wav_header liteav_ff_put_wav_header +#define ff_put_h264_qpel8_mc30_10_sse2 liteav_ff_put_h264_qpel8_mc30_10_sse2 +#define av_gettime liteav_av_gettime +#define ff_pw_20 liteav_ff_pw_20 +#define ff_framequeue_skip_samples liteav_ff_framequeue_skip_samples +#define ff_h263_cbpy_tab liteav_ff_h263_cbpy_tab +#define ff_avg_pixels16_y2_no_rnd_neon liteav_ff_avg_pixels16_y2_no_rnd_neon +#define ff_read_line_to_bprint liteav_ff_read_line_to_bprint +#define ff_draw_rectangle liteav_ff_draw_rectangle +#define ff_kbd_window_init liteav_ff_kbd_window_init +#define av_dirac_parse_sequence_header liteav_av_dirac_parse_sequence_header +#define ff_put_h264_qpel8_mc10_10_ssse3_cache64 liteav_ff_put_h264_qpel8_mc10_10_ssse3_cache64 +#define av_color_primaries_name liteav_av_color_primaries_name +#define av_log2_i liteav_av_log2_i +#define ff_h264_idct8_add_10_avx liteav_ff_h264_idct8_add_10_avx +#define av_parser_change liteav_av_parser_change +#define ff_hevc_put_epel_uw_bi_h_neon_8 liteav_ff_hevc_put_epel_uw_bi_h_neon_8 +#define ff_aac_demuxer liteav_ff_aac_demuxer +#define ff_hevc_sao_edge_eo2_w64_neon_8 liteav_ff_hevc_sao_edge_eo2_w64_neon_8 +#define ff_h264_idct_add8_12_c liteav_ff_h264_idct_add8_12_c +#define av_aes_ctr_crypt liteav_av_aes_ctr_crypt +#define ff_hevc_log2_res_scale_abs liteav_ff_hevc_log2_res_scale_abs +#define av_timecode_adjust_ntsc_framenum2 liteav_av_timecode_adjust_ntsc_framenum2 +#define ff_interleaved_dirac_golomb_vlc_code liteav_ff_interleaved_dirac_golomb_vlc_code +#define swr_convert liteav_swr_convert +#define ff_hevc_put_qpel_uw_pixels_w4_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w4_neon_8 +#define av_color_range_from_name liteav_av_color_range_from_name +#define ff_hevc_put_qpel_uw_weight_h1v1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h1v1_neon_8 +#define av_buffersink_get_samples liteav_av_buffersink_get_samples +#define ff_pred8x8l_dc_10_avx liteav_ff_pred8x8l_dc_10_avx +#define ff_fill_rectangle liteav_ff_fill_rectangle +#define ff_ebur128_init liteav_ff_ebur128_init +#define ff_unpack_6ch_int32_to_float_u_avx liteav_ff_unpack_6ch_int32_to_float_u_avx +#define ff_put_h264_qpel8_mc23_neon liteav_ff_put_h264_qpel8_mc23_neon +#define ff_unpack_2ch_int32_to_int16_u_sse2 liteav_ff_unpack_2ch_int32_to_int16_u_sse2 +#define ff_unpack_6ch_float_to_float_u_avx liteav_ff_unpack_6ch_float_to_float_u_avx +#define av_thread_message_queue_set_err_recv liteav_av_thread_message_queue_set_err_recv +#define ff_put_h264_qpel8_mc10_10_sse2 liteav_ff_put_h264_qpel8_mc10_10_sse2 +#define av_dct_end liteav_av_dct_end +#define ff_float_dsp_init_aarch64 liteav_ff_float_dsp_init_aarch64 +#define ff_h264_idct_add8_422_10_avx liteav_ff_h264_idct_add8_422_10_avx +#define ff_ass_subtitle_header liteav_ff_ass_subtitle_header +#define avfilter_pad_get_type liteav_avfilter_pad_get_type +#define ff_hevc_put_epel_hv_neon_8_wrapper liteav_ff_hevc_put_epel_hv_neon_8_wrapper +#define ff_adts_muxer liteav_ff_adts_muxer +#define ff_mpeg1_default_intra_matrix liteav_ff_mpeg1_default_intra_matrix +#define ff_gsm_ms_at_decoder liteav_ff_gsm_ms_at_decoder +#define ff_pack_2ch_int16_to_int16_a_sse2 liteav_ff_pack_2ch_int16_to_int16_a_sse2 +#define ff_cpu_cpuid liteav_ff_cpu_cpuid +#define av_opt_get_q liteav_av_opt_get_q +#define ff_avg_h264_qpel4_mc21_10_mmxext liteav_ff_avg_h264_qpel4_mc21_10_mmxext +#define ff_vector_fmul_window_vfp liteav_ff_vector_fmul_window_vfp +#define av_image_copy_uc_from liteav_av_image_copy_uc_from +#define ffurl_get_protocols liteav_ffurl_get_protocols +#define av_frame_get_colorspace liteav_av_frame_get_colorspace +#define avfilter_graph_alloc liteav_avfilter_graph_alloc +#define ff_avg_h264_qpel8_mc32_neon liteav_ff_avg_h264_qpel8_mc32_neon +#define av_mul_q liteav_av_mul_q +#define ff_hevc_cu_qp_delta_abs liteav_ff_hevc_cu_qp_delta_abs +#define ff_unpack_2ch_int32_to_float_u_sse2 liteav_ff_unpack_2ch_int32_to_float_u_sse2 +#define av_mul_i liteav_av_mul_i +#define ff_sws_init_input_funcs liteav_ff_sws_init_input_funcs +#define ff_h264_init_cabac_states liteav_ff_h264_init_cabac_states +#define ff_alloc_packet liteav_ff_alloc_packet +#define ff_ac3_demuxer liteav_ff_ac3_demuxer +#define av_add_stable liteav_av_add_stable +#define ff_pw_64 liteav_ff_pw_64 +#define ff_imdct36_float_sse3 liteav_ff_imdct36_float_sse3 +#define ff_imdct36_float_sse2 liteav_ff_imdct36_float_sse2 +#define ff_deblock_v_chroma_8_sse2 liteav_ff_deblock_v_chroma_8_sse2 +#define ff_mov_muxer liteav_ff_mov_muxer +#define av_rdft_init liteav_av_rdft_init +#define ff_hevc_put_qpel_uw_h1_neon_8 liteav_ff_hevc_put_qpel_uw_h1_neon_8 +#define ff_hevc_pcm_flag_decode liteav_ff_hevc_pcm_flag_decode +#define ff_deblock_v_luma_8_sse2 liteav_ff_deblock_v_luma_8_sse2 +#define ff_ebur128_loudness_range_multiple liteav_ff_ebur128_loudness_range_multiple +#define ff_tls_init liteav_ff_tls_init +#define ff_avg_pixels8x8_c liteav_ff_avg_pixels8x8_c +#define av_blowfish_crypt liteav_av_blowfish_crypt +#define av_image_copy liteav_av_image_copy +#define av_frame_new_side_data liteav_av_frame_new_side_data +#define ff_put_h264_qpel8_h_lowpass_ssse3 liteav_ff_put_h264_qpel8_h_lowpass_ssse3 +#define av_register_input_format liteav_av_register_input_format +#define ff_pred16x16_plane_h264_8_ssse3 liteav_ff_pred16x16_plane_h264_8_ssse3 +#define ff_h264_idct8_add4_9_c liteav_ff_h264_idct8_add4_9_c +#define av_bsf_free liteav_av_bsf_free +#define ff_pred4x4_vertical_right_10_ssse3 liteav_ff_pred4x4_vertical_right_10_ssse3 +#define ff_unpack_2ch_int16_to_int32_u_ssse3 liteav_ff_unpack_2ch_int16_to_int32_u_ssse3 +#define variant_matched_tags liteav_variant_matched_tags +#define ff_amf_write_string2 liteav_ff_amf_write_string2 +#define av_register_output_format liteav_av_register_output_format +#define ff_pred16x16_vertical_10_mmxext liteav_ff_pred16x16_vertical_10_mmxext +#define ff_mpeg4_set_direct_mv liteav_ff_mpeg4_set_direct_mv +#define av_rescale_q_rnd liteav_av_rescale_q_rnd +#define ff_amf_read_bool liteav_ff_amf_read_bool +#define av_opt_set_image_size liteav_av_opt_set_image_size +#define av_audio_fifo_free liteav_av_audio_fifo_free +#define ff_h264_idct8_dc_add_neon liteav_ff_h264_idct8_dc_add_neon +#define av_packet_get_side_data liteav_av_packet_get_side_data +#define av_blowfish_crypt_ecb liteav_av_blowfish_crypt_ecb +#define ff_deblock_h_chroma_intra_8_sse2 liteav_ff_deblock_h_chroma_intra_8_sse2 +#define ff_eac3_default_chmap liteav_ff_eac3_default_chmap +#define ffurl_read_complete liteav_ffurl_read_complete +#define ff_fft_calc_vfp liteav_ff_fft_calc_vfp +#define avcodec_encode_audio2 liteav_avcodec_encode_audio2 +#define swri_noise_shaping_int32 liteav_swri_noise_shaping_int32 +#define ff_avg_pixels8_x2_neon liteav_ff_avg_pixels8_x2_neon +#define ff_avg_h264_qpel8or16_v_lowpass_sse2 liteav_ff_avg_h264_qpel8or16_v_lowpass_sse2 +#define ff_text_init_buf liteav_ff_text_init_buf +#define ff_int16_to_float_u_sse2 liteav_ff_int16_to_float_u_sse2 +#define avio_rl24 liteav_avio_rl24 +#define ff_network_wait_fd liteav_ff_network_wait_fd +#define ff_avg_rv40_chroma_mc4_3dnow liteav_ff_avg_rv40_chroma_mc4_3dnow +#define ff_hevc_annexb2mp4 liteav_ff_hevc_annexb2mp4 +#define ff_put_qpel16_mc32_old_c liteav_ff_put_qpel16_mc32_old_c +#define ff_h264_get_slice_type liteav_ff_h264_get_slice_type +#define ff_w1_plus_w3_hi liteav_ff_w1_plus_w3_hi +#define ff_h264_idct_add8_422_9_c liteav_ff_h264_idct_add8_422_9_c +#define ff_planar_sample_fmts liteav_ff_planar_sample_fmts +#define ff_simple_idct8_add_avx liteav_ff_simple_idct8_add_avx +#define ff_init_vscale liteav_ff_init_vscale +#define ff_deblock_v_chroma_intra_8_avx liteav_ff_deblock_v_chroma_intra_8_avx +#define ff_put_h264_qpel16_mc12_neon liteav_ff_put_h264_qpel16_mc12_neon +#define ff_hevc_pred_angular_32x32_v_zero_neon_8 liteav_ff_hevc_pred_angular_32x32_v_zero_neon_8 +#define av_msg_set_callback liteav_av_msg_set_callback +#define ff_hevc_put_epel_v_neon_8_wrapper liteav_ff_hevc_put_epel_v_neon_8_wrapper +#define yyget_extra liteav_yyget_extra +#define ff_init_cabac_decoder liteav_ff_init_cabac_decoder +#define ff_scale_eval_dimensions liteav_ff_scale_eval_dimensions +#define avfilter_version liteav_avfilter_version +#define ff_fft_init liteav_ff_fft_init +#define av_get_sample_fmt_name liteav_av_get_sample_fmt_name +#define av_hwdevice_find_type_by_name liteav_av_hwdevice_find_type_by_name +#define ff_deblock_h_chroma_8_mmxext liteav_ff_deblock_h_chroma_8_mmxext +#define ff_put_h264_qpel4_mc01_10_mmxext liteav_ff_put_h264_qpel4_mc01_10_mmxext +#define ff_channel_layouts_changeref liteav_ff_channel_layouts_changeref +#define ff_ass_split_dialog2 liteav_ff_ass_split_dialog2 +#define ff_rl_init_vlc liteav_ff_rl_init_vlc +#define ff_wait_thread liteav_ff_wait_thread +#define ff_put_h264_qpel8_mc20_neon liteav_ff_put_h264_qpel8_mc20_neon +#define avfilter_insert_filter liteav_avfilter_insert_filter +#define ff_square_tab liteav_ff_square_tab +#define av_frame_make_writable liteav_av_frame_make_writable +#define ff_pb_FE liteav_ff_pb_FE +#define ff_pb_FC liteav_ff_pb_FC +#define ff_pred16x16_plane_h264_8_mmxext liteav_ff_pred16x16_plane_h264_8_mmxext +#define ff_urldecode liteav_ff_urldecode +#define ff_pack_2ch_int16_to_float_u_sse2 liteav_ff_pack_2ch_int16_to_float_u_sse2 +#define ff_interleaved_se_golomb_vlc_code liteav_ff_interleaved_se_golomb_vlc_code +#define ff_avg_h264_qpel8_mc21_neon liteav_ff_avg_h264_qpel8_mc21_neon +#define ff_pd_1 liteav_ff_pd_1 +#define ff_biweight_h264_pixels_4_neon liteav_ff_biweight_h264_pixels_4_neon +#define av_image_get_buffer_size liteav_av_image_get_buffer_size +#define av_get_standard_channel_layout liteav_av_get_standard_channel_layout +#define ff_rl_mpeg2 liteav_ff_rl_mpeg2 +#define av_cast5_alloc liteav_av_cast5_alloc +#define ff_rl_mpeg1 liteav_ff_rl_mpeg1 +#define ff_hevc_pel_bi_pixels_w8_neon_8 liteav_ff_hevc_pel_bi_pixels_w8_neon_8 +#define av_codec_next liteav_av_codec_next +#define ff_lzw_encode liteav_ff_lzw_encode +#define ff_unpack_6ch_float_to_int32_a_sse2 liteav_ff_unpack_6ch_float_to_int32_a_sse2 +#define av_hwframe_constraints_free liteav_av_hwframe_constraints_free +#define ff_avg_h264_qpel8_mc02_neon liteav_ff_avg_h264_qpel8_mc02_neon +#define ff_pack_6ch_float_to_int32_u_avx liteav_ff_pack_6ch_float_to_int32_u_avx +#define av_bsf_flush liteav_av_bsf_flush +#define ff_fft_init_aarch64 liteav_ff_fft_init_aarch64 +#define ff_vf_transpose liteav_ff_vf_transpose +#define ff_get_video_buffer liteav_ff_get_video_buffer +#define ff_avfilter_link_set_in_status liteav_ff_avfilter_link_set_in_status +#define av_encryption_init_info_add_side_data liteav_av_encryption_init_info_add_side_data +#define av_buffersink_get_time_base liteav_av_buffersink_get_time_base +#define av_expr_free liteav_av_expr_free +#define ff_h264_idct8_add_8_sse2 liteav_ff_h264_idct8_add_8_sse2 +#define avio_open2 liteav_avio_open2 +#define ff_simple_idct44_add liteav_ff_simple_idct44_add +#define ff_put_h264_qpel16_mc00_neon liteav_ff_put_h264_qpel16_mc00_neon +#define ff_replaygain_export liteav_ff_replaygain_export +#define ff_eac3_at_decoder liteav_ff_eac3_at_decoder +#define rgb24tobgr16 liteav_rgb24tobgr16 +#define ff_hevc_put_pel_uw_pixels_w12_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w12_neon_8_asm +#define text_style_create liteav_text_style_create +#define ff_h264dsp_init_x86 liteav_ff_h264dsp_init_x86 +#define ff_pred16x16_vertical_10_sse2 liteav_ff_pred16x16_vertical_10_sse2 +#define swri_audio_convert liteav_swri_audio_convert +#define av_twofish_init liteav_av_twofish_init +#define av_free_packet liteav_av_free_packet +#define vlc_css_selectors_Delete liteav_vlc_css_selectors_Delete +#define ff_hevc_pred_planar_32x32_neon_8_1 liteav_ff_hevc_pred_planar_32x32_neon_8_1 +#define ff_hevc_put_qpel_v1_neon_8 liteav_ff_hevc_put_qpel_v1_neon_8 +#define av_bsf_iterate liteav_av_bsf_iterate +#define sws_convertPalette8ToPacked24 liteav_sws_convertPalette8ToPacked24 +#define ff_h264dsp_init_aarch64 liteav_ff_h264dsp_init_aarch64 +#define ff_dct_end liteav_ff_dct_end +#define rgb32to24 liteav_rgb32to24 +#define ff_sbr_hf_g_filt_neon liteav_ff_sbr_hf_g_filt_neon +#define ff_av1_filter_obus_buf liteav_ff_av1_filter_obus_buf +#define ff_pack_2ch_float_to_int16_u_sse2 liteav_ff_pack_2ch_float_to_int16_u_sse2 +#define ff_wav_demuxer liteav_ff_wav_demuxer +#define ff_put_h264_qpel8_mc00_neon liteav_ff_put_h264_qpel8_mc00_neon +#define av_mediacodec_release_buffer liteav_av_mediacodec_release_buffer +#define av_get_bytes_per_sample liteav_av_get_bytes_per_sample +#define av_mediacodec_default_init liteav_av_mediacodec_default_init +#define avfilter_register_all liteav_avfilter_register_all +#define avio_wb64 liteav_avio_wb64 +#define av_opt_ptr liteav_av_opt_ptr +#define ff_asink_abuffer liteav_ff_asink_abuffer +#define ff_cos_65536 liteav_ff_cos_65536 +#define swr_close liteav_swr_close +#define av_aes_ctr_set_full_iv liteav_av_aes_ctr_set_full_iv +#define ff_aac_spectral_sizes liteav_ff_aac_spectral_sizes +#define shuffle_bytes_2103 liteav_shuffle_bytes_2103 +#define ff_id3v2_tag_len liteav_ff_id3v2_tag_len +#define ff_hevc_put_epel_hv_neon_8 liteav_ff_hevc_put_epel_hv_neon_8 +#define ff_mjpeg_encode_huffman_init liteav_ff_mjpeg_encode_huffman_init +#define ff_vector_dmul_scalar_neon liteav_ff_vector_dmul_scalar_neon +#define ff_ac3_dec_channel_map liteav_ff_ac3_dec_channel_map +#define ff_get_bmp_header liteav_ff_get_bmp_header +#define rgb64tobgr48_bswap liteav_rgb64tobgr48_bswap +#define ff_get_wav_header liteav_ff_get_wav_header +#define av_videotoolbox_default_init2 liteav_av_videotoolbox_default_init2 +#define ff_ac3_floor_tab liteav_ff_ac3_floor_tab +#define ff_h264_idct8_add_8_mmx liteav_ff_h264_idct8_add_8_mmx +#define ff_mp3_demuxer liteav_ff_mp3_demuxer +#define ff_aac_kbd_short_120 liteav_ff_aac_kbd_short_120 +#define ff_hevc_put_pel_uw_pixels_w64_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w64_neon_8_asm +#define av_shr_i liteav_av_shr_i +#define ff_unpack_6ch_int32_to_float_a_avx liteav_ff_unpack_6ch_int32_to_float_a_avx +#define ff_h2645_extract_rbsp liteav_ff_h2645_extract_rbsp +#define ff_h264qpel_init_aarch64 liteav_ff_h264qpel_init_aarch64 +#define ff_avg_h264_qpel16_mc12_10_sse2 liteav_ff_avg_h264_qpel16_mc12_10_sse2 +#define ff_ebur128_add_frames_short liteav_ff_ebur128_add_frames_short +#define av_guess_format liteav_av_guess_format +#define ff_avg_h264_qpel4_mc30_10_mmxext liteav_ff_avg_h264_qpel4_mc30_10_mmxext +#define ff_h264_idct8_add_12_c liteav_ff_h264_idct8_add_12_c +#define ff_hevc_decode_nal_sps liteav_ff_hevc_decode_nal_sps +#define ff_reverse liteav_ff_reverse +#define yuy2toyv12 liteav_yuy2toyv12 +#define av_frame_set_decode_error_flags liteav_av_frame_set_decode_error_flags +#define sws_shiftVec liteav_sws_shiftVec +#define ff_pred4x4_down_left_8_mmxext liteav_ff_pred4x4_down_left_8_mmxext +#define ff_put_h264_qpel16_mc03_10_sse2 liteav_ff_put_h264_qpel16_mc03_10_sse2 +#define av_pix_fmt_desc_get liteav_av_pix_fmt_desc_get +#define ff_pred8x8_tm_vp8_8_sse2 liteav_ff_pred8x8_tm_vp8_8_sse2 +#define ff_hevc_put_pixels_w16_neon_8 liteav_ff_hevc_put_pixels_w16_neon_8 +#define ff_h264_unref_picture liteav_ff_h264_unref_picture +#define ff_adpcm_ima_qt_at_decoder liteav_ff_adpcm_ima_qt_at_decoder +#define av_read_image_line2 liteav_av_read_image_line2 +#define ff_brktimegm liteav_ff_brktimegm +#define ff_hevc_pred_angular_8x8_v_zero_neon_8 liteav_ff_hevc_pred_angular_8x8_v_zero_neon_8 +#define avpriv_report_missing_feature liteav_avpriv_report_missing_feature +#define ff_mp2_at_decoder liteav_ff_mp2_at_decoder +#define ff_h264_biweight_4_mmxext liteav_ff_h264_biweight_4_mmxext +#define ff_tns_max_bands_480 liteav_ff_tns_max_bands_480 +#define av_hash_freep liteav_av_hash_freep +#define ff_golomb_vlc_len liteav_ff_golomb_vlc_len +#define ff_pred8x8l_vertical_left_8_ssse3 liteav_ff_pred8x8l_vertical_left_8_ssse3 +#define ff_hevc_frame_rps liteav_ff_hevc_frame_rps +#define av_pix_fmt_count_planes liteav_av_pix_fmt_count_planes +#define av_camellia_init liteav_av_camellia_init +#define ff_emulated_edge_mc_16 liteav_ff_emulated_edge_mc_16 +#define ff_put_h264_qpel8_mc13_10_sse2 liteav_ff_put_h264_qpel8_mc13_10_sse2 +#define ff_ps_stereo_interpolate_neon liteav_ff_ps_stereo_interpolate_neon +#define ff_subtitles_utf8_external_read_chunk liteav_ff_subtitles_utf8_external_read_chunk +#define av_opt_next liteav_av_opt_next +#define ff_avg_h264_qpel8_mc01_neon liteav_ff_avg_h264_qpel8_mc01_neon +#define avpriv_mpegts_parse_close liteav_avpriv_mpegts_parse_close +#define ff_four_imdct36_float_sse liteav_ff_four_imdct36_float_sse +#define ff_sbr_noise_table liteav_ff_sbr_noise_table +#define codec_mp4_tags liteav_codec_mp4_tags +#define ff_h264_idct_add8_10_c liteav_ff_h264_idct_add8_10_c +#define av_opt_show2 liteav_av_opt_show2 +#define ff_decode_sbr_extension liteav_ff_decode_sbr_extension +#define ff_avg_h264_qpel16_h_lowpass_l2_ssse3 liteav_ff_avg_h264_qpel16_h_lowpass_l2_ssse3 +#define ff_make_absolute_url liteav_ff_make_absolute_url +#define ff_unpack_2ch_int16_to_int32_a_sse2 liteav_ff_unpack_2ch_int16_to_int32_a_sse2 +#define ff_mpeg4_find_frame_end liteav_ff_mpeg4_find_frame_end +#define ff_float_dsp_init_x86 liteav_ff_float_dsp_init_x86 +#define ff_pack_6ch_float_to_int32_a_sse2 liteav_ff_pack_6ch_float_to_int32_a_sse2 +#define ff_deblock_v_luma_10_sse2 liteav_ff_deblock_v_luma_10_sse2 +#define ff_mpeg4_clean_buffers liteav_ff_mpeg4_clean_buffers +#define ffurl_connect liteav_ffurl_connect +#define avpriv_open liteav_avpriv_open +#define ff_h264_idct_add16_10_sse2 liteav_ff_h264_idct_add16_10_sse2 +#define ff_put_h264_qpel4_mc21_10_mmxext liteav_ff_put_h264_qpel4_mc21_10_mmxext +#define ff_pred16x16_dc_neon liteav_ff_pred16x16_dc_neon +#define ff_int32_to_float_a_avx liteav_ff_int32_to_float_a_avx +#define ff_put_h264_qpel4_mc30_10_mmxext liteav_ff_put_h264_qpel4_mc30_10_mmxext +#define ff_h263_intra_MCBPC_code liteav_ff_h263_intra_MCBPC_code +#define ff_h264_idct_add8_14_c liteav_ff_h264_idct_add8_14_c +#define ff_mpeg12_mbMotionVectorTable liteav_ff_mpeg12_mbMotionVectorTable +#define avformat_get_mov_audio_tags liteav_avformat_get_mov_audio_tags +#define ff_inlink_make_frame_writable liteav_ff_inlink_make_frame_writable +#define rgb24tobgr15 liteav_rgb24tobgr15 +#define ff_h264_idct_add_10_avx liteav_ff_h264_idct_add_10_avx +#define ff_shuffle_bytes_0321_ssse3 liteav_ff_shuffle_bytes_0321_ssse3 +#define av_opt_set_pixel_fmt liteav_av_opt_set_pixel_fmt +#define ff_ass_free_dialog liteav_ff_ass_free_dialog +#define ff_xvid_idct_init_x86 liteav_ff_xvid_idct_init_x86 +#define ff_cos_32 liteav_ff_cos_32 +#define avpriv_mpegaudio_decode_header liteav_avpriv_mpegaudio_decode_header +#define ff_vf_overlay liteav_ff_vf_overlay +#define ff_imdct_half_neon liteav_ff_imdct_half_neon +#define ff_hevc_put_epel_uw_pixels_w8_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w8_neon_8 +#define ff_put_h264_qpel16_mc21_neon liteav_ff_put_h264_qpel16_mc21_neon +#define ff_avg_h264_qpel16_mc01_neon liteav_ff_avg_h264_qpel16_mc01_neon +#define ff_id3v2_read_dict liteav_ff_id3v2_read_dict +#define ff_put_h264_qpel16_mc03_neon liteav_ff_put_h264_qpel16_mc03_neon +#define ff_riff_write_info liteav_ff_riff_write_info +#define av_dct_init liteav_av_dct_init +#define ff_mpeg4_videotoolbox_hwaccel liteav_ff_mpeg4_videotoolbox_hwaccel +#define ff_pred16x16_horizontal_8_mmx liteav_ff_pred16x16_horizontal_8_mmx +#define swscale_configuration liteav_swscale_configuration +#define av_packet_side_data_name liteav_av_packet_side_data_name +#define ff_h264_luma_dc_dequant_idct_8_c liteav_ff_h264_luma_dc_dequant_idct_8_c +#define ff_xvid_idct liteav_ff_xvid_idct +#define ff_scalarproduct_float_neon liteav_ff_scalarproduct_float_neon +#define vlc_css_parser_Debug liteav_vlc_css_parser_Debug +#define avformat_close liteav_avformat_close +#define avformat_new_stream liteav_avformat_new_stream +#define avformat_close_input liteav_avformat_close_input +#define avformat_free_context liteav_avformat_free_context +#define avformat_alloc_context liteav_avformat_alloc_context +#define avformat_open_input liteav_avformat_open_input +#define avformat_find_stream_info liteav_avformat_find_stream_info +#define av_find_best_stream liteav_av_find_best_stream +#define avcodec_open2 liteav_avcodec_open2 +#define av_read_frame liteav_av_read_frame +#define av_seek_frame liteav_av_seek_frame +#define av_codec_get_tag liteav_av_codec_get_tag +#define avcodec_parameters_from_context liteav_avcodec_parameters_from_context +#define avcodec_parameters_to_context liteav_avcodec_parameters_to_context +#define avcodec_alloc_context3 liteav_avcodec_alloc_context3 +#define avcodec_get_name liteav_avcodec_get_name +#define avcodec_free_context liteav_avcodec_free_context +#define avcodec_close liteav_avcodec_close +//vod +#define avsubtitle_free liteav_avsubtitle_free +#define av_version_info liteav_av_version_info +#define av_find_default_stream_index liteav_av_find_default_stream_index +#define av_stream_get_side_data liteav_av_stream_get_side_data +#define av_get_media_type_string liteav_av_get_media_type_string +#define avcodec_parameters_alloc liteav_avcodec_parameters_alloc +#define avformat_network_init liteav_avformat_network_init +#define av_int_list_length_for_size liteav_av_int_list_length_for_size +#define avcodec_parameters_free liteav_avcodec_parameters_free +// clang-format on + +#endif // THIRD_PARTY_FFMPEG_FFMPEG_RENAME_DEFINES_H diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h new file mode 100644 index 0000000..7f9780b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h @@ -0,0 +1,37 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * AC-3 parser prototypes + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2003 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AC3_PARSER_H +#define AVCODEC_AC3_PARSER_H + +#include <stddef.h> +#include <stdint.h> + +/** + * Extract the bitstream ID and the frame size from AC-3 data. + */ +int liteav_av_ac3_parse_header(const uint8_t *buf, size_t size, + uint8_t *bitstream_id, uint16_t *frame_size); + + +#endif /* AVCODEC_AC3_PARSER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h new file mode 100644 index 0000000..8f38526 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h @@ -0,0 +1,38 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_ADTS_PARSER_H +#define AVCODEC_ADTS_PARSER_H + +#include <stddef.h> +#include <stdint.h> + +#define AV_AAC_ADTS_HEADER_SIZE 7 + +/** + * Extract the number of samples and frames from AAC data. + * @param[in] buf pointer to AAC data buffer + * @param[out] samples Pointer to where number of samples is written + * @param[out] frames Pointer to where number of frames is written + * @return Returns 0 on success, error code on failure. + */ +int liteav_av_adts_header_parse(const uint8_t *buf, uint32_t *samples, + uint8_t *frames); + +#endif /* AVCODEC_ADTS_PARSER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ass_split.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ass_split.h new file mode 100644 index 0000000..42f32f8 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/ass_split.h @@ -0,0 +1,331 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * SSA/ASS spliting functions + * Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_ASS_SPLIT_H +#define AVCODEC_ASS_SPLIT_H + +/** + * fields extracted from the [Script Info] section + */ +typedef struct { + char *script_type; /**< SSA script format version (eg. v4.00) */ + char *collisions; /**< how subtitles are moved to prevent collisions */ + int play_res_x; /**< video width that ASS coords are referring to */ + int play_res_y; /**< video height that ASS coords are referring to */ + float timer; /**< time multiplier to apply to SSA clock (in %) */ +} ASSScriptInfo; + +/** + * fields extracted from the [V4(+) Styles] section + */ +typedef struct { + char *name; /**< name of the tyle (case sensitive) */ + char *font_name; /**< font face (case sensitive) */ + int font_size; /**< font height */ + int primary_color; /**< color that a subtitle will normally appear in */ + int secondary_color; + int outline_color; /**< color for outline in ASS, called tertiary in SSA */ + int back_color; /**< color of the subtitle outline or shadow */ + int bold; /**< whether text is bold (1) or not (0) */ + int italic; /**< whether text is italic (1) or not (0) */ + int underline; /**< whether text is underlined (1) or not (0) */ + int strikeout; + float scalex; + float scaley; + float spacing; + float angle; + int border_style; + float outline; + float shadow; + int alignment; /**< position of the text (left, center, top...), + defined after the layout of the numpad + (1-3 sub, 4-6 mid, 7-9 top) */ + int margin_l; + int margin_r; + int margin_v; + int alpha_level; + int encoding; +} ASSStyle; + +/** + * fields extracted from the [Events] section + */ +typedef struct { + int readorder; + int layer; /**< higher numbered layers are drawn over lower numbered */ + int start; /**< start time of the dialog in centiseconds */ + int end; /**< end time of the dialog in centiseconds */ + char *style; /**< name of the ASSStyle to use with this dialog */ + char *name; + int margin_l; + int margin_r; + int margin_v; + char *effect; + char *text; /**< actual text which will be displayed as a subtitle, + can include style override control codes (see + liteav_ff_ass_split_override_codes()) */ +} ASSDialog; + +/** + * structure containing the whole split ASS data + */ +typedef struct { + ASSScriptInfo script_info; /**< general information about the SSA script*/ + ASSStyle *styles; /**< array of split out styles */ + int styles_count; /**< number of ASSStyle in the styles array */ + ASSDialog *dialogs; /**< array of split out dialogs */ + int dialogs_count; /**< number of ASSDialog in the dialogs array*/ +} ASS; + +typedef enum { + ASS_STR, + ASS_INT, + ASS_FLT, + ASS_COLOR, + ASS_TIMESTAMP, + ASS_ALGN, +} ASSFieldType; + +typedef struct { + const char *name; + int type; + int offset; +} ASSFields; + +typedef struct { + const char *section; + const char *format_header; + const char *fields_header; + int size; + int offset; + int offset_count; + ASSFields fields[24]; +} ASSSection; + +static const ASSSection ass_sections[] = { + { .section = "Script Info", + .offset = offsetof(ASS, script_info), + .fields = {{"ScriptType", ASS_STR, offsetof(ASSScriptInfo, script_type)}, + {"Collisions", ASS_STR, offsetof(ASSScriptInfo, collisions) }, + {"PlayResX", ASS_INT, offsetof(ASSScriptInfo, play_res_x) }, + {"PlayResY", ASS_INT, offsetof(ASSScriptInfo, play_res_y) }, + {"Timer", ASS_FLT, offsetof(ASSScriptInfo, timer) }, + {0}, + } + }, + { .section = "V4+ Styles", + .format_header = "Format", + .fields_header = "Style", + .size = sizeof(ASSStyle), + .offset = offsetof(ASS, styles), + .offset_count = offsetof(ASS, styles_count), + .fields = {{"Name", ASS_STR, offsetof(ASSStyle, name) }, + {"Fontname", ASS_STR, offsetof(ASSStyle, font_name) }, + {"Fontsize", ASS_INT, offsetof(ASSStyle, font_size) }, + {"PrimaryColour", ASS_COLOR, offsetof(ASSStyle, primary_color) }, + {"SecondaryColour", ASS_COLOR, offsetof(ASSStyle, secondary_color)}, + {"OutlineColour", ASS_COLOR, offsetof(ASSStyle, outline_color) }, + {"BackColour", ASS_COLOR, offsetof(ASSStyle, back_color) }, + {"Bold", ASS_INT, offsetof(ASSStyle, bold) }, + {"Italic", ASS_INT, offsetof(ASSStyle, italic) }, + {"Underline", ASS_INT, offsetof(ASSStyle, underline) }, + {"StrikeOut", ASS_INT, offsetof(ASSStyle, strikeout) }, + {"ScaleX", ASS_FLT, offsetof(ASSStyle, scalex) }, + {"ScaleY", ASS_FLT, offsetof(ASSStyle, scaley) }, + {"Spacing", ASS_FLT, offsetof(ASSStyle, spacing) }, + {"Angle", ASS_FLT, offsetof(ASSStyle, angle) }, + {"BorderStyle", ASS_INT, offsetof(ASSStyle, border_style) }, + {"Outline", ASS_FLT, offsetof(ASSStyle, outline) }, + {"Shadow", ASS_FLT, offsetof(ASSStyle, shadow) }, + {"Alignment", ASS_INT, offsetof(ASSStyle, alignment) }, + {"MarginL", ASS_INT, offsetof(ASSStyle, margin_l) }, + {"MarginR", ASS_INT, offsetof(ASSStyle, margin_r) }, + {"MarginV", ASS_INT, offsetof(ASSStyle, margin_v) }, + {"Encoding", ASS_INT, offsetof(ASSStyle, encoding) }, + {0}, + } + }, + { .section = "V4 Styles", + .format_header = "Format", + .fields_header = "Style", + .size = sizeof(ASSStyle), + .offset = offsetof(ASS, styles), + .offset_count = offsetof(ASS, styles_count), + .fields = {{"Name", ASS_STR, offsetof(ASSStyle, name) }, + {"Fontname", ASS_STR, offsetof(ASSStyle, font_name) }, + {"Fontsize", ASS_INT, offsetof(ASSStyle, font_size) }, + {"PrimaryColour", ASS_COLOR, offsetof(ASSStyle, primary_color) }, + {"SecondaryColour", ASS_COLOR, offsetof(ASSStyle, secondary_color)}, + {"TertiaryColour", ASS_COLOR, offsetof(ASSStyle, outline_color) }, + {"BackColour", ASS_COLOR, offsetof(ASSStyle, back_color) }, + {"Bold", ASS_INT, offsetof(ASSStyle, bold) }, + {"Italic", ASS_INT, offsetof(ASSStyle, italic) }, + {"BorderStyle", ASS_INT, offsetof(ASSStyle, border_style) }, + {"Outline", ASS_FLT, offsetof(ASSStyle, outline) }, + {"Shadow", ASS_FLT, offsetof(ASSStyle, shadow) }, + {"Alignment", ASS_ALGN, offsetof(ASSStyle, alignment) }, + {"MarginL", ASS_INT, offsetof(ASSStyle, margin_l) }, + {"MarginR", ASS_INT, offsetof(ASSStyle, margin_r) }, + {"MarginV", ASS_INT, offsetof(ASSStyle, margin_v) }, + {"AlphaLevel", ASS_INT, offsetof(ASSStyle, alpha_level) }, + {"Encoding", ASS_INT, offsetof(ASSStyle, encoding) }, + {0}, + } + }, + { .section = "Events", + .format_header = "Format", + .fields_header = "Dialogue", + .size = sizeof(ASSDialog), + .offset = offsetof(ASS, dialogs), + .offset_count = offsetof(ASS, dialogs_count), + .fields = {{"Layer", ASS_INT, offsetof(ASSDialog, layer) }, + {"Start", ASS_TIMESTAMP, offsetof(ASSDialog, start) }, + {"End", ASS_TIMESTAMP, offsetof(ASSDialog, end) }, + {"Style", ASS_STR, offsetof(ASSDialog, style) }, + {"Name", ASS_STR, offsetof(ASSDialog, name) }, + {"MarginL", ASS_INT, offsetof(ASSDialog, margin_l)}, + {"MarginR", ASS_INT, offsetof(ASSDialog, margin_r)}, + {"MarginV", ASS_INT, offsetof(ASSDialog, margin_v)}, + {"Effect", ASS_STR, offsetof(ASSDialog, effect) }, + {"Text", ASS_STR, offsetof(ASSDialog, text) }, + {0}, + } + }, +}; + +struct ASSSplitContext { + ASS ass; + int current_section; + int field_number[FF_ARRAY_ELEMS(ass_sections)]; + int *field_order[FF_ARRAY_ELEMS(ass_sections)]; +}; + +/** + * This struct can be casted to ASS to access to the split data. + */ +typedef struct ASSSplitContext ASSSplitContext; + +/** + * Split a full ASS file or a ASS header from a string buffer and store + * the split structure in a newly allocated context. + * + * @param buf String containing the ASS formatted data. + * @return Newly allocated struct containing split data. + */ +ASSSplitContext *liteav_ff_ass_split(const char *buf); + +/** + * Split one or several ASS "Dialogue" lines from a string buffer and store + * them in an already initialized context. + * + * @param ctx Context previously initialized by liteav_ff_ass_split(). + * @param buf String containing the ASS "Dialogue" lines. + * @param cache Set to 1 to keep all the previously split ASSDialog in + * the context, or set to 0 to free all the previously split + * ASSDialog. + * @param number If not NULL, the pointed integer will be set to the number + * of split ASSDialog. + * @return Pointer to the first split ASSDialog. + */ +ASSDialog *liteav_ff_ass_split_dialog(ASSSplitContext *ctx, const char *buf, + int cache, int *number); + +/** + * Free a dialogue obtained from liteav_ff_ass_split_dialog2(). + */ +void liteav_ff_ass_free_dialog(ASSDialog **dialogp); + +/** + * Split one ASS Dialogue line from a string buffer. + * + * @param ctx Context previously initialized by liteav_ff_ass_split(). + * @param buf String containing the ASS "Dialogue" line. + * @return Pointer to the split ASSDialog. Must be freed with liteav_ff_ass_free_dialog() + */ +ASSDialog *liteav_ff_ass_split_dialog2(ASSSplitContext *ctx, const char *buf); + +/** + * Free all the memory allocated for an ASSSplitContext. + * + * @param ctx Context previously initialized by liteav_ff_ass_split(). + */ +void liteav_ff_ass_split_free(ASSSplitContext *ctx); + + +/** + * Set of callback functions corresponding to each override codes that can + * be encountered in a "Dialogue" Text field. + */ +typedef struct { + /** + * @defgroup ass_styles ASS styles + * @{ + */ + void (*text)(void *priv, const char *text, int len); + void (*new_line)(void *priv, int forced); + void (*style)(void *priv, char style, int close); + void (*color)(void *priv, unsigned int /* color */, unsigned int color_id); + void (*alpha)(void *priv, int alpha, int alpha_id); + void (*font_name)(void *priv, const char *name); + void (*font_size)(void *priv, int size); + void (*alignment)(void *priv, int alignment); + void (*cancel_overrides)(void *priv, const char *style); + /** @} */ + + /** + * @defgroup ass_functions ASS functions + * @{ + */ + void (*move)(void *priv, int x1, int y1, int x2, int y2, int t1, int t2); + void (*origin)(void *priv, int x, int y); + /** @} */ + + /** + * @defgroup ass_end end of Dialogue Event + * @{ + */ + void (*end)(void *priv); + /** @} */ +} ASSCodesCallbacks; + +/** + * Split override codes out of a ASS "Dialogue" Text field. + * + * @param callbacks Set of callback functions called for each override code + * encountered. + * @param priv Opaque pointer passed to the callback functions. + * @param buf The ASS "Dialogue" Text field to split. + * @return >= 0 on success otherwise an error code <0 + */ +int liteav_ff_ass_split_override_codes(const ASSCodesCallbacks *callbacks, void *priv, + const char *buf); + +/** + * Find an ASSStyle structure by its name. + * + * @param ctx Context previously initialized by liteav_ff_ass_split(). + * @param style name of the style to search for. + * @return the ASSStyle corresponding to style, or NULL if style can't be found + */ +ASSStyle *liteav_ff_ass_style_get(ASSSplitContext *ctx, const char *style); + +#endif /* AVCODEC_ASS_SPLIT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avcodec.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avcodec.h new file mode 100755 index 0000000..b8e0974 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avcodec.h @@ -0,0 +1,6630 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVCODEC_H +#define AVCODEC_AVCODEC_H + +/** + * @file + * @ingroup libavc + * Libavcodec external API header + */ + +#include <errno.h> +#include "libavutil/samplefmt.h" +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/buffer.h" +#include "libavutil/cpu.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/hwcontext.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "version.h" + +/** + * @defgroup libavc libavcodec + * Encoding/Decoding Library + * + * @{ + * + * @defgroup lavc_decoding Decoding + * @{ + * @} + * + * @defgroup lavc_encoding Encoding + * @{ + * @} + * + * @defgroup lavc_codec Codecs + * @{ + * @defgroup lavc_codec_native Native Codecs + * @{ + * @} + * @defgroup lavc_codec_wrappers External library wrappers + * @{ + * @} + * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge + * @{ + * @} + * @} + * @defgroup lavc_internal Internal + * @{ + * @} + * @} + */ + +/** + * @ingroup libavc + * @defgroup lavc_encdec send/receive encoding and decoding API overview + * @{ + * + * The liteav_avcodec_send_packet()/liteav_avcodec_receive_frame()/liteav_avcodec_send_frame()/ + * liteav_avcodec_receive_packet() functions provide an encode/decode API, which + * decouples input and output. + * + * The API is very similar for encoding/decoding and audio/video, and works as + * follows: + * - Set up and open the AVCodecContext as usual. + * - Send valid input: + * - For decoding, call liteav_avcodec_send_packet() to give the decoder raw + * compressed data in an AVPacket. + * - For encoding, call liteav_avcodec_send_frame() to give the encoder an AVFrame + * containing uncompressed audio or video. + * In both cases, it is recommended that AVPackets and AVFrames are + * refcounted, or libavcodec might have to copy the input data. (libavformat + * always returns refcounted AVPackets, and liteav_av_frame_get_buffer() allocates + * refcounted AVFrames.) + * - Receive output in a loop. Periodically call one of the avcodec_receive_*() + * functions and process their output: + * - For decoding, call liteav_avcodec_receive_frame(). On success, it will return + * an AVFrame containing uncompressed audio or video data. + * - For encoding, call liteav_avcodec_receive_packet(). On success, it will return + * an AVPacket with a compressed frame. + * Repeat this call until it returns AVERROR(EAGAIN) or an error. The + * AVERROR(EAGAIN) return value means that new input data is required to + * return new output. In this case, continue with sending input. For each + * input frame/packet, the codec will typically return 1 output frame/packet, + * but it can also be 0 or more than 1. + * + * At the beginning of decoding or encoding, the codec might accept multiple + * input frames/packets without returning a frame, until its internal buffers + * are filled. This situation is handled transparently if you follow the steps + * outlined above. + * + * In theory, sending input can result in EAGAIN - this should happen only if + * not all output was received. You can use this to structure alternative decode + * or encode loops other than the one suggested above. For example, you could + * try sending new input on each iteration, and try to receive output if that + * returns EAGAIN. + * + * End of stream situations. These require "flushing" (aka draining) the codec, + * as the codec might buffer multiple frames or packets internally for + * performance or out of necessity (consider B-frames). + * This is handled as follows: + * - Instead of valid input, send NULL to the liteav_avcodec_send_packet() (decoding) + * or liteav_avcodec_send_frame() (encoding) functions. This will enter draining + * mode. + * - Call liteav_avcodec_receive_frame() (decoding) or liteav_avcodec_receive_packet() + * (encoding) in a loop until AVERROR_EOF is returned. The functions will + * not return AVERROR(EAGAIN), unless you forgot to enter draining mode. + * - Before decoding can be resumed again, the codec has to be reset with + * liteav_avcodec_flush_buffers(). + * + * Using the API as outlined above is highly recommended. But it is also + * possible to call functions outside of this rigid schema. For example, you can + * call liteav_avcodec_send_packet() repeatedly without calling + * liteav_avcodec_receive_frame(). In this case, liteav_avcodec_send_packet() will succeed + * until the codec's internal buffer has been filled up (which is typically of + * size 1 per output frame, after initial input), and then reject input with + * AVERROR(EAGAIN). Once it starts rejecting input, you have no choice but to + * read at least some output. + * + * Not all codecs will follow a rigid and predictable dataflow; the only + * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call on + * one end implies that a receive/send call on the other end will succeed, or + * at least will not fail with AVERROR(EAGAIN). In general, no codec will + * permit unlimited buffering of input or output. + * + * This API replaces the following legacy functions: + * - liteav_avcodec_decode_video2() and liteav_avcodec_decode_audio4(): + * Use liteav_avcodec_send_packet() to feed input to the decoder, then use + * liteav_avcodec_receive_frame() to receive decoded frames after each packet. + * Unlike with the old video decoding API, multiple frames might result from + * a packet. For audio, splitting the input packet into frames by partially + * decoding packets becomes transparent to the API user. You never need to + * feed an AVPacket to the API twice (unless it is rejected with AVERROR(EAGAIN) - then + * no data was read from the packet). + * Additionally, sending a flush/draining packet is required only once. + * - liteav_avcodec_encode_video2()/liteav_avcodec_encode_audio2(): + * Use liteav_avcodec_send_frame() to feed input to the encoder, then use + * liteav_avcodec_receive_packet() to receive encoded packets. + * Providing user-allocated buffers for liteav_avcodec_receive_packet() is not + * possible. + * - The new API does not handle subtitles yet. + * + * Mixing new and old function calls on the same AVCodecContext is not allowed, + * and will result in undefined behavior. + * + * Some codecs might require using the new API; using the old API will return + * an error when calling it. All codecs support the new API. + * + * A codec is not allowed to return AVERROR(EAGAIN) for both sending and receiving. This + * would be an invalid state, which could put the codec user into an endless + * loop. The API has no concept of time either: it cannot happen that trying to + * do liteav_avcodec_send_packet() results in AVERROR(EAGAIN), but a repeated call 1 second + * later accepts the packet (with no other receive/flush API calls involved). + * The API is a strict state machine, and the passage of time is not supposed + * to influence it. Some timing-dependent behavior might still be deemed + * acceptable in certain cases. But it must never result in both send/receive + * returning EAGAIN at the same time at any point. It must also absolutely be + * avoided that the current state is "unstable" and can "flip-flop" between + * the send/receive APIs allowing progress. For example, it's not allowed that + * the codec randomly decides that it actually wants to consume a packet now + * instead of returning a frame, after it just returned AVERROR(EAGAIN) on an + * liteav_avcodec_send_packet() call. + * @} + */ + +/** + * @defgroup lavc_core Core functions/structures. + * @ingroup libavc + * + * Basic definitions, functions for querying libavcodec capabilities, + * allocating core structures, etc. + * @{ + */ + + +/** + * Identify the syntax and semantics of the bitstream. + * The principle is roughly: + * Two decoders with the same ID can decode the same streams. + * Two encoders with the same ID can encode compatible streams. + * There may be slight deviations from the principle due to implementation + * details. + * + * If you add a codec ID to this list, add it so that + * 1. no value of an existing codec ID changes (that would break ABI), + * 2. it is as close as possible to similar codecs + * + * After adding new codec IDs, do not forget to add an entry to the codec + * descriptor list and bump libavcodec minor version. + */ +enum AVCodecID { + AV_CODEC_ID_NONE, + + /* video codecs */ + AV_CODEC_ID_MPEG1VIDEO, + AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding + AV_CODEC_ID_H261, + AV_CODEC_ID_H263, + AV_CODEC_ID_RV10, + AV_CODEC_ID_RV20, + AV_CODEC_ID_MJPEG, + AV_CODEC_ID_MJPEGB, + AV_CODEC_ID_LJPEG, + AV_CODEC_ID_SP5X, + AV_CODEC_ID_JPEGLS, + AV_CODEC_ID_MPEG4, + AV_CODEC_ID_RAWVIDEO, + AV_CODEC_ID_MSMPEG4V1, + AV_CODEC_ID_MSMPEG4V2, + AV_CODEC_ID_MSMPEG4V3, + AV_CODEC_ID_WMV1, + AV_CODEC_ID_WMV2, + AV_CODEC_ID_H263P, + AV_CODEC_ID_H263I, + AV_CODEC_ID_FLV1, + AV_CODEC_ID_SVQ1, + AV_CODEC_ID_SVQ3, + AV_CODEC_ID_DVVIDEO, + AV_CODEC_ID_HUFFYUV, + AV_CODEC_ID_CYUV, + AV_CODEC_ID_H264, + AV_CODEC_ID_INDEO3, + AV_CODEC_ID_VP3, + AV_CODEC_ID_THEORA, + AV_CODEC_ID_ASV1, + AV_CODEC_ID_ASV2, + AV_CODEC_ID_FFV1, + AV_CODEC_ID_4XM, + AV_CODEC_ID_VCR1, + AV_CODEC_ID_CLJR, + AV_CODEC_ID_MDEC, + AV_CODEC_ID_ROQ, + AV_CODEC_ID_INTERPLAY_VIDEO, + AV_CODEC_ID_XAN_WC3, + AV_CODEC_ID_XAN_WC4, + AV_CODEC_ID_RPZA, + AV_CODEC_ID_CINEPAK, + AV_CODEC_ID_WS_VQA, + AV_CODEC_ID_MSRLE, + AV_CODEC_ID_MSVIDEO1, + AV_CODEC_ID_IDCIN, + AV_CODEC_ID_8BPS, + AV_CODEC_ID_SMC, + AV_CODEC_ID_FLIC, + AV_CODEC_ID_TRUEMOTION1, + AV_CODEC_ID_VMDVIDEO, + AV_CODEC_ID_MSZH, + AV_CODEC_ID_ZLIB, + AV_CODEC_ID_QTRLE, + AV_CODEC_ID_TSCC, + AV_CODEC_ID_ULTI, + AV_CODEC_ID_QDRAW, + AV_CODEC_ID_VIXL, + AV_CODEC_ID_QPEG, + AV_CODEC_ID_PNG, + AV_CODEC_ID_PPM, + AV_CODEC_ID_PBM, + AV_CODEC_ID_PGM, + AV_CODEC_ID_PGMYUV, + AV_CODEC_ID_PAM, + AV_CODEC_ID_FFVHUFF, + AV_CODEC_ID_RV30, + AV_CODEC_ID_RV40, + AV_CODEC_ID_VC1, + AV_CODEC_ID_WMV3, + AV_CODEC_ID_LOCO, + AV_CODEC_ID_WNV1, + AV_CODEC_ID_AASC, + AV_CODEC_ID_INDEO2, + AV_CODEC_ID_FRAPS, + AV_CODEC_ID_TRUEMOTION2, + AV_CODEC_ID_BMP, + AV_CODEC_ID_CSCD, + AV_CODEC_ID_MMVIDEO, + AV_CODEC_ID_ZMBV, + AV_CODEC_ID_AVS, + AV_CODEC_ID_SMACKVIDEO, + AV_CODEC_ID_NUV, + AV_CODEC_ID_KMVC, + AV_CODEC_ID_FLASHSV, + AV_CODEC_ID_CAVS, + AV_CODEC_ID_JPEG2000, + AV_CODEC_ID_VMNC, + AV_CODEC_ID_VP5, + AV_CODEC_ID_VP6, + AV_CODEC_ID_VP6F, + AV_CODEC_ID_TARGA, + AV_CODEC_ID_DSICINVIDEO, + AV_CODEC_ID_TIERTEXSEQVIDEO, + AV_CODEC_ID_TIFF, + AV_CODEC_ID_GIF, + AV_CODEC_ID_DXA, + AV_CODEC_ID_DNXHD, + AV_CODEC_ID_THP, + AV_CODEC_ID_SGI, + AV_CODEC_ID_C93, + AV_CODEC_ID_BETHSOFTVID, + AV_CODEC_ID_PTX, + AV_CODEC_ID_TXD, + AV_CODEC_ID_VP6A, + AV_CODEC_ID_AMV, + AV_CODEC_ID_VB, + AV_CODEC_ID_PCX, + AV_CODEC_ID_SUNRAST, + AV_CODEC_ID_INDEO4, + AV_CODEC_ID_INDEO5, + AV_CODEC_ID_MIMIC, + AV_CODEC_ID_RL2, + AV_CODEC_ID_ESCAPE124, + AV_CODEC_ID_DIRAC, + AV_CODEC_ID_BFI, + AV_CODEC_ID_CMV, + AV_CODEC_ID_MOTIONPIXELS, + AV_CODEC_ID_TGV, + AV_CODEC_ID_TGQ, + AV_CODEC_ID_TQI, + AV_CODEC_ID_AURA, + AV_CODEC_ID_AURA2, + AV_CODEC_ID_V210X, + AV_CODEC_ID_TMV, + AV_CODEC_ID_V210, + AV_CODEC_ID_DPX, + AV_CODEC_ID_MAD, + AV_CODEC_ID_FRWU, + AV_CODEC_ID_FLASHSV2, + AV_CODEC_ID_CDGRAPHICS, + AV_CODEC_ID_R210, + AV_CODEC_ID_ANM, + AV_CODEC_ID_BINKVIDEO, + AV_CODEC_ID_IFF_ILBM, +#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM + AV_CODEC_ID_KGV1, + AV_CODEC_ID_YOP, + AV_CODEC_ID_VP8, + AV_CODEC_ID_PICTOR, + AV_CODEC_ID_ANSI, + AV_CODEC_ID_A64_MULTI, + AV_CODEC_ID_A64_MULTI5, + AV_CODEC_ID_R10K, + AV_CODEC_ID_MXPEG, + AV_CODEC_ID_LAGARITH, + AV_CODEC_ID_PRORES, + AV_CODEC_ID_JV, + AV_CODEC_ID_DFA, + AV_CODEC_ID_WMV3IMAGE, + AV_CODEC_ID_VC1IMAGE, + AV_CODEC_ID_UTVIDEO, + AV_CODEC_ID_BMV_VIDEO, + AV_CODEC_ID_VBLE, + AV_CODEC_ID_DXTORY, + AV_CODEC_ID_V410, + AV_CODEC_ID_XWD, + AV_CODEC_ID_CDXL, + AV_CODEC_ID_XBM, + AV_CODEC_ID_ZEROCODEC, + AV_CODEC_ID_MSS1, + AV_CODEC_ID_MSA1, + AV_CODEC_ID_TSCC2, + AV_CODEC_ID_MTS2, + AV_CODEC_ID_CLLC, + AV_CODEC_ID_MSS2, + AV_CODEC_ID_VP9, + AV_CODEC_ID_AIC, + AV_CODEC_ID_ESCAPE130, + AV_CODEC_ID_G2M, + AV_CODEC_ID_WEBP, + AV_CODEC_ID_HNM4_VIDEO, + AV_CODEC_ID_HEVC, +#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC + AV_CODEC_ID_FIC, + AV_CODEC_ID_ALIAS_PIX, + AV_CODEC_ID_BRENDER_PIX, + AV_CODEC_ID_PAF_VIDEO, + AV_CODEC_ID_EXR, + AV_CODEC_ID_VP7, + AV_CODEC_ID_SANM, + AV_CODEC_ID_SGIRLE, + AV_CODEC_ID_MVC1, + AV_CODEC_ID_MVC2, + AV_CODEC_ID_HQX, + AV_CODEC_ID_TDSC, + AV_CODEC_ID_HQ_HQA, + AV_CODEC_ID_HAP, + AV_CODEC_ID_DDS, + AV_CODEC_ID_DXV, + AV_CODEC_ID_SCREENPRESSO, + AV_CODEC_ID_RSCC, + AV_CODEC_ID_AVS2, + AV_CODEC_ID_AVS3, + + AV_CODEC_ID_Y41P = 0x8000, + AV_CODEC_ID_AVRP, + AV_CODEC_ID_012V, + AV_CODEC_ID_AVUI, + AV_CODEC_ID_AYUV, + AV_CODEC_ID_TARGA_Y216, + AV_CODEC_ID_V308, + AV_CODEC_ID_V408, + AV_CODEC_ID_YUV4, + AV_CODEC_ID_AVRN, + AV_CODEC_ID_CPIA, + AV_CODEC_ID_XFACE, + AV_CODEC_ID_SNOW, + AV_CODEC_ID_SMVJPEG, + AV_CODEC_ID_APNG, + AV_CODEC_ID_DAALA, + AV_CODEC_ID_CFHD, + AV_CODEC_ID_TRUEMOTION2RT, + AV_CODEC_ID_M101, + AV_CODEC_ID_MAGICYUV, + AV_CODEC_ID_SHEERVIDEO, + AV_CODEC_ID_YLC, + AV_CODEC_ID_PSD, + AV_CODEC_ID_PIXLET, + AV_CODEC_ID_SPEEDHQ, + AV_CODEC_ID_FMVC, + AV_CODEC_ID_SCPR, + AV_CODEC_ID_CLEARVIDEO, + AV_CODEC_ID_XPM, + AV_CODEC_ID_AV1, + AV_CODEC_ID_BITPACKED, + AV_CODEC_ID_MSCC, + AV_CODEC_ID_SRGC, + AV_CODEC_ID_SVG, + AV_CODEC_ID_GDV, + AV_CODEC_ID_FITS, + AV_CODEC_ID_IMM4, + AV_CODEC_ID_PROSUMER, + AV_CODEC_ID_MWSC, + AV_CODEC_ID_WCMV, + AV_CODEC_ID_RASC, + + /* various PCM "codecs" */ + AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + AV_CODEC_ID_PCM_S16LE = 0x10000, + AV_CODEC_ID_PCM_S16BE, + AV_CODEC_ID_PCM_U16LE, + AV_CODEC_ID_PCM_U16BE, + AV_CODEC_ID_PCM_S8, + AV_CODEC_ID_PCM_U8, + AV_CODEC_ID_PCM_MULAW, + AV_CODEC_ID_PCM_ALAW, + AV_CODEC_ID_PCM_S32LE, + AV_CODEC_ID_PCM_S32BE, + AV_CODEC_ID_PCM_U32LE, + AV_CODEC_ID_PCM_U32BE, + AV_CODEC_ID_PCM_S24LE, + AV_CODEC_ID_PCM_S24BE, + AV_CODEC_ID_PCM_U24LE, + AV_CODEC_ID_PCM_U24BE, + AV_CODEC_ID_PCM_S24DAUD, + AV_CODEC_ID_PCM_ZORK, + AV_CODEC_ID_PCM_S16LE_PLANAR, + AV_CODEC_ID_PCM_DVD, + AV_CODEC_ID_PCM_F32BE, + AV_CODEC_ID_PCM_F32LE, + AV_CODEC_ID_PCM_F64BE, + AV_CODEC_ID_PCM_F64LE, + AV_CODEC_ID_PCM_BLURAY, + AV_CODEC_ID_PCM_LXF, + AV_CODEC_ID_S302M, + AV_CODEC_ID_PCM_S8_PLANAR, + AV_CODEC_ID_PCM_S24LE_PLANAR, + AV_CODEC_ID_PCM_S32LE_PLANAR, + AV_CODEC_ID_PCM_S16BE_PLANAR, + + AV_CODEC_ID_PCM_S64LE = 0x10800, + AV_CODEC_ID_PCM_S64BE, + AV_CODEC_ID_PCM_F16LE, + AV_CODEC_ID_PCM_F24LE, + AV_CODEC_ID_PCM_VIDC, + + /* various ADPCM codecs */ + AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, + AV_CODEC_ID_ADPCM_IMA_WAV, + AV_CODEC_ID_ADPCM_IMA_DK3, + AV_CODEC_ID_ADPCM_IMA_DK4, + AV_CODEC_ID_ADPCM_IMA_WS, + AV_CODEC_ID_ADPCM_IMA_SMJPEG, + AV_CODEC_ID_ADPCM_MS, + AV_CODEC_ID_ADPCM_4XM, + AV_CODEC_ID_ADPCM_XA, + AV_CODEC_ID_ADPCM_ADX, + AV_CODEC_ID_ADPCM_EA, + AV_CODEC_ID_ADPCM_G726, + AV_CODEC_ID_ADPCM_CT, + AV_CODEC_ID_ADPCM_SWF, + AV_CODEC_ID_ADPCM_YAMAHA, + AV_CODEC_ID_ADPCM_SBPRO_4, + AV_CODEC_ID_ADPCM_SBPRO_3, + AV_CODEC_ID_ADPCM_SBPRO_2, + AV_CODEC_ID_ADPCM_THP, + AV_CODEC_ID_ADPCM_IMA_AMV, + AV_CODEC_ID_ADPCM_EA_R1, + AV_CODEC_ID_ADPCM_EA_R3, + AV_CODEC_ID_ADPCM_EA_R2, + AV_CODEC_ID_ADPCM_IMA_EA_SEAD, + AV_CODEC_ID_ADPCM_IMA_EA_EACS, + AV_CODEC_ID_ADPCM_EA_XAS, + AV_CODEC_ID_ADPCM_EA_MAXIS_XA, + AV_CODEC_ID_ADPCM_IMA_ISS, + AV_CODEC_ID_ADPCM_G722, + AV_CODEC_ID_ADPCM_IMA_APC, + AV_CODEC_ID_ADPCM_VIMA, + + AV_CODEC_ID_ADPCM_AFC = 0x11800, + AV_CODEC_ID_ADPCM_IMA_OKI, + AV_CODEC_ID_ADPCM_DTK, + AV_CODEC_ID_ADPCM_IMA_RAD, + AV_CODEC_ID_ADPCM_G726LE, + AV_CODEC_ID_ADPCM_THP_LE, + AV_CODEC_ID_ADPCM_PSX, + AV_CODEC_ID_ADPCM_AICA, + AV_CODEC_ID_ADPCM_IMA_DAT4, + AV_CODEC_ID_ADPCM_MTAF, + + /* AMR */ + AV_CODEC_ID_AMR_NB = 0x12000, + AV_CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + AV_CODEC_ID_RA_144 = 0x13000, + AV_CODEC_ID_RA_288, + + /* various DPCM codecs */ + AV_CODEC_ID_ROQ_DPCM = 0x14000, + AV_CODEC_ID_INTERPLAY_DPCM, + AV_CODEC_ID_XAN_DPCM, + AV_CODEC_ID_SOL_DPCM, + + AV_CODEC_ID_SDX2_DPCM = 0x14800, + AV_CODEC_ID_GREMLIN_DPCM, + + /* audio codecs */ + AV_CODEC_ID_MP2 = 0x15000, + AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + AV_CODEC_ID_AAC, + AV_CODEC_ID_AC3, + AV_CODEC_ID_DTS, + AV_CODEC_ID_VORBIS, + AV_CODEC_ID_DVAUDIO, + AV_CODEC_ID_WMAV1, + AV_CODEC_ID_WMAV2, + AV_CODEC_ID_MACE3, + AV_CODEC_ID_MACE6, + AV_CODEC_ID_VMDAUDIO, + AV_CODEC_ID_FLAC, + AV_CODEC_ID_MP3ADU, + AV_CODEC_ID_MP3ON4, + AV_CODEC_ID_SHORTEN, + AV_CODEC_ID_ALAC, + AV_CODEC_ID_WESTWOOD_SND1, + AV_CODEC_ID_GSM, ///< as in Berlin toast format + AV_CODEC_ID_QDM2, + AV_CODEC_ID_COOK, + AV_CODEC_ID_TRUESPEECH, + AV_CODEC_ID_TTA, + AV_CODEC_ID_SMACKAUDIO, + AV_CODEC_ID_QCELP, + AV_CODEC_ID_WAVPACK, + AV_CODEC_ID_DSICINAUDIO, + AV_CODEC_ID_IMC, + AV_CODEC_ID_MUSEPACK7, + AV_CODEC_ID_MLP, + AV_CODEC_ID_GSM_MS, /* as found in WAV */ + AV_CODEC_ID_ATRAC3, + AV_CODEC_ID_APE, + AV_CODEC_ID_NELLYMOSER, + AV_CODEC_ID_MUSEPACK8, + AV_CODEC_ID_SPEEX, + AV_CODEC_ID_WMAVOICE, + AV_CODEC_ID_WMAPRO, + AV_CODEC_ID_WMALOSSLESS, + AV_CODEC_ID_ATRAC3P, + AV_CODEC_ID_EAC3, + AV_CODEC_ID_SIPR, + AV_CODEC_ID_MP1, + AV_CODEC_ID_TWINVQ, + AV_CODEC_ID_TRUEHD, + AV_CODEC_ID_MP4ALS, + AV_CODEC_ID_ATRAC1, + AV_CODEC_ID_BINKAUDIO_RDFT, + AV_CODEC_ID_BINKAUDIO_DCT, + AV_CODEC_ID_AAC_LATM, + AV_CODEC_ID_QDMC, + AV_CODEC_ID_CELT, + AV_CODEC_ID_G723_1, + AV_CODEC_ID_G729, + AV_CODEC_ID_8SVX_EXP, + AV_CODEC_ID_8SVX_FIB, + AV_CODEC_ID_BMV_AUDIO, + AV_CODEC_ID_RALF, + AV_CODEC_ID_IAC, + AV_CODEC_ID_ILBC, + AV_CODEC_ID_OPUS, + AV_CODEC_ID_COMFORT_NOISE, + AV_CODEC_ID_TAK, + AV_CODEC_ID_METASOUND, + AV_CODEC_ID_PAF_AUDIO, + AV_CODEC_ID_ON2AVC, + AV_CODEC_ID_DSS_SP, + AV_CODEC_ID_CODEC2, + + AV_CODEC_ID_FFWAVESYNTH = 0x15800, + AV_CODEC_ID_SONIC, + AV_CODEC_ID_SONIC_LS, + AV_CODEC_ID_EVRC, + AV_CODEC_ID_SMV, + AV_CODEC_ID_DSD_LSBF, + AV_CODEC_ID_DSD_MSBF, + AV_CODEC_ID_DSD_LSBF_PLANAR, + AV_CODEC_ID_DSD_MSBF_PLANAR, + AV_CODEC_ID_4GV, + AV_CODEC_ID_INTERPLAY_ACM, + AV_CODEC_ID_XMA1, + AV_CODEC_ID_XMA2, + AV_CODEC_ID_DST, + AV_CODEC_ID_ATRAC3AL, + AV_CODEC_ID_ATRAC3PAL, + AV_CODEC_ID_DOLBY_E, + AV_CODEC_ID_APTX, + AV_CODEC_ID_APTX_HD, + AV_CODEC_ID_SBC, + AV_CODEC_ID_ATRAC9, + + /* subtitle codecs */ + AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + AV_CODEC_ID_DVD_SUBTITLE = 0x17000, + AV_CODEC_ID_DVB_SUBTITLE, + AV_CODEC_ID_TEXT, ///< raw UTF-8 text + AV_CODEC_ID_XSUB, + AV_CODEC_ID_SSA, + AV_CODEC_ID_MOV_TEXT, + AV_CODEC_ID_HDMV_PGS_SUBTITLE, + AV_CODEC_ID_DVB_TELETEXT, + AV_CODEC_ID_SRT, + + AV_CODEC_ID_MICRODVD = 0x17800, + AV_CODEC_ID_EIA_608, + AV_CODEC_ID_JACOSUB, + AV_CODEC_ID_SAMI, + AV_CODEC_ID_REALTEXT, + AV_CODEC_ID_STL, + AV_CODEC_ID_SUBVIEWER1, + AV_CODEC_ID_SUBVIEWER, + AV_CODEC_ID_SUBRIP, + AV_CODEC_ID_WEBVTT, + AV_CODEC_ID_MPL2, + AV_CODEC_ID_VPLAYER, + AV_CODEC_ID_PJS, + AV_CODEC_ID_ASS, + AV_CODEC_ID_HDMV_TEXT_SUBTITLE, + AV_CODEC_ID_TTML, + + /* other specific kind of codecs (generally used for attachments) */ + AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + AV_CODEC_ID_TTF = 0x18000, + + AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream. + AV_CODEC_ID_BINTEXT = 0x18800, + AV_CODEC_ID_XBIN, + AV_CODEC_ID_IDF, + AV_CODEC_ID_OTF, + AV_CODEC_ID_SMPTE_KLV, + AV_CODEC_ID_DVD_NAV, + AV_CODEC_ID_TIMED_ID3, + AV_CODEC_ID_BIN_DATA, + + + AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it + + AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG2PS = 0x20002, /**< _FAKE_ codec to indicate a raw MPEG-2 PS + * stream (only used by libavformat) */ + AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket +}; + +/** + * This struct describes the properties of a single codec described by an + * AVCodecID. + * @see liteav_avcodec_descriptor_get() + */ +typedef struct AVCodecDescriptor { + enum AVCodecID id; + enum AVMediaType type; + /** + * Name of the codec described by this descriptor. It is non-empty and + * unique for each codec descriptor. It should contain alphanumeric + * characters and '_' only. + */ + const char *name; + /** + * A more descriptive name for this codec. May be NULL. + */ + const char *long_name; + /** + * Codec properties, a combination of AV_CODEC_PROP_* flags. + */ + int props; + /** + * MIME type(s) associated with the codec. + * May be NULL; if not, a NULL-terminated array of MIME types. + * The first item is always non-NULL and is the preferred MIME type. + */ + const char *const *mime_types; + /** + * If non-NULL, an array of profiles recognized for this codec. + * Terminated with FF_PROFILE_UNKNOWN. + */ + const struct AVProfile *profiles; +} AVCodecDescriptor; + +/** + * Codec uses only intra compression. + * Video and audio codecs only. + */ +#define AV_CODEC_PROP_INTRA_ONLY (1 << 0) +/** + * Codec supports lossy compression. Audio and video codecs only. + * @note a codec may support both lossy and lossless + * compression modes + */ +#define AV_CODEC_PROP_LOSSY (1 << 1) +/** + * Codec supports lossless compression. Audio and video codecs only. + */ +#define AV_CODEC_PROP_LOSSLESS (1 << 2) +/** + * Codec supports frame reordering. That is, the coded order (the order in which + * the encoded packets are output by the encoders / stored / input to the + * decoders) may be different from the presentation order of the corresponding + * frames. + * + * For codecs that do not have this property set, PTS and DTS should always be + * equal. + */ +#define AV_CODEC_PROP_REORDER (1 << 3) +/** + * Subtitle codec is bitmap based + * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field. + */ +#define AV_CODEC_PROP_BITMAP_SUB (1 << 16) +/** + * Subtitle codec is text based. + * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field. + */ +#define AV_CODEC_PROP_TEXT_SUB (1 << 17) + +/** + * @ingroup lavc_decoding + * Required number of additionally allocated bytes at the end of the input bitstream for decoding. + * This is mainly needed because some optimized bitstream readers read + * 32 or 64 bit at once and could read over the end.<br> + * Note: If the first 23 bits of the additional bytes are not 0, then damaged + * MPEG bitstreams could cause overread and segfault. + */ +#define AV_INPUT_BUFFER_PADDING_SIZE 64 + +/** + * @ingroup lavc_encoding + * minimum encoding buffer size + * Used to avoid some checks during header writing. + */ +#define AV_INPUT_BUFFER_MIN_SIZE 16384 + +#if FF_API_WITHOUT_PREFIX +/** + * @deprecated use AV_INPUT_BUFFER_PADDING_SIZE instead + */ +#define FF_INPUT_BUFFER_PADDING_SIZE 32 + +/** + * @deprecated use AV_INPUT_BUFFER_MIN_SIZE instead + */ +#define FF_MIN_BUFFER_SIZE 16384 +#endif /* FF_API_WITHOUT_PREFIX */ + +/** + * @ingroup lavc_decoding + */ +enum AVDiscard{ + /* We leave some space between them for extensions (drop some + * keyframes for intra-only or drop just some bidir frames). */ + AVDISCARD_NONE =-16, ///< discard nothing + AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi + AVDISCARD_NONREF = 8, ///< discard all non reference + AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames + AVDISCARD_NONINTRA= 24, ///< discard all non intra frames + AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes + AVDISCARD_ALL = 48, ///< discard all +}; + +enum AVAudioServiceType { + AV_AUDIO_SERVICE_TYPE_MAIN = 0, + AV_AUDIO_SERVICE_TYPE_EFFECTS = 1, + AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2, + AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3, + AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4, + AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5, + AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6, + AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7, + AV_AUDIO_SERVICE_TYPE_KARAOKE = 8, + AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI +}; + +/** + * @ingroup lavc_encoding + */ +typedef struct RcOverride{ + int start_frame; + int end_frame; + int qscale; // If this is 0 then quality_factor will be used instead. + float quality_factor; +} RcOverride; + +/* encoding support + These flags can be passed in AVCodecContext.flags before initialization. + Note: Not everything is supported yet. +*/ + +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define AV_CODEC_FLAG_UNALIGNED (1 << 0) +/** + * Use fixed qscale. + */ +#define AV_CODEC_FLAG_QSCALE (1 << 1) +/** + * 4 MV per MB allowed / advanced prediction for H.263. + */ +#define AV_CODEC_FLAG_4MV (1 << 2) +/** + * Output even those frames that might be corrupted. + */ +#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3) +/** + * Use qpel MC. + */ +#define AV_CODEC_FLAG_QPEL (1 << 4) +/** + * Use internal 2pass ratecontrol in first pass mode. + */ +#define AV_CODEC_FLAG_PASS1 (1 << 9) +/** + * Use internal 2pass ratecontrol in second pass mode. + */ +#define AV_CODEC_FLAG_PASS2 (1 << 10) +/** + * loop filter. + */ +#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11) +/** + * Only decode/encode grayscale. + */ +#define AV_CODEC_FLAG_GRAY (1 << 13) +/** + * error[?] variables will be set during encoding. + */ +#define AV_CODEC_FLAG_PSNR (1 << 15) +/** + * Input bitstream might be truncated at a random location + * instead of only at frame boundaries. + */ +#define AV_CODEC_FLAG_TRUNCATED (1 << 16) +/** + * Use interlaced DCT. + */ +#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18) +/** + * Force low delay. + */ +#define AV_CODEC_FLAG_LOW_DELAY (1 << 19) +/** + * Place global headers in extradata instead of every keyframe. + */ +#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22) +/** + * Use only bitexact stuff (except (I)DCT). + */ +#define AV_CODEC_FLAG_BITEXACT (1 << 23) +/* Fx : Flag for H.263+ extra options */ +/** + * H.263 advanced intra coding / MPEG-4 AC prediction + */ +#define AV_CODEC_FLAG_AC_PRED (1 << 24) +/** + * interlaced motion estimation + */ +#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29) +#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31) + +/** + * Allow non spec compliant speedup tricks. + */ +#define AV_CODEC_FLAG2_FAST (1 << 0) +/** + * Skip bitstream encoding. + */ +#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2) +/** + * Place global headers at every keyframe instead of in extradata. + */ +#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3) + +/** + * timecode is in drop frame format. DEPRECATED!!!! + */ +#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE (1 << 13) + +/** + * Input bitstream might be truncated at a packet boundaries + * instead of only at frame boundaries. + */ +#define AV_CODEC_FLAG2_CHUNKS (1 << 15) +/** + * Discard cropping information from SPS. + */ +#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16) + +/** + * Show all frames before the first keyframe + */ +#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22) +/** + * Export motion vectors through frame side data + */ +#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28) +/** + * Do not skip samples and export skip information as frame side data + */ +#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29) +/** + * Do not reset ASS ReadOrder field on flush (subtitles decoding) + */ +#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30) + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +/** + * Decoder can use draw_horiz_band callback. + */ +#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0) +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define AV_CODEC_CAP_DR1 (1 << 1) +#define AV_CODEC_CAP_TRUNCATED (1 << 3) +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define AV_CODEC_CAP_DELAY (1 << 5) +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6) + +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carrying such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define AV_CODEC_CAP_SUBFRAMES (1 << 8) +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9) +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10) +/** + * Codec supports frame-level multithreading. + */ +#define AV_CODEC_CAP_FRAME_THREADS (1 << 12) +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define AV_CODEC_CAP_SLICE_THREADS (1 << 13) +/** + * Codec supports changed parameters at any point. + */ +#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14) +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define AV_CODEC_CAP_AUTO_THREADS (1 << 15) +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16) +/** + * Decoder is not a preferred choice for probing. + * This indicates that the decoder is not a good choice for probing. + * It could for example be an expensive to spin up hardware decoder, + * or it could simply not provide a lot of useful information about + * the stream. + * A decoder marked with this flag should only be used as last resort + * choice for probing. + */ +#define AV_CODEC_CAP_AVOID_PROBING (1 << 17) +/** + * Codec is intra only. + */ +#define AV_CODEC_CAP_INTRA_ONLY 0x40000000 +/** + * Codec is lossless. + */ +#define AV_CODEC_CAP_LOSSLESS 0x80000000 + +#if FF_API_WITHOUT_PREFIX +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define CODEC_FLAG_UNALIGNED AV_CODEC_FLAG_UNALIGNED +#define CODEC_FLAG_QSCALE AV_CODEC_FLAG_QSCALE +#define CODEC_FLAG_4MV AV_CODEC_FLAG_4MV +#define CODEC_FLAG_OUTPUT_CORRUPT AV_CODEC_FLAG_OUTPUT_CORRUPT +#define CODEC_FLAG_QPEL AV_CODEC_FLAG_QPEL +#if FF_API_GMC +/** + * @deprecated use the "gmc" private option of the libxvid encoder + */ +#define CODEC_FLAG_GMC 0x0020 ///< Use GMC. +#endif +#if FF_API_MV0 +/** + * @deprecated use the flag "mv0" in the "mpv_flags" private option of the + * mpegvideo encoders + */ +#define CODEC_FLAG_MV0 0x0040 +#endif +#if FF_API_INPUT_PRESERVED +/** + * @deprecated passing reference-counted frames to the encoders replaces this + * flag + */ +#define CODEC_FLAG_INPUT_PRESERVED 0x0100 +#endif +#define CODEC_FLAG_PASS1 AV_CODEC_FLAG_PASS1 +#define CODEC_FLAG_PASS2 AV_CODEC_FLAG_PASS2 +#define CODEC_FLAG_GRAY AV_CODEC_FLAG_GRAY +#if FF_API_EMU_EDGE +/** + * @deprecated edges are not used/required anymore. I.e. this flag is now always + * set. + */ +#define CODEC_FLAG_EMU_EDGE 0x4000 +#endif +#define CODEC_FLAG_PSNR AV_CODEC_FLAG_PSNR +#define CODEC_FLAG_TRUNCATED AV_CODEC_FLAG_TRUNCATED + +#if FF_API_NORMALIZE_AQP +/** + * @deprecated use the flag "naq" in the "mpv_flags" private option of the + * mpegvideo encoders + */ +#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 +#endif +#define CODEC_FLAG_INTERLACED_DCT AV_CODEC_FLAG_INTERLACED_DCT +#define CODEC_FLAG_LOW_DELAY AV_CODEC_FLAG_LOW_DELAY +#define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER +#define CODEC_FLAG_BITEXACT AV_CODEC_FLAG_BITEXACT +#define CODEC_FLAG_AC_PRED AV_CODEC_FLAG_AC_PRED +#define CODEC_FLAG_LOOP_FILTER AV_CODEC_FLAG_LOOP_FILTER +#define CODEC_FLAG_INTERLACED_ME AV_CODEC_FLAG_INTERLACED_ME +#define CODEC_FLAG_CLOSED_GOP AV_CODEC_FLAG_CLOSED_GOP +#define CODEC_FLAG2_FAST AV_CODEC_FLAG2_FAST +#define CODEC_FLAG2_NO_OUTPUT AV_CODEC_FLAG2_NO_OUTPUT +#define CODEC_FLAG2_LOCAL_HEADER AV_CODEC_FLAG2_LOCAL_HEADER +#define CODEC_FLAG2_DROP_FRAME_TIMECODE AV_CODEC_FLAG2_DROP_FRAME_TIMECODE +#define CODEC_FLAG2_IGNORE_CROP AV_CODEC_FLAG2_IGNORE_CROP + +#define CODEC_FLAG2_CHUNKS AV_CODEC_FLAG2_CHUNKS +#define CODEC_FLAG2_SHOW_ALL AV_CODEC_FLAG2_SHOW_ALL +#define CODEC_FLAG2_EXPORT_MVS AV_CODEC_FLAG2_EXPORT_MVS +#define CODEC_FLAG2_SKIP_MANUAL AV_CODEC_FLAG2_SKIP_MANUAL + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ +#define CODEC_CAP_DRAW_HORIZ_BAND AV_CODEC_CAP_DRAW_HORIZ_BAND ///< Decoder can use draw_horiz_band callback. +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define CODEC_CAP_DR1 AV_CODEC_CAP_DR1 +#define CODEC_CAP_TRUNCATED AV_CODEC_CAP_TRUNCATED +#if FF_API_XVMC +/* Codec can export data for HW decoding. This flag indicates that + * the codec would call get_format() with list that might contain HW accelerated + * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them + * including raw image format. + * The application can use the passed context to determine bitstream version, + * chroma format, resolution etc. + */ +#define CODEC_CAP_HWACCEL 0x0010 +#endif /* FF_API_XVMC */ +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define CODEC_CAP_DELAY AV_CODEC_CAP_DELAY +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define CODEC_CAP_HWACCEL_VDPAU AV_CODEC_CAP_HWACCEL_VDPAU +#endif +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carrying such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define CODEC_CAP_SUBFRAMES AV_CODEC_CAP_SUBFRAMES +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define CODEC_CAP_EXPERIMENTAL AV_CODEC_CAP_EXPERIMENTAL +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define CODEC_CAP_CHANNEL_CONF AV_CODEC_CAP_CHANNEL_CONF +#if FF_API_NEG_LINESIZES +/** + * @deprecated no codecs use this capability + */ +#define CODEC_CAP_NEG_LINESIZES 0x0800 +#endif +/** + * Codec supports frame-level multithreading. + */ +#define CODEC_CAP_FRAME_THREADS AV_CODEC_CAP_FRAME_THREADS +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define CODEC_CAP_SLICE_THREADS AV_CODEC_CAP_SLICE_THREADS +/** + * Codec supports changed parameters at any point. + */ +#define CODEC_CAP_PARAM_CHANGE AV_CODEC_CAP_PARAM_CHANGE +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define CODEC_CAP_AUTO_THREADS AV_CODEC_CAP_AUTO_THREADS +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define CODEC_CAP_VARIABLE_FRAME_SIZE AV_CODEC_CAP_VARIABLE_FRAME_SIZE +/** + * Codec is intra only. + */ +#define CODEC_CAP_INTRA_ONLY AV_CODEC_CAP_INTRA_ONLY +/** + * Codec is lossless. + */ +#define CODEC_CAP_LOSSLESS AV_CODEC_CAP_LOSSLESS + +/** + * HWAccel is experimental and is thus avoided in favor of non experimental + * codecs + */ +#define HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200 +#endif /* FF_API_WITHOUT_PREFIX */ + +/** + * Codec is backed by a hardware implementation. Typically used to + * identify a non-hwaccel hardware decoder. For information about hwaccels, use + * avcodec_get_hw_config() instead. + */ +#define AV_CODEC_CAP_HARDWARE (1 << 18) + +/** + * Codec is potentially backed by a hardware implementation, but not + * necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the + * implementation provides some sort of internal fallback. + */ +#define AV_CODEC_CAP_HYBRID (1 << 19) + +/** + * Pan Scan area. + * This specifies the area which should be displayed. + * Note there may be multiple such areas for one frame. + */ +typedef struct AVPanScan { + /** + * id + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int id; + + /** + * width and height in 1/16 pel + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int width; + int height; + + /** + * position of the top left corner in 1/16 pel for up to 3 fields/frames + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int16_t position[3][2]; +} AVPanScan; + +/** + * This structure describes the bitrate properties of an encoded bitstream. It + * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD + * parameters for H.264/HEVC. + */ +typedef struct AVCPBProperties { + /** + * Maximum bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int max_bitrate; + /** + * Minimum bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int min_bitrate; + /** + * Average bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int avg_bitrate; + + /** + * The size of the buffer to which the ratecontrol is applied, in bits. + * Zero if unknown or unspecified. + */ + int buffer_size; + + /** + * The delay between the time the packet this structure is associated with + * is received and the time when it should be decoded, in periods of a 27MHz + * clock. + * + * UINT64_MAX when unknown or unspecified. + */ + uint64_t vbv_delay; +} AVCPBProperties; + +/** + * The decoder will keep a reference to the frame and may reuse it later. + */ +#define AV_GET_BUFFER_FLAG_REF (1 << 0) + +/** + * @defgroup lavc_packet AVPacket + * + * Types and functions for working with AVPacket. + * @{ + */ +enum AVPacketSideDataType { + /** + * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE + * bytes worth of palette. This side data signals that a new palette is + * present. + */ + AV_PKT_DATA_PALETTE, + + /** + * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format + * that the extradata buffer was changed and the receiving side should + * act upon it appropriately. The new extradata is embedded in the side + * data buffer and should be immediately used for processing the current + * frame or packet. + */ + AV_PKT_DATA_NEW_EXTRADATA, + + /** + * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * @code + * u32le param_flags + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) + * s32le channel_count + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) + * u64le channel_layout + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) + * s32le sample_rate + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) + * s32le width + * s32le height + * @endcode + */ + AV_PKT_DATA_PARAM_CHANGE, + + /** + * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of + * structures with info about macroblocks relevant to splitting the + * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). + * That is, it does not necessarily contain info about all macroblocks, + * as long as the distance between macroblocks in the info is smaller + * than the target payload size. + * Each MB info structure is 12 bytes, and is laid out as follows: + * @code + * u32le bit offset from the start of the packet + * u8 current quantizer at the start of the macroblock + * u8 GOB number + * u16le macroblock address within the GOB + * u8 horizontal MV predictor + * u8 vertical MV predictor + * u8 horizontal MV predictor for block number 3 + * u8 vertical MV predictor for block number 3 + * @endcode + */ + AV_PKT_DATA_H263_MB_INFO, + + /** + * This side data should be associated with an audio stream and contains + * ReplayGain information in form of the AVReplayGain struct. + */ + AV_PKT_DATA_REPLAYGAIN, + + /** + * This side data contains a 3x3 transformation matrix describing an affine + * transformation that needs to be applied to the decoded video frames for + * correct presentation. + * + * See libavutil/display.h for a detailed description of the data. + */ + AV_PKT_DATA_DISPLAYMATRIX, + + /** + * This side data should be associated with a video stream and contains + * Stereoscopic 3D information in form of the AVStereo3D struct. + */ + AV_PKT_DATA_STEREO3D, + + /** + * This side data should be associated with an audio stream and corresponds + * to enum AVAudioServiceType. + */ + AV_PKT_DATA_AUDIO_SERVICE_TYPE, + + /** + * This side data contains quality related information from the encoder. + * @code + * u32le quality factor of the compressed frame. Allowed range is between 1 (good) and FF_LAMBDA_MAX (bad). + * u8 picture type + * u8 error count + * u16 reserved + * u64le[error count] sum of squared differences between encoder in and output + * @endcode + */ + AV_PKT_DATA_QUALITY_STATS, + + /** + * This side data contains an integer value representing the stream index + * of a "fallback" track. A fallback track indicates an alternate + * track to use when the current track can not be decoded for some reason. + * e.g. no decoder available for codec. + */ + AV_PKT_DATA_FALLBACK_TRACK, + + /** + * This side data corresponds to the AVCPBProperties struct. + */ + AV_PKT_DATA_CPB_PROPERTIES, + + /** + * Recommmends skipping the specified number of samples + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_PKT_DATA_SKIP_SAMPLES, + + /** + * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that + * the packet may contain "dual mono" audio specific to Japanese DTV + * and if it is true, recommends only the selected channel to be used. + * @code + * u8 selected channels (0=mail/left, 1=sub/right, 2=both) + * @endcode + */ + AV_PKT_DATA_JP_DUALMONO, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. + */ + AV_PKT_DATA_STRINGS_METADATA, + + /** + * Subtitle event position + * @code + * u32le x1 + * u32le y1 + * u32le x2 + * u32le y2 + * @endcode + */ + AV_PKT_DATA_SUBTITLE_POSITION, + + /** + * Data found in BlockAdditional element of matroska container. There is + * no end marker for the data, so it is required to rely on the side data + * size to recognize the end. 8 byte id (as found in BlockAddId) followed + * by data. + */ + AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, + + /** + * The optional first identifier line of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_IDENTIFIER, + + /** + * The optional settings (rendering instructions) that immediately + * follow the timestamp specifier of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_SETTINGS, + + /** + * The optional WebVTT NOTE. + */ + AV_PKT_DATA_WEBVTT_NOTE, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. This + * side data includes updated metadata which appeared in the stream. + */ + AV_PKT_DATA_METADATA_UPDATE, + + /** + * MPEGTS stream ID as uint8_t, this is required to pass the stream ID + * information from the demuxer to the corresponding muxer. + */ + AV_PKT_DATA_MPEGTS_STREAM_ID, + + /** + * Mastering display metadata (based on SMPTE-2086:2014). This metadata + * should be associated with a video stream and contains data in the form + * of the AVMasteringDisplayMetadata struct. + */ + AV_PKT_DATA_MASTERING_DISPLAY_METADATA, + + /** + * This side data should be associated with a video stream and corresponds + * to the AVSphericalMapping structure. + */ + AV_PKT_DATA_SPHERICAL, + + /** + * Content light level (based on CTA-861.3). This metadata should be + * associated with a video stream and contains data in the form of the + * AVContentLightMetadata struct. + */ + AV_PKT_DATA_CONTENT_LIGHT_LEVEL, + + /** + * ATSC A53 Part 4 Closed Captions. This metadata should be associated with + * a video stream. A53 CC bitstream is stored as uint8_t in AVPacketSideData.data. + * The number of bytes of CC data is AVPacketSideData.size. + */ + AV_PKT_DATA_A53_CC, + + /** + * This side data is encryption initialization data. + * The format is not part of ABI, use av_encryption_init_info_* methods to + * access. + */ + AV_PKT_DATA_ENCRYPTION_INIT_INFO, + + /** + * This side data contains encryption info for how to decrypt the packet. + * The format is not part of ABI, use av_encryption_info_* methods to access. + */ + AV_PKT_DATA_ENCRYPTION_INFO, + + /** + * Active Format Description data consisting of a single byte as specified + * in ETSI TS 101 154 using AVActiveFormatDescription enum. + */ + AV_PKT_DATA_AFD, + + /** + * Used to record the time offset of each packet from the start of the current + * playlist. using int64_t, in AV_TIME_BASE. + * @code + * i64le duration + * @endcode + */ + AV_PKT_DATA_PASS_DURATION, + + /** + * Used to pass hls media tags to caller. + * Tag strings are stored in struct HLSMediaTags. + * @code + * u32le tag_num, total number of tags + * u32le len_1, lenght of next tag string, includes tail '\0' + * u8 *tag_1, '\0' terminated + * u32le len_2, lenght of next tag string, includes tail '\0' + * u8 *tag_2, '\0' terminated + * ... + * @endcode + */ + AV_PKT_DATA_HLS_MEDIA_TAGS, + + /** + * DOVI configuration + * ref: + * dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2, section 2.2 + * dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2, section 3.3 + * Tags are stored in struct AVDOVIDecoderConfigurationRecord. + */ + AV_PKT_DATA_DOVI_CONF, + + /** + * The number of side data types. + * This is not part of the public API/ABI in the sense that it may + * change when new side data types are added. + * This must stay the last enum value. + * If its value becomes huge, some code using it + * needs to be updated as it assumes it to be smaller than other limits. + */ + AV_PKT_DATA_NB +}; + +#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED + +typedef struct AVPacketSideData { + uint8_t *data; + int size; + enum AVPacketSideDataType type; +} AVPacketSideData; + +#define MAX_PRIVATE_TAG_NUM 5 +#define MAX_PRIVATE_TAG_SIZE 4096 + +/** + * This structure stores compressed data. It is typically exported by demuxers + * and then passed as input to decoders, or received as output from encoders and + * then passed to muxers. + * + * For video, it should typically contain one compressed frame. For audio it may + * contain several compressed frames. Encoders are allowed to output empty + * packets, with no compressed data, containing only side data + * (e.g. to update some stream parameters at the end of encoding). + * + * AVPacket is one of the few structs in FFmpeg, whose size is a part of public + * ABI. Thus it may be allocated on stack and no new fields can be added to it + * without libavcodec and libavformat major bump. + * + * The semantics of data ownership depends on the buf field. + * If it is set, the packet data is dynamically allocated and is + * valid indefinitely until a call to liteav_av_packet_unref() reduces the + * reference count to 0. + * + * If the buf field is not set liteav_av_packet_ref() would make a copy instead + * of increasing the reference count. + * + * The side data is always allocated with liteav_av_malloc(), copied by + * liteav_av_packet_ref() and freed by liteav_av_packet_unref(). + * + * @see liteav_av_packet_ref + * @see liteav_av_packet_unref + */ +typedef struct AVPacket { + /** + * A reference to the reference-counted buffer where the packet data is + * stored. + * May be NULL, then the packet data is not reference-counted. + */ + AVBufferRef *buf; + /** + * Presentation timestamp in AVStream->time_base units; the time at which + * the decompressed packet will be presented to the user. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + * pts MUST be larger or equal to dts as presentation cannot happen before + * decompression, unless one wants to view hex dumps. Some formats misuse + * the terms dts and pts/cts to mean something different. Such timestamps + * must be converted to true pts/dts before they are stored in AVPacket. + */ + int64_t pts; + /** + * Decompression timestamp in AVStream->time_base units; the time at which + * the packet is decompressed. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + */ + int64_t dts; + uint8_t *data; + int size; + int stream_index; + /** + * A combination of AV_PKT_FLAG values + */ + int flags; + /** + * Additional packet data that can be provided by the container. + * Packet can contain several types of side information. + */ + AVPacketSideData *side_data; + int side_data_elems; + + /** + * Duration of this packet in AVStream->time_base units, 0 if unknown. + * Equals next_pts - this_pts in presentation order. + */ + int64_t duration; + + int64_t pos; ///< byte position in stream, -1 if unknown + +#if FF_API_CONVERGENCE_DURATION + /** + * @deprecated Same as the duration field, but as int64_t. This was required + * for Matroska subtitles, whose duration values could overflow when the + * duration field was still an int. + */ + attribute_deprecated + int64_t convergence_duration; + + int sequence_num; + + int64_t last_segment_total_dur; + + int private_tag_num; + char *private_tag_container[MAX_PRIVATE_TAG_NUM]; + int seg_no; +#endif +} AVPacket; +#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe +#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted +#define AV_PKT_FLAG_HLS_NEW_SEGMENT 0x1000 ///< The packet is the first one of a HLS segment +#define AV_PKT_FLAG_HLS_DISCONTINUITY 0x2000 ///< The packet is the first one after receiving an HLS DISCONTINUITY tag + +/** + * Flag is used to discard packets which are required to maintain valid + * decoder state but are not required for output and should be dropped + * after decoding. + **/ +#define AV_PKT_FLAG_DISCARD 0x0004 +/** + * The packet comes from a trusted source. + * + * Otherwise-unsafe constructs such as arbitrary pointers to data + * outside the packet may be followed. + */ +#define AV_PKT_FLAG_TRUSTED 0x0008 +/** + * Flag is used to indicate packets that contain frames that can + * be discarded by the decoder. I.e. Non-reference frames. + */ +#define AV_PKT_FLAG_DISPOSABLE 0x0010 + + +enum AVSideDataParamChangeFlags { + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, + AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, + AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, +}; +/** + * @} + */ + +struct AVCodecInternal; + +enum AVFieldOrder { + AV_FIELD_UNKNOWN, + AV_FIELD_PROGRESSIVE, + AV_FIELD_TT, //< Top coded_first, top displayed first + AV_FIELD_BB, //< Bottom coded first, bottom displayed first + AV_FIELD_TB, //< Top coded first, bottom displayed first + AV_FIELD_BT, //< Bottom coded first, top displayed first +}; + +/** + * main external API structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * You can use AVOptions (av_opt* / av_set/get*()) to access these fields from user + * applications. + * The name string for AVOptions options matches the associated command line + * parameter name and can be found in libavcodec/options_table.h + * The AVOption/command line parameter names differ in some cases from the C + * structure field names for historic reasons or brevity. + * sizeof(AVCodecContext) must not be used outside libav*. + */ +typedef struct AVCodecContext { + /** + * information on struct for liteav_av_log + * - set by avcodec_alloc_context3 + */ + const AVClass *av_class; + int log_level_offset; + + enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ + const struct AVCodec *codec; +#if FF_API_CODEC_NAME + /** + * @deprecated this field is not used for anything in libavcodec + */ + attribute_deprecated + char codec_name[32]; +#endif + enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ + + /** + * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * A demuxer should set this to what is stored in the field used to identify the codec. + * If there are multiple such fields in a container then the demuxer should choose the one + * which maximizes the information about the used codec. + * If the codec tag field in a container is larger than 32 bits then the demuxer should + * remap the longer ID to 32 bits with a table or other structure. Alternatively a new + * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated + * first. + * - encoding: Set by user, if not then the default based on codec_id will be used. + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int codec_tag; + + void *priv_data; + + /** + * Private context used for internal data. + * + * Unlike priv_data, this is not codec-specific. It is used in general + * libavcodec functions. + */ + struct AVCodecInternal *internal; + + /** + * Private data of the user, can be used to carry app specific stuff. + * - encoding: Set by user. + * - decoding: Set by user. + */ + void *opaque; + + /** + * the average bitrate + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: Set by user, may be overwritten by libavcodec + * if this info is available in the stream + */ + int64_t bit_rate; + + /** + * number of bits the bitstream is allowed to diverge from the reference. + * the reference can be CBR (for CBR pass1) or VBR (for pass2) + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: unused + */ + int bit_rate_tolerance; + + /** + * Global quality for codecs which cannot change it per frame. + * This should be proportional to MPEG-1/2/4 qscale. + * - encoding: Set by user. + * - decoding: unused + */ + int global_quality; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int compression_level; +#define FF_COMPRESSION_DEFAULT -1 + + /** + * AV_CODEC_FLAG_*. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags; + + /** + * AV_CODEC_FLAG2_* + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags2; + + /** + * some codecs need / can use extradata like Huffman tables. + * MJPEG: Huffman tables + * rv10: additional flags + * MPEG-4: global headers (they can be in the bitstream or here) + * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger + * than extradata_size to avoid problems if it is read with the bitstream reader. + * The bytewise contents of extradata must not depend on the architecture or CPU endianness. + * Must be allocated with the liteav_av_malloc() family of functions. + * - encoding: Set/allocated/freed by libavcodec. + * - decoding: Set/allocated/freed by user. + */ + uint8_t *extradata; + int extradata_size; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identically 1. + * This often, but not always is the inverse of the frame rate or field rate + * for video. 1/time_base is not the average frame rate if the frame rate is not + * constant. + * + * Like containers, elementary streams also can store timestamps, 1/time_base + * is the unit in which these timestamps are specified. + * As example of such codec time base see ISO/IEC 14496-2:2001(E) + * vop_time_increment_resolution and fixed_vop_rate + * (fixed_vop_rate == 0 implies that it is different from the framerate) + * + * - encoding: MUST be set by user. + * - decoding: the use of this field for decoding is deprecated. + * Use framerate instead. + */ + AVRational time_base; + + /** + * For some codecs, the time base is closer to the field rate than the frame rate. + * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration + * if no telecine is used ... + * + * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. + */ + int ticks_per_frame; + + /** + * Codec delay. + * + * Encoding: Number of frames delay there will be from the encoder input to + * the decoder output. (we assume the decoder matches the spec) + * Decoding: Number of frames delay in addition to what a standard decoder + * as specified in the spec would produce. + * + * Video: + * Number of frames the decoded output will be delayed relative to the + * encoded input. + * + * Audio: + * For encoding, this field is unused (see initial_padding). + * + * For decoding, this is the number of samples the decoder needs to + * output before the decoder's output is valid. When seeking, you should + * start decoding this many samples prior to your desired seek point. + * + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int delay; + + + /* video only */ + /** + * picture width / height. + * + * @note Those fields may not match the values of the last + * AVFrame output by liteav_avcodec_decode_video2 due frame + * reordering. + * + * - encoding: MUST be set by user. + * - decoding: May be set by the user before opening the decoder if known e.g. + * from the container. Some decoders will require the dimensions + * to be set by the caller. During decoding, the decoder may + * overwrite those values as required while parsing the data. + */ + int width, height; + + /** + * Bitstream width / height, may be different from width/height e.g. when + * the decoded frame is cropped before being output or lowres is enabled. + * + * @note Those field may not match the value of the last + * AVFrame output by liteav_avcodec_receive_frame() due frame + * reordering. + * + * - encoding: unused + * - decoding: May be set by the user before opening the decoder if known + * e.g. from the container. During decoding, the decoder may + * overwrite those values as required while parsing the data. + */ + int coded_width, coded_height; + + /** + * the number of pictures in a group of pictures, or 0 for intra_only + * - encoding: Set by user. + * - decoding: unused + */ + int gop_size; + + /** + * Pixel format, see AV_PIX_FMT_xxx. + * May be set by the demuxer if known from headers. + * May be overridden by the decoder if it knows better. + * + * @note This field may not match the value of the last + * AVFrame output by liteav_avcodec_receive_frame() due frame + * reordering. + * + * - encoding: Set by user. + * - decoding: Set by user if known, overridden by libavcodec while + * parsing the data. + */ + enum AVPixelFormat pix_fmt; + + /** + * If non NULL, 'draw_horiz_band' is called by the libavcodec + * decoder to draw a horizontal band. It improves cache usage. Not + * all codecs can do that. You must check the codec capabilities + * beforehand. + * When multithreading is used, it may be called from multiple threads + * at the same time; threads might draw different parts of the same AVFrame, + * or multiple AVFrames, and there is no guarantee that slices will be drawn + * in order. + * The function is also used by hardware acceleration APIs. + * It is called at least once during frame decoding to pass + * the data needed for hardware render. + * In that mode instead of pixel data, AVFrame points to + * a structure specific to the acceleration API. The application + * reads the structure and can change some fields to indicate progress + * or mark state. + * - encoding: unused + * - decoding: Set by user. + * @param height the height of the slice + * @param y the y position of the slice + * @param type 1->top field, 2->bottom field, 3->frame + * @param offset offset into the AVFrame.data from which the slice should be read + */ + void (*draw_horiz_band)(struct AVCodecContext *s, + const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], + int y, int type, int height); + + /** + * callback to negotiate the pixelFormat + * @param fmt is the list of formats which are supported by the codec, + * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. + * The first is always the native one. + * @note The callback may be called again immediately if initialization for + * the selected (hardware-accelerated) pixel format failed. + * @warning Behavior is undefined if the callback returns a value not + * in the fmt list of formats. + * @return the chosen format + * - encoding: unused + * - decoding: Set by user, if not set the native format will be chosen. + */ + enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + + /** + * maximum number of B-frames between non-B-frames + * Note: The output will be delayed by max_b_frames+1 relative to the input. + * - encoding: Set by user. + * - decoding: unused + */ + int max_b_frames; + + /** + * qscale factor between IP and B-frames + * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_factor; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int b_frame_strategy; +#endif + + /** + * qscale offset between IP and B-frames + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_offset; + + /** + * Size of the frame reordering buffer in the decoder. + * For MPEG-2 it is 1 IPB or 0 low delay IP. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int has_b_frames; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int mpeg_quant; +#endif + + /** + * qscale factor between P- and I-frames + * If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor + offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_factor; + + /** + * qscale offset between P and I-frames + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_offset; + + /** + * luminance masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float lumi_masking; + + /** + * temporary complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float temporal_cplx_masking; + + /** + * spatial complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float spatial_cplx_masking; + + /** + * p block masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float p_masking; + + /** + * darkness masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float dark_masking; + + /** + * slice count + * - encoding: Set by libavcodec. + * - decoding: Set by user (or 0). + */ + int slice_count; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int prediction_method; +#define FF_PRED_LEFT 0 +#define FF_PRED_PLANE 1 +#define FF_PRED_MEDIAN 2 +#endif + + /** + * slice offsets in the frame in bytes + * - encoding: Set/allocated by libavcodec. + * - decoding: Set/allocated by user (or NULL). + */ + int *slice_offset; + + /** + * sample aspect ratio (0 if unknown) + * That is the width of a pixel divided by the height of the pixel. + * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVRational sample_aspect_ratio; + + /** + * motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_cmp; + /** + * subpixel motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_sub_cmp; + /** + * macroblock comparison function (not supported yet) + * - encoding: Set by user. + * - decoding: unused + */ + int mb_cmp; + /** + * interlaced DCT comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int ildct_cmp; +#define FF_CMP_SAD 0 +#define FF_CMP_SSE 1 +#define FF_CMP_SATD 2 +#define FF_CMP_DCT 3 +#define FF_CMP_PSNR 4 +#define FF_CMP_BIT 5 +#define FF_CMP_RD 6 +#define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 +#define FF_CMP_NSSE 10 +#define FF_CMP_W53 11 +#define FF_CMP_W97 12 +#define FF_CMP_DCTMAX 13 +#define FF_CMP_DCT264 14 +#define FF_CMP_MEDIAN_SAD 15 +#define FF_CMP_CHROMA 256 + + /** + * ME diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int dia_size; + + /** + * amount of previous MV predictors (2a+1 x 2a+1 square) + * - encoding: Set by user. + * - decoding: unused + */ + int last_predictor_count; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int pre_me; +#endif + + /** + * motion estimation prepass comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_pre_cmp; + + /** + * ME prepass diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int pre_dia_size; + + /** + * subpel ME quality + * - encoding: Set by user. + * - decoding: unused + */ + int me_subpel_quality; + + /** + * maximum motion estimation search range in subpel units + * If 0 then no limit. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_range; + + /** + * slice flags + * - encoding: unused + * - decoding: Set by user. + */ + int slice_flags; +#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display +#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG-2 field pics) +#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) + + /** + * macroblock decision mode + * - encoding: Set by user. + * - decoding: unused + */ + int mb_decision; +#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp +#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits +#define FF_MB_DECISION_RD 2 ///< rate distortion + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *intra_matrix; + + /** + * custom inter quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *inter_matrix; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int scenechange_threshold; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int noise_reduction; +#endif + + /** + * precision of the intra DC coefficient - 8 + * - encoding: Set by user. + * - decoding: Set by libavcodec + */ + int intra_dc_precision; + + /** + * Number of macroblock rows at the top which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_top; + + /** + * Number of macroblock rows at the bottom which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_bottom; + + /** + * minimum MB Lagrange multiplier + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmin; + + /** + * maximum MB Lagrange multiplier + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmax; + +#if FF_API_PRIVATE_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int me_penalty_compensation; +#endif + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int bidir_refine; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int brd_scale; +#endif + + /** + * minimum GOP size + * - encoding: Set by user. + * - decoding: unused + */ + int keyint_min; + + /** + * number of reference frames + * - encoding: Set by user. + * - decoding: Set by lavc. + */ + int refs; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int chromaoffset; +#endif + + /** + * Note: Value depends upon the compare function used for fullpel ME. + * - encoding: Set by user. + * - decoding: unused + */ + int mv0_threshold; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int b_sensitivity; +#endif + + /** + * Chromaticity coordinates of the source primaries. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorPrimaries color_primaries; + + /** + * Color Transfer Characteristic. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + /** + * This defines the location of chroma samples. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVChromaLocation chroma_sample_location; + + /** + * Number of slices. + * Indicates number of picture subdivisions. Used for parallelized + * decoding. + * - encoding: Set by user + * - decoding: unused + */ + int slices; + + /** Field order + * - encoding: set by libavcodec + * - decoding: Set by user. + */ + enum AVFieldOrder field_order; + + /* audio only */ + int sample_rate; ///< samples per second + int channels; ///< number of audio channels + + /** + * audio sample format + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVSampleFormat sample_fmt; ///< sample format + + /* The following data should not be initialized. */ + /** + * Number of samples per channel in an audio frame. + * + * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame + * except the last must contain exactly frame_size samples per channel. + * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then the + * frame size is not restricted. + * - decoding: may be set by some decoders to indicate constant frame size + */ + int frame_size; + + /** + * Frame counter, set by libavcodec. + * + * - decoding: total number of frames returned from the decoder so far. + * - encoding: total number of frames passed to the encoder so far. + * + * @note the counter is not incremented if encoding/decoding resulted in + * an error. + */ + int frame_number; + + /** + * number of bytes per packet if constant and known or 0 + * Used by some WAV based audio codecs. + */ + int block_align; + + /** + * Audio cutoff bandwidth (0 means "automatic") + * - encoding: Set by user. + * - decoding: unused + */ + int cutoff; + + /** + * Audio channel layout. + * - encoding: set by user. + * - decoding: set by user, may be overwritten by libavcodec. + */ + uint64_t channel_layout; + + /** + * Request decoder to use this channel layout if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + */ + uint64_t request_channel_layout; + + /** + * Type of service that the audio stream conveys. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVAudioServiceType audio_service_type; + + /** + * desired sample format + * - encoding: Not used. + * - decoding: Set by user. + * Decoder will decode to this format if it can. + */ + enum AVSampleFormat request_sample_fmt; + + /** + * This callback is called at the beginning of each frame to get data + * buffer(s) for it. There may be one contiguous buffer for all the data or + * there may be a buffer per each data plane or anything in between. What + * this means is, you may set however many entries in buf[] you feel necessary. + * Each buffer must be reference-counted using the AVBuffer API (see description + * of buf[] below). + * + * The following fields will be set in the frame before this callback is + * called: + * - format + * - width, height (video only) + * - sample_rate, channel_layout, nb_samples (audio only) + * Their values may differ from the corresponding values in + * AVCodecContext. This callback must use the frame values, not the codec + * context values, to calculate the required buffer size. + * + * This callback must fill the following fields in the frame: + * - data[] + * - linesize[] + * - extended_data: + * * if the data is planar audio with more than 8 channels, then this + * callback must allocate and fill extended_data to contain all pointers + * to all data planes. data[] must hold as many pointers as it can. + * extended_data must be allocated with liteav_av_malloc() and will be freed in + * liteav_av_frame_unref(). + * * otherwise extended_data must point to data + * - buf[] must contain one or more pointers to AVBufferRef structures. Each of + * the frame's data and extended_data pointers must be contained in these. That + * is, one AVBufferRef for each allocated chunk of memory, not necessarily one + * AVBufferRef per data[] entry. See: liteav_av_buffer_create(), liteav_av_buffer_alloc(), + * and liteav_av_buffer_ref(). + * - extended_buf and nb_extended_buf must be allocated with liteav_av_malloc() by + * this callback and filled with the extra buffers if there are more + * buffers than buf[] can hold. extended_buf will be freed in + * liteav_av_frame_unref(). + * + * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call + * liteav_avcodec_default_get_buffer2() instead of providing buffers allocated by + * some other means. + * + * Each data plane must be aligned to the maximum required by the target + * CPU. + * + * @see liteav_avcodec_default_get_buffer2() + * + * Video: + * + * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused + * (read and/or written to if it is writable) later by libavcodec. + * + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * Some decoders do not support linesizes changing between frames. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * this callback may be called from a different thread, but not from more + * than one at once. Does not need to be reentrant. + * + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * As a convenience, liteav_av_samples_get_buffer_size() and + * liteav_av_samples_fill_arrays() in libavutil may be used by custom get_buffer2() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see liteav_av_samples_get_buffer_size(), liteav_av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags); + + /** + * If non-zero, the decoded audio and video frames returned from + * liteav_avcodec_decode_video2() and liteav_avcodec_decode_audio4() are reference-counted + * and are valid indefinitely. The caller must free them with + * liteav_av_frame_unref() when they are not needed anymore. + * Otherwise, the decoded frames must not be freed by the caller and are + * only valid until the next decode call. + * + * This is always automatically enabled if liteav_avcodec_receive_frame() is used. + * + * - encoding: unused + * - decoding: set by the caller before avcodec_open2(). + */ + attribute_deprecated + int refcounted_frames; + + /* - encoding parameters */ + float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) + float qblur; ///< amount of qscale smoothing over time (0.0-1.0) + + /** + * minimum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmin; + + /** + * maximum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmax; + + /** + * maximum quantizer difference between frames + * - encoding: Set by user. + * - decoding: unused + */ + int max_qdiff; + + /** + * decoder bitstream buffer size + * - encoding: Set by user. + * - decoding: unused + */ + int rc_buffer_size; + + /** + * ratecontrol override, see RcOverride + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + int rc_override_count; + RcOverride *rc_override; + + /** + * maximum bitrate + * - encoding: Set by user. + * - decoding: Set by user, may be overwritten by libavcodec. + */ + int64_t rc_max_rate; + + /** + * minimum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int64_t rc_min_rate; + + /** + * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_max_available_vbv_use; + + /** + * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_min_vbv_overflow_use; + + /** + * Number of bits which should be loaded into the rc buffer before decoding starts. + * - encoding: Set by user. + * - decoding: unused + */ + int rc_initial_buffer_occupancy; + +#if FF_API_CODER_TYPE +#define FF_CODER_TYPE_VLC 0 +#define FF_CODER_TYPE_AC 1 +#define FF_CODER_TYPE_RAW 2 +#define FF_CODER_TYPE_RLE 3 + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int coder_type; +#endif /* FF_API_CODER_TYPE */ + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int context_model; +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_threshold; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_factor; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_exp; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_cmp; +#endif /* FF_API_PRIVATE_OPT */ + + /** + * trellis RD quantization + * - encoding: Set by user. + * - decoding: unused + */ + int trellis; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int min_prediction_order; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int max_prediction_order; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int64_t timecode_frame_start; +#endif + +#if FF_API_RTP_CALLBACK + /** + * @deprecated unused + */ + /* The RTP callback: This function is called */ + /* every time the encoder has a packet to send. */ + /* It depends on the encoder if the data starts */ + /* with a Start Code (it should). H.263 does. */ + /* mb_nb contains the number of macroblocks */ + /* encoded in the RTP payload. */ + attribute_deprecated + void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int rtp_payload_size; /* The size of the RTP payload: the coder will */ + /* do its best to deliver a chunk with size */ + /* below rtp_payload_size, the chunk will start */ + /* with a start code on some codecs like H.263. */ + /* This doesn't take account of any particular */ + /* headers inside the transmitted RTP payload. */ +#endif + +#if FF_API_STAT_BITS + /* statistics, used for 2-pass encoding */ + attribute_deprecated + int mv_bits; + attribute_deprecated + int header_bits; + attribute_deprecated + int i_tex_bits; + attribute_deprecated + int p_tex_bits; + attribute_deprecated + int i_count; + attribute_deprecated + int p_count; + attribute_deprecated + int skip_count; + attribute_deprecated + int misc_bits; + + /** @deprecated this field is unused */ + attribute_deprecated + int frame_bits; +#endif + + /** + * pass1 encoding statistics output buffer + * - encoding: Set by libavcodec. + * - decoding: unused + */ + char *stats_out; + + /** + * pass2 encoding statistics input buffer + * Concatenated stuff from stats_out of pass1 should be placed here. + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + char *stats_in; + + /** + * Work around bugs in encoders which sometimes cannot be detected automatically. + * - encoding: Set by user + * - decoding: Set by user + */ + int workaround_bugs; +#define FF_BUG_AUTODETECT 1 ///< autodetection +#define FF_BUG_XVID_ILACE 4 +#define FF_BUG_UMP4 8 +#define FF_BUG_NO_PADDING 16 +#define FF_BUG_AMV 32 +#define FF_BUG_QPEL_CHROMA 64 +#define FF_BUG_STD_QPEL 128 +#define FF_BUG_QPEL_CHROMA2 256 +#define FF_BUG_DIRECT_BLOCKSIZE 512 +#define FF_BUG_EDGE 1024 +#define FF_BUG_HPEL_CHROMA 2048 +#define FF_BUG_DC_CLIP 4096 +#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. +#define FF_BUG_TRUNCATED 16384 +#define FF_BUG_IEDGE 32768 + + /** + * strictly follow the standard (MPEG-4, ...). + * - encoding: Set by user. + * - decoding: Set by user. + * Setting this to STRICT or higher means the encoder and decoder will + * generally do stupid things, whereas setting it to unofficial or lower + * will mean the encoder might produce output that is not supported by all + * spec-compliant decoders. Decoders don't differentiate between normal, + * unofficial and experimental (that is, they always try to decode things + * when they can) unless they are explicitly asked to behave stupidly + * (=strictly conform to the specs) + */ + int strict_std_compliance; +#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software. +#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. +#define FF_COMPLIANCE_NORMAL 0 +#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions +#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. + + /** + * error concealment flags + * - encoding: unused + * - decoding: Set by user. + */ + int error_concealment; +#define FF_EC_GUESS_MVS 1 +#define FF_EC_DEBLOCK 2 +#define FF_EC_FAVOR_INTER 256 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug; +#define FF_DEBUG_PICT_INFO 1 +#define FF_DEBUG_RC 2 +#define FF_DEBUG_BITSTREAM 4 +#define FF_DEBUG_MB_TYPE 8 +#define FF_DEBUG_QP 16 +#if FF_API_DEBUG_MV +/** + * @deprecated this option does nothing + */ +#define FF_DEBUG_MV 32 +#endif +#define FF_DEBUG_DCT_COEFF 0x00000040 +#define FF_DEBUG_SKIP 0x00000080 +#define FF_DEBUG_STARTCODE 0x00000100 +#define FF_DEBUG_ER 0x00000400 +#define FF_DEBUG_MMCO 0x00000800 +#define FF_DEBUG_BUGS 0x00001000 +#if FF_API_DEBUG_MV +#define FF_DEBUG_VIS_QP 0x00002000 +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 +#endif +#define FF_DEBUG_BUFFERS 0x00008000 +#define FF_DEBUG_THREADS 0x00010000 +#define FF_DEBUG_GREEN_MD 0x00800000 +#define FF_DEBUG_NOMC 0x01000000 + +#if FF_API_DEBUG_MV + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 // visualize forward predicted MVs of P-frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 // visualize forward predicted MVs of B-frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 // visualize backward predicted MVs of B-frames +#endif + + /** + * Error recognition; may misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int err_recognition; + +/** + * Verify checksums embedded in the bitstream (could be of either encoded or + * decoded data, depending on the codec) and print an error message on mismatch. + * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the + * decoder returning an error. + */ +#define AV_EF_CRCCHECK (1<<0) +#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations +#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length +#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection + +#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue +#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors +#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors +#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error + + + /** + * opaque 64-bit number (generally a PTS) that will be reordered and + * output in AVFrame.reordered_opaque + * - encoding: unused + * - decoding: Set by user. + */ + int64_t reordered_opaque; + + /** + * Hardware accelerator in use + * - encoding: unused. + * - decoding: Set by libavcodec + */ + const struct AVHWAccel *hwaccel; + + /** + * Hardware accelerator context. + * For some hardware accelerators, a global context needs to be + * provided by the user. In that case, this holds display-dependent + * data FFmpeg cannot instantiate itself. Please refer to the + * FFmpeg HW accelerator documentation to know how to fill this + * is. e.g. for VA API, this is a struct vaapi_context. + * - encoding: unused + * - decoding: Set by user + */ + void *hwaccel_context; + + /** + * error + * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR. + * - decoding: unused + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + + /** + * DCT algorithm, see FF_DCT_* below + * - encoding: Set by user. + * - decoding: unused + */ + int dct_algo; +#define FF_DCT_AUTO 0 +#define FF_DCT_FASTINT 1 +#define FF_DCT_INT 2 +#define FF_DCT_MMX 3 +#define FF_DCT_ALTIVEC 5 +#define FF_DCT_FAAN 6 + + /** + * IDCT algorithm, see FF_IDCT_* below. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int idct_algo; +#define FF_IDCT_AUTO 0 +#define FF_IDCT_INT 1 +#define FF_IDCT_SIMPLE 2 +#define FF_IDCT_SIMPLEMMX 3 +#define FF_IDCT_ARM 7 +#define FF_IDCT_ALTIVEC 8 +#define FF_IDCT_SIMPLEARM 10 +#define FF_IDCT_XVID 14 +#define FF_IDCT_SIMPLEARMV5TE 16 +#define FF_IDCT_SIMPLEARMV6 17 +#define FF_IDCT_FAAN 20 +#define FF_IDCT_SIMPLENEON 22 +#define FF_IDCT_NONE 24 /* Used by XvMC to extract IDCT coefficients with FF_IDCT_PERM_NONE */ +#define FF_IDCT_SIMPLEAUTO 128 + + /** + * bits per sample/pixel from the demuxer (needed for huffyuv). + * - encoding: Set by libavcodec. + * - decoding: Set by user. + */ + int bits_per_coded_sample; + + /** + * Bits per sample/pixel of internal libavcodec pixel/sample format. + * - encoding: set by user. + * - decoding: set by libavcodec. + */ + int bits_per_raw_sample; + +#if FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + */ + int lowres; +#endif + +#if FF_API_CODED_FRAME + /** + * the picture in the bitstream + * - encoding: Set by libavcodec. + * - decoding: unused + * + * @deprecated use the quality factor packet side data instead + */ + attribute_deprecated AVFrame *coded_frame; +#endif + + /** + * thread count + * is used to decide how many independent tasks should be passed to execute() + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_count; + + /** + * Which multithreading methods to use. + * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, + * so clients which cannot provide future frames should not use it. + * + * - encoding: Set by user, otherwise the default is used. + * - decoding: Set by user, otherwise the default is used. + */ + int thread_type; +#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once +#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once + + /** + * Which multithreading methods are in use by the codec. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int active_thread_type; + + /** + * Set by the client if its custom get_buffer() callback can be called + * synchronously from another thread, which allows faster multithreaded decoding. + * draw_horiz_band() will be called from other threads regardless of this setting. + * Ignored if the default get_buffer() is used. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_safe_callbacks; + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * @param count the number of things to execute + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. + * @param c context passed also to func + * @param count the number of things to execute + * @param arg2 argument passed unchanged to func + * @param ret return values of executed functions, must have space for "count" values. May be NULL. + * @param func function that will be called count times, with jobnr from 0 to count-1. + * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no + * two instances of func executing at the same time will have the same threadnr. + * @return always 0 currently, but code should handle a future improvement where when any call to func + * returns < 0 no further calls to func may be done and < 0 is returned. + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); + + /** + * noise vs. sse weight for the nsse comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int nsse_weight; + + /** + * profile + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int profile; +#define FF_PROFILE_UNKNOWN -99 +#define FF_PROFILE_RESERVED -100 + +#define FF_PROFILE_AAC_MAIN 0 +#define FF_PROFILE_AAC_LOW 1 +#define FF_PROFILE_AAC_SSR 2 +#define FF_PROFILE_AAC_LTP 3 +#define FF_PROFILE_AAC_HE 4 +#define FF_PROFILE_AAC_HE_V2 28 +#define FF_PROFILE_AAC_LD 22 +#define FF_PROFILE_AAC_ELD 38 +#define FF_PROFILE_MPEG2_AAC_LOW 128 +#define FF_PROFILE_MPEG2_AAC_HE 131 + +#define FF_PROFILE_DNXHD 0 +#define FF_PROFILE_DNXHR_LB 1 +#define FF_PROFILE_DNXHR_SQ 2 +#define FF_PROFILE_DNXHR_HQ 3 +#define FF_PROFILE_DNXHR_HQX 4 +#define FF_PROFILE_DNXHR_444 5 + +#define FF_PROFILE_DTS 20 +#define FF_PROFILE_DTS_ES 30 +#define FF_PROFILE_DTS_96_24 40 +#define FF_PROFILE_DTS_HD_HRA 50 +#define FF_PROFILE_DTS_HD_MA 60 +#define FF_PROFILE_DTS_EXPRESS 70 + +#define FF_PROFILE_MPEG2_422 0 +#define FF_PROFILE_MPEG2_HIGH 1 +#define FF_PROFILE_MPEG2_SS 2 +#define FF_PROFILE_MPEG2_SNR_SCALABLE 3 +#define FF_PROFILE_MPEG2_MAIN 4 +#define FF_PROFILE_MPEG2_SIMPLE 5 + +#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag +#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag + +#define FF_PROFILE_H264_BASELINE 66 +#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) +#define FF_PROFILE_H264_MAIN 77 +#define FF_PROFILE_H264_EXTENDED 88 +#define FF_PROFILE_H264_HIGH 100 +#define FF_PROFILE_H264_HIGH_10 110 +#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_MULTIVIEW_HIGH 118 +#define FF_PROFILE_H264_HIGH_422 122 +#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_STEREO_HIGH 128 +#define FF_PROFILE_H264_HIGH_444 144 +#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 +#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_CAVLC_444 44 + +#define FF_PROFILE_VC1_SIMPLE 0 +#define FF_PROFILE_VC1_MAIN 1 +#define FF_PROFILE_VC1_COMPLEX 2 +#define FF_PROFILE_VC1_ADVANCED 3 + +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1 +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2 +#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768 +#define FF_PROFILE_JPEG2000_DCINEMA_2K 3 +#define FF_PROFILE_JPEG2000_DCINEMA_4K 4 + +#define FF_PROFILE_VP9_0 0 +#define FF_PROFILE_VP9_1 1 +#define FF_PROFILE_VP9_2 2 +#define FF_PROFILE_VP9_3 3 + +#define FF_PROFILE_HEVC_MAIN 1 +#define FF_PROFILE_HEVC_MAIN_10 2 +#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3 +#define FF_PROFILE_HEVC_REXT 4 + +#define FF_PROFILE_AV1_MAIN 0 +#define FF_PROFILE_AV1_HIGH 1 +#define FF_PROFILE_AV1_PROFESSIONAL 2 + +#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0 +#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1 +#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2 +#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3 +#define FF_PROFILE_MJPEG_JPEG_LS 0xf7 + +#define FF_PROFILE_SBC_MSBC 1 + +#define FF_PROFILE_AVS3_MAIN 1 +#define FF_PROFILE_AVS3_MAIN_10 2 + + /** + * level + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int level; +#define FF_LEVEL_UNKNOWN -99 + + /** + * Skip loop filtering for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_loop_filter; + + /** + * Skip IDCT/dequantization for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_idct; + + /** + * Skip decoding for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_frame; + + /** + * Header containing style information for text subtitles. + * For SUBTITLE_ASS subtitle type, it should contain the whole ASS + * [Script Info] and [V4+ Styles] section, plus the [Events] line and + * the Format line following. It shouldn't include any Dialogue line. + * - encoding: Set/allocated/freed by user (before avcodec_open2()) + * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) + */ + uint8_t *subtitle_header; + int subtitle_header_size; + +#if FF_API_VBV_DELAY + /** + * VBV delay coded in the last frame (in periods of a 27 MHz clock). + * Used for compliant TS muxing. + * - encoding: Set by libavcodec. + * - decoding: unused. + * @deprecated this value is now exported as a part of + * AV_PKT_DATA_CPB_PROPERTIES packet side data + */ + attribute_deprecated + uint64_t vbv_delay; +#endif + +#if FF_API_SIDEDATA_ONLY_PKT + /** + * Encoding only and set by default. Allow encoders to output packets + * that do not contain any encoded data, only side data. + * + * Some encoders need to output such packets, e.g. to update some stream + * parameters at the end of encoding. + * + * @deprecated this field disables the default behaviour and + * it is kept only for compatibility. + */ + attribute_deprecated + int side_data_only_packets; +#endif + + /** + * Audio only. The number of "priming" samples (padding) inserted by the + * encoder at the beginning of the audio. I.e. this number of leading + * decoded samples must be discarded by the caller to get the original audio + * without leading padding. + * + * - decoding: unused + * - encoding: Set by libavcodec. The timestamps on the output packets are + * adjusted by the encoder so that they always refer to the + * first sample of the data actually contained in the packet, + * including any added padding. E.g. if the timebase is + * 1/samplerate and the timestamp of the first input sample is + * 0, the timestamp of the first output packet will be + * -initial_padding. + */ + int initial_padding; + + /** + * - decoding: For codecs that store a framerate value in the compressed + * bitstream, the decoder may export it here. { 0, 1} when + * unknown. + * - encoding: May be used to signal the framerate of CFR content to an + * encoder. + */ + AVRational framerate; + + /** + * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx. + * - encoding: unused. + * - decoding: Set by libavcodec before calling get_format() + */ + enum AVPixelFormat sw_pix_fmt; + + /** + * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. + * - encoding unused. + * - decoding set by user. + */ + AVRational pkt_timebase; + + /** + * AVCodecDescriptor + * - encoding: unused. + * - decoding: set by libavcodec. + */ + const AVCodecDescriptor *codec_descriptor; + +#if !FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + */ + int lowres; +#endif + + /** + * Current statistics for PTS correction. + * - decoding: maintained and used by libavcodec, not intended to be used by user apps + * - encoding: unused + */ + int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far + int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far + int64_t pts_correction_last_pts; /// PTS of the last frame + int64_t pts_correction_last_dts; /// DTS of the last frame + + /** + * Character encoding of the input subtitles file. + * - decoding: set by user + * - encoding: unused + */ + char *sub_charenc; + + /** + * Subtitles character encoding mode. Formats or codecs might be adjusting + * this setting (if they are doing the conversion themselves for instance). + * - decoding: set by libavcodec + * - encoding: unused + */ + int sub_charenc_mode; +#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) +#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself +#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv +#define FF_SUB_CHARENC_MODE_IGNORE 2 ///< neither convert the subtitles, nor check them for valid UTF-8 + + /** + * Skip processing alpha if supported by codec. + * Note that if the format uses pre-multiplied alpha (common with VP6, + * and recommended due to better video quality/compression) + * the image will look as if alpha-blended onto a black background. + * However for formats that do not use pre-multiplied alpha + * there might be serious artefacts (though e.g. libswscale currently + * assumes pre-multiplied alpha anyway). + * + * - decoding: set by user + * - encoding: unused + */ + int skip_alpha; + + /** + * Number of samples to skip after a discontinuity + * - decoding: unused + * - encoding: set by libavcodec + */ + int seek_preroll; + +#if !FF_API_DEBUG_MV + /** + * debug motion vectors + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames +#endif + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: unused. + */ + uint16_t *chroma_intra_matrix; + + /** + * dump format separator. + * can be ", " or "\n " or anything else + * - encoding: Set by user. + * - decoding: Set by user. + */ + uint8_t *dump_separator; + + /** + * ',' separated list of allowed decoders. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *codec_whitelist; + + /** + * Properties of the stream that gets decoded + * - encoding: unused + * - decoding: set by libavcodec + */ + unsigned properties; +#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001 +#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002 + + /** + * Additional data associated with the entire coded stream. + * + * - decoding: unused + * - encoding: may be set by libavcodec after avcodec_open2(). + */ + AVPacketSideData *coded_side_data; + int nb_coded_side_data; + + /** + * A reference to the AVHWFramesContext describing the input (for encoding) + * or output (decoding) frames. The reference is set by the caller and + * afterwards owned (and freed) by libavcodec - it should never be read by + * the caller after being set. + * + * - decoding: This field should be set by the caller from the get_format() + * callback. The previous reference (if any) will always be + * unreffed by libavcodec before the get_format() call. + * + * If the default get_buffer2() is used with a hwaccel pixel + * format, then this AVHWFramesContext will be used for + * allocating the frame buffers. + * + * - encoding: For hardware encoders configured to use a hwaccel pixel + * format, this field should be set by the caller to a reference + * to the AVHWFramesContext describing input frames. + * AVHWFramesContext.format must be equal to + * AVCodecContext.pix_fmt. + * + * This field should be set before avcodec_open2() is called. + */ + AVBufferRef *hw_frames_ctx; + + /** + * Control the form of AVSubtitle.rects[N]->ass + * - decoding: set by user + * - encoding: unused + */ + int sub_text_format; +#define FF_SUB_TEXT_FMT_ASS 0 +#if FF_API_ASS_TIMING +#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS 1 +#endif + + /** + * Audio only. The amount of padding (in samples) appended by the encoder to + * the end of the audio. I.e. this number of decoded samples must be + * discarded by the caller from the end of the stream to get the original + * audio without any trailing padding. + * + * - decoding: unused + * - encoding: unused + */ + int trailing_padding; + + /** + * The number of pixels per image to maximally accept. + * + * - decoding: set by user + * - encoding: set by user + */ + int64_t max_pixels; + + /** + * A reference to the AVHWDeviceContext describing the device which will + * be used by a hardware encoder/decoder. The reference is set by the + * caller and afterwards owned (and freed) by libavcodec. + * + * This should be used if either the codec device does not require + * hardware frames or any that are used are to be allocated internally by + * libavcodec. If the user wishes to supply any of the frames used as + * encoder input or decoder output then hw_frames_ctx should be used + * instead. When hw_frames_ctx is set in get_format() for a decoder, this + * field will be ignored while decoding the associated stream segment, but + * may again be used on a following one after another get_format() call. + * + * For both encoders and decoders this field should be set before + * avcodec_open2() is called and must not be written to thereafter. + * + * Note that some decoders may require this field to be set initially in + * order to support hw_frames_ctx at all - in that case, all frames + * contexts used must be created on the same device. + */ + AVBufferRef *hw_device_ctx; + + /** + * Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated + * decoding (if active). + * - encoding: unused + * - decoding: Set by user (either before avcodec_open2(), or in the + * AVCodecContext.get_format callback) + */ + int hwaccel_flags; + + /** + * Video decoding only. Certain video codecs support cropping, meaning that + * only a sub-rectangle of the decoded frame is intended for display. This + * option controls how cropping is handled by libavcodec. + * + * When set to 1 (the default), libavcodec will apply cropping internally. + * I.e. it will modify the output frame width/height fields and offset the + * data pointers (only by as much as possible while preserving alignment, or + * by the full amount if the AV_CODEC_FLAG_UNALIGNED flag is set) so that + * the frames output by the decoder refer only to the cropped area. The + * crop_* fields of the output frames will be zero. + * + * When set to 0, the width/height fields of the output frames will be set + * to the coded dimensions and the crop_* fields will describe the cropping + * rectangle. Applying the cropping is left to the caller. + * + * @warning When hardware acceleration with opaque output frames is used, + * libavcodec is unable to apply cropping from the top/left border. + * + * @note when this option is set to zero, the width/height fields of the + * AVCodecContext and output AVFrames have different meanings. The codec + * context fields store display dimensions (with the coded dimensions in + * coded_width/height), while the frame fields store the coded dimensions + * (with the display dimensions being determined by the crop_* fields). + */ + int apply_cropping; + + /* + * Video decoding only. Sets the number of extra hardware frames which + * the decoder will allocate for use by the caller. This must be set + * before avcodec_open2() is called. + * + * Some hardware decoders require all frames that they will use for + * output to be defined in advance before decoding starts. For such + * decoders, the hardware frame pool must therefore be of a fixed size. + * The extra frames set here are on top of any number that the decoder + * needs internally in order to operate normally (for example, frames + * used as reference pictures). + */ + int extra_hw_frames; +} AVCodecContext; + +#if FF_API_CODEC_GET_SET +/** + * Accessors for some AVCodecContext fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +attribute_deprecated +AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val); + +attribute_deprecated +const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc); + +attribute_deprecated +unsigned av_codec_get_codec_properties(const AVCodecContext *avctx); + +#if FF_API_LOWRES +attribute_deprecated +int av_codec_get_lowres(const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_lowres(AVCodecContext *avctx, int val); +#endif + +attribute_deprecated +int av_codec_get_seek_preroll(const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_seek_preroll(AVCodecContext *avctx, int val); + +attribute_deprecated +uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val); +#endif + +/** + * AVProfile. + */ +typedef struct AVProfile { + int profile; + const char *name; ///< short name for the profile +} AVProfile; + +enum { + /** + * The codec supports this format via the hw_device_ctx interface. + * + * When selecting this format, AVCodecContext.hw_device_ctx should + * have been set to a device of the specified type before calling + * avcodec_open2(). + */ + AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01, + /** + * The codec supports this format via the hw_frames_ctx interface. + * + * When selecting this format for a decoder, + * AVCodecContext.hw_frames_ctx should be set to a suitable frames + * context inside the get_format() callback. The frames context + * must have been created on a device of the specified type. + */ + AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02, + /** + * The codec supports this format by some internal method. + * + * This format can be selected without any additional configuration - + * no device or frames context is required. + */ + AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04, + /** + * The codec supports this format by some ad-hoc method. + * + * Additional settings and/or function calls are required. See the + * codec-specific documentation for details. (Methods requiring + * this sort of configuration are deprecated and others should be + * used in preference.) + */ + AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08, +}; + +typedef struct AVCodecHWConfig { + /** + * A hardware pixel format which the codec can use. + */ + enum AVPixelFormat pix_fmt; + /** + * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible + * setup methods which can be used with this configuration. + */ + int methods; + /** + * The device type associated with the configuration. + * + * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and + * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused. + */ + enum AVHWDeviceType device_type; +} AVCodecHWConfig; + +typedef struct AVCodecDefault AVCodecDefault; + +struct AVSubtitle; + +/** + * AVCodec. + */ +typedef struct AVCodec { + /** + * Name of the codec implementation. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + * This is the primary way to find a codec from the user perspective. + */ + const char *name; + /** + * Descriptive name for the codec, meant to be more human readable than name. + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *long_name; + enum AVMediaType type; + enum AVCodecID id; + /** + * Codec capabilities. + * see AV_CODEC_CAP_* + */ + int capabilities; + const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} + const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 + const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 + const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 + const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder + const AVClass *priv_class; ///< AVClass for the private context + const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} + + /** + * Group name of the codec implementation. + * This is a short symbolic name of the wrapper backing this codec. A + * wrapper uses some kind of external implementation for the codec, such + * as an external library, or a codec implementation provided by the OS or + * the hardware. + * If this field is NULL, this is a builtin, libavcodec native codec. + * If non-NULL, this will be the suffix in AVCodec.name in most cases + * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>"). + */ + const char *wrapper_name; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int priv_data_size; + struct AVCodec *next; + /** + * @name Frame-level threading support functions + * @{ + */ + /** + * If defined, called on thread contexts when they are created. + * If the codec allocates writable tables in init(), re-allocate them here. + * priv_data will be set to a copy of the original. + */ + int (*init_thread_copy)(AVCodecContext *); + /** + * Copy necessary context variables from a previous thread context to the current one. + * If not defined, the next thread will start automatically; otherwise, the codec + * must call liteav_ff_thread_finish_setup(). + * + * dst and src will (rarely) point to the same context, in which case memcpy should be skipped. + */ + int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); + /** @} */ + + /** + * Private codec-specific defaults. + */ + const AVCodecDefault *defaults; + + /** + * Initialize codec static data, called from liteav_avcodec_register(). + * + * This is not intended for time consuming operations as it is + * run for every codec regardless of that codec being used. + */ + void (*init_static_data)(struct AVCodec *codec); + + int (*init)(AVCodecContext *); + int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size, + const struct AVSubtitle *sub); + /** + * Encode data to an AVPacket. + * + * @param avctx codec context + * @param avpkt output AVPacket (may contain a user-provided buffer) + * @param[in] frame AVFrame containing the raw data to be encoded + * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a + * non-empty packet was returned in avpkt. + * @return 0 on success, negative error code on failure + */ + int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, + int *got_packet_ptr); + int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); + int (*close)(AVCodecContext *); + /** + * Encode API with decoupled packet/frame dataflow. The API is the + * same as the avcodec_ prefixed APIs (liteav_avcodec_send_frame() etc.), except + * that: + * - never called if the codec is closed or the wrong type, + * - if AV_CODEC_CAP_DELAY is not set, drain frames are never sent, + * - only one drain frame is ever passed down, + */ + int (*send_frame)(AVCodecContext *avctx, const AVFrame *frame); + int (*receive_packet)(AVCodecContext *avctx, AVPacket *avpkt); + + /** + * Decode API with decoupled packet/frame dataflow. This function is called + * to get one output frame. It should call liteav_ff_decode_get_packet() to obtain + * input data. + */ + int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame); + /** + * Flush buffers. + * Will be called when seeking + */ + void (*flush)(AVCodecContext *); + /** + * Internal codec capabilities. + * See FF_CODEC_CAP_* in internal.h + */ + int caps_internal; + + /** + * Decoding only, a comma-separated list of bitstream filters to apply to + * packets before decoding. + */ + const char *bsfs; + + /** + * Array of pointers to hardware configurations supported by the codec, + * or NULL if no hardware supported. The array is terminated by a NULL + * pointer. + * + * The user can only access this field via avcodec_get_hw_config(). + */ + const struct AVCodecHWConfigInternal **hw_configs; +} AVCodec; + +#if FF_API_CODEC_GET_SET +attribute_deprecated +int av_codec_get_max_lowres(const AVCodec *codec); +#endif + +struct MpegEncContext; + +/** + * Retrieve supported hardware configurations for a codec. + * + * Values of index from zero to some maximum return the indexed configuration + * descriptor; all other values return NULL. If the codec does not support + * any hardware configurations then it will always return NULL. + */ +const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index); + +/** + * @defgroup lavc_hwaccel AVHWAccel + * + * @note Nothing in this structure should be accessed by the user. At some + * point in future it will not be externally visible at all. + * + * @{ + */ +typedef struct AVHWAccel { + /** + * Name of the hardware accelerated codec. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + */ + const char *name; + + /** + * Type of codec implemented by the hardware accelerator. + * + * See AVMEDIA_TYPE_xxx + */ + enum AVMediaType type; + + /** + * Codec implemented by the hardware accelerator. + * + * See AV_CODEC_ID_xxx + */ + enum AVCodecID id; + + /** + * Supported pixel format. + * + * Only hardware accelerated formats are supported here. + */ + enum AVPixelFormat pix_fmt; + + /** + * Hardware accelerated codec capabilities. + * see AV_HWACCEL_CODEC_CAP_* + */ + int capabilities; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Allocate a custom buffer + */ + int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame); + + /** + * Called at the beginning of each frame or field picture. + * + * Meaningful frame information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * Note that buf can be NULL along with buf_size set to 0. + * Otherwise, this means the whole frame is available at this point. + * + * @param avctx the codec context + * @param buf the frame data buffer base + * @param buf_size the size of the frame in bytes + * @return zero if successful, a negative value otherwise + */ + int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for parameter data (SPS/PPS/VPS etc). + * + * Useful for hardware decoders which keep persistent state about the + * video parameters, and need to receive any changes to update that state. + * + * @param avctx the codec context + * @param type the nal unit type + * @param buf the nal unit data buffer + * @param buf_size the size of the nal unit in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for each slice. + * + * Meaningful slice information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * The only exception is XvMC, that works on MB level. + * + * @param avctx the codec context + * @param buf the slice data buffer base + * @param buf_size the size of the slice in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Called at the end of each frame or field picture. + * + * The whole picture is parsed at this point and can now be sent + * to the hardware accelerator. This function is mandatory. + * + * @param avctx the codec context + * @return zero if successful, a negative value otherwise + */ + int (*end_frame)(AVCodecContext *avctx); + + /** + * Size of per-frame hardware accelerator private data. + * + * Private data is allocated with liteav_av_mallocz() before + * AVCodecContext.get_buffer() and deallocated after + * AVCodecContext.release_buffer(). + */ + int frame_priv_data_size; + + /** + * Called for every Macroblock in a slice. + * + * XvMC uses it to replace the liteav_ff_mpv_reconstruct_mb(). + * Instead of decoding to raw picture, MB parameters are + * stored in an array provided by the video driver. + * + * @param s the mpeg context + */ + void (*decode_mb)(struct MpegEncContext *s); + + /** + * Initialize the hwaccel private data. + * + * This will be called from liteav_ff_get_format(), after hwaccel and + * hwaccel_context are set and the hwaccel private data in AVCodecInternal + * is allocated. + */ + int (*init)(AVCodecContext *avctx); + + /** + * Uninitialize the hwaccel private data. + * + * This will be called from get_format() or avcodec_close(), after hwaccel + * and hwaccel_context are already uninitialized. + */ + int (*uninit)(AVCodecContext *avctx); + + /** + * Size of the private data to allocate in + * AVCodecInternal.hwaccel_priv_data. + */ + int priv_data_size; + + /** + * Internal hwaccel capabilities. + */ + int caps_internal; + + /** + * Fill the given hw_frames context with current codec parameters. Called + * from get_format. Refer to liteav_avcodec_get_hw_frames_parameters() for + * details. + * + * This CAN be called before AVHWAccel.init is called, and you must assume + * that avctx->hwaccel_priv_data is invalid. + */ + int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx); +} AVHWAccel; + +/** + * HWAccel is experimental and is thus avoided in favor of non experimental + * codecs + */ +#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200 + +/** + * Hardware acceleration should be used for decoding even if the codec level + * used is unknown or higher than the maximum supported level reported by the + * hardware driver. + * + * It's generally a good idea to pass this flag unless you have a specific + * reason not to, as hardware tends to under-report supported levels. + */ +#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0) + +/** + * Hardware acceleration can output YUV pixel formats with a different chroma + * sampling than 4:2:0 and/or other than 8 bits per component. + */ +#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1) + +/** + * Hardware acceleration should still be attempted for decoding when the + * codec profile does not match the reported capabilities of the hardware. + * + * For example, this can be used to try to decode baseline profile H.264 + * streams in hardware - it will often succeed, because many streams marked + * as baseline profile actually conform to constrained baseline profile. + * + * @warning If the stream is actually not supported then the behaviour is + * undefined, and may include returning entirely incorrect output + * while indicating success. + */ +#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2) + +/** + * @} + */ + +#if FF_API_AVPICTURE +/** + * @defgroup lavc_picture AVPicture + * + * Functions for working with AVPicture + * @{ + */ + +/** + * Picture data structure. + * + * Up to four components can be stored into it, the last component is + * alpha. + * @deprecated use AVFrame or imgutils functions instead + */ +typedef struct AVPicture { + attribute_deprecated + uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes + attribute_deprecated + int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line +} AVPicture; + +/** + * @} + */ +#endif + +enum AVSubtitleType { + SUBTITLE_NONE, + + SUBTITLE_BITMAP, ///< A bitmap, pict will be set + + /** + * Plain text, the text field must be set by the decoder and is + * authoritative. ass and pict fields may contain approximations. + */ + SUBTITLE_TEXT, + + /** + * Formatted text, the ass field must be set by the decoder and is + * authoritative. pict and text fields may contain approximations. + */ + SUBTITLE_ASS, + + /** + * WebVTT rich text, the box field must be set by the decoder and is + * authoritative. pict and text fields are empty. + */ + SUBTITLE_WEBVTT, +}; + +#define AV_SUBTITLE_FLAG_FORCED 0x00000001 + +/** + * indicating that y of AVSubtitleRect is a line number. + * @see AVSubtitleBox + */ +#define AV_SUBTITLE_FLAG_LINE_NUMBER 0x00000002 + +/** + * indicating that WebVTT setting has "line" property + */ +#define AV_SUBTITLE_FLAG_LINE_SETTING 0x00000004 + +typedef struct AVTextStyle { + /** Family font names */ + char * font_name; /**< The name of the font */ + char * mono_font_name; /**< The name of the mono font */ + + uint16_t features; /**< Feature flags (means non default) */ + uint16_t style_flags; /**< Formatting style flags */ + + /* Font style */ + float font_relsize; /**< The font size in video height % */ + int font_size; /**< The font size in pixels */ + int font_color; /**< The color of the text 0xRRGGBB + (native endianness) */ + uint8_t font_alpha; /**< The transparency of the text.*/ + int spacing; /**< The spaceing between glyphs in pixels */ + + /* Outline */ + int outline_color; /**< The color of the outline 0xRRGGBB */ + uint8_t outline_alpha; /**< The transparency of the outline */ + int outline_width; /**< The width of the outline in pixels */ + + /* Shadow */ + int shadow_color; /**< The color of the shadow 0xRRGGBB */ + uint8_t shadow_alpha; /**< The transparency of the shadow. */ + int shadow_width; /**< The width of the shadow in pixels */ + + /* Background (and karaoke) */ + int background_color;/**< The color of the background 0xRRGGBB */ + uint8_t background_alpha;/**< The transparency of the background */ + int karaoke_background_color;/**< Background color for karaoke 0xRRGGBB */ + uint8_t karaoke_background_alpha;/**< The transparency of the karaoke bg */ + + /* Line breaking */ + enum + { + STYLE_WRAP_DEFAULT = 0, /**< Breaks on whitespace or fallback on char */ + STYLE_WRAP_CHAR, /**< Breaks at character level only */ + STYLE_WRAP_NONE, /**< No line breaks (except explicit ones) */ + } wrapinfo; +} AVTextStyle; + +#define STYLE_ALPHA_OPAQUE 0xFF +#define STYLE_ALPHA_TRANSPARENT 0x00 + +/* Features flags for AVTextStyle features */ +#define STYLE_NO_DEFAULTS 0x0 +#define STYLE_FULLY_SET 0xFFFF +#define STYLE_HAS_FONT_COLOR (1 << 0) +#define STYLE_HAS_FONT_ALPHA (1 << 1) +#define STYLE_HAS_FLAGS (1 << 2) +#define STYLE_HAS_OUTLINE_COLOR (1 << 3) +#define STYLE_HAS_OUTLINE_ALPHA (1 << 4) +#define STYLE_HAS_SHADOW_COLOR (1 << 5) +#define STYLE_HAS_SHADOW_ALPHA (1 << 6) +#define STYLE_HAS_BACKGROUND_COLOR (1 << 7) +#define STYLE_HAS_BACKGROUND_ALPHA (1 << 8) +#define STYLE_HAS_K_BACKGROUND_COLOR (1 << 9) +#define STYLE_HAS_K_BACKGROUND_ALPHA (1 << 10) +#define STYLE_HAS_WRAP_INFO (1 << 11) + +/* Style flags for AVTextStyle style_flags */ +#define STYLE_BOLD (1 << 0) +#define STYLE_ITALIC (1 << 1) +#define STYLE_OUTLINE (1 << 2) +#define STYLE_SHADOW (1 << 3) +#define STYLE_BACKGROUND (1 << 4) +#define STYLE_UNDERLINE (1 << 5) +#define STYLE_STRIKEOUT (1 << 6) +#define STYLE_HALFWIDTH (1 << 7) +#define STYLE_MONOSPACED (1 << 8) +#define STYLE_DOUBLEWIDTH (1 << 9) +#define STYLE_BLINK_FOREGROUND (1 << 10) +#define STYLE_BLINK_BACKGROUND (1 << 11) + +#define STYLE_DEFAULT_FONT_SIZE 20 +#define STYLE_DEFAULT_REL_FONT_SIZE 6.25 + +/** + * subtitle alignment for positioning a piece of subtitle text. + */ +enum AVSubtitleAlign { + AV_SUBTITLE_ALIGN_START, + + AV_SUBTITLE_ALIGN_CENTER, + + AV_SUBTITLE_ALIGN_END, + + AV_SUBTITLE_ALIGN_LEFT, + + AV_SUBTITLE_ALIGN_RIGHT, +}; + +enum AVWritingDirection { + AV_WRITING_DIRECTION_HORIZONTAL, + AV_WRITING_DIRECTION_VERTICAL_RL, // vertical, right to left + AV_WRITING_DIRECTION_VERTICAL_LR, // vertical, left to right +}; + +typedef struct AVTextSegment { + char *text; + AVTextStyle *style; + struct AVTextSegment *next; +} AVTextSegment; + +/** + * AVSubtitleBox is define to extent AVSubtitleRect + */ +typedef struct AVSubtitleBox { + /** + * (x,y,w,h), with y_align determine a box into which a piece of text is rendered. + * the meaning of (x,y,w,h) dependes on the writing direction. + * if writing direction is horizontal, x is offset from the left of the video viewport to the + * left side of the box, and y is the offset from the top of the video viewport(see y_align), + * and w is the box's horizontal size, and h currently unused . + * + * if writing direction is vertical, x is offset from the top of the video viewport to the + * top side of the box, and y is the offset from the right(for AV_WRITING_DIRECTION_VERTICAL_RL), + * or left of the video view port(for AV_WRITING_DIRECTION_VERTICAL_LR), and w is the box's + * vertical size, and h currently unused. + */ + float x; + float y; + float w; + float h; + + /** + * reference width of x, w (for horizontal direction), or y, h (for vertical direction). + * if reference width equals 1, then x, y, w, h is a ratio, except that the writing direction + * is vertical and y is a line number. so, when you calculate y dimension in case of vertical + * writing direction, you should firstly check whether y is line number(see AV_SUBTITLE_FLAG_LINE_NUMBER). + */ + int ref_width; + + /** + * reference height of y, h (for horizontal direction), x, w (for vertical direction) + * if reference width equals 1, then x, y, w, h is a ratio, except that the writing direction + * is vertical and y is a line number. so, when you calculate y dimension in case of vertical + * writing direction, you should firstly check whether y is line number(see AV_SUBTITLE_FLAG_LINE_NUMBER). + */ + int ref_height; + /** + * the alignment of display box of a piece of text, depending on the writing direction. + * AV_SUBTITLE_ALIGN_START: + * the display box's top side(for horizontal writing direction), + * left side(for vertical and left-to-right writing direction), + * or right side(for vertical and right-to-left writing direction) is align at y. + * AV_SUBTITLE_ALIGN_CENTER: + * The display box is centered at y. + * AV_SUBTITLE_ALIGN_END: + * the display box's bottom side(for horizontal writing direction), + * right side(for vertical and left-to-right writing direction), + * or left side(for vertical and right-to-left writing direction) is align at y. + */ + enum AVSubtitleAlign y_align; + + // the alignment of text within diplay box + enum AVSubtitleAlign text_align; + + // text_segments is an array of AVTextSegment, which represent a fragment of text with text style + AVTextSegment *text_segments; + + enum AVWritingDirection writing_direction; + /** + * @see AV_SUBTITLE_FLAG_LINE_NUMBER + */ + int flags; +} AVSubtitleBox; + +typedef struct AVSubtitleRect { + int x; ///< top left corner of pict, undefined when pict is not set + int y; ///< top left corner of pict, undefined when pict is not set + int w; ///< width of pict, undefined when pict is not set + int h; ///< height of pict, undefined when pict is not set + int nb_colors; ///< number of colors in pict, undefined when pict is not set + +#if FF_API_AVPICTURE + /** + * @deprecated unused + */ + attribute_deprecated + AVPicture pict; +#endif + /** + * data+linesize for the bitmap of this subtitle. + * Can be set for text/ass as well once they are rendered. + */ + uint8_t *data[4]; + int linesize[4]; + + enum AVSubtitleType type; + + char *text; ///< 0 terminated plain UTF-8 text + + /** + * 0 terminated ASS/SSA compatible event line. + * The presentation of this is unaffected by the other values in this + * struct. + */ + char *ass; + + int flags; + + //the len of subtitle text + int text_len; + + //the flags of unicode encode type; + int UnicodeFlags; + + // the subtitle box in which a piece of subtitle text is rendered; + AVSubtitleBox box; +} AVSubtitleRect; + +typedef struct AVSubtitle { + uint16_t format; /* 0 = graphics */ + uint32_t start_display_time; /* relative to packet pts, in ms */ + uint32_t end_display_time; /* relative to packet pts, in ms */ + unsigned num_rects; + AVSubtitleRect **rects; + int64_t pts; ///< Same as packet pts, in AV_TIME_BASE +} AVSubtitle; + +/** + * This struct describes the properties of an encoded stream. + * + * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must + * be allocated with avcodec_parameters_alloc() and freed with + * avcodec_parameters_free(). + */ +typedef struct AVCodecParameters { + /** + * General type of the encoded data. + */ + enum AVMediaType codec_type; + /** + * Specific type of the encoded data (the codec used). + */ + enum AVCodecID codec_id; + /** + * Additional information about the codec (corresponds to the AVI FOURCC). + */ + uint32_t codec_tag; + + /** + * Extra binary data needed for initializing the decoder, codec-dependent. + * + * Must be allocated with liteav_av_malloc() and will be freed by + * avcodec_parameters_free(). The allocated size of extradata must be at + * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding + * bytes zeroed. + */ + uint8_t *extradata; + /** + * Size of the extradata content in bytes. + */ + int extradata_size; + + /** + * - video: the pixel format, the value corresponds to enum AVPixelFormat. + * - audio: the sample format, the value corresponds to enum AVSampleFormat. + */ + int format; + + /** + * The average bitrate of the encoded data (in bits per second). + */ + int64_t bit_rate; + + /** + * The number of bits per sample in the codedwords. + * + * This is basically the bitrate per sample. It is mandatory for a bunch of + * formats to actually decode them. It's the number of bits for one sample in + * the actual coded bitstream. + * + * This could be for example 4 for ADPCM + * For PCM formats this matches bits_per_raw_sample + * Can be 0 + */ + int bits_per_coded_sample; + + /** + * This is the number of valid bits in each output sample. If the + * sample format has more bits, the least significant bits are additional + * padding bits, which are always 0. Use right shifts to reduce the sample + * to its actual size. For example, audio formats with 24 bit samples will + * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32. + * To get the original sample use "(int32_t)sample >> 8"." + * + * For ADPCM this might be 12 or 16 or similar + * Can be 0 + */ + int bits_per_raw_sample; + + /** + * Codec-specific bitstream restrictions that the stream conforms to. + */ + int profile; + int level; + + /** + * Video only. The dimensions of the video frame in pixels. + */ + int width; + int height; + + /** + * Video only. The aspect ratio (width / height) which a single pixel + * should have when displayed. + * + * When the aspect ratio is unknown / undefined, the numerator should be + * set to 0 (the denominator may have any value). + */ + AVRational sample_aspect_ratio; + + /** + * Video only. The order of the fields in interlaced video. + */ + enum AVFieldOrder field_order; + + /** + * Video only. Additional colorspace characteristics. + */ + enum AVColorRange color_range; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace color_space; + enum AVChromaLocation chroma_location; + + /** + * Video only. Number of delayed frames. + */ + int video_delay; + + /** + * Audio only. The channel layout bitmask. May be 0 if the channel layout is + * unknown or unspecified, otherwise the number of bits set must be equal to + * the channels field. + */ + uint64_t channel_layout; + /** + * Audio only. The number of audio channels. + */ + int channels; + /** + * Audio only. The number of audio samples per second. + */ + int sample_rate; + /** + * Audio only. The number of bytes per coded audio frame, required by some + * formats. + * + * Corresponds to nBlockAlign in WAVEFORMATEX. + */ + int block_align; + /** + * Audio only. Audio frame size, if known. Required by some formats to be static. + */ + int frame_size; + + /** + * Audio only. The amount of padding (in samples) inserted by the encoder at + * the beginning of the audio. I.e. this number of leading decoded samples + * must be discarded by the caller to get the original audio without leading + * padding. + */ + int initial_padding; + /** + * Audio only. The amount of padding (in samples) appended by the encoder to + * the end of the audio. I.e. this number of decoded samples must be + * discarded by the caller from the end of the stream to get the original + * audio without any trailing padding. + */ + int trailing_padding; + /** + * Audio only. Number of samples to skip after a discontinuity. + */ + int seek_preroll; +} AVCodecParameters; + +/** + * Iterate over all registered codecs. + * + * @param opaque a pointer where libavcodec will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered codec or NULL when the iteration is + * finished + */ +const AVCodec *liteav_av_codec_iterate(void **opaque); + +#if FF_API_NEXT +/** + * If c is NULL, returns the first registered codec, + * if c is non-NULL, returns the next registered codec after c, + * or NULL if c is the last one. + */ +attribute_deprecated +AVCodec *liteav_av_codec_next(const AVCodec *c); +#endif + +/** + * Return the LIBAVCODEC_VERSION_INT constant. + */ +unsigned avcodec_version(void); + +/** + * Return the libavcodec build-time configuration. + */ +const char *avcodec_configuration(void); + +/** + * Return the libavcodec license. + */ +const char *avcodec_license(void); + +#if FF_API_NEXT +/** + * Register the codec codec and initialize libavcodec. + * + * @warning either this function or liteav_avcodec_register_all() must be called + * before any other libavcodec functions. + * + * @see liteav_avcodec_register_all() + */ +attribute_deprecated +void liteav_avcodec_register(AVCodec *codec); + +/** + * Register all the codecs, parsers and bitstream filters which were enabled at + * configuration time. If you do not call this function you can select exactly + * which formats you want to support, by using the individual registration + * functions. + * + * @see liteav_avcodec_register + * @see liteav_av_register_codec_parser + * @see liteav_av_register_bitstream_filter + */ +attribute_deprecated +void liteav_avcodec_register_all(void); +#endif + +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct should be freed with avcodec_free_context(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open2() + * with a different codec. + * If NULL, then the codec-specific defaults won't be initialized, + * which may result in suboptimal default settings (this is + * important mainly for encoders, e.g. libx264). + * + * @return An AVCodecContext filled with default values or NULL on failure. + */ +AVCodecContext *avcodec_alloc_context3(const AVCodec *codec); + +/** + * Free the codec context and everything associated with it and write NULL to + * the provided pointer. + */ +void avcodec_free_context(AVCodecContext **avctx); + +#if FF_API_GET_CONTEXT_DEFAULTS +/** + * @deprecated This function should not be used, as closing and opening a codec + * context multiple time is not supported. A new codec context should be + * allocated for each new use. + */ +int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec); +#endif + +/** + * Get the AVClass for AVCodecContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *avcodec_get_class(void); + +#if FF_API_COPY_CONTEXT +/** + * Get the AVClass for AVFrame. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *avcodec_get_frame_class(void); + +/** + * Get the AVClass for AVSubtitleRect. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *avcodec_get_subtitle_rect_class(void); + +/** + * Copy the settings of the source AVCodecContext into the destination + * AVCodecContext. The resulting destination codec context will be + * unopened, i.e. you are required to call avcodec_open2() before you + * can use this AVCodecContext to decode/encode video/audio data. + * + * @param dest target codec context, should be initialized with + * avcodec_alloc_context3(NULL), but otherwise uninitialized + * @param src source codec context + * @return AVERROR() on error (e.g. memory allocation error), 0 on success + * + * @deprecated The semantics of this function are ill-defined and it should not + * be used. If you need to transfer the stream parameters from one codec context + * to another, use an intermediate AVCodecParameters instance and the + * avcodec_parameters_from_context() / avcodec_parameters_to_context() + * functions. + */ +attribute_deprecated +int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); +#endif + +/** + * Allocate a new AVCodecParameters and set its fields to default values + * (unknown/invalid/0). The returned struct must be freed with + * avcodec_parameters_free(). + */ +AVCodecParameters *avcodec_parameters_alloc(void); + +/** + * Free an AVCodecParameters instance and everything associated with it and + * write NULL to the supplied pointer. + */ +void avcodec_parameters_free(AVCodecParameters **par); + +/** + * Copy the contents of src to dst. Any allocated fields in dst are freed and + * replaced with newly allocated duplicates of the corresponding fields in src. + * + * @return >= 0 on success, a negative AVERROR code on failure. + */ +int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src); + +/** + * Fill the parameters struct based on the values from the supplied codec + * context. Any allocated fields in par are freed and replaced with duplicates + * of the corresponding fields in codec. + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int avcodec_parameters_from_context(AVCodecParameters *par, + const AVCodecContext *codec); + +/** + * Fill the codec context based on the values from the supplied codec + * parameters. Any allocated fields in codec that have a corresponding field in + * par are freed and replaced with duplicates of the corresponding field in par. + * Fields in codec that do not have a counterpart in par are not touched. + * + * @return >= 0 on success, a negative AVERROR code on failure. + */ +int avcodec_parameters_to_context(AVCodecContext *codec, + const AVCodecParameters *par); + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context3(). + * + * The functions liteav_avcodec_find_decoder_by_name(), liteav_avcodec_find_encoder_by_name(), + * liteav_avcodec_find_decoder() and liteav_avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @note Always call this function before using decoding routines (such as + * @ref liteav_avcodec_receive_frame()). + * + * @code + * liteav_avcodec_register_all(); + * liteav_av_dict_set(&opts, "b", "2.5M", 0); + * codec = liteav_avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open2(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param codec The codec to open this context for. If a non-NULL codec has been + * previously passed to avcodec_alloc_context3() or + * for this context, then this parameter MUST be either NULL or + * equal to the previously passed codec. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), liteav_avcodec_find_decoder(), liteav_avcodec_find_encoder(), + * liteav_av_dict_set(), liteav_av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + +/** + * Close a given AVCodecContext and free all the data associated with it + * (but not the AVCodecContext itself). + * + * Calling this function on an AVCodecContext that hasn't been opened will free + * the codec-specific data allocated in avcodec_alloc_context3() with a non-NULL + * codec. Subsequent calls will do nothing. + * + * @note Do not use this function. Use avcodec_free_context() to destroy a + * codec context (either open or closed). Opening and closing a codec context + * multiple times is not supported anymore -- use multiple codec contexts + * instead. + */ +int avcodec_close(AVCodecContext *avctx); + +/** + * Free all allocated data in the given subtitle struct. + * + * @param sub AVSubtitle to free. + */ +void avsubtitle_free(AVSubtitle *sub); + +/** + * @} + */ + +/** + * @addtogroup lavc_packet + * @{ + */ + +/** + * Allocate an AVPacket and set its fields to default values. The resulting + * struct must be freed using liteav_av_packet_free(). + * + * @return An AVPacket filled with default values or NULL on failure. + * + * @note this only allocates the AVPacket itself, not the data buffers. Those + * must be allocated through other means such as liteav_av_new_packet. + * + * @see liteav_av_new_packet + */ +AVPacket *liteav_av_packet_alloc(void); + +/** + * Create a new packet that references the same data as src. + * + * This is a shortcut for liteav_av_packet_alloc()+liteav_av_packet_ref(). + * + * @return newly created AVPacket on success, NULL on error. + * + * @see liteav_av_packet_alloc + * @see liteav_av_packet_ref + */ +AVPacket *liteav_av_packet_clone(const AVPacket *src); + +/** + * Free the packet, if the packet is reference counted, it will be + * unreferenced first. + * + * @param pkt packet to be freed. The pointer will be set to NULL. + * @note passing NULL is a no-op. + */ +void liteav_av_packet_free(AVPacket **pkt); + +/** + * Initialize optional fields of a packet with default values. + * + * Note, this does not touch the data and size members, which have to be + * initialized separately. + * + * @param pkt packet + */ +void liteav_av_init_packet(AVPacket *pkt); + +/** + * Allocate the payload of a packet and initialize its fields with + * default values. + * + * @param pkt packet + * @param size wanted payload size + * @return 0 if OK, AVERROR_xxx otherwise + */ +int liteav_av_new_packet(AVPacket *pkt, int size); + +/** + * Reduce packet size, correctly zeroing padding + * + * @param pkt packet + * @param size new size + */ +void liteav_av_shrink_packet(AVPacket *pkt, int size); + +/** + * Increase packet size, correctly zeroing padding + * + * @param pkt packet + * @param grow_by number of bytes by which to increase the size of the packet + */ +int liteav_av_grow_packet(AVPacket *pkt, int grow_by); + +/** + * Initialize a reference-counted packet from liteav_av_malloc()ed data. + * + * @param pkt packet to be initialized. This function will set the data, size, + * buf and destruct fields, all others are left untouched. + * @param data Data allocated by liteav_av_malloc() to be used as packet data. If this + * function returns successfully, the data is owned by the underlying AVBuffer. + * The caller may not access the data through other means. + * @param size size of data in bytes, without the padding. I.e. the full buffer + * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE. + * + * @return 0 on success, a negative AVERROR on error + */ +int liteav_av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); + +#if FF_API_AVPACKET_OLD_API +/** + * @warning This is a hack - the packet memory allocation stuff is broken. The + * packet is allocated if it was not really allocated. + * + * @deprecated Use liteav_av_packet_ref or liteav_av_packet_make_refcounted + */ +attribute_deprecated +int liteav_av_dup_packet(AVPacket *pkt); +/** + * Copy packet, including contents + * + * @return 0 on success, negative AVERROR on fail + * + * @deprecated Use liteav_av_packet_ref + */ +attribute_deprecated +int liteav_av_copy_packet(AVPacket *dst, const AVPacket *src); + +/** + * Copy packet side data + * + * @return 0 on success, negative AVERROR on fail + * + * @deprecated Use liteav_av_packet_copy_props + */ +attribute_deprecated +int liteav_av_copy_packet_side_data(AVPacket *dst, const AVPacket *src); + +/** + * Free a packet. + * + * @deprecated Use liteav_av_packet_unref + * + * @param pkt packet to free + */ +attribute_deprecated +void liteav_av_free_packet(AVPacket *pkt); +#endif +/** + * Allocate new information of a packet. + * + * @param pkt packet + * @param type side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t* liteav_av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Wrap an existing array as a packet side data. + * + * @param pkt packet + * @param type side information type + * @param data the side data array. It must be allocated with the liteav_av_malloc() + * family of functions. The ownership of the data is transferred to + * pkt. + * @param size side information size + * @return a non-negative number on success, a negative AVERROR code on + * failure. On failure, the packet is unchanged and the data remains + * owned by the caller. + */ +int liteav_av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + uint8_t *data, size_t size); + +/** + * Shrink the already allocated side data buffer + * + * @param pkt packet + * @param type side information type + * @param size new side information size + * @return 0 on success, < 0 on failure + */ +int liteav_av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Get side information from packet. + * + * @param pkt packet + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t* liteav_av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, + int *size); + +#if FF_API_MERGE_SD_API +attribute_deprecated +int liteav_av_packet_merge_side_data(AVPacket *pkt); + +attribute_deprecated +int liteav_av_packet_split_side_data(AVPacket *pkt); +#endif + +const char *liteav_av_packet_side_data_name(enum AVPacketSideDataType type); + +/** + * Pack a dictionary for use in side_data. + * + * @param dict The dictionary to pack. + * @param size pointer to store the size of the returned data + * @return pointer to data if successful, NULL otherwise + */ +uint8_t *liteav_av_packet_pack_dictionary(AVDictionary *dict, int *size); +/** + * Unpack a dictionary from side_data. + * + * @param data data from side_data + * @param size size of the data + * @param dict the metadata storage dictionary + * @return 0 on success, < 0 on failure + */ +int liteav_av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict); + + +/** + * Convenience function to free all the side data stored. + * All the other fields stay untouched. + * + * @param pkt packet + */ +void liteav_av_packet_free_side_data(AVPacket *pkt); + +/** + * Setup a new reference to the data described by a given packet + * + * If src is reference-counted, setup dst as a new reference to the + * buffer in src. Otherwise allocate a new buffer in dst and copy the + * data from src into it. + * + * All the other fields are copied from src. + * + * @see liteav_av_packet_unref + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success, a negative AVERROR on error. + */ +int liteav_av_packet_ref(AVPacket *dst, const AVPacket *src); + +/** + * Wipe the packet. + * + * Unreference the buffer referenced by the packet and reset the + * remaining packet fields to their default values. + * + * @param pkt The packet to be unreferenced. + */ +void liteav_av_packet_unref(AVPacket *pkt); + +/** + * Move every field in src to dst and reset src. + * + * @see liteav_av_packet_unref + * + * @param src Source packet, will be reset + * @param dst Destination packet + */ +void liteav_av_packet_move_ref(AVPacket *dst, AVPacket *src); + +/** + * Copy only "properties" fields from src to dst. + * + * Properties for the purpose of this function are all the fields + * beside those related to the packet data (buf, data, size) + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success AVERROR on failure. + */ +int liteav_av_packet_copy_props(AVPacket *dst, const AVPacket *src); + +/** + * Ensure the data described by a given packet is reference counted. + * + * @note This function does not ensure that the reference will be writable. + * Use liteav_av_packet_make_writable instead for that purpose. + * + * @see liteav_av_packet_ref + * @see liteav_av_packet_make_writable + * + * @param pkt packet whose data should be made reference counted. + * + * @return 0 on success, a negative AVERROR on error. On failure, the + * packet is unchanged. + */ +int liteav_av_packet_make_refcounted(AVPacket *pkt); + +/** + * Create a writable reference for the data described by a given packet, + * avoiding data copy if possible. + * + * @param pkt Packet whose data should be made writable. + * + * @return 0 on success, a negative AVERROR on failure. On failure, the + * packet is unchanged. + */ +int liteav_av_packet_make_writable(AVPacket *pkt); + +/** + * Convert valid timing fields (timestamps / durations) in a packet from one + * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be + * ignored. + * + * @param pkt packet on which the conversion will be performed + * @param tb_src source timebase, in which the timing fields in pkt are + * expressed + * @param tb_dst destination timebase, to which the timing fields will be + * converted + */ +void liteav_av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst); + +/** + * @} + */ + +/** + * @addtogroup lavc_decoding + * @{ + */ + +/** + * Find a registered decoder with a matching codec ID. + * + * @param id AVCodecID of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *liteav_avcodec_find_decoder(enum AVCodecID id); + +/** + * Find a registered decoder with the specified name. + * + * @param name name of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *liteav_avcodec_find_decoder_by_name(const char *name); + +/** + * The default callback for AVCodecContext.get_buffer2(). It is made public so + * it can be called by custom get_buffer2() implementations for decoders without + * AV_CODEC_CAP_DR1 set. + */ +int liteav_avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you do not use any horizontal + * padding. + * + * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened. + */ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you also ensure that all + * line sizes are a multiple of the respective linesize_align[i]. + * + * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened. + */ +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]); + +/** + * Converts AVChromaLocation to swscale x/y chroma position. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos); + +/** + * Converts swscale x/y chroma position to AVChromaLocation. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos); + +/** + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame and the return value would be + * less than the packet size. In this case, liteav_avcodec_decode_audio4 has to be + * called again with an AVPacket containing the remaining data in order to + * decode the second frame, etc... Even if no frames are returned, the packet + * needs to be fed to the decoder with remaining data until it is completely + * consumed or an error occurs. + * + * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning samples. It is safe to flush even those decoders that are not + * marked with AV_CODEC_CAP_DELAY, then no samples will be returned. + * + * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * The decoder will allocate a buffer for the decoded frame by + * calling the AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using liteav_av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if liteav_av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. Note that this field being set to zero + * does not mean that an error has occurred. For + * decoders with AV_CODEC_CAP_DELAY set, no given decode + * call is guaranteed to produce a frame. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. + * +* @deprecated Use liteav_avcodec_send_packet() and liteav_avcodec_receive_frame(). + */ +attribute_deprecated +int liteav_avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, const AVPacket *avpkt); + +/** + * Decode the video frame of size avpkt->size from avpkt->data into picture. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. + * + * @warning The input buffer must be AV_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer buf should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @note Codecs which have the AV_CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] picture The AVFrame in which the decoded video frame will be stored. + * Use liteav_av_frame_alloc() to get an AVFrame. The codec will + * allocate memory for the actual bitmap by calling the + * AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using liteav_av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if liteav_av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with liteav_av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields like + * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least + * fields possible. + * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame could be decompressed. + * + * @deprecated Use liteav_avcodec_send_packet() and liteav_avcodec_receive_frame(). + */ +attribute_deprecated +int liteav_avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, + int *got_picture_ptr, + const AVPacket *avpkt); + +/** + * Decode a subtitle message. + * Return a negative value on error, otherwise return the number of bytes used. + * If no subtitle could be decompressed, got_sub_ptr is zero. + * Otherwise, the subtitle is stored in *sub. + * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for + * simplicity, because the performance difference is expect to be negligible + * and reusing a get_buffer written for video codecs would probably perform badly + * due to a potentially very different allocation pattern. + * + * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning subtitles. It is safe to flush even those decoders that are not + * marked with AV_CODEC_CAP_DELAY, then no subtitles will be returned. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored, + * must be freed with avsubtitle_free if *got_sub_ptr is set. + * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. + * @param[in] avpkt The input AVPacket containing the input buffer. + */ +int liteav_avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, + int *got_sub_ptr, + AVPacket *avpkt); + +/** + * Supply raw packet data as input to a decoder. + * + * Internally, this call will copy relevant AVCodecContext fields, which can + * influence decoding per-packet, and apply them when the packet is actually + * decoded. (For example AVCodecContext.skip_frame, which might direct the + * decoder to drop the frame contained by the packet sent with this function.) + * + * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @warning Do not mix this API with the legacy API (like liteav_avcodec_decode_video2()) + * on the same AVCodecContext. It will return unexpected results now + * or in future libavcodec versions. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx codec context + * @param[in] avpkt The input AVPacket. Usually, this will be a single video + * frame, or several complete audio frames. + * Ownership of the packet remains with the caller, and the + * decoder will not write to the packet. The decoder may create + * a reference to the packet data (or copy it if the packet is + * not reference-counted). + * Unlike with older APIs, the packet is always fully consumed, + * and if it contains multiple frames (e.g. some audio codecs), + * will require you to call liteav_avcodec_receive_frame() multiple + * times afterwards before you can send a new packet. + * It can be NULL (or an AVPacket with data set to NULL and + * size set to 0); in this case, it is considered a flush + * packet, which signals the end of the stream. Sending the + * first flush packet will return success. Subsequent ones are + * unnecessary and will return AVERROR_EOF. If the decoder + * still has frames buffered, it will return them after sending + * a flush packet. + * + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): input is not accepted in the current state - user + * must read output with liteav_avcodec_receive_frame() (once + * all output is read, the packet should be resent, and + * the call will not fail with EAGAIN). + * AVERROR_EOF: the decoder has been flushed, and no new packets can + * be sent to it (also returned if more than 1 flush + * packet is sent) + * AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush + * AVERROR(ENOMEM): failed to add packet to internal queue, or similar + * other errors: legitimate decoding errors + */ +int liteav_avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt); + +/** + * Return decoded output data from a decoder. + * + * @param avctx codec context + * @param frame This will be set to a reference-counted video or audio + * frame (depending on the decoder type) allocated by the + * decoder. Note that the function will always call + * liteav_av_frame_unref(frame) before doing anything else. + * + * @return + * 0: success, a frame was returned + * AVERROR(EAGAIN): output is not available in this state - user must try + * to send new input + * AVERROR_EOF: the decoder has been fully flushed, and there will be + * no more output frames + * AVERROR(EINVAL): codec not opened, or it is an encoder + * other negative values: legitimate decoding errors + */ +int liteav_avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame); + +/** + * Supply a raw video or audio frame to the encoder. Use liteav_avcodec_receive_packet() + * to retrieve buffered output packets. + * + * @param avctx codec context + * @param[in] frame AVFrame containing the raw audio or video frame to be encoded. + * Ownership of the frame remains with the caller, and the + * encoder will not write to the frame. The encoder may create + * a reference to the frame data (or copy it if the frame is + * not reference-counted). + * It can be NULL, in which case it is considered a flush + * packet. This signals the end of the stream. If the encoder + * still has packets buffered, it will return them after this + * call. Once flushing mode has been entered, additional flush + * packets are ignored, and sending frames will return + * AVERROR_EOF. + * + * For audio: + * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): input is not accepted in the current state - user + * must read output with liteav_avcodec_receive_packet() (once + * all output is read, the packet should be resent, and + * the call will not fail with EAGAIN). + * AVERROR_EOF: the encoder has been flushed, and no new frames can + * be sent to it + * AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a + * decoder, or requires flush + * AVERROR(ENOMEM): failed to add packet to internal queue, or similar + * other errors: legitimate decoding errors + */ +int liteav_avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame); + +/** + * Read encoded data from the encoder. + * + * @param avctx codec context + * @param avpkt This will be set to a reference-counted packet allocated by the + * encoder. Note that the function will always call + * liteav_av_frame_unref(frame) before doing anything else. + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): output is not available in the current state - user + * must try to send input + * AVERROR_EOF: the encoder has been fully flushed, and there will be + * no more output packets + * AVERROR(EINVAL): codec not opened, or it is an encoder + * other errors: legitimate decoding errors + */ +int liteav_avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); + +/** + * Create and return a AVHWFramesContext with values adequate for hardware + * decoding. This is meant to get called from the get_format callback, and is + * a helper for preparing a AVHWFramesContext for AVCodecContext.hw_frames_ctx. + * This API is for decoding with certain hardware acceleration modes/APIs only. + * + * The returned AVHWFramesContext is not initialized. The caller must do this + * with liteav_av_hwframe_ctx_init(). + * + * Calling this function is not a requirement, but makes it simpler to avoid + * codec or hardware API specific details when manually allocating frames. + * + * Alternatively to this, an API user can set AVCodecContext.hw_device_ctx, + * which sets up AVCodecContext.hw_frames_ctx fully automatically, and makes + * it unnecessary to call this function or having to care about + * AVHWFramesContext initialization at all. + * + * There are a number of requirements for calling this function: + * + * - It must be called from get_format with the same avctx parameter that was + * passed to get_format. Calling it outside of get_format is not allowed, and + * can trigger undefined behavior. + * - The function is not always supported (see description of return values). + * Even if this function returns successfully, hwaccel initialization could + * fail later. (The degree to which implementations check whether the stream + * is actually supported varies. Some do this check only after the user's + * get_format callback returns.) + * - The hw_pix_fmt must be one of the choices suggested by get_format. If the + * user decides to use a AVHWFramesContext prepared with this API function, + * the user must return the same hw_pix_fmt from get_format. + * - The device_ref passed to this function must support the given hw_pix_fmt. + * - After calling this API function, it is the user's responsibility to + * initialize the AVHWFramesContext (returned by the out_frames_ref parameter), + * and to set AVCodecContext.hw_frames_ctx to it. If done, this must be done + * before returning from get_format (this is implied by the normal + * AVCodecContext.hw_frames_ctx API rules). + * - The AVHWFramesContext parameters may change every time time get_format is + * called. Also, AVCodecContext.hw_frames_ctx is reset before get_format. So + * you are inherently required to go through this process again on every + * get_format call. + * - It is perfectly possible to call this function without actually using + * the resulting AVHWFramesContext. One use-case might be trying to reuse a + * previously initialized AVHWFramesContext, and calling this API function + * only to test whether the required frame parameters have changed. + * - Fields that use dynamically allocated values of any kind must not be set + * by the user unless setting them is explicitly allowed by the documentation. + * If the user sets AVHWFramesContext.free and AVHWFramesContext.user_opaque, + * the new free callback must call the potentially set previous free callback. + * This API call may set any dynamically allocated fields, including the free + * callback. + * + * The function will set at least the following fields on AVHWFramesContext + * (potentially more, depending on hwaccel API): + * + * - All fields set by liteav_av_hwframe_ctx_alloc(). + * - Set the format field to hw_pix_fmt. + * - Set the sw_format field to the most suited and most versatile format. (An + * implication is that this will prefer generic formats over opaque formats + * with arbitrary restrictions, if possible.) + * - Set the width/height fields to the coded frame size, rounded up to the + * API-specific minimum alignment. + * - Only _if_ the hwaccel requires a pre-allocated pool: set the initial_pool_size + * field to the number of maximum reference surfaces possible with the codec, + * plus 1 surface for the user to work (meaning the user can safely reference + * at most 1 decoded surface at a time), plus additional buffering introduced + * by frame threading. If the hwaccel does not require pre-allocation, the + * field is left to 0, and the decoder will allocate new surfaces on demand + * during decoding. + * - Possibly AVHWFramesContext.hwctx fields, depending on the underlying + * hardware API. + * + * Essentially, out_frames_ref returns the same as liteav_av_hwframe_ctx_alloc(), but + * with basic frame parameters set. + * + * The function is stateless, and does not change the AVCodecContext or the + * device_ref AVHWDeviceContext. + * + * @param avctx The context which is currently calling get_format, and which + * implicitly contains all state needed for filling the returned + * AVHWFramesContext properly. + * @param device_ref A reference to the AVHWDeviceContext describing the device + * which will be used by the hardware decoder. + * @param hw_pix_fmt The hwaccel format you are going to return from get_format. + * @param out_frames_ref On success, set to a reference to an _uninitialized_ + * AVHWFramesContext, created from the given device_ref. + * Fields will be set to values required for decoding. + * Not changed if an error is returned. + * @return zero on success, a negative value on error. The following error codes + * have special semantics: + * AVERROR(ENOENT): the decoder does not support this functionality. Setup + * is always manual, or it is a decoder which does not + * support setting AVCodecContext.hw_frames_ctx at all, + * or it is a software format. + * AVERROR(EINVAL): it is known that hardware decoding is not supported for + * this configuration, or the device_ref is not supported + * for the hwaccel referenced by hw_pix_fmt. + */ +int liteav_avcodec_get_hw_frames_parameters(AVCodecContext *avctx, + AVBufferRef *device_ref, + enum AVPixelFormat hw_pix_fmt, + AVBufferRef **out_frames_ref); + + + +/** + * @defgroup lavc_parsing Frame parsing + * @{ + */ + +enum AVPictureStructure { + AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown + AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field + AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field + AV_PICTURE_STRUCTURE_FRAME, //< coded as frame +}; + +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t next_frame_offset; /* offset of the next frame */ + /* video info */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + /** + * This field is used for proper frame duration computation in lavf. + * It signals, how much longer the frame duration of the current frame + * is compared to normal frame duration. + * + * frame_duration = (1 + repeat_pict) * time_base + * + * It is used by codecs like H.264 to display telecined material. + */ + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + int fetch_timestamp; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; + + int flags; +#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 +#define PARSER_FLAG_ONCE 0x0002 +/// Set if the parser has a valid file offset +#define PARSER_FLAG_FETCHED_OFFSET 0x0004 +#define PARSER_FLAG_USE_CODEC_TS 0x1000 + + int64_t offset; ///< byte offset from starting packet start + int64_t cur_frame_end[AV_PARSER_PTS_NB]; + + /** + * Set by parser to 1 for key frames and 0 for non-key frames. + * It is initialized to -1, so if the parser doesn't set this flag, + * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames + * will be used. + */ + int key_frame; + +#if FF_API_CONVERGENCE_DURATION + /** + * @deprecated unused + */ + attribute_deprecated + int64_t convergence_duration; +#endif + + // Timestamp generation support: + /** + * Synchronization point for start of timestamp generation. + * + * Set to >0 for sync point, 0 for no sync point and <0 for undefined + * (default). + * + * For example, this corresponds to presence of H.264 buffering period + * SEI message. + */ + int dts_sync_point; + + /** + * Offset of the current timestamp against last timestamp sync point in + * units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain a valid timestamp offset. + * + * Note that the timestamp of sync point has usually a nonzero + * dts_ref_dts_delta, which refers to the previous sync point. Offset of + * the next frame after timestamp sync point will be usually 1. + * + * For example, this corresponds to H.264 cpb_removal_delay. + */ + int dts_ref_dts_delta; + + /** + * Presentation delay of current frame in units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain valid non-negative timestamp delta (presentation time of a frame + * must not lie in the past). + * + * This delay represents the difference between decoding and presentation + * time of the frame. + * + * For example, this corresponds to H.264 dpb_output_delay. + */ + int pts_dts_delta; + + /** + * Position of the packet in file. + * + * Analogous to cur_frame_pts/dts + */ + int64_t cur_frame_pos[AV_PARSER_PTS_NB]; + + /** + * Byte position of currently parsed frame in stream. + */ + int64_t pos; + + /** + * Previous frame byte position. + */ + int64_t last_pos; + + /** + * Duration of the current frame. + * For audio, this is in units of 1 / AVCodecContext.sample_rate. + * For all other types, this is in units of AVCodecContext.time_base. + */ + int duration; + + enum AVFieldOrder field_order; + + /** + * Indicate whether a picture is coded as a frame, top field or bottom field. + * + * For example, H.264 field_pic_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag + * equal to 1 and bottom_field_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_TOP_FIELD. + */ + enum AVPictureStructure picture_structure; + + /** + * Picture number incremented in presentation or output order. + * This field may be reinitialized at the first picture of a new sequence. + * + * For example, this corresponds to H.264 PicOrderCnt. + */ + int output_picture_number; + + /** + * Dimensions of the decoded video intended for presentation. + */ + int width; + int height; + + /** + * Dimensions of the coded video. + */ + int coded_width; + int coded_height; + + /** + * The format of the coded data, corresponds to enum AVPixelFormat for video + * and for enum AVSampleFormat for audio. + * + * Note that a decoder can have considerable freedom in how exactly it + * decodes the data, so the format reported here might be different from the + * one returned by a decoder. + */ + int format; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[5]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + /* This callback never returns an error, a negative value means that + * the frame start was in a previous packet. */ + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); + struct AVCodecParser *next; +} AVCodecParser; + +/** + * Iterate over all registered codec parsers. + * + * @param opaque a pointer where libavcodec will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered codec parser or NULL when the iteration is + * finished + */ +const AVCodecParser *liteav_av_parser_iterate(void **opaque); + +attribute_deprecated +AVCodecParser *liteav_av_parser_next(const AVCodecParser *c); + +attribute_deprecated +void liteav_av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *liteav_av_parser_init(int codec_id); + +/** + * Parse a packet. + * + * @param s parser context. + * @param avctx codec context. + * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. + * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. + * @param buf input buffer. + * @param buf_size buffer size in bytes without the padding. I.e. the full buffer + size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE. + To signal EOF, this should be 0 (so that the last frame + can be output). + * @param pts input presentation timestamp. + * @param dts input decoding timestamp. + * @param pos input byte position in stream. + * @return the number of bytes of the input bitstream used. + * + * Example: + * @code + * while(in_len){ + * len = liteav_av_parser_parse2(myparser, AVCodecContext, &data, &size, + * in_data, in_len, + * pts, dts, pos); + * in_data += len; + * in_len -= len; + * + * if(size) + * decode_frame(data, size); + * } + * @endcode + */ +int liteav_av_parser_parse2(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts, + int64_t pos); + +/** + * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed + * @deprecated use AVBitStreamFilter + */ +int liteav_av_parser_change(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +void liteav_av_parser_close(AVCodecParserContext *s); + +/** + * @} + * @} + */ + +/** + * @addtogroup lavc_encoding + * @{ + */ + +/** + * Find a registered encoder with a matching codec ID. + * + * @param id AVCodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *liteav_avcodec_find_encoder(enum AVCodecID id); + +/** + * Find a registered encoder with the specified name. + * + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *liteav_avcodec_find_encoder_by_name(const char *name); + +/** + * Encode a frame of audio. + * + * Takes input samples from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay, split, and combine input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. If avpkt->data and + * avpkt->size are set, avpkt->destruct must also be set. All + * other AVPacket fields will be reset by the encoder using + * liteav_av_init_packet(). If avpkt->data is NULL, the encoder will + * allocate it. The encoder will set avpkt->size to the size + * of the output packet. + * + * If this function fails or produces no output, avpkt will be + * freed using liteav_av_packet_unref(). + * @param[in] frame AVFrame containing the raw audio data to be encoded. + * May be NULL when flushing an encoder that has the + * AV_CODEC_CAP_DELAY capability set. + * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + * + * @deprecated use liteav_avcodec_send_frame()/liteav_avcodec_receive_packet() instead + */ +attribute_deprecated +int liteav_avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +/** + * Encode a frame of video. + * + * Takes input raw video data from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay and reorder input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using liteav_av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. The returned data (if any) belongs to the + * caller, he is responsible for freeing it. + * + * If this function fails or produces no output, avpkt will be + * freed using liteav_av_packet_unref(). + * @param[in] frame AVFrame containing the raw video data to be encoded. + * May be NULL when flushing an encoder that has the + * AV_CODEC_CAP_DELAY capability set. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + * + * @deprecated use liteav_avcodec_send_frame()/liteav_avcodec_receive_packet() instead + */ +attribute_deprecated +int liteav_avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +int liteav_avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVSubtitle *sub); + + +/** + * @} + */ + +#if FF_API_AVPICTURE +/** + * @addtogroup lavc_picture + * @{ + */ + +/** + * @deprecated unused + */ +attribute_deprecated +int liteav_avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated unused + */ +attribute_deprecated +void liteav_avpicture_free(AVPicture *picture); + +/** + * @deprecated use liteav_av_image_fill_arrays() instead. + */ +attribute_deprecated +int liteav_avpicture_fill(AVPicture *picture, const uint8_t *ptr, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated use liteav_av_image_copy_to_buffer() instead. + */ +attribute_deprecated +int liteav_avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt, + int width, int height, + unsigned char *dest, int dest_size); + +/** + * @deprecated use liteav_av_image_get_buffer_size() instead. + */ +attribute_deprecated +int liteav_avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated liteav_av_image_copy() instead. + */ +attribute_deprecated +void liteav_av_picture_copy(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated unused + */ +attribute_deprecated +int liteav_av_picture_crop(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int top_band, int left_band); + +/** + * @deprecated unused + */ +attribute_deprecated +int liteav_av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt, + int padtop, int padbottom, int padleft, int padright, int *color); + +/** + * @} + */ +#endif + +/** + * @defgroup lavc_misc Utility functions + * @ingroup libavc + * + * Miscellaneous utility functions related to both encoding and decoding + * (or neither). + * @{ + */ + +/** + * @defgroup lavc_misc_pixfmt Pixel formats + * + * Functions for working with pixel formats. + * @{ + */ + +#if FF_API_GETCHROMA +/** + * @deprecated Use liteav_av_pix_fmt_get_chroma_sub_sample + */ + +attribute_deprecated +void liteav_avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); +#endif + +/** + * Return a value representing the fourCC code associated to the + * pixel format pix_fmt, or 0 if no associated fourCC code can be + * found. + */ +unsigned int liteav_avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt); + +/** + * @deprecated see liteav_av_get_pix_fmt_loss() + */ +int liteav_avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format. When converting from one pixel format to another, information loss + * may occur. For example, when converting from RGB24 to GRAY, the color + * information will be lost. Similarly, other losses occur when converting from + * some formats to other formats. liteav_avcodec_find_best_pix_fmt_of_2() searches which of + * the given pixel formats should be used to suffer the least amount of loss. + * The pixel formats from which it chooses one, are determined by the + * pix_fmt_list parameter. + * + * + * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat liteav_avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); + +/** + * @deprecated see liteav_av_find_best_pix_fmt_of_2() + */ +enum AVPixelFormat liteav_avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +attribute_deprecated +enum AVPixelFormat liteav_avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +enum AVPixelFormat liteav_avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + +/** + * @} + */ + +#if FF_API_TAG_STRING +/** + * Put a string representing the codec tag codec_tag in buf. + * + * @param buf buffer to place codec tag in + * @param buf_size size in bytes of buf + * @param codec_tag codec tag to assign + * @return the length of the string that would have been generated if + * enough space had been available, excluding the trailing null + * + * @deprecated see av_fourcc_make_string() and av_fourcc2str(). + */ +attribute_deprecated +size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); +#endif + +void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); + +/** + * Return a name for the specified profile, if available. + * + * @param codec the codec that is searched for the given profile + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + */ +const char *av_get_profile_name(const AVCodec *codec, int profile); + +/** + * Return a name for the specified profile, if available. + * + * @param codec_id the ID of the codec to which the requested profile belongs + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + * + * @note unlike av_get_profile_name(), which searches a list of profiles + * supported by a specific decoder or encoder implementation, this + * function searches the list of profiles from the AVCodecDescriptor + */ +const char *avcodec_profile_name(enum AVCodecID codec_id, int profile); + +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); +int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); +//FIXME func typedef + +/** + * Fill AVFrame audio data and linesize pointers. + * + * The buffer buf must be a preallocated buffer with a size big enough + * to contain the specified samples amount. The filled AVFrame data + * pointers will point to this buffer. + * + * AVFrame extended_data channel pointers are allocated if necessary for + * planar audio. + * + * @param frame the AVFrame + * frame->nb_samples must be set prior to calling the + * function. This function fills in frame->data, + * frame->extended_data, frame->linesize[0]. + * @param nb_channels channel count + * @param sample_fmt sample format + * @param buf buffer to use for frame data + * @param buf_size size of buffer + * @param align plane size sample alignment (0 = default) + * @return >=0 on success, negative error code on failure + * @todo return the size in bytes required to store the samples in + * case of success, at the next libavutil bump + */ +int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, + enum AVSampleFormat sample_fmt, const uint8_t *buf, + int buf_size, int align); + +/** + * Reset the internal decoder state / flush internal buffers. Should be called + * e.g. when seeking or when switching to a different stream. + * + * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0), + * this invalidates the frames previously returned from the decoder. When + * refcounted frames are used, the decoder just releases any references it might + * keep internally, but the caller's reference remains valid. + */ +void liteav_avcodec_flush_buffers(AVCodecContext *avctx); + +/** + * Return codec bits per sample. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return the PCM codec associated with a sample format. + * @param be endianness, 0 for little, 1 for big, + * -1 (or anything else) for native + * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE + */ +enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be); + +/** + * Return codec bits per sample. + * Only return non-zero if the bits per sample is exactly correct, not an + * approximation. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_exact_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return audio frame duration. + * + * @param avctx codec context + * @param frame_bytes size of the frame, or 0 if unknown + * @return frame duration, in samples, if known. 0 if not able to + * determine. + */ +int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); + +/** + * This function is the same as av_get_audio_frame_duration(), except it works + * with AVCodecParameters instead of an AVCodecContext. + */ +int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes); + +#if FF_API_OLD_BSF +typedef struct AVBitStreamFilterContext { + void *priv_data; + const struct AVBitStreamFilter *filter; + AVCodecParserContext *parser; + struct AVBitStreamFilterContext *next; + /** + * Internal default arguments, used if NULL is passed to liteav_av_bitstream_filter_filter(). + * Not for access by library users. + */ + char *args; +} AVBitStreamFilterContext; +#endif + +typedef struct AVBSFInternal AVBSFInternal; + +/** + * The bitstream filter state. + * + * This struct must be allocated with liteav_av_bsf_alloc() and freed with + * liteav_av_bsf_free(). + * + * The fields in the struct will only be changed (by the caller or by the + * filter) as described in their documentation, and are to be considered + * immutable otherwise. + */ +typedef struct AVBSFContext { + /** + * A class for logging and AVOptions + */ + const AVClass *av_class; + + /** + * The bitstream filter this context is an instance of. + */ + const struct AVBitStreamFilter *filter; + + /** + * Opaque libavcodec internal data. Must not be touched by the caller in any + * way. + */ + AVBSFInternal *internal; + + /** + * Opaque filter-specific private data. If filter->priv_class is non-NULL, + * this is an AVOptions-enabled struct. + */ + void *priv_data; + + /** + * Parameters of the input stream. This field is allocated in + * liteav_av_bsf_alloc(), it needs to be filled by the caller before + * liteav_av_bsf_init(). + */ + AVCodecParameters *par_in; + + /** + * Parameters of the output stream. This field is allocated in + * liteav_av_bsf_alloc(), it is set by the filter in liteav_av_bsf_init(). + */ + AVCodecParameters *par_out; + + /** + * The timebase used for the timestamps of the input packets. Set by the + * caller before liteav_av_bsf_init(). + */ + AVRational time_base_in; + + /** + * The timebase used for the timestamps of the output packets. Set by the + * filter in liteav_av_bsf_init(). + */ + AVRational time_base_out; +} AVBSFContext; + +typedef struct AVBitStreamFilter { + const char *name; + + /** + * A list of codec ids supported by the filter, terminated by + * AV_CODEC_ID_NONE. + * May be NULL, in that case the bitstream filter works with any codec id. + */ + const enum AVCodecID *codec_ids; + + /** + * A class for the private data, used to declare bitstream filter private + * AVOptions. This field is NULL for bitstream filters that do not declare + * any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavcodec generic + * code to this class. + */ + const AVClass *priv_class; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + int priv_data_size; + int (*init)(AVBSFContext *ctx); + int (*filter)(AVBSFContext *ctx, AVPacket *pkt); + void (*close)(AVBSFContext *ctx); + void (*flush)(AVBSFContext *ctx); +} AVBitStreamFilter; + +#if FF_API_OLD_BSF +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use the new bitstream filtering API (using AVBSFContext). + */ +attribute_deprecated +void liteav_av_register_bitstream_filter(AVBitStreamFilter *bsf); +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use liteav_av_bsf_get_by_name(), liteav_av_bsf_alloc(), and liteav_av_bsf_init() + * from the new bitstream filtering API (using AVBSFContext). + */ +attribute_deprecated +AVBitStreamFilterContext *liteav_av_bitstream_filter_init(const char *name); +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use liteav_av_bsf_send_packet() and liteav_av_bsf_receive_packet() from the + * new bitstream filtering API (using AVBSFContext). + */ +attribute_deprecated +int liteav_av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use liteav_av_bsf_free() from the new bitstream filtering API (using + * AVBSFContext). + */ +attribute_deprecated +void liteav_av_bitstream_filter_close(AVBitStreamFilterContext *bsf); +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use liteav_av_bsf_iterate() from the new bitstream filtering API (using + * AVBSFContext). + */ +attribute_deprecated +const AVBitStreamFilter *liteav_av_bitstream_filter_next(const AVBitStreamFilter *f); +#endif + +/** + * @return a bitstream filter with the specified name or NULL if no such + * bitstream filter exists. + */ +const AVBitStreamFilter *liteav_av_bsf_get_by_name(const char *name); + +/** + * Iterate over all registered bitstream filters. + * + * @param opaque a pointer where libavcodec will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered bitstream filter or NULL when the iteration is + * finished + */ +const AVBitStreamFilter *liteav_av_bsf_iterate(void **opaque); +#if FF_API_NEXT +attribute_deprecated +const AVBitStreamFilter *liteav_av_bsf_next(void **opaque); +#endif + +/** + * Allocate a context for a given bitstream filter. The caller must fill in the + * context parameters as described in the documentation and then call + * liteav_av_bsf_init() before sending any data to the filter. + * + * @param filter the filter for which to allocate an instance. + * @param ctx a pointer into which the pointer to the newly-allocated context + * will be written. It must be freed with liteav_av_bsf_free() after the + * filtering is done. + * + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx); + +/** + * Prepare the filter for use, after all the parameters and options have been + * set. + */ +int liteav_av_bsf_init(AVBSFContext *ctx); + +/** + * Submit a packet for filtering. + * + * After sending each packet, the filter must be completely drained by calling + * liteav_av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or + * AVERROR_EOF. + * + * @param pkt the packet to filter. The bitstream filter will take ownership of + * the packet and reset the contents of pkt. pkt is not touched if an error occurs. + * This parameter may be NULL, which signals the end of the stream (i.e. no more + * packets will be sent). That will cause the filter to output any packets it + * may have buffered internally. + * + * @return 0 on success, a negative AVERROR on error. + */ +int liteav_av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt); + +/** + * Retrieve a filtered packet. + * + * @param[out] pkt this struct will be filled with the contents of the filtered + * packet. It is owned by the caller and must be freed using + * liteav_av_packet_unref() when it is no longer needed. + * This parameter should be "clean" (i.e. freshly allocated + * with liteav_av_packet_alloc() or unreffed with liteav_av_packet_unref()) + * when this function is called. If this function returns + * successfully, the contents of pkt will be completely + * overwritten by the returned data. On failure, pkt is not + * touched. + * + * @return 0 on success. AVERROR(EAGAIN) if more packets need to be sent to the + * filter (using liteav_av_bsf_send_packet()) to get more output. AVERROR_EOF if there + * will be no further output from the filter. Another negative AVERROR value if + * an error occurs. + * + * @note one input packet may result in several output packets, so after sending + * a packet with liteav_av_bsf_send_packet(), this function needs to be called + * repeatedly until it stops returning 0. It is also possible for a filter to + * output fewer packets than were sent to it, so this function may return + * AVERROR(EAGAIN) immediately after a successful liteav_av_bsf_send_packet() call. + */ +int liteav_av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt); + +/** + * Reset the internal bitstream filter state / flush internal buffers. + */ +void liteav_av_bsf_flush(AVBSFContext *ctx); + +/** + * Free a bitstream filter context and everything associated with it; write NULL + * into the supplied pointer. + */ +void liteav_av_bsf_free(AVBSFContext **ctx); + +/** + * Get the AVClass for AVBSFContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *liteav_av_bsf_get_class(void); + +/** + * Structure for chain/list of bitstream filters. + * Empty list can be allocated by liteav_av_bsf_list_alloc(). + */ +typedef struct AVBSFList AVBSFList; + +/** + * Allocate empty list of bitstream filters. + * The list must be later freed by liteav_av_bsf_list_free() + * or finalized by liteav_av_bsf_list_finalize(). + * + * @return Pointer to @ref AVBSFList on success, NULL in case of failure + */ +AVBSFList *liteav_av_bsf_list_alloc(void); + +/** + * Free list of bitstream filters. + * + * @param lst Pointer to pointer returned by liteav_av_bsf_list_alloc() + */ +void liteav_av_bsf_list_free(AVBSFList **lst); + +/** + * Append bitstream filter to the list of bitstream filters. + * + * @param lst List to append to + * @param bsf Filter context to be appended + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf); + +/** + * Construct new bitstream filter context given it's name and options + * and append it to the list of bitstream filters. + * + * @param lst List to append to + * @param bsf_name Name of the bitstream filter + * @param options Options for the bitstream filter, can be set to NULL + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_av_bsf_list_append2(AVBSFList *lst, const char * bsf_name, AVDictionary **options); +/** + * Finalize list of bitstream filters. + * + * This function will transform @ref AVBSFList to single @ref AVBSFContext, + * so the whole chain of bitstream filters can be treated as single filter + * freshly allocated by liteav_av_bsf_alloc(). + * If the call is successful, @ref AVBSFList structure is freed and lst + * will be set to NULL. In case of failure, caller is responsible for + * freeing the structure by liteav_av_bsf_list_free() + * + * @param lst Filter list structure to be transformed + * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure + * representing the chain of bitstream filters + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf); + +/** + * Parse string describing list of bitstream filters and create single + * @ref AVBSFContext describing the whole chain of bitstream filters. + * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext freshly + * allocated by liteav_av_bsf_alloc(). + * + * @param str String describing chain of bitstream filters in format + * `bsf1[=opt1=val1:opt2=val2][,bsf2]` + * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure + * representing the chain of bitstream filters + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_av_bsf_list_parse_str(const char *str, AVBSFContext **bsf); + +/** + * Get null/pass-through bitstream filter. + * + * @param[out] bsf Pointer to be set to new instance of pass-through bitstream filter + * + * @return + */ +int liteav_av_bsf_get_null_filter(AVBSFContext **bsf); + +/* memory */ + +/** + * Same behaviour liteav_av_fast_malloc but the buffer has additional + * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. + * + * In addition the whole buffer will initially and after resizes + * be 0-initialized so that no uninitialized data will ever appear. + */ +void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_padded_malloc except that buffer will always + * be 0-initialized after call. + */ +void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Encode extradata length to a buffer. Used by xiph codecs. + * + * @param s buffer to write to; must be at least (v/255+1) bytes long + * @param v size of extradata in bytes + * @return number of bytes written to the buffer. + */ +unsigned int av_xiphlacing(unsigned char *s, unsigned int v); + +#if FF_API_USER_VISIBLE_AVHWACCEL +/** + * Register the hardware accelerator hwaccel. + * + * @deprecated This function doesn't do anything. + */ +attribute_deprecated +void av_register_hwaccel(AVHWAccel *hwaccel); + +/** + * If hwaccel is NULL, returns the first registered hardware accelerator, + * if hwaccel is non-NULL, returns the next registered hardware accelerator + * after hwaccel, or NULL if hwaccel is the last one. + * + * @deprecated AVHWaccel structures contain no user-serviceable parts, so + * this function should not be used. + */ +attribute_deprecated +AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel); +#endif + +#if FF_API_LOCKMGR +/** + * Lock operation used by lockmgr + * + * @deprecated Deprecated together with av_lockmgr_register(). + */ +enum AVLockOp { + AV_LOCK_CREATE, ///< Create a mutex + AV_LOCK_OBTAIN, ///< Lock the mutex + AV_LOCK_RELEASE, ///< Unlock the mutex + AV_LOCK_DESTROY, ///< Free mutex resources +}; + +/** + * Register a user provided lock manager supporting the operations + * specified by AVLockOp. The "mutex" argument to the function points + * to a (void *) where the lockmgr should store/get a pointer to a user + * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the + * value left by the last call for all other ops. If the lock manager is + * unable to perform the op then it should leave the mutex in the same + * state as when it was called and return a non-zero value. However, + * when called with AV_LOCK_DESTROY the mutex will always be assumed to + * have been successfully destroyed. If av_lockmgr_register succeeds + * it will return a non-negative value, if it fails it will return a + * negative value and destroy all mutex and unregister all callbacks. + * av_lockmgr_register is not thread-safe, it must be called from a + * single thread before any calls which make use of locking are used. + * + * @param cb User defined callback. av_lockmgr_register invokes calls + * to this callback and the previously registered callback. + * The callback will be used to create more than one mutex + * each of which must be backed by its own underlying locking + * mechanism (i.e. do not use a single static object to + * implement your lock manager). If cb is set to NULL the + * lockmgr will be unregistered. + * + * @deprecated This function does nothing, and always returns 0. Be sure to + * build with thread support to get basic thread safety. + */ +attribute_deprecated +int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); +#endif + +/** + * Get the type of the given codec. + */ +enum AVMediaType liteav_avcodec_get_type(enum AVCodecID codec_id); + +/** + * Get the name of a codec. + * @return a static string identifying the codec; never NULL + */ +const char *avcodec_get_name(enum AVCodecID id); + +/** + * @return a positive value if s is open (i.e. avcodec_open2() was called on it + * with no corresponding avcodec_close()), 0 otherwise. + */ +int avcodec_is_open(AVCodecContext *s); + +/** + * @return a non-zero number if codec is an encoder, zero otherwise + */ +int av_codec_is_encoder(const AVCodec *codec); + +/** + * @return a non-zero number if codec is a decoder, zero otherwise + */ +int av_codec_is_decoder(const AVCodec *codec); + +/** + * @return descriptor for given codec ID or NULL if no descriptor exists. + */ +const AVCodecDescriptor *liteav_avcodec_descriptor_get(enum AVCodecID id); + +/** + * Iterate over all codec descriptors known to libavcodec. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVCodecDescriptor *liteav_avcodec_descriptor_next(const AVCodecDescriptor *prev); + +/** + * @return codec descriptor with the given name or NULL if no such descriptor + * exists. + */ +const AVCodecDescriptor *liteav_avcodec_descriptor_get_by_name(const char *name); + +/** + * Allocate a CPB properties structure and initialize its fields to default + * values. + * + * @param size if non-NULL, the size of the allocated struct will be written + * here. This is useful for embedding it in side data. + * + * @return the newly allocated struct or NULL on failure + */ +AVCPBProperties *av_cpb_properties_alloc(size_t *size); + +/** + * @} + */ + +#endif /* AVCODEC_AVCODEC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avdct.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avdct.h new file mode 100644 index 0000000..90a6b0d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avdct.h @@ -0,0 +1,85 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVDCT_H +#define AVCODEC_AVDCT_H + +#include "libavutil/opt.h" + +/** + * AVDCT context. + * @note function pointers can be NULL if the specific features have been + * disabled at build time. + */ +typedef struct AVDCT { + const AVClass *av_class; + + void (*idct)(int16_t *block /* align 16 */); + + /** + * IDCT input permutation. + * Several optimized IDCTs need a permutated input (relative to the + * normal order of the reference IDCT). + * This permutation must be performed before the idct_put/add. + * Note, normally this can be merged with the zigzag/alternate scan<br> + * An example to avoid confusion: + * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...) + * - (x -> reference DCT -> reference IDCT -> x) + * - (x -> reference DCT -> simple_mmx_perm = idct_permutation + * -> simple_idct_mmx -> x) + * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant + * -> simple_idct_mmx -> ...) + */ + uint8_t idct_permutation[64]; + + void (*fdct)(int16_t *block /* align 16 */); + + + /** + * DCT algorithm. + * must use AVOptions to set this field. + */ + int dct_algo; + + /** + * IDCT algorithm. + * must use AVOptions to set this field. + */ + int idct_algo; + + void (*get_pixels)(int16_t *block /* align 16 */, + const uint8_t *pixels /* align 8 */, + ptrdiff_t line_size); + + int bits_per_sample; +} AVDCT; + +/** + * Allocates a AVDCT context. + * This needs to be initialized with liteav_avcodec_dct_init() after optionally + * configuring it with AVOptions. + * + * To free it use liteav_av_free() + */ +AVDCT *liteav_avcodec_dct_alloc(void); +int liteav_avcodec_dct_init(AVDCT *); + +const AVClass *liteav_avcodec_dct_get_class(void); + +#endif /* AVCODEC_AVDCT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avfft.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avfft.h new file mode 100644 index 0000000..5aa411b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/avfft.h @@ -0,0 +1,119 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVFFT_H +#define AVCODEC_AVFFT_H + +/** + * @file + * @ingroup lavc_fft + * FFT functions + */ + +/** + * @defgroup lavc_fft FFT functions + * @ingroup lavc_misc + * + * @{ + */ + +typedef float FFTSample; + +typedef struct FFTComplex { + FFTSample re, im; +} FFTComplex; + +typedef struct FFTContext FFTContext; + +/** + * Set up a complex FFT. + * @param nbits log2 of the length of the input array + * @param inverse if 0 perform the forward transform, if 1 perform the inverse + */ +FFTContext *liteav_av_fft_init(int nbits, int inverse); + +/** + * Do the permutation needed BEFORE calling liteav_ff_fft_calc(). + */ +void liteav_av_fft_permute(FFTContext *s, FFTComplex *z); + +/** + * Do a complex FFT with the parameters defined in liteav_av_fft_init(). The + * input data must be permuted before. No 1.0/sqrt(n) normalization is done. + */ +void liteav_av_fft_calc(FFTContext *s, FFTComplex *z); + +void liteav_av_fft_end(FFTContext *s); + +FFTContext *liteav_av_mdct_init(int nbits, int inverse, double scale); +void liteav_av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void liteav_av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); +void liteav_av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void liteav_av_mdct_end(FFTContext *s); + +/* Real Discrete Fourier Transform */ + +enum RDFTransformType { + DFT_R2C, + IDFT_C2R, + IDFT_R2C, + DFT_C2R, +}; + +typedef struct RDFTContext RDFTContext; + +/** + * Set up a real FFT. + * @param nbits log2 of the length of the input array + * @param trans the type of transform + */ +RDFTContext *liteav_av_rdft_init(int nbits, enum RDFTransformType trans); +void liteav_av_rdft_calc(RDFTContext *s, FFTSample *data); +void liteav_av_rdft_end(RDFTContext *s); + +/* Discrete Cosine Transform */ + +typedef struct DCTContext DCTContext; + +enum DCTTransformType { + DCT_II = 0, + DCT_III, + DCT_I, + DST_I, +}; + +/** + * Set up DCT. + * + * @param nbits size of the input array: + * (1 << nbits) for DCT-II, DCT-III and DST-I + * (1 << nbits) + 1 for DCT-I + * @param type the type of transform + * + * @note the first element of the input of DST-I is ignored + */ +DCTContext *liteav_av_dct_init(int nbits, enum DCTTransformType type); +void liteav_av_dct_calc(DCTContext *s, FFTSample *data); +void liteav_av_dct_end (DCTContext *s); + +/** + * @} + */ + +#endif /* AVCODEC_AVFFT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/bytestream.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/bytestream.h new file mode 100644 index 0000000..7be7fc2 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/bytestream.h @@ -0,0 +1,376 @@ +/* + * Bytestream functions + * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr> + * Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_BYTESTREAM_H +#define AVCODEC_BYTESTREAM_H + +#include <stdint.h> +#include <string.h> + +#include "libavutil/avassert.h" +#include "libavutil/common.h" +#include "libavutil/intreadwrite.h" + +typedef struct GetByteContext { + const uint8_t *buffer, *buffer_end, *buffer_start; +} GetByteContext; + +typedef struct PutByteContext { + uint8_t *buffer, *buffer_end, *buffer_start; + int eof; +} PutByteContext; + +#define DEF(type, name, bytes, read, write) \ +static av_always_inline type bytestream_get_ ## name(const uint8_t **b) \ +{ \ + (*b) += bytes; \ + return read(*b - bytes); \ +} \ +static av_always_inline void bytestream_put_ ## name(uint8_t **b, \ + const type value) \ +{ \ + write(*b, value); \ + (*b) += bytes; \ +} \ +static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, \ + const type value) \ +{ \ + bytestream_put_ ## name(&p->buffer, value); \ +} \ +static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, \ + const type value) \ +{ \ + if (!p->eof && (p->buffer_end - p->buffer >= bytes)) { \ + write(p->buffer, value); \ + p->buffer += bytes; \ + } else \ + p->eof = 1; \ +} \ +static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \ +{ \ + return bytestream_get_ ## name(&g->buffer); \ +} \ +static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \ +{ \ + if (g->buffer_end - g->buffer < bytes) { \ + g->buffer = g->buffer_end; \ + return 0; \ + } \ + return bytestream2_get_ ## name ## u(g); \ +} \ +static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \ +{ \ + if (g->buffer_end - g->buffer < bytes) \ + return 0; \ + return read(g->buffer); \ +} + +DEF(uint64_t, le64, 8, AV_RL64, AV_WL64) +DEF(unsigned int, le32, 4, AV_RL32, AV_WL32) +DEF(unsigned int, le24, 3, AV_RL24, AV_WL24) +DEF(unsigned int, le16, 2, AV_RL16, AV_WL16) +DEF(uint64_t, be64, 8, AV_RB64, AV_WB64) +DEF(unsigned int, be32, 4, AV_RB32, AV_WB32) +DEF(unsigned int, be24, 3, AV_RB24, AV_WB24) +DEF(unsigned int, be16, 2, AV_RB16, AV_WB16) +DEF(unsigned int, byte, 1, AV_RB8 , AV_WB8) + +#if AV_HAVE_BIGENDIAN +# define bytestream2_get_ne16 bytestream2_get_be16 +# define bytestream2_get_ne24 bytestream2_get_be24 +# define bytestream2_get_ne32 bytestream2_get_be32 +# define bytestream2_get_ne64 bytestream2_get_be64 +# define bytestream2_get_ne16u bytestream2_get_be16u +# define bytestream2_get_ne24u bytestream2_get_be24u +# define bytestream2_get_ne32u bytestream2_get_be32u +# define bytestream2_get_ne64u bytestream2_get_be64u +# define bytestream2_put_ne16 bytestream2_put_be16 +# define bytestream2_put_ne24 bytestream2_put_be24 +# define bytestream2_put_ne32 bytestream2_put_be32 +# define bytestream2_put_ne64 bytestream2_put_be64 +# define bytestream2_peek_ne16 bytestream2_peek_be16 +# define bytestream2_peek_ne24 bytestream2_peek_be24 +# define bytestream2_peek_ne32 bytestream2_peek_be32 +# define bytestream2_peek_ne64 bytestream2_peek_be64 +#else +# define bytestream2_get_ne16 bytestream2_get_le16 +# define bytestream2_get_ne24 bytestream2_get_le24 +# define bytestream2_get_ne32 bytestream2_get_le32 +# define bytestream2_get_ne64 bytestream2_get_le64 +# define bytestream2_get_ne16u bytestream2_get_le16u +# define bytestream2_get_ne24u bytestream2_get_le24u +# define bytestream2_get_ne32u bytestream2_get_le32u +# define bytestream2_get_ne64u bytestream2_get_le64u +# define bytestream2_put_ne16 bytestream2_put_le16 +# define bytestream2_put_ne24 bytestream2_put_le24 +# define bytestream2_put_ne32 bytestream2_put_le32 +# define bytestream2_put_ne64 bytestream2_put_le64 +# define bytestream2_peek_ne16 bytestream2_peek_le16 +# define bytestream2_peek_ne24 bytestream2_peek_le24 +# define bytestream2_peek_ne32 bytestream2_peek_le32 +# define bytestream2_peek_ne64 bytestream2_peek_le64 +#endif + +static av_always_inline void bytestream2_init(GetByteContext *g, + const uint8_t *buf, + int buf_size) +{ + av_assert0(buf_size >= 0); + g->buffer = buf; + g->buffer_start = buf; + g->buffer_end = buf + buf_size; +} + +static av_always_inline void bytestream2_init_writer(PutByteContext *p, + uint8_t *buf, + int buf_size) +{ + av_assert0(buf_size >= 0); + p->buffer = buf; + p->buffer_start = buf; + p->buffer_end = buf + buf_size; + p->eof = 0; +} + +static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g) +{ + return g->buffer_end - g->buffer; +} + +static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p) +{ + return p->buffer_end - p->buffer; +} + +static av_always_inline void bytestream2_skip(GetByteContext *g, + unsigned int size) +{ + g->buffer += FFMIN(g->buffer_end - g->buffer, size); +} + +static av_always_inline void bytestream2_skipu(GetByteContext *g, + unsigned int size) +{ + g->buffer += size; +} + +static av_always_inline void bytestream2_skip_p(PutByteContext *p, + unsigned int size) +{ + int size2; + if (p->eof) + return; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + p->buffer += size2; +} + +static av_always_inline int bytestream2_tell(GetByteContext *g) +{ + return (int)(g->buffer - g->buffer_start); +} + +static av_always_inline int bytestream2_tell_p(PutByteContext *p) +{ + return (int)(p->buffer - p->buffer_start); +} + +static av_always_inline int bytestream2_size(GetByteContext *g) +{ + return (int)(g->buffer_end - g->buffer_start); +} + +static av_always_inline int bytestream2_size_p(PutByteContext *p) +{ + return (int)(p->buffer_end - p->buffer_start); +} + +static av_always_inline int bytestream2_seek(GetByteContext *g, + int offset, + int whence) +{ + switch (whence) { + case SEEK_CUR: + offset = av_clip(offset, -(g->buffer - g->buffer_start), + g->buffer_end - g->buffer); + g->buffer += offset; + break; + case SEEK_END: + offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0); + g->buffer = g->buffer_end + offset; + break; + case SEEK_SET: + offset = av_clip(offset, 0, g->buffer_end - g->buffer_start); + g->buffer = g->buffer_start + offset; + break; + default: + return AVERROR(EINVAL); + } + return bytestream2_tell(g); +} + +static av_always_inline int bytestream2_seek_p(PutByteContext *p, + int offset, + int whence) +{ + p->eof = 0; + switch (whence) { + case SEEK_CUR: + if (p->buffer_end - p->buffer < offset) + p->eof = 1; + offset = av_clip(offset, -(p->buffer - p->buffer_start), + p->buffer_end - p->buffer); + p->buffer += offset; + break; + case SEEK_END: + if (offset > 0) + p->eof = 1; + offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0); + p->buffer = p->buffer_end + offset; + break; + case SEEK_SET: + if (p->buffer_end - p->buffer_start < offset) + p->eof = 1; + offset = av_clip(offset, 0, p->buffer_end - p->buffer_start); + p->buffer = p->buffer_start + offset; + break; + default: + return AVERROR(EINVAL); + } + return bytestream2_tell_p(p); +} + +static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, + uint8_t *dst, + unsigned int size) +{ + int size2 = FFMIN(g->buffer_end - g->buffer, size); + memcpy(dst, g->buffer, size2); + g->buffer += size2; + return size2; +} + +static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, + uint8_t *dst, + unsigned int size) +{ + memcpy(dst, g->buffer, size); + g->buffer += size; + return size; +} + +static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, + const uint8_t *src, + unsigned int size) +{ + int size2; + if (p->eof) + return 0; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + memcpy(p->buffer, src, size2); + p->buffer += size2; + return size2; +} + +static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p, + const uint8_t *src, + unsigned int size) +{ + memcpy(p->buffer, src, size); + p->buffer += size; + return size; +} + +static av_always_inline void bytestream2_set_buffer(PutByteContext *p, + const uint8_t c, + unsigned int size) +{ + int size2; + if (p->eof) + return; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + memset(p->buffer, c, size2); + p->buffer += size2; +} + +static av_always_inline void bytestream2_set_bufferu(PutByteContext *p, + const uint8_t c, + unsigned int size) +{ + memset(p->buffer, c, size); + p->buffer += size; +} + +static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p) +{ + return p->eof; +} + +static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p, + GetByteContext *g, + unsigned int size) +{ + memcpy(p->buffer, g->buffer, size); + p->buffer += size; + g->buffer += size; + return size; +} + +static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p, + GetByteContext *g, + unsigned int size) +{ + int size2; + + if (p->eof) + return 0; + size = FFMIN(g->buffer_end - g->buffer, size); + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + + return bytestream2_copy_bufferu(p, g, size2); +} + +static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, + uint8_t *dst, + unsigned int size) +{ + memcpy(dst, *b, size); + (*b) += size; + return size; +} + +static av_always_inline void bytestream_put_buffer(uint8_t **b, + const uint8_t *src, + unsigned int size) +{ + memcpy(*b, src, size); + (*b) += size; +} + +#endif /* AVCODEC_BYTESTREAM_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h new file mode 100644 index 0000000..89a7170 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h @@ -0,0 +1,113 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Direct3D11 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * copyright (c) 2015 Steve Lhomme + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_D3D11VA_H +#define AVCODEC_D3D11VA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_d3d11va + * Public libavcodec D3D11VA header. + */ + +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#endif + +#include <stdint.h> +#include <d3d11.h> + +/** + * @defgroup lavc_codec_hwaccel_d3d11va Direct3D11 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for Direct3D11 and old UVD/UVD+ ATI video cards +#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for Direct3D11 and old Intel GPUs with ClearVideo interface + +/** + * This structure is used to provides the necessary configurations and data + * to the Direct3D11 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + * + * Use liteav_av_d3d11va_alloc_context() exclusively to allocate an AVD3D11VAContext. + */ +typedef struct AVD3D11VAContext { + /** + * D3D11 decoder object + */ + ID3D11VideoDecoder *decoder; + + /** + * D3D11 VideoContext + */ + ID3D11VideoContext *video_context; + + /** + * D3D11 configuration used to create the decoder + */ + D3D11_VIDEO_DECODER_CONFIG *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + ID3D11VideoDecoderOutputView **surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; + + /** + * Mutex to access video_context + */ + HANDLE context_mutex; +} AVD3D11VAContext; + +/** + * Allocate an AVD3D11VAContext. + * + * @return Newly-allocated AVD3D11VAContext or NULL on failure. + */ +AVD3D11VAContext *liteav_av_d3d11va_alloc_context(void); + +/** + * @} + */ + +#endif /* AVCODEC_D3D11VA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dirac.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dirac.h new file mode 100644 index 0000000..549b8b1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dirac.h @@ -0,0 +1,132 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2007 Marco Gerards <marco@gnu.org> + * Copyright (C) 2009 David Conrad + * Copyright (C) 2011 Jordi Ortiz + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DIRAC_H +#define AVCODEC_DIRAC_H + +/** + * @file + * Interface to Dirac Decoder/Encoder + * @author Marco Gerards <marco@gnu.org> + * @author David Conrad + * @author Jordi Ortiz + */ + +#include "avcodec.h" + +/** + * The spec limits the number of wavelet decompositions to 4 for both + * level 1 (VC-2) and 128 (long-gop default). + * 5 decompositions is the maximum before >16-bit buffers are needed. + * Schroedinger allows this for DD 9,7 and 13,7 wavelets only, limiting + * the others to 4 decompositions (or 3 for the fidelity filter). + * + * We use this instead of MAX_DECOMPOSITIONS to save some memory. + */ +#define MAX_DWT_LEVELS 5 + +/** + * Parse code values: + * + * Dirac Specification -> + * 9.6.1 Table 9.1 + * + * VC-2 Specification -> + * 10.4.1 Table 10.1 + */ + +enum DiracParseCodes { + DIRAC_PCODE_SEQ_HEADER = 0x00, + DIRAC_PCODE_END_SEQ = 0x10, + DIRAC_PCODE_AUX = 0x20, + DIRAC_PCODE_PAD = 0x30, + DIRAC_PCODE_PICTURE_CODED = 0x08, + DIRAC_PCODE_PICTURE_RAW = 0x48, + DIRAC_PCODE_PICTURE_LOW_DEL = 0xC8, + DIRAC_PCODE_PICTURE_HQ = 0xE8, + DIRAC_PCODE_INTER_NOREF_CO1 = 0x0A, + DIRAC_PCODE_INTER_NOREF_CO2 = 0x09, + DIRAC_PCODE_INTER_REF_CO1 = 0x0D, + DIRAC_PCODE_INTER_REF_CO2 = 0x0E, + DIRAC_PCODE_INTRA_REF_CO = 0x0C, + DIRAC_PCODE_INTRA_REF_RAW = 0x4C, + DIRAC_PCODE_INTRA_REF_PICT = 0xCC, + DIRAC_PCODE_MAGIC = 0x42424344, +}; + +typedef struct DiracVersionInfo { + int major; + int minor; +} DiracVersionInfo; + +typedef struct AVDiracSeqHeader { + unsigned width; + unsigned height; + uint8_t chroma_format; ///< 0: 444 1: 422 2: 420 + + uint8_t interlaced; + uint8_t top_field_first; + + uint8_t frame_rate_index; ///< index into dirac_frame_rate[] + uint8_t aspect_ratio_index; ///< index into dirac_aspect_ratio[] + + uint16_t clean_width; + uint16_t clean_height; + uint16_t clean_left_offset; + uint16_t clean_right_offset; + + uint8_t pixel_range_index; ///< index into dirac_pixel_range_presets[] + uint8_t color_spec_index; ///< index into dirac_color_spec_presets[] + + int profile; + int level; + + AVRational framerate; + AVRational sample_aspect_ratio; + + enum AVPixelFormat pix_fmt; + enum AVColorRange color_range; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace colorspace; + + DiracVersionInfo version; + int bit_depth; +} AVDiracSeqHeader; + +/** + * Parse a Dirac sequence header. + * + * @param dsh this function will allocate and fill an AVDiracSeqHeader struct + * and write it into this pointer. The caller must free it with + * liteav_av_free(). + * @param buf the data buffer + * @param buf_size the size of the data buffer in bytes + * @param log_ctx if non-NULL, this function will log errors here + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_dirac_parse_sequence_header(AVDiracSeqHeader **dsh, + const uint8_t *buf, size_t buf_size, + void *log_ctx); + +#endif /* AVCODEC_DIRAC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h new file mode 100644 index 0000000..74068ec --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h @@ -0,0 +1,84 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DV_PROFILE_H +#define AVCODEC_DV_PROFILE_H + +#include <stdint.h> + +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" +#include "avcodec.h" + +/* minimum number of bytes to read from a DV stream in order to + * determine the profile */ +#define DV_PROFILE_BYTES (6 * 80) /* 6 DIF blocks */ + + +/* + * AVDVProfile is used to express the differences between various + * DV flavors. For now it's primarily used for differentiating + * 525/60 and 625/50, but the plans are to use it for various + * DV specs as well (e.g. SMPTE314M vs. IEC 61834). + */ +typedef struct AVDVProfile { + int dsf; /* value of the dsf in the DV header */ + int video_stype; /* stype for VAUX source pack */ + int frame_size; /* total size of one frame in bytes */ + int difseg_size; /* number of DIF segments per DIF channel */ + int n_difchan; /* number of DIF channels per frame */ + AVRational time_base; /* 1/framerate */ + int ltc_divisor; /* FPS from the LTS standpoint */ + int height; /* picture height in pixels */ + int width; /* picture width in pixels */ + AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */ + enum AVPixelFormat pix_fmt; /* picture pixel format */ + int bpm; /* blocks per macroblock */ + const uint8_t *block_sizes; /* AC block sizes, in bits */ + int audio_stride; /* size of audio_shuffle table */ + int audio_min_samples[3]; /* min amount of audio samples */ + /* for 48kHz, 44.1kHz and 32kHz */ + int audio_samples_dist[5]; /* how many samples are supposed to be */ + /* in each frame in a 5 frames window */ + const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */ +} AVDVProfile; + +/** + * Get a DV profile for the provided compressed frame. + * + * @param sys the profile used for the previous frame, may be NULL + * @param frame the compressed data buffer + * @param buf_size size of the buffer in bytes + * @return the DV profile for the supplied data or NULL on failure + */ +const AVDVProfile *liteav_av_dv_frame_profile(const AVDVProfile *sys, + const uint8_t *frame, unsigned buf_size); + +/** + * Get a DV profile for the provided stream parameters. + */ +const AVDVProfile *liteav_av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt); + +/** + * Get a DV profile for the provided stream parameters. + * The frame rate is used as a best-effort parameter. + */ +const AVDVProfile *liteav_av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate); + +#endif /* AVCODEC_DV_PROFILE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dxva2.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dxva2.h new file mode 100644 index 0000000..22c9399 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/dxva2.h @@ -0,0 +1,93 @@ +/* + * DXVA2 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DXVA2_H +#define AVCODEC_DXVA2_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_dxva2 + * Public libavcodec DXVA2 header. + */ + +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#endif + +#include <stdint.h> +#include <d3d9.h> +#include <dxva2api.h> + +/** + * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards +#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for DXVA2 and old Intel GPUs with ClearVideo interface + +/** + * This structure is used to provides the necessary configurations and data + * to the DXVA2 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct dxva_context { + /** + * DXVA2 decoder object + */ + IDirectXVideoDecoder *decoder; + + /** + * DXVA2 configuration used to create the decoder + */ + const DXVA2_ConfigPictureDecode *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + LPDIRECT3DSURFACE9 *surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; +}; + +/** + * @} + */ + +#endif /* AVCODEC_DXVA2_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/jni.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/jni.h new file mode 100644 index 0000000..85c9ae5 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/jni.h @@ -0,0 +1,47 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * JNI public API functions + * + * Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_JNI_H +#define AVCODEC_JNI_H + +/* + * Manually set a Java virtual machine which will be used to retrieve the JNI + * environment. Once a Java VM is set it cannot be changed afterwards, meaning + * you can call multiple times liteav_av_jni_set_java_vm with the same Java VM pointer + * however it will error out if you try to set a different Java VM. + * + * @param vm Java virtual machine + * @param log_ctx context used for logging, can be NULL + * @return 0 on success, < 0 otherwise + */ +int liteav_av_jni_set_java_vm(void *vm, void *log_ctx); + +/* + * Get the Java virtual machine which has been set with liteav_av_jni_set_java_vm. + * + * @param vm Java virtual machine + * @return a pointer to the Java virtual machine + */ +void *liteav_av_jni_get_java_vm(void *log_ctx); + +#endif /* AVCODEC_JNI_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h new file mode 100644 index 0000000..3a716d1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h @@ -0,0 +1,102 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Android MediaCodec public API + * + * Copyright (c) 2016 Matthieu Bouron <matthieu.bouron stupeflix.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_MEDIACODEC_H +#define AVCODEC_MEDIACODEC_H + +#include "libavcodec/avcodec.h" + +/** + * This structure holds a reference to a android/view/Surface object that will + * be used as output by the decoder. + * + */ +typedef struct AVMediaCodecContext { + + /** + * android/view/Surface object reference. + */ + void *surface; + +} AVMediaCodecContext; + +/** + * Allocate and initialize a MediaCodec context. + * + * When decoding with MediaCodec is finished, the caller must free the + * MediaCodec context with liteav_av_mediacodec_default_free. + * + * @return a pointer to a newly allocated AVMediaCodecContext on success, NULL otherwise + */ +AVMediaCodecContext *liteav_av_mediacodec_alloc_context(void); + +/** + * Convenience function that sets up the MediaCodec context. + * + * @param avctx codec context + * @param ctx MediaCodec context to initialize + * @param surface reference to an android/view/Surface + * @return 0 on success, < 0 otherwise + */ +int liteav_av_mediacodec_default_init(AVCodecContext *avctx, AVMediaCodecContext *ctx, void *surface); + +/** + * This function must be called to free the MediaCodec context initialized with + * liteav_av_mediacodec_default_init(). + * + * @param avctx codec context + */ +void liteav_av_mediacodec_default_free(AVCodecContext *avctx); + +/** + * Opaque structure representing a MediaCodec buffer to render. + */ +typedef struct MediaCodecBuffer AVMediaCodecBuffer; + +/** + * Release a MediaCodec buffer and render it to the surface that is associated + * with the decoder. This function should only be called once on a given + * buffer, once released the underlying buffer returns to the codec, thus + * subsequent calls to this function will have no effect. + * + * @param buffer the buffer to render + * @param render 1 to release and render the buffer to the surface or 0 to + * discard the buffer + * @return 0 on success, < 0 otherwise + */ +int liteav_av_mediacodec_release_buffer(AVMediaCodecBuffer *buffer, int render); + +/** + * Release a MediaCodec buffer and render it at the given time to the surface + * that is associated with the decoder. The timestamp must be within one second + * of the current java/lang/System#nanoTime() (which is implemented using + * CLOCK_MONOTONIC on Android). See the Android MediaCodec documentation + * of android/media/MediaCodec#releaseOutputBuffer(int,long) for more details. + * + * @param buffer the buffer to render + * @param time timestamp in nanoseconds of when to render the buffer + * @return 0 on success, < 0 otherwise + */ +int liteav_av_mediacodec_render_buffer_at_time(AVMediaCodecBuffer *buffer, int64_t time); + +#endif /* AVCODEC_MEDIACODEC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/qsv.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/qsv.h new file mode 100644 index 0000000..04eaf12 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/qsv.h @@ -0,0 +1,108 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Intel MediaSDK QSV public API + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_QSV_H +#define AVCODEC_QSV_H + +#include <mfx/mfxvideo.h> + +#include "libavutil/buffer.h" + +/** + * This struct is used for communicating QSV parameters between libavcodec and + * the caller. It is managed by the caller and must be assigned to + * AVCodecContext.hwaccel_context. + * - decoding: hwaccel_context must be set on return from the get_format() + * callback + * - encoding: hwaccel_context must be set before avcodec_open2() + */ +typedef struct AVQSVContext { + /** + * If non-NULL, the session to use for encoding or decoding. + * Otherwise, libavcodec will try to create an internal session. + */ + mfxSession session; + + /** + * The IO pattern to use. + */ + int iopattern; + + /** + * Extra buffers to pass to encoder or decoder initialization. + */ + mfxExtBuffer **ext_buffers; + int nb_ext_buffers; + + /** + * Encoding only. If this field is set to non-zero by the caller, libavcodec + * will create an mfxExtOpaqueSurfaceAlloc extended buffer and pass it to + * the encoder initialization. This only makes sense if iopattern is also + * set to MFX_IOPATTERN_IN_OPAQUE_MEMORY. + * + * The number of allocated opaque surfaces will be the sum of the number + * required by the encoder and the user-provided value nb_opaque_surfaces. + * The array of the opaque surfaces will be exported to the caller through + * the opaque_surfaces field. + */ + int opaque_alloc; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. Before + * calling avcodec_open2(), the caller should set this field to the number + * of extra opaque surfaces to allocate beyond what is required by the + * encoder. + * + * On return from avcodec_open2(), this field will be set by libavcodec to + * the total number of allocated opaque surfaces. + */ + int nb_opaque_surfaces; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. On return + * from avcodec_open2(), this field will be used by libavcodec to export the + * array of the allocated opaque surfaces to the caller, so they can be + * passed to other parts of the pipeline. + * + * The buffer reference exported here is owned and managed by libavcodec, + * the callers should make their own reference with liteav_av_buffer_ref() and free + * it with liteav_av_buffer_unref() when it is no longer needed. + * + * The buffer data is an nb_opaque_surfaces-sized array of mfxFrameSurface1. + */ + AVBufferRef *opaque_surfaces; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. On return + * from avcodec_open2(), this field will be set to the surface type used in + * the opaque allocation request. + */ + int opaque_alloc_type; +} AVQSVContext; + +/** + * Allocate a new context. + * + * It must be freed by the caller with liteav_av_free(). + */ +AVQSVContext *liteav_av_qsv_alloc_context(void); + +#endif /* AVCODEC_QSV_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vaapi.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vaapi.h new file mode 100644 index 0000000..2cf7da5 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vaapi.h @@ -0,0 +1,86 @@ +/* + * Video Acceleration API (shared data between FFmpeg and the video player) + * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 + * + * Copyright (C) 2008-2009 Splitted-Desktop Systems + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VAAPI_H +#define AVCODEC_VAAPI_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vaapi + * Public libavcodec VA API header. + */ + +#include <stdint.h> +#include "libavutil/attributes.h" +#include "version.h" + +#if FF_API_STRUCT_VAAPI_CONTEXT + +/** + * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding + * @ingroup lavc_codec_hwaccel + * @{ + */ + +/** + * This structure is used to share data between the FFmpeg library and + * the client video application. + * This shall be zero-allocated and available as + * AVCodecContext.hwaccel_context. All user members can be set once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + * + * Deprecated: use AVCodecContext.hw_frames_ctx instead. + */ +struct attribute_deprecated vaapi_context { + /** + * Window system dependent data + * + * - encoding: unused + * - decoding: Set by user + */ + void *display; + + /** + * Configuration ID + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t config_id; + + /** + * Context ID (video decode pipeline) + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t context_id; +}; + +/* @} */ + +#endif /* FF_API_STRUCT_VAAPI_CONTEXT */ + +#endif /* AVCODEC_VAAPI_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vdpau.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vdpau.h new file mode 100644 index 0000000..461bc59 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vdpau.h @@ -0,0 +1,177 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * The Video Decode and Presentation API for UNIX (VDPAU) is used for + * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1. + * + * Copyright (C) 2008 NVIDIA + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDPAU_H +#define AVCODEC_VDPAU_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vdpau + * Public libavcodec VDPAU header. + */ + + +/** + * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer + * @ingroup lavc_codec_hwaccel + * + * VDPAU hardware acceleration has two modules + * - VDPAU decoding + * - VDPAU presentation + * + * The VDPAU decoding module parses all headers using FFmpeg + * parsing mechanisms and uses VDPAU for the actual decoding. + * + * As per the current implementation, the actual decoding + * and rendering (API calls) are done as part of the VDPAU + * presentation (vo_vdpau.c) module. + * + * @{ + */ + +#include <vdpau/vdpau.h> + +#include "libavutil/avconfig.h" +#include "libavutil/attributes.h" + +#include "avcodec.h" +#include "version.h" + +struct AVCodecContext; +struct AVFrame; + +typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, + const VdpPictureInfo *, uint32_t, + const VdpBitstreamBuffer *); + +/** + * This structure is used to share data between the libavcodec library and + * the client video application. + * The user shall allocate the structure via the av_alloc_vdpau_hwaccel + * function and make it available as + * AVCodecContext.hwaccel_context. Members can be set by the user once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + * + * The size of this structure is not a part of the public ABI and must not + * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an + * AVVDPAUContext. + */ +typedef struct AVVDPAUContext { + /** + * VDPAU decoder handle + * + * Set by user. + */ + VdpDecoder decoder; + + /** + * VDPAU decoder render callback + * + * Set by the user. + */ + VdpDecoderRender *render; + + AVVDPAU_Render2 render2; +} AVVDPAUContext; + +/** + * @brief allocation function for AVVDPAUContext + * + * Allows extending the struct without breaking API/ABI + */ +AVVDPAUContext *av_alloc_vdpaucontext(void); + +AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *); +void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2); + +/** + * Associate a VDPAU device with a codec context for hardware acceleration. + * This function is meant to be called from the get_format() codec callback, + * or earlier. It can also be called after liteav_avcodec_flush_buffers() to change + * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent + * display preemption). + * + * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes + * successfully. + * + * @param avctx decoding context whose get_format() callback is invoked + * @param device VDPAU device handle to use for hardware acceleration + * @param get_proc_address VDPAU device driver + * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags + * + * @return 0 on success, an AVERROR code on failure. + */ +int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, + VdpGetProcAddress *get_proc_address, unsigned flags); + +/** + * Gets the parameters to create an adequate VDPAU video surface for the codec + * context using VDPAU hardware decoding acceleration. + * + * @note Behavior is undefined if the context was not successfully bound to a + * VDPAU device using av_vdpau_bind_context(). + * + * @param avctx the codec context being used for decoding the stream + * @param type storage space for the VDPAU video surface chroma type + * (or NULL to ignore) + * @param width storage space for the VDPAU video surface pixel width + * (or NULL to ignore) + * @param height storage space for the VDPAU video surface pixel height + * (or NULL to ignore) + * + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, + uint32_t *width, uint32_t *height); + +/** + * Allocate an AVVDPAUContext. + * + * @return Newly-allocated AVVDPAUContext or NULL on failure. + */ +AVVDPAUContext *av_vdpau_alloc_context(void); + +#if FF_API_VDPAU_PROFILE +/** + * Get a decoder profile that should be used for initializing a VDPAU decoder. + * Should be called from the AVCodecContext.get_format() callback. + * + * @deprecated Use av_vdpau_bind_context() instead. + * + * @param avctx the codec context being used for decoding the stream + * @param profile a pointer into which the result will be written on success. + * The contents of profile are undefined if this function returns + * an error. + * + * @return 0 on success (non-negative), a negative AVERROR on failure. + */ +attribute_deprecated +int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile); +#endif + +/* @}*/ + +#endif /* AVCODEC_VDPAU_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/version.h new file mode 100644 index 0000000..c907524 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/version.h @@ -0,0 +1,146 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VERSION_H +#define AVCODEC_VERSION_H + +/** + * @file + * @ingroup libavc + * Libavcodec version macros. + */ + +#include "libavutil/version.h" + +#define LIBAVCODEC_VERSION_MAJOR 58 +#define LIBAVCODEC_VERSION_MINOR 35 +#define LIBAVCODEC_VERSION_MICRO 100 + +#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT + +#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + */ + +#ifndef FF_API_LOWRES +#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_DEBUG_MV +#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AVCTX_TIMEBASE +#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODED_FRAME +#define FF_API_CODED_FRAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_SIDEDATA_ONLY_PKT +#define FF_API_SIDEDATA_ONLY_PKT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_WITHOUT_PREFIX +#define FF_API_WITHOUT_PREFIX (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_VDPAU_PROFILE +#define FF_API_VDPAU_PROFILE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CONVERGENCE_DURATION +#define FF_API_CONVERGENCE_DURATION (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVPICTURE +#define FF_API_AVPICTURE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVPACKET_OLD_API +#define FF_API_AVPACKET_OLD_API (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_RTP_CALLBACK +#define FF_API_RTP_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_VBV_DELAY +#define FF_API_VBV_DELAY (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODER_TYPE +#define FF_API_CODER_TYPE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STAT_BITS +#define FF_API_STAT_BITS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_PRIVATE_OPT +#define FF_API_PRIVATE_OPT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_ASS_TIMING +#define FF_API_ASS_TIMING (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_BSF +#define FF_API_OLD_BSF (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_COPY_CONTEXT +#define FF_API_COPY_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_GET_CONTEXT_DEFAULTS +#define FF_API_GET_CONTEXT_DEFAULTS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_NVENC_OLD_NAME +#define FF_API_NVENC_OLD_NAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STRUCT_VAAPI_CONTEXT +#define FF_API_STRUCT_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_MERGE_SD_API +#define FF_API_MERGE_SD_API (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_TAG_STRING +#define FF_API_TAG_STRING (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_GETCHROMA +#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODEC_GET_SET +#define FF_API_CODEC_GET_SET (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_USER_VISIBLE_AVHWACCEL +#define FF_API_USER_VISIBLE_AVHWACCEL (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LOCKMGR +#define FF_API_LOCKMGR (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_NEXT +#define FF_API_NEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODEC_NAME +#define FF_API_CODEC_NAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_EMU_EDGE +#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif + + +#endif /* AVCODEC_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h new file mode 100644 index 0000000..0289f5b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h @@ -0,0 +1,128 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Videotoolbox hardware acceleration + * + * copyright (c) 2012 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VIDEOTOOLBOX_H +#define AVCODEC_VIDEOTOOLBOX_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_videotoolbox + * Public libavcodec Videotoolbox header. + */ + +#include <stdint.h> + +#define Picture QuickdrawPicture +#include <VideoToolbox/VideoToolbox.h> +#undef Picture + +#include "libavcodec/avcodec.h" + +/** + * This struct holds all the information that needs to be passed + * between the caller and libavcodec for initializing Videotoolbox decoding. + * Its size is not a part of the public ABI, it must be allocated with + * liteav_av_videotoolbox_alloc_context() and freed with liteav_av_free(). + */ +typedef struct AVVideotoolboxContext { + /** + * Videotoolbox decompression session object. + * Created and freed the caller. + */ + VTDecompressionSessionRef session; + + /** + * The output callback that must be passed to the session. + * Set by av_videottoolbox_default_init() + */ + VTDecompressionOutputCallback output_callback; + + /** + * CVPixelBuffer Format Type that Videotoolbox will use for decoded frames. + * set by the caller. If this is set to 0, then no specific format is + * requested from the decoder, and its native format is output. + */ + OSType cv_pix_fmt_type; + + /** + * CoreMedia Format Description that Videotoolbox will use to create the decompression session. + * Set by the caller. + */ + CMVideoFormatDescriptionRef cm_fmt_desc; + + /** + * CoreMedia codec type that Videotoolbox will use to create the decompression session. + * Set by the caller. + */ + int cm_codec_type; +} AVVideotoolboxContext; + +/** + * Allocate and initialize a Videotoolbox context. + * + * This function should be called from the get_format() callback when the caller + * selects the AV_PIX_FMT_VIDETOOLBOX format. The caller must then create + * the decoder object (using the output callback provided by libavcodec) that + * will be used for Videotoolbox-accelerated decoding. + * + * When decoding with Videotoolbox is finished, the caller must destroy the decoder + * object and free the Videotoolbox context using liteav_av_free(). + * + * @return the newly allocated context or NULL on failure + */ +AVVideotoolboxContext *liteav_av_videotoolbox_alloc_context(void); + +/** + * This is a convenience function that creates and sets up the Videotoolbox context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int liteav_av_videotoolbox_default_init(AVCodecContext *avctx); + +/** + * This is a convenience function that creates and sets up the Videotoolbox context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * @param vtctx the Videotoolbox context to use + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int liteav_av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx); + +/** + * This function must be called to free the Videotoolbox context initialized with + * liteav_av_videotoolbox_default_init(). + * + * @param avctx the corresponding codec context + */ +void liteav_av_videotoolbox_default_free(AVCodecContext *avctx); + +/** + * @} + */ + +#endif /* AVCODEC_VIDEOTOOLBOX_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h new file mode 100644 index 0000000..428cde6 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h @@ -0,0 +1,75 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * A public API for Vorbis parsing + * + * Determines the duration for each packet. + */ + +#ifndef AVCODEC_VORBIS_PARSER_H +#define AVCODEC_VORBIS_PARSER_H + +#include <stdint.h> + +typedef struct AVVorbisParseContext AVVorbisParseContext; + +/** + * Allocate and initialize the Vorbis parser using headers in the extradata. + */ +AVVorbisParseContext *liteav_av_vorbis_parse_init(const uint8_t *extradata, + int extradata_size); + +/** + * Free the parser and everything associated with it. + */ +void liteav_av_vorbis_parse_free(AVVorbisParseContext **s); + +#define VORBIS_FLAG_HEADER 0x00000001 +#define VORBIS_FLAG_COMMENT 0x00000002 +#define VORBIS_FLAG_SETUP 0x00000004 + +/** + * Get the duration for a Vorbis packet. + * + * If @p flags is @c NULL, + * special frames are considered invalid. + * + * @param s Vorbis parser context + * @param buf buffer containing a Vorbis frame + * @param buf_size size of the buffer + * @param flags flags for special frames + */ +int liteav_av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf, + int buf_size, int *flags); + +/** + * Get the duration for a Vorbis packet. + * + * @param s Vorbis parser context + * @param buf buffer containing a Vorbis frame + * @param buf_size size of the buffer + */ +int liteav_av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf, + int buf_size); + +void liteav_av_vorbis_parse_reset(AVVorbisParseContext *s); + +#endif /* AVCODEC_VORBIS_PARSER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/xvmc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/xvmc.h new file mode 100644 index 0000000..92a95e1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavcodec/xvmc.h @@ -0,0 +1,171 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2003 Ivan Kalvachev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_XVMC_H +#define AVCODEC_XVMC_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_xvmc + * Public libavcodec XvMC header. + */ + +#include <X11/extensions/XvMC.h> + +#include "libavutil/attributes.h" +#include "version.h" +#include "avcodec.h" + +/** + * @defgroup lavc_codec_hwaccel_xvmc XvMC + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct + the number is 1337 speak for the letters IDCT MCo (motion compensation) */ + +struct attribute_deprecated xvmc_pix_fmt { + /** The field contains the special constant value AV_XVMC_ID. + It is used as a test that the application correctly uses the API, + and that there is no corruption caused by pixel routines. + - application - set during initialization + - libavcodec - unchanged + */ + int xvmc_id; + + /** Pointer to the block array allocated by XvMCCreateBlocks(). + The array has to be freed by XvMCDestroyBlocks(). + Each group of 64 values represents one data block of differential + pixel information (in MoCo mode) or coefficients for IDCT. + - application - set the pointer during initialization + - libavcodec - fills coefficients/pixel data into the array + */ + short* data_blocks; + + /** Pointer to the macroblock description array allocated by + XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). + - application - set the pointer during initialization + - libavcodec - fills description data into the array + */ + XvMCMacroBlock* mv_blocks; + + /** Number of macroblock descriptions that can be stored in the mv_blocks + array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_mv_blocks; + + /** Number of blocks that can be stored at once in the data_blocks array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_data_blocks; + + /** Indicate that the hardware would interpret data_blocks as IDCT + coefficients and perform IDCT on them. + - application - set during initialization + - libavcodec - unchanged + */ + int idct; + + /** In MoCo mode it indicates that intra macroblocks are assumed to be in + unsigned format; same as the XVMC_INTRA_UNSIGNED flag. + - application - set during initialization + - libavcodec - unchanged + */ + int unsigned_intra; + + /** Pointer to the surface allocated by XvMCCreateSurface(). + It has to be freed by XvMCDestroySurface() on application exit. + It identifies the frame and its state on the video hardware. + - application - set during initialization + - libavcodec - unchanged + */ + XvMCSurface* p_surface; + +/** Set by the decoder before calling liteav_ff_draw_horiz_band(), + needed by the XvMCRenderSurface function. */ +//@{ + /** Pointer to the surface used as past reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_past_surface; + + /** Pointer to the surface used as future reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_future_surface; + + /** top/bottom field or frame + - application - unchanged + - libavcodec - set + */ + unsigned int picture_structure; + + /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence + - application - unchanged + - libavcodec - set + */ + unsigned int flags; +//}@ + + /** Number of macroblock descriptions in the mv_blocks array + that have already been passed to the hardware. + - application - zeroes it on get_buffer(). + A successful liteav_ff_draw_horiz_band() may increment it + with filled_mb_block_num or zero both. + - libavcodec - unchanged + */ + int start_mv_blocks_num; + + /** Number of new macroblock descriptions in the mv_blocks array (after + start_mv_blocks_num) that are filled by libavcodec and have to be + passed to the hardware. + - application - zeroes it on get_buffer() or after successful + liteav_ff_draw_horiz_band(). + - libavcodec - increment with one of each stored MB + */ + int filled_mv_blocks_num; + + /** Number of the next free data block; one data block consists of + 64 short values in the data_blocks array. + All blocks before this one have already been claimed by placing their + position into the corresponding block description structure field, + that are part of the mv_blocks array. + - application - zeroes it on get_buffer(). + A successful liteav_ff_draw_horiz_band() may zero it together + with start_mb_blocks_num. + - libavcodec - each decoded macroblock increases it by the number + of coded blocks it contains. + */ + int next_free_data_block_num; +}; + +/** + * @} + */ + +#endif /* AVCODEC_XVMC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/avfilter.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/avfilter.h new file mode 100644 index 0000000..9eb15d9 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/avfilter.h @@ -0,0 +1,1169 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * filter layer + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTER_H +#define AVFILTER_AVFILTER_H + +/** + * @file + * @ingroup lavfi + * Main libavfilter public API header + */ + +/** + * @defgroup lavfi libavfilter + * Graph-based frame editing library. + * + * @{ + */ + +#include <stddef.h> + +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/buffer.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/samplefmt.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "libavfilter/version.h" + +/** + * Return the LIBAVFILTER_VERSION_INT constant. + */ +unsigned liteav_avfilter_version(void); + +/** + * Return the libavfilter build-time configuration. + */ +const char *liteav_avfilter_configuration(void); + +/** + * Return the libavfilter license. + */ +const char *liteav_avfilter_license(void); + +typedef struct AVFilterContext AVFilterContext; +typedef struct AVFilterLink AVFilterLink; +typedef struct AVFilterPad AVFilterPad; +typedef struct AVFilterFormats AVFilterFormats; + +/** + * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g. + * AVFilter.inputs/outputs). + */ +int liteav_avfilter_pad_count(const AVFilterPad *pads); + +/** + * Get the name of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array it; is the caller's + * responsibility to ensure the index is valid + * + * @return name of the pad_idx'th pad in pads + */ +const char *liteav_avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx); + +/** + * Get the type of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array; it is the caller's + * responsibility to ensure the index is valid + * + * @return type of the pad_idx'th pad in pads + */ +enum AVMediaType liteav_avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx); + +/** + * The number of the filter inputs is not determined just by AVFilter.inputs. + * The filter might add additional inputs during initialization depending on the + * options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0) +/** + * The number of the filter outputs is not determined just by AVFilter.outputs. + * The filter might add additional outputs during initialization depending on + * the options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1) +/** + * The filter supports multithreading by splitting frames into multiple parts + * and processing them concurrently. + */ +#define AVFILTER_FLAG_SLICE_THREADS (1 << 2) +/** + * Some filters support a generic "enable" expression option that can be used + * to enable or disable a filter in the timeline. Filters supporting this + * option have this flag set. When the enable expression is false, the default + * no-op filter_frame() function is called in place of the filter_frame() + * callback defined on each input pad, thus the frame is passed unchanged to + * the next filters. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16) +/** + * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will + * have its filter_frame() callback(s) called as usual even when the enable + * expression is false. The filter will disable filtering within the + * filter_frame() callback(s) itself, for example executing code depending on + * the AVFilterContext->is_disabled value. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17) +/** + * Handy mask to test whether the filter supports or no the timeline feature + * (internally or generically). + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL) + +/** + * Filter definition. This defines the pads a filter contains, and all the + * callback functions used to interact with the filter. + */ +typedef struct AVFilter { + /** + * Filter name. Must be non-NULL and unique among filters. + */ + const char *name; + + /** + * A description of the filter. May be NULL. + * + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *description; + + /** + * List of inputs, terminated by a zeroed element. + * + * NULL if there are no (static) inputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in + * this list. + */ + const AVFilterPad *inputs; + /** + * List of outputs, terminated by a zeroed element. + * + * NULL if there are no (static) outputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in + * this list. + */ + const AVFilterPad *outputs; + + /** + * A class for the private data, used to declare filter private AVOptions. + * This field is NULL for filters that do not declare any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavfilter generic + * code to this class. + */ + const AVClass *priv_class; + + /** + * A combination of AVFILTER_FLAG_* + */ + int flags; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Filter pre-initialization function + * + * This callback will be called immediately after the filter context is + * allocated, to allow allocating and initing sub-objects. + * + * If this callback is not NULL, the uninit callback will be called on + * allocation failure. + * + * @return 0 on success, + * AVERROR code on failure (but the code will be + * dropped and treated as ENOMEM by the calling code) + */ + int (*preinit)(AVFilterContext *ctx); + + /** + * Filter initialization function. + * + * This callback will be called only once during the filter lifetime, after + * all the options have been set, but before links between filters are + * established and format negotiation is done. + * + * Basic filter initialization should be done here. Filters with dynamic + * inputs and/or outputs should create those inputs/outputs here based on + * provided options. No more changes to this filter's inputs/outputs can be + * done after this callback. + * + * This callback must not assume that the filter links exist or frame + * parameters are known. + * + * @ref AVFilter.uninit "uninit" is guaranteed to be called even if + * initialization fails, so this callback does not have to clean up on + * failure. + * + * @return 0 on success, a negative AVERROR on failure + */ + int (*init)(AVFilterContext *ctx); + + /** + * Should be set instead of @ref AVFilter.init "init" by the filters that + * want to pass a dictionary of AVOptions to nested contexts that are + * allocated during init. + * + * On return, the options dict should be freed and replaced with one that + * contains all the options which could not be processed by this filter (or + * with NULL if all the options were processed). + * + * Otherwise the semantics is the same as for @ref AVFilter.init "init". + */ + int (*init_dict)(AVFilterContext *ctx, AVDictionary **options); + + /** + * Filter uninitialization function. + * + * Called only once right before the filter is freed. Should deallocate any + * memory held by the filter, release any buffer references, etc. It does + * not need to deallocate the AVFilterContext.priv memory itself. + * + * This callback may be called even if @ref AVFilter.init "init" was not + * called or failed, so it must be prepared to handle such a situation. + */ + void (*uninit)(AVFilterContext *ctx); + + /** + * Query formats supported by the filter on its inputs and outputs. + * + * This callback is called after the filter is initialized (so the inputs + * and outputs are fixed), shortly before the format negotiation. This + * callback may be called more than once. + * + * This callback must set AVFilterLink.out_formats on every input link and + * AVFilterLink.in_formats on every output link to a list of pixel/sample + * formats that the filter supports on that link. For audio links, this + * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" / + * @ref AVFilterLink.out_samplerates "out_samplerates" and + * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" / + * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously. + * + * This callback may be NULL for filters with one input, in which case + * libavfilter assumes that it supports all input formats and preserves + * them on output. + * + * @return zero on success, a negative value corresponding to an + * AVERROR code otherwise + */ + int (*query_formats)(AVFilterContext *); + + int priv_size; ///< size of private data to allocate for the filter + + int flags_internal; ///< Additional flags for avfilter internal use only. + + /** + * Used by the filter registration system. Must not be touched by any other + * code. + */ + struct AVFilter *next; + + /** + * Make the filter instance process a command. + * + * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported. + * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be + * time consuming then a filter should treat it like an unsupported command + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ + int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags); + + /** + * Filter initialization function, alternative to the init() + * callback. Args contains the user-supplied parameters, opaque is + * used for providing binary data. + */ + int (*init_opaque)(AVFilterContext *ctx, void *opaque); + + /** + * Filter activation function. + * + * Called when any processing is needed from the filter, instead of any + * filter_frame and request_frame on pads. + * + * The function must examine inlinks and outlinks and perform a single + * step of processing. If there is nothing to do, the function must do + * nothing and not return an error. If more steps are or may be + * possible, it must use liteav_ff_filter_set_ready() to schedule another + * activation. + */ + int (*activate)(AVFilterContext *ctx); +} AVFilter; + +/** + * Process multiple parts of the frame concurrently. + */ +#define AVFILTER_THREAD_SLICE (1 << 0) + +typedef struct AVFilterInternal AVFilterInternal; + +/** An instance of a filter */ +struct AVFilterContext { + const AVClass *av_class; ///< needed for liteav_av_log() and filters common options + + const AVFilter *filter; ///< the AVFilter of which this is an instance + + char *name; ///< name of this filter instance + + AVFilterPad *input_pads; ///< array of input pads + AVFilterLink **inputs; ///< array of pointers to input links + unsigned nb_inputs; ///< number of input pads + + AVFilterPad *output_pads; ///< array of output pads + AVFilterLink **outputs; ///< array of pointers to output links + unsigned nb_outputs; ///< number of output pads + + void *priv; ///< private data for use by the filter + + struct AVFilterGraph *graph; ///< filtergraph this filter belongs to + + /** + * Type of multithreading being allowed/used. A combination of + * AVFILTER_THREAD_* flags. + * + * May be set by the caller before initializing the filter to forbid some + * or all kinds of multithreading for this filter. The default is allowing + * everything. + * + * When the filter is initialized, this field is combined using bit AND with + * AVFilterGraph.thread_type to get the final mask used for determining + * allowed threading types. I.e. a threading type needs to be set in both + * to be allowed. + * + * After the filter is initialized, libavfilter sets this field to the + * threading type that is actually used (0 for no multithreading). + */ + int thread_type; + + /** + * An opaque struct for libavfilter internal use. + */ + AVFilterInternal *internal; + + struct AVFilterCommand *command_queue; + + char *enable_str; ///< enable expression string + void *enable; ///< parsed expression (AVExpr*) + double *var_values; ///< variable values for the enable expression + int is_disabled; ///< the enabled state from the last expression evaluation + + /** + * For filters which will create hardware frames, sets the device the + * filter should create them in. All other filters will ignore this field: + * in particular, a filter which consumes or processes hardware frames will + * instead use the hw_frames_ctx field in AVFilterLink to carry the + * hardware context information. + */ + AVBufferRef *hw_device_ctx; + + /** + * Max number of threads allowed in this filter instance. + * If <= 0, its value is ignored. + * Overrides global number of threads set per filter graph. + */ + int nb_threads; + + /** + * Ready status of the filter. + * A non-0 value means that the filter needs activating; + * a higher value suggests a more urgent activation. + */ + unsigned ready; + + /** + * Sets the number of extra hardware frames which the filter will + * allocate on its output links for use in following filters or by + * the caller. + * + * Some hardware filters require all frames that they will use for + * output to be defined in advance before filtering starts. For such + * filters, any hardware frame pools used for output must therefore be + * of fixed size. The extra frames set here are on top of any number + * that the filter needs internally in order to operate normally. + * + * This field must be set before the graph containing this filter is + * configured. + */ + int extra_hw_frames; +}; + +/** + * A link between two filters. This contains pointers to the source and + * destination filters between which this link exists, and the indexes of + * the pads involved. In addition, this link also contains the parameters + * which have been negotiated and agreed upon between the filter, such as + * image dimensions, format, etc. + * + * Applications must not normally access the link structure directly. + * Use the buffersrc and buffersink API instead. + * In the future, access to the header may be reserved for filters + * implementation. + */ +struct AVFilterLink { + AVFilterContext *src; ///< source filter + AVFilterPad *srcpad; ///< output pad on the source filter + + AVFilterContext *dst; ///< dest filter + AVFilterPad *dstpad; ///< input pad on the dest filter + + enum AVMediaType type; ///< filter media type + + /* These parameters apply only to video */ + int w; ///< agreed upon image width + int h; ///< agreed upon image height + AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio + /* These parameters apply only to audio */ + uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h) + int sample_rate; ///< samples per second + + int format; ///< agreed upon media format + + /** + * Define the time base used by the PTS of the frames/samples + * which will pass through this link. + * During the configuration stage, each filter is supposed to + * change only the output timebase, while the timebase of the + * input link is assumed to be an unchangeable property. + */ + AVRational time_base; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + /** + * Lists of formats and channel layouts supported by the input and output + * filters respectively. These lists are used for negotiating the format + * to actually be used, which will be loaded into the format and + * channel_layout members, above, when chosen. + * + */ + AVFilterFormats *in_formats; + AVFilterFormats *out_formats; + + /** + * Lists of channel layouts and sample rates used for automatic + * negotiation. + */ + AVFilterFormats *in_samplerates; + AVFilterFormats *out_samplerates; + struct AVFilterChannelLayouts *in_channel_layouts; + struct AVFilterChannelLayouts *out_channel_layouts; + + /** + * Audio only, the destination filter sets this to a non-zero value to + * request that buffers with the given number of samples should be sent to + * it. AVFilterPad.needs_fifo must also be set on the corresponding input + * pad. + * Last buffer before EOF will be padded with silence. + */ + int request_samples; + + /** stage of the initialization of the link properties (dimensions, etc) */ + enum { + AVLINK_UNINIT = 0, ///< not started + AVLINK_STARTINIT, ///< started, but incomplete + AVLINK_INIT ///< complete + } init_state; + + /** + * Graph the filter belongs to. + */ + struct AVFilterGraph *graph; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in link time_base units. + */ + int64_t current_pts; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in AV_TIME_BASE units. + */ + int64_t current_pts_us; + + /** + * Index in the age array. + */ + int age_index; + + /** + * Frame rate of the stream on the link, or 1/0 if unknown or variable; + * if left to 0/0, will be automatically copied from the first input + * of the source filter if it exists. + * + * Sources should set it to the best estimation of the real frame rate. + * If the source frame rate is unknown or variable, set this to 1/0. + * Filters should update it if necessary depending on their function. + * Sinks can use it to set a default output frame rate. + * It is similar to the r_frame_rate field in AVStream. + */ + AVRational frame_rate; + + /** + * Buffer partially filled with samples to achieve a fixed/minimum size. + */ + AVFrame *partial_buf; + + /** + * Size of the partial buffer to allocate. + * Must be between min_samples and max_samples. + */ + int partial_buf_size; + + /** + * Minimum number of samples to filter at once. If filter_frame() is + * called with fewer samples, it will accumulate them in partial_buf. + * This field and the related ones must not be changed after filtering + * has started. + * If 0, all related fields are ignored. + */ + int min_samples; + + /** + * Maximum number of samples to filter at once. If filter_frame() is + * called with more samples, it will split them. + */ + int max_samples; + + /** + * Number of channels. + */ + int channels; + + /** + * Link processing flags. + */ + unsigned flags; + + /** + * Number of past frames sent through the link. + */ + int64_t frame_count_in, frame_count_out; + + /** + * A pointer to a FFFramePool struct. + */ + void *frame_pool; + + /** + * True if a frame is currently wanted on the output of this filter. + * Set when liteav_ff_request_frame() is called by the output, + * cleared when a frame is filtered. + */ + int frame_wanted_out; + + /** + * For hwaccel pixel formats, this should be a reference to the + * AVHWFramesContext describing the frames. + */ + AVBufferRef *hw_frames_ctx; + +#ifndef FF_INTERNAL_FIELDS + + /** + * Internal structure members. + * The fields below this limit are internal for libavfilter's use + * and must in no way be accessed by applications. + */ + char reserved[0xF000]; + +#else /* FF_INTERNAL_FIELDS */ + + /** + * Queue of frames waiting to be filtered. + */ + FFFrameQueue fifo; + + /** + * If set, the source filter can not generate a frame as is. + * The goal is to avoid repeatedly calling the request_frame() method on + * the same link. + */ + int frame_blocked_in; + + /** + * Link input status. + * If not zero, all attempts of filter_frame will fail with the + * corresponding code. + */ + int status_in; + + /** + * Timestamp of the input status change. + */ + int64_t status_in_pts; + + /** + * Link output status. + * If not zero, all attempts of request_frame will fail with the + * corresponding code. + */ + int status_out; + +#endif /* FF_INTERNAL_FIELDS */ + +}; + +/** + * Link two filters together. + * + * @param src the source filter + * @param srcpad index of the output pad on the source filter + * @param dst the destination filter + * @param dstpad index of the input pad on the destination filter + * @return zero on success + */ +int liteav_avfilter_link(AVFilterContext *src, unsigned srcpad, + AVFilterContext *dst, unsigned dstpad); + +/** + * Free the link in *link, and set its pointer to NULL. + */ +void liteav_avfilter_link_free(AVFilterLink **link); + +#if FF_API_FILTER_GET_SET +/** + * Get the number of channels of a link. + * @deprecated Use liteav_av_buffersink_get_channels() + */ +attribute_deprecated +int liteav_avfilter_link_get_channels(AVFilterLink *link); +#endif + +/** + * Set the closed field of a link. + * @deprecated applications are not supposed to mess with links, they should + * close the sinks. + */ +attribute_deprecated +void liteav_avfilter_link_set_closed(AVFilterLink *link, int closed); + +/** + * Negotiate the media format, dimensions, etc of all inputs to a filter. + * + * @param filter the filter to negotiate the properties for its inputs + * @return zero on successful negotiation + */ +int liteav_avfilter_config_links(AVFilterContext *filter); + +#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically +#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) + +/** + * Make the filter instance process a command. + * It is recommended to use liteav_avfilter_graph_send_command(). + */ +int liteav_avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Iterate over all registered filters. + * + * @param opaque a pointer where libavfilter will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered filter or NULL when the iteration is + * finished + */ +const AVFilter *liteav_av_filter_iterate(void **opaque); + +#if FF_API_NEXT +/** Initialize the filter system. Register all builtin filters. */ +attribute_deprecated +void liteav_avfilter_register_all(void); + +/** + * Register a filter. This is only needed if you plan to use + * liteav_avfilter_get_by_name later to lookup the AVFilter structure by name. A + * filter can still by instantiated with liteav_avfilter_graph_alloc_filter even if it + * is not registered. + * + * @param filter the filter to register + * @return 0 if the registration was successful, a negative value + * otherwise + */ +attribute_deprecated +int liteav_avfilter_register(AVFilter *filter); + +/** + * Iterate over all registered filters. + * @return If prev is non-NULL, next registered filter after prev or NULL if + * prev is the last filter. If prev is NULL, return the first registered filter. + */ +attribute_deprecated +const AVFilter *liteav_avfilter_next(const AVFilter *prev); +#endif + +/** + * Get a filter definition matching the given name. + * + * @param name the filter name to find + * @return the filter definition, if any matching one is registered. + * NULL if none found. + */ +const AVFilter *liteav_avfilter_get_by_name(const char *name); + + +/** + * Initialize a filter with the supplied parameters. + * + * @param ctx uninitialized filter context to initialize + * @param args Options to initialize the filter with. This must be a + * ':'-separated list of options in the 'key=value' form. + * May be NULL if the options have been set directly using the + * AVOptions API or there are no options that need to be set. + * @return 0 on success, a negative AVERROR on failure + */ +int liteav_avfilter_init_str(AVFilterContext *ctx, const char *args); + +/** + * Initialize a filter with the supplied dictionary of options. + * + * @param ctx uninitialized filter context to initialize + * @param options An AVDictionary filled with options for this filter. On + * return this parameter will be destroyed and replaced with + * a dict containing options that were not found. This dictionary + * must be freed by the caller. + * May be NULL, then this function is equivalent to + * liteav_avfilter_init_str() with the second parameter set to NULL. + * @return 0 on success, a negative AVERROR on failure + * + * @note This function and liteav_avfilter_init_str() do essentially the same thing, + * the difference is in manner in which the options are passed. It is up to the + * calling code to choose whichever is more preferable. The two functions also + * behave differently when some of the provided options are not declared as + * supported by the filter. In such a case, liteav_avfilter_init_str() will fail, but + * this function will leave those extra options in the options AVDictionary and + * continue as usual. + */ +int liteav_avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options); + +/** + * Free a filter context. This will also remove the filter from its + * filtergraph's list of filters. + * + * @param filter the filter to free + */ +void liteav_avfilter_free(AVFilterContext *filter); + +/** + * Insert a filter in the middle of an existing link. + * + * @param link the link into which the filter should be inserted + * @param filt the filter to be inserted + * @param filt_srcpad_idx the input pad on the filter to connect + * @param filt_dstpad_idx the output pad on the filter to connect + * @return zero on success + */ +int liteav_avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, + unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); + +/** + * @return AVClass for AVFilterContext. + * + * @see liteav_av_opt_find(). + */ +const AVClass *liteav_avfilter_get_class(void); + +typedef struct AVFilterGraphInternal AVFilterGraphInternal; + +/** + * A function pointer passed to the @ref AVFilterGraph.execute callback to be + * executed multiple times, possibly in parallel. + * + * @param ctx the filter context the job belongs to + * @param arg an opaque parameter passed through from @ref + * AVFilterGraph.execute + * @param jobnr the index of the job being executed + * @param nb_jobs the total number of jobs + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); + +/** + * A function executing multiple jobs, possibly in parallel. + * + * @param ctx the filter context to which the jobs belong + * @param func the function to be called multiple times + * @param arg the argument to be passed to func + * @param ret a nb_jobs-sized array to be filled with return values from each + * invocation of func + * @param nb_jobs the number of jobs to execute + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func, + void *arg, int *ret, int nb_jobs); + +typedef struct AVFilterGraph { + const AVClass *av_class; + AVFilterContext **filters; + unsigned nb_filters; + + char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters +#if FF_API_LAVR_OPTS + attribute_deprecated char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters +#endif + + /** + * Type of multithreading allowed for filters in this graph. A combination + * of AVFILTER_THREAD_* flags. + * + * May be set by the caller at any point, the setting will apply to all + * filters initialized after that. The default is allowing everything. + * + * When a filter in this graph is initialized, this field is combined using + * bit AND with AVFilterContext.thread_type to get the final mask used for + * determining allowed threading types. I.e. a threading type needs to be + * set in both to be allowed. + */ + int thread_type; + + /** + * Maximum number of threads used by filters in this graph. May be set by + * the caller before adding any filters to the filtergraph. Zero (the + * default) means that the number of threads is determined automatically. + */ + int nb_threads; + + /** + * Opaque object for libavfilter internal use. + */ + AVFilterGraphInternal *internal; + + /** + * Opaque user data. May be set by the caller to an arbitrary value, e.g. to + * be used from callbacks like @ref AVFilterGraph.execute. + * Libavfilter will not touch this field in any way. + */ + void *opaque; + + /** + * This callback may be set by the caller immediately after allocating the + * graph and before adding any filters to it, to provide a custom + * multithreading implementation. + * + * If set, filters with slice threading capability will call this callback + * to execute multiple jobs in parallel. + * + * If this field is left unset, libavfilter will use its internal + * implementation, which may or may not be multithreaded depending on the + * platform and build options. + */ + avfilter_execute_func *execute; + + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + /** + * Private fields + * + * The following fields are for internal use only. + * Their type, offset, number and semantic can change without notice. + */ + + AVFilterLink **sink_links; + int sink_links_count; + + unsigned disable_auto_convert; +} AVFilterGraph; + +/** + * Allocate a filter graph. + * + * @return the allocated filter graph on success or NULL. + */ +AVFilterGraph *liteav_avfilter_graph_alloc(void); + +/** + * Create a new filter instance in a filter graph. + * + * @param graph graph in which the new filter will be used + * @param filter the filter to create an instance of + * @param name Name to give to the new instance (will be copied to + * AVFilterContext.name). This may be used by the caller to identify + * different filters, libavfilter itself assigns no semantics to + * this parameter. May be NULL. + * + * @return the context of the newly created filter instance (note that it is + * also retrievable directly through AVFilterGraph.filters or with + * liteav_avfilter_graph_get_filter()) on success or NULL on failure. + */ +AVFilterContext *liteav_avfilter_graph_alloc_filter(AVFilterGraph *graph, + const AVFilter *filter, + const char *name); + +/** + * Get a filter instance identified by instance name from graph. + * + * @param graph filter graph to search through. + * @param name filter instance name (should be unique in the graph). + * @return the pointer to the found filter instance or NULL if it + * cannot be found. + */ +AVFilterContext *liteav_avfilter_graph_get_filter(AVFilterGraph *graph, const char *name); + +/** + * Create and add a filter instance into an existing graph. + * The filter instance is created from the filter filt and inited + * with the parameters args and opaque. + * + * In case of success put in *filt_ctx the pointer to the created + * filter instance, otherwise set *filt_ctx to NULL. + * + * @param name the instance name to give to the created filter instance + * @param graph_ctx the filter graph + * @return a negative AVERROR error code in case of failure, a non + * negative value otherwise + */ +int liteav_avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, + const char *name, const char *args, void *opaque, + AVFilterGraph *graph_ctx); + +/** + * Enable or disable automatic format conversion inside the graph. + * + * Note that format conversion can still happen inside explicitly inserted + * scale and aresample filters. + * + * @param flags any of the AVFILTER_AUTO_CONVERT_* constants + */ +void liteav_avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags); + +enum { + AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */ + AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */ +}; + +/** + * Check validity and configure all the links and formats in the graph. + * + * @param graphctx the filter graph + * @param log_ctx context used for logging + * @return >= 0 in case of success, a negative AVERROR code otherwise + */ +int liteav_avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx); + +/** + * Free a graph, destroy its links, and set *graph to NULL. + * If *graph is NULL, do nothing. + */ +void liteav_avfilter_graph_free(AVFilterGraph **graph); + +/** + * A linked-list of the inputs/outputs of the filter chain. + * + * This is mainly useful for liteav_avfilter_graph_parse() / liteav_avfilter_graph_parse2(), + * where it is used to communicate open (unlinked) inputs and outputs from and + * to the caller. + * This struct specifies, per each not connected pad contained in the graph, the + * filter context and the pad index required for establishing a link. + */ +typedef struct AVFilterInOut { + /** unique name for this input/output in the list */ + char *name; + + /** filter context associated to this input/output */ + AVFilterContext *filter_ctx; + + /** index of the filt_ctx pad to use for linking */ + int pad_idx; + + /** next input/input in the list, NULL if this is the last */ + struct AVFilterInOut *next; +} AVFilterInOut; + +/** + * Allocate a single AVFilterInOut entry. + * Must be freed with liteav_avfilter_inout_free(). + * @return allocated AVFilterInOut on success, NULL on failure. + */ +AVFilterInOut *liteav_avfilter_inout_alloc(void); + +/** + * Free the supplied list of AVFilterInOut and set *inout to NULL. + * If *inout is NULL, do nothing. + */ +void liteav_avfilter_inout_free(AVFilterInOut **inout); + +/** + * Add a graph described by a string to a graph. + * + * @note The caller must provide the lists of inputs and outputs, + * which therefore must be known before calling the function. + * + * @note The inputs parameter describes inputs of the already existing + * part of the graph; i.e. from the point of view of the newly created + * part, they are outputs. Similarly the outputs parameter describes + * outputs of the already existing filters, which are provided as + * inputs to the parsed filters. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs linked list to the inputs of the graph + * @param outputs linked list to the outputs of the graph + * @return zero on success, a negative AVERROR code on error + */ +int liteav_avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut *inputs, AVFilterInOut *outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * In the graph filters description, if the input label of the first + * filter is not specified, "in" is assumed; if the output label of + * the last filter is not specified, "out" is assumed. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with liteav_avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with liteav_avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + */ +int liteav_avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * @param[in] graph the filter graph where to link the parsed graph context + * @param[in] filters string to be parsed + * @param[out] inputs a linked list of all free (unlinked) inputs of the + * parsed graph will be returned here. It is to be freed + * by the caller using liteav_avfilter_inout_free(). + * @param[out] outputs a linked list of all free (unlinked) outputs of the + * parsed graph will be returned here. It is to be freed by the + * caller using liteav_avfilter_inout_free(). + * @return zero on success, a negative AVERROR code on error + * + * @note This function returns the inputs and outputs that are left + * unlinked after parsing the graph and the caller then deals with + * them. + * @note This function makes no reference whatsoever to already + * existing parts of the graph and the inputs parameter will on return + * contain inputs of the newly parsed part of the graph. Analogously + * the outputs parameter will contain outputs of the newly created + * filters. + */ +int liteav_avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, + AVFilterInOut **outputs); + +/** + * Send a command to one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ +int liteav_avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Queue a command for one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param ts time at which the command should be sent to the filter + * + * @note As this executes commands after this function returns, no return code + * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported. + */ +int liteav_avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts); + + +/** + * Dump a graph into a human-readable string representation. + * + * @param graph the graph to dump + * @param options formatting options; currently ignored + * @return a string, or NULL in case of memory allocation failure; + * the string must be freed using liteav_av_free + */ +char *liteav_avfilter_graph_dump(AVFilterGraph *graph, const char *options); + +/** + * Request a frame on the oldest sink link. + * + * If the request returns AVERROR_EOF, try the next. + * + * Note that this function is not meant to be the sole scheduling mechanism + * of a filtergraph, only a convenience function to help drain a filtergraph + * in a balanced way under normal circumstances. + * + * Also note that AVERROR_EOF does not mean that frames did not arrive on + * some of the sinks during the process. + * When there are multiple sink links, in case the requested link + * returns an EOF, this may cause a filter to flush pending frames + * which are sent to another sink link, although unrequested. + * + * @return the return value of liteav_ff_request_frame(), + * or AVERROR_EOF if all links returned AVERROR_EOF + */ +int liteav_avfilter_graph_request_oldest(AVFilterGraph *graph); + +/** + * @} + */ + +#endif /* AVFILTER_AVFILTER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersink.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersink.h new file mode 100644 index 0000000..6ca5aea --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersink.h @@ -0,0 +1,166 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSINK_H +#define AVFILTER_BUFFERSINK_H + +/** + * @file + * @ingroup lavfi_buffersink + * memory buffer sink API for audio and video + */ + +#include "avfilter.h" + +/** + * @defgroup lavfi_buffersink Buffer sink API + * @ingroup lavfi + * @{ + */ + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a buffersink or abuffersink filter context. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using liteav_av_frame_unref() / liteav_av_frame_free() + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * + * @return >= 0 in for success, a negative AVERROR code for failure. + */ +int liteav_av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); + +/** + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer + * reference, but not remove it from the buffer. This is useful if you + * need only to read a video/samples buffer, without to fetch it. + */ +#define AV_BUFFERSINK_FLAG_PEEK 1 + +/** + * Tell av_buffersink_get_buffer_ref() not to request a frame from its input. + * If a frame is already buffered, it is read (and removed from the buffer), + * but if no frame is present, return AVERROR(EAGAIN). + */ +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2 + +/** + * Struct to use for initializing a buffersink context. + */ +typedef struct AVBufferSinkParams { + const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE +} AVBufferSinkParams; + +/** + * Create an AVBufferSinkParams structure. + * + * Must be freed with liteav_av_free(). + */ +AVBufferSinkParams *liteav_av_buffersink_params_alloc(void); + +/** + * Struct to use for initializing an abuffersink context. + */ +typedef struct AVABufferSinkParams { + const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1 + const int *channel_counts; ///< list of allowed channel counts, terminated by -1 + int all_channel_counts; ///< if not 0, accept any channel count or layout + int *sample_rates; ///< list of allowed sample rates, terminated by -1 +} AVABufferSinkParams; + +/** + * Create an AVABufferSinkParams structure. + * + * Must be freed with liteav_av_free(). + */ +AVABufferSinkParams *liteav_av_abuffersink_params_alloc(void); + +/** + * Set the frame size for an audio buffer sink. + * + * All calls to av_buffersink_get_buffer_ref will return a buffer with + * exactly the specified number of samples, or AVERROR(EAGAIN) if there is + * not enough. The last buffer at EOF will be padded with 0. + */ +void liteav_av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); + +/** + * @defgroup lavfi_buffersink_accessors Buffer sink accessors + * Get the properties of the stream + * @{ + */ + +enum AVMediaType liteav_av_buffersink_get_type (const AVFilterContext *ctx); +AVRational liteav_av_buffersink_get_time_base (const AVFilterContext *ctx); +int liteav_av_buffersink_get_format (const AVFilterContext *ctx); + +AVRational liteav_av_buffersink_get_frame_rate (const AVFilterContext *ctx); +int liteav_av_buffersink_get_w (const AVFilterContext *ctx); +int liteav_av_buffersink_get_h (const AVFilterContext *ctx); +AVRational liteav_av_buffersink_get_sample_aspect_ratio (const AVFilterContext *ctx); + +int liteav_av_buffersink_get_channels (const AVFilterContext *ctx); +uint64_t liteav_av_buffersink_get_channel_layout (const AVFilterContext *ctx); +int liteav_av_buffersink_get_sample_rate (const AVFilterContext *ctx); + +AVBufferRef * liteav_av_buffersink_get_hw_frames_ctx (const AVFilterContext *ctx); + +/** @} */ + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using liteav_av_frame_unref() / liteav_av_frame_free() + * + * @return + * - >= 0 if a frame was successfully returned. + * - AVERROR(EAGAIN) if no frames are available at this point; more + * input frames must be added to the filtergraph to get more output. + * - AVERROR_EOF if there will be no more output frames on this sink. + * - A different negative AVERROR code in other failure cases. + */ +int liteav_av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Same as liteav_av_buffersink_get_frame(), but with the ability to specify the number + * of samples read. This function is less efficient than + * liteav_av_buffersink_get_frame(), because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using liteav_av_frame_unref() / liteav_av_frame_free() + * frame will contain exactly nb_samples audio samples, except at + * the end of stream, when it can contain less than nb_samples. + * + * @return The return codes have the same meaning as for + * liteav_av_buffersink_get_frame(). + * + * @warning do not mix this function with liteav_av_buffersink_get_frame(). Use only one or + * the other with a single sink, not both. + */ +int liteav_av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); + +/** + * @} + */ + +#endif /* AVFILTER_BUFFERSINK_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h new file mode 100644 index 0000000..1e6b84a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h @@ -0,0 +1,210 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSRC_H +#define AVFILTER_BUFFERSRC_H + +/** + * @file + * @ingroup lavfi_buffersrc + * Memory buffer source API. + */ + +#include "avfilter.h" + +/** + * @defgroup lavfi_buffersrc Buffer source API + * @ingroup lavfi + * @{ + */ + +enum { + + /** + * Do not check for format changes. + */ + AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1, + + /** + * Immediately push the frame to the output. + */ + AV_BUFFERSRC_FLAG_PUSH = 4, + + /** + * Keep a reference to the frame. + * If the frame if reference-counted, create a new reference; otherwise + * copy the frame data. + */ + AV_BUFFERSRC_FLAG_KEEP_REF = 8, + +}; + +/** + * Get the number of failed requests. + * + * A failed request is when the request_frame method is called while no + * frame is present in the buffer. + * The number is reset when a frame is added. + */ +unsigned liteav_av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); + +/** + * This structure contains the parameters describing the frames that will be + * passed to this filter. + * + * It should be allocated with liteav_av_buffersrc_parameters_alloc() and freed with + * liteav_av_free(). All the allocated fields in it remain owned by the caller. + */ +typedef struct AVBufferSrcParameters { + /** + * video: the pixel format, value corresponds to enum AVPixelFormat + * audio: the sample format, value corresponds to enum AVSampleFormat + */ + int format; + /** + * The timebase to be used for the timestamps on the input frames. + */ + AVRational time_base; + + /** + * Video only, the display dimensions of the input frames. + */ + int width, height; + + /** + * Video only, the sample (pixel) aspect ratio. + */ + AVRational sample_aspect_ratio; + + /** + * Video only, the frame rate of the input video. This field must only be + * set to a non-zero value if input stream has a known constant framerate + * and should be left at its initial value if the framerate is variable or + * unknown. + */ + AVRational frame_rate; + + /** + * Video with a hwaccel pixel format only. This should be a reference to an + * AVHWFramesContext instance describing the input frames. + */ + AVBufferRef *hw_frames_ctx; + + /** + * Audio only, the audio sampling rate in samples per second. + */ + int sample_rate; + + /** + * Audio only, the audio channel layout + */ + uint64_t channel_layout; +} AVBufferSrcParameters; + +/** + * Allocate a new AVBufferSrcParameters instance. It should be freed by the + * caller with liteav_av_free(). + */ +AVBufferSrcParameters *liteav_av_buffersrc_parameters_alloc(void); + +/** + * Initialize the buffersrc or abuffersrc filter with the provided parameters. + * This function may be called multiple times, the later calls override the + * previous ones. Some of the parameters may also be set through AVOptions, then + * whatever method is used last takes precedence. + * + * @param ctx an instance of the buffersrc or abuffersrc filter + * @param param the stream parameters. The frames later passed to this filter + * must conform to those parameters. All the allocated fields in + * param remain owned by the caller, libavfilter will make internal + * copies or references when necessary. + * @return 0 on success, a negative AVERROR code on failure. + */ +int liteav_av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param); + +/** + * Add a frame to the buffer source. + * + * @param ctx an instance of the buffersrc filter + * @param frame frame to be added. If the frame is reference counted, this + * function will make a new reference to it. Otherwise the frame data will be + * copied. + * + * @return 0 on success, a negative AVERROR on error + * + * This function is equivalent to liteav_av_buffersrc_add_frame_flags() with the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +av_warn_unused_result +int liteav_av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * @param ctx an instance of the buffersrc filter + * @param frame frame to be added. If the frame is reference counted, this + * function will take ownership of the reference(s) and reset the frame. + * Otherwise the frame data will be copied. If this function returns an error, + * the input frame is not touched. + * + * @return 0 on success, a negative AVERROR on error. + * + * @note the difference between this function and liteav_av_buffersrc_write_frame() is + * that liteav_av_buffersrc_write_frame() creates a new reference to the input frame, + * while this function takes ownership of the reference passed to it. + * + * This function is equivalent to liteav_av_buffersrc_add_frame_flags() without the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +av_warn_unused_result +int liteav_av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * By default, if the frame is reference-counted, this function will take + * ownership of the reference(s) and reset the frame. This can be controlled + * using the flags. + * + * If this function returns an error, the input frame is not touched. + * + * @param buffer_src pointer to a buffer source context + * @param frame a frame, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +av_warn_unused_result +int liteav_av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, + AVFrame *frame, int flags); + +/** + * Close the buffer source after EOF. + * + * This is similar to passing NULL to liteav_av_buffersrc_add_frame_flags() + * except it takes the timestamp of the EOF, i.e. the timestamp of the end + * of the last frame. + */ +int liteav_av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags); + +/** + * @} + */ + +#endif /* AVFILTER_BUFFERSRC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/version.h new file mode 100644 index 0000000..9f0a996 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavfilter/version.h @@ -0,0 +1,65 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_VERSION_H +#define AVFILTER_VERSION_H + +/** + * @file + * @ingroup lavfi + * Libavfilter version macros + */ + +#include "libavutil/version.h" + +#define LIBAVFILTER_VERSION_MAJOR 7 +#define LIBAVFILTER_VERSION_MINOR 40 +#define LIBAVFILTER_VERSION_MICRO 101 + +#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT + +#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_OLD_FILTER_OPTS_ERROR +#define FF_API_OLD_FILTER_OPTS_ERROR (LIBAVFILTER_VERSION_MAJOR < 8) +#endif +#ifndef FF_API_LAVR_OPTS +#define FF_API_LAVR_OPTS (LIBAVFILTER_VERSION_MAJOR < 8) +#endif +#ifndef FF_API_FILTER_GET_SET +#define FF_API_FILTER_GET_SET (LIBAVFILTER_VERSION_MAJOR < 8) +#endif +#ifndef FF_API_NEXT +#define FF_API_NEXT (LIBAVFILTER_VERSION_MAJOR < 8) +#endif + +#endif /* AVFILTER_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avc.h new file mode 100644 index 0000000..d91d636 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avc.h @@ -0,0 +1,38 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * AVC helper functions for muxers + * Copyright (c) 2008 Aurelien Jacobs <aurel@gnuage.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVC_H +#define AVFORMAT_AVC_H + +#include <stdint.h> +#include "avio.h" + +int liteav_ff_avc_parse_nal_units(AVIOContext *s, const uint8_t *buf, int size); +int liteav_ff_avc_parse_nal_units_buf(const uint8_t *buf_in, uint8_t **buf, int *size); +int liteav_ff_isom_write_avcc(AVIOContext *pb, const uint8_t *data, int len); +const uint8_t *liteav_ff_avc_find_startcode(const uint8_t *p, const uint8_t *end); +int liteav_ff_avc_write_annexb_extradata(const uint8_t *in, uint8_t **buf, int *size); +const uint8_t *liteav_ff_avc_mp4_find_startcode(const uint8_t *start, + const uint8_t *end, + int nal_length_size); + +#endif /* AVFORMAT_AVC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avformat.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avformat.h new file mode 100644 index 0000000..8086860 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avformat.h @@ -0,0 +1,3108 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVFORMAT_H +#define AVFORMAT_AVFORMAT_H + +/** + * @file + * @ingroup libavf + * Main libavformat public API header + */ + +/** + * @defgroup libavf libavformat + * I/O and Muxing/Demuxing Library + * + * Libavformat (lavf) is a library for dealing with various media container + * formats. Its main two purposes are demuxing - i.e. splitting a media file + * into component streams, and the reverse process of muxing - writing supplied + * data in a specified container format. It also has an @ref lavf_io + * "I/O module" which supports a number of protocols for accessing the data (e.g. + * file, tcp, http and others). Before using lavf, you need to call + * liteav_av_register_all() to register all compiled muxers, demuxers and protocols. + * Unless you are absolutely sure you won't use libavformat's network + * capabilities, you should also call avformat_network_init(). + * + * A supported input format is described by an AVInputFormat struct, conversely + * an output format is described by AVOutputFormat. You can iterate over all + * registered input/output formats using the liteav_av_iformat_next() / + * liteav_av_oformat_next() functions. The protocols layer is not part of the public + * API, so you can only get the names of supported protocols with the + * liteav_avio_enum_protocols() function. + * + * Main lavf structure used for both muxing and demuxing is AVFormatContext, + * which exports all information about the file being read or written. As with + * most Libavformat structures, its size is not part of public ABI, so it cannot be + * allocated on stack or directly with liteav_av_malloc(). To create an + * AVFormatContext, use avformat_alloc_context() (some functions, like + * avformat_open_input() might do that for you). + * + * Most importantly an AVFormatContext contains: + * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat + * "output" format. It is either autodetected or set by user for input; + * always set by user for output. + * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all + * elementary streams stored in the file. AVStreams are typically referred to + * using their index in this array. + * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or + * set by user for input, always set by user for output (unless you are dealing + * with an AVFMT_NOFILE format). + * + * @section lavf_options Passing options to (de)muxers + * It is possible to configure lavf muxers and demuxers using the @ref avoptions + * mechanism. Generic (format-independent) libavformat options are provided by + * AVFormatContext, they can be examined from a user program by calling + * liteav_av_opt_next() / liteav_av_opt_find() on an allocated AVFormatContext (or its AVClass + * from avformat_get_class()). Private (format-specific) options are provided by + * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / + * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. + * Further options may be provided by the @ref AVFormatContext.pb "I/O context", + * if its AVClass is non-NULL, and the protocols layer. See the discussion on + * nesting in @ref avoptions documentation to learn how to access those. + * + * @section urls + * URL strings in libavformat are made of a scheme/protocol, a ':', and a + * scheme specific string. URLs without a scheme and ':' used for local files + * are supported but deprecated. "file:" should be used for local files. + * + * It is important that the scheme string is not taken from untrusted + * sources without checks. + * + * Note that some schemes/protocols are quite powerful, allowing access to + * both local and remote files, parts of them, concatenations of them, local + * audio and video devices and so on. + * + * @{ + * + * @defgroup lavf_decoding Demuxing + * @{ + * Demuxers read a media file and split it into chunks of data (@em packets). A + * @ref AVPacket "packet" contains one or more encoded frames which belongs to a + * single elementary stream. In the lavf API this process is represented by the + * avformat_open_input() function for opening a file, av_read_frame() for + * reading a single packet and finally avformat_close_input(), which does the + * cleanup. + * + * @section lavf_decoding_open Opening a media file + * The minimum information required to open a file is its URL, which + * is passed to avformat_open_input(), as in the following code: + * @code + * const char *url = "file:in.mp3"; + * AVFormatContext *s = NULL; + * int ret = avformat_open_input(&s, url, NULL, NULL); + * if (ret < 0) + * abort(); + * @endcode + * The above code attempts to allocate an AVFormatContext, open the + * specified file (autodetecting the format) and read the header, exporting the + * information stored there into s. Some formats do not have a header or do not + * store enough information there, so it is recommended that you call the + * avformat_find_stream_info() function which tries to read and decode a few + * frames to find missing information. + * + * In some cases you might want to preallocate an AVFormatContext yourself with + * avformat_alloc_context() and do some tweaking on it before passing it to + * avformat_open_input(). One such case is when you want to use custom functions + * for reading input data instead of lavf internal I/O layer. + * To do that, create your own AVIOContext with liteav_avio_alloc_context(), passing + * your reading callbacks to it. Then set the @em pb field of your + * AVFormatContext to newly created AVIOContext. + * + * Since the format of the opened file is in general not known until after + * avformat_open_input() has returned, it is not possible to set demuxer private + * options on a preallocated context. Instead, the options should be passed to + * avformat_open_input() wrapped in an AVDictionary: + * @code + * AVDictionary *options = NULL; + * liteav_av_dict_set(&options, "video_size", "640x480", 0); + * liteav_av_dict_set(&options, "pixel_format", "rgb24", 0); + * + * if (avformat_open_input(&s, url, NULL, &options) < 0) + * abort(); + * liteav_av_dict_free(&options); + * @endcode + * This code passes the private options 'video_size' and 'pixel_format' to the + * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it + * cannot know how to interpret raw video data otherwise. If the format turns + * out to be something different than raw video, those options will not be + * recognized by the demuxer and therefore will not be applied. Such unrecognized + * options are then returned in the options dictionary (recognized options are + * consumed). The calling program can handle such unrecognized options as it + * wishes, e.g. + * @code + * AVDictionaryEntry *e; + * if (e = liteav_av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { + * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); + * abort(); + * } + * @endcode + * + * After you have finished reading the file, you must close it with + * avformat_close_input(). It will free everything associated with the file. + * + * @section lavf_decoding_read Reading from an opened file + * Reading data from an opened AVFormatContext is done by repeatedly calling + * av_read_frame() on it. Each call, if successful, will return an AVPacket + * containing encoded data for one AVStream, identified by + * AVPacket.stream_index. This packet may be passed straight into the libavcodec + * decoding functions liteav_avcodec_send_packet() or liteav_avcodec_decode_subtitle2() if the + * caller wishes to decode the data. + * + * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be + * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for + * pts/dts, 0 for duration) if the stream does not provide them. The timing + * information will be in AVStream.time_base units, i.e. it has to be + * multiplied by the timebase to convert them to seconds. + * + * If AVPacket.buf is set on the returned packet, then the packet is + * allocated dynamically and the user may keep it indefinitely. + * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a + * static storage somewhere inside the demuxer and the packet is only valid + * until the next av_read_frame() call or closing the file. If the caller + * requires a longer lifetime, liteav_av_dup_packet() will make an liteav_av_malloc()ed copy + * of it. + * In both cases, the packet must be freed with liteav_av_packet_unref() when it is no + * longer needed. + * + * @section lavf_decoding_seek Seeking + * @} + * + * @defgroup lavf_encoding Muxing + * @{ + * Muxers take encoded data in the form of @ref AVPacket "AVPackets" and write + * it into files or other output bytestreams in the specified container format. + * + * The main API functions for muxing are liteav_avformat_write_header() for writing the + * file header, liteav_av_write_frame() / liteav_av_interleaved_write_frame() for writing the + * packets and liteav_av_write_trailer() for finalizing the file. + * + * At the beginning of the muxing process, the caller must first call + * avformat_alloc_context() to create a muxing context. The caller then sets up + * the muxer by filling the various fields in this context: + * + * - The @ref AVFormatContext.oformat "oformat" field must be set to select the + * muxer that will be used. + * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb + * "pb" field must be set to an opened IO context, either returned from + * liteav_avio_open2() or a custom one. + * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must + * be created with the avformat_new_stream() function. The caller should fill + * the @ref AVStream.codecpar "stream codec parameters" information, such as the + * codec @ref AVCodecParameters.codec_type "type", @ref AVCodecParameters.codec_id + * "id" and other parameters (e.g. width / height, the pixel or sample format, + * etc.) as known. The @ref AVStream.time_base "stream timebase" should + * be set to the timebase that the caller desires to use for this stream (note + * that the timebase actually used by the muxer can be different, as will be + * described later). + * - It is advised to manually initialize only the relevant fields in + * AVCodecParameters, rather than using @ref avcodec_parameters_copy() during + * remuxing: there is no guarantee that the codec context values remain valid + * for both input and output format contexts. + * - The caller may fill in additional information, such as @ref + * AVFormatContext.metadata "global" or @ref AVStream.metadata "per-stream" + * metadata, @ref AVFormatContext.chapters "chapters", @ref + * AVFormatContext.programs "programs", etc. as described in the + * AVFormatContext documentation. Whether such information will actually be + * stored in the output depends on what the container format and the muxer + * support. + * + * When the muxing context is fully set up, the caller must call + * liteav_avformat_write_header() to initialize the muxer internals and write the file + * header. Whether anything actually is written to the IO context at this step + * depends on the muxer, but this function must always be called. Any muxer + * private options must be passed in the options parameter to this function. + * + * The data is then sent to the muxer by repeatedly calling liteav_av_write_frame() or + * liteav_av_interleaved_write_frame() (consult those functions' documentation for + * discussion on the difference between them; only one of them may be used with + * a single muxing context, they should not be mixed). Do note that the timing + * information on the packets sent to the muxer must be in the corresponding + * AVStream's timebase. That timebase is set by the muxer (in the + * liteav_avformat_write_header() step) and may be different from the timebase + * requested by the caller. + * + * Once all the data has been written, the caller must call liteav_av_write_trailer() + * to flush any buffered packets and finalize the output file, then close the IO + * context (if any) and finally free the muxing context with + * avformat_free_context(). + * @} + * + * @defgroup lavf_io I/O Read/Write + * @{ + * @section lavf_io_dirlist Directory listing + * The directory listing API makes it possible to list files on remote servers. + * + * Some of possible use cases: + * - an "open file" dialog to choose files from a remote location, + * - a recursive media finder providing a player with an ability to play all + * files from a given directory. + * + * @subsection lavf_io_dirlist_open Opening a directory + * At first, a directory needs to be opened by calling liteav_avio_open_dir() + * supplied with a URL and, optionally, ::AVDictionary containing + * protocol-specific parameters. The function returns zero or positive + * integer and allocates AVIODirContext on success. + * + * @code + * AVIODirContext *ctx = NULL; + * if (liteav_avio_open_dir(&ctx, "smb://example.com/some_dir", NULL) < 0) { + * fprintf(stderr, "Cannot open directory.\n"); + * abort(); + * } + * @endcode + * + * This code tries to open a sample directory using smb protocol without + * any additional parameters. + * + * @subsection lavf_io_dirlist_read Reading entries + * Each directory's entry (i.e. file, another directory, anything else + * within ::AVIODirEntryType) is represented by AVIODirEntry. + * Reading consecutive entries from an opened AVIODirContext is done by + * repeatedly calling liteav_avio_read_dir() on it. Each call returns zero or + * positive integer if successful. Reading can be stopped right after the + * NULL entry has been read -- it means there are no entries left to be + * read. The following code reads all entries from a directory associated + * with ctx and prints their names to standard output. + * @code + * AVIODirEntry *entry = NULL; + * for (;;) { + * if (liteav_avio_read_dir(ctx, &entry) < 0) { + * fprintf(stderr, "Cannot list directory.\n"); + * abort(); + * } + * if (!entry) + * break; + * printf("%s\n", entry->name); + * liteav_avio_free_directory_entry(&entry); + * } + * @endcode + * @} + * + * @defgroup lavf_codec Demuxers + * @{ + * @defgroup lavf_codec_native Native Demuxers + * @{ + * @} + * @defgroup lavf_codec_wrappers External library wrappers + * @{ + * @} + * @} + * @defgroup lavf_protos I/O Protocols + * @{ + * @} + * @defgroup lavf_internal Internal + * @{ + * @} + * @} + */ + +#include <time.h> +#include <stdio.h> /* FILE */ +#include "libavcodec/avcodec.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "avio.h" +#include "libavformat/version.h" + +struct AVFormatContext; + +struct AVDeviceInfoList; +struct AVDeviceCapabilitiesQuery; + +/** + * @defgroup metadata_api Public Metadata API + * @{ + * @ingroup libavf + * The metadata API allows libavformat to export metadata tags to a client + * application when demuxing. Conversely it allows a client application to + * set metadata when muxing. + * + * Metadata is exported or set as pairs of key/value strings in the 'metadata' + * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs + * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, + * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata + * exported by demuxers isn't checked to be valid UTF-8 in most cases. + * + * Important concepts to keep in mind: + * - Keys are unique; there can never be 2 tags with the same key. This is + * also meant semantically, i.e., a demuxer should not knowingly produce + * several keys that are literally different but semantically identical. + * E.g., key=Author5, key=Author6. In this example, all authors must be + * placed in the same tag. + * - Metadata is flat, not hierarchical; there are no subtags. If you + * want to store, e.g., the email address of the child of producer Alice + * and actor Bob, that could have key=alice_and_bobs_childs_email_address. + * - Several modifiers can be applied to the tag name. This is done by + * appending a dash character ('-') and the modifier name in the order + * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. + * - language -- a tag whose value is localized for a particular language + * is appended with the ISO 639-2/B 3-letter language code. + * For example: Author-ger=Michael, Author-eng=Mike + * The original/default language is in the unqualified "Author" tag. + * A demuxer should set a default if it sets any translated tag. + * - sorting -- a modified version of a tag that should be used for + * sorting will have '-sort' appended. E.g. artist="The Beatles", + * artist-sort="Beatles, The". + * - Some protocols and demuxers support metadata updates. After a successful + * call to av_read_packet(), AVFormatContext.event_flags or AVStream.event_flags + * will be updated to indicate if metadata changed. In order to detect metadata + * changes on a stream, you need to loop through all streams in the AVFormatContext + * and check their individual event_flags. + * + * - Demuxers attempt to export metadata in a generic format, however tags + * with no generic equivalents are left as they are stored in the container. + * Follows a list of generic tag names: + * + @verbatim + album -- name of the set this work belongs to + album_artist -- main creator of the set/album, if different from artist. + e.g. "Various Artists" for compilation albums. + artist -- main creator of the work + comment -- any additional description of the file. + composer -- who composed the work, if different from artist. + copyright -- name of copyright holder. + creation_time-- date when the file was created, preferably in ISO 8601. + date -- date when the work was created, preferably in ISO 8601. + disc -- number of a subset, e.g. disc in a multi-disc collection. + encoder -- name/settings of the software/hardware that produced the file. + encoded_by -- person/group who created the file. + filename -- original name of the file. + genre -- <self-evident>. + language -- main language in which the work is performed, preferably + in ISO 639-2 format. Multiple languages can be specified by + separating them with commas. + performer -- artist who performed the work, if different from artist. + E.g for "Also sprach Zarathustra", artist would be "Richard + Strauss" and performer "London Philharmonic Orchestra". + publisher -- name of the label/publisher. + service_name -- name of the service in broadcasting (channel name). + service_provider -- name of the service provider in broadcasting. + title -- name of the work. + track -- number of this work in the set, can be in form current/total. + variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @endverbatim + * + * Look in the examples section for an application example how to use the Metadata API. + * + * @} + */ + +/* packet functions */ + + +/** + * Allocate and read the payload of a packet and initialize its + * fields with default values. + * + * @param s associated IO context + * @param pkt packet + * @param size desired payload size + * @return >0 (read size) if OK, AVERROR_xxx otherwise + */ +int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); + + +/** + * Read data and append it to the current content of the AVPacket. + * If pkt->size is 0 this is identical to av_get_packet. + * Note that this uses liteav_av_grow_packet and thus involves a realloc + * which is inefficient. Thus this function should only be used + * when there is no reasonable way to know (an upper bound of) + * the final size. + * + * @param s associated IO context + * @param pkt packet + * @param size amount of data to read + * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data + * will not be lost even if an error occurs. + */ +int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); + +/*************************************************/ +/* input/output formats */ + +struct AVCodecTag; + +/** + * This structure contains the data a format has to probe a file. + */ +typedef struct AVProbeData { + const char *filename; + unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ + int buf_size; /**< Size of buf except extra allocated bytes */ + const char *mime_type; /**< mime_type, when known. */ +} AVProbeData; + +#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) +#define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1) + +#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension +#define AVPROBE_SCORE_MIME 75 ///< score for file mime type +#define AVPROBE_SCORE_MAX 100 ///< maximum score + +#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer + +/// Demuxer will use liteav_avio_open, no opened file should be provided by the caller. +#define AVFMT_NOFILE 0x0001 +#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ +#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ +#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ +#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ +#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ +#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ +#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ +#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ +#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ +#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ +#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ +#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ +#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ +#define AVFMT_TS_NONSTRICT 0x20000 /**< Format does not require strictly + increasing timestamps, but they must + still be monotonic */ +#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative + timestamps. If not set the timestamp + will be shifted in liteav_av_write_frame and + liteav_av_interleaved_write_frame so they + start from 0. + The user or muxer can override this through + AVFormatContext.avoid_negative_ts + */ + +#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ + +/** + * @addtogroup lavf_encoding + * @{ + */ +typedef struct AVOutputFormat { + const char *name; + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + const char *mime_type; + const char *extensions; /**< comma-separated filename extensions */ + /* output support */ + enum AVCodecID audio_codec; /**< default audio codec */ + enum AVCodecID video_codec; /**< default video codec */ + enum AVCodecID subtitle_codec; /**< default subtitle codec */ + /** + * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, + * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, + * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, + * AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE + */ + int flags; + + /** + * List of supported codec_id-codec_tag pairs, ordered by "better + * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. + */ + const struct AVCodecTag * const *codec_tag; + + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVOutputFormat *next; + /** + * size of private data so that it can be allocated in the wrapper + */ + int priv_data_size; + + int (*write_header)(struct AVFormatContext *); + /** + * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, + * pkt can be NULL in order to flush data buffered in the muxer. + * When flushing, return 0 if there still is more data to flush, + * or 1 if everything was flushed and there is no more buffered + * data. + */ + int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); + int (*write_trailer)(struct AVFormatContext *); + /** + * Currently only used to set pixel format if not YUV420P. + */ + int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, + AVPacket *in, int flush); + /** + * Test if the given codec can be stored in this container. + * + * @return 1 if the codec is supported, 0 if it is not. + * A negative number if unknown. + * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC + */ + int (*query_codec)(enum AVCodecID id, int std_compliance); + + void (*get_output_timestamp)(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + /** + * Allows sending messages from application to device. + */ + int (*control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + + /** + * Write an uncoded AVFrame. + * + * See liteav_av_write_uncoded_frame() for details. + * + * The library will free *frame afterwards, but the muxer can prevent it + * by setting the pointer to NULL. + */ + int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index, + AVFrame **frame, unsigned flags); + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); + /** + * Initialize device capabilities submodule. + * @see avdevice_capabilities_create() for more details. + */ + int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + /** + * Free device capabilities submodule. + * @see avdevice_capabilities_free() for more details. + */ + int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + enum AVCodecID data_codec; /**< default data codec */ + /** + * Initialize format. May allocate data here, and set any AVFormatContext or + * AVStream parameters that need to be set before packets are sent. + * This method must not write output. + * + * Return 0 if streams were fully configured, 1 if not, negative AVERROR on failure + * + * Any allocations made here must be freed in deinit(). + */ + int (*init)(struct AVFormatContext *); + /** + * Deinitialize format. If present, this is called whenever the muxer is being + * destroyed, regardless of whether or not the header has been written. + * + * If a trailer is being written, this is called after write_trailer(). + * + * This is called if init() fails as well. + */ + void (*deinit)(struct AVFormatContext *); + /** + * Set up any necessary bitstream filtering and extract any extra data needed + * for the global header. + * Return 0 if more packets from this stream must be checked; 1 if not. + */ + int (*check_bitstream)(struct AVFormatContext *, const AVPacket *pkt); +} AVOutputFormat; +/** + * @} + */ + +/** + * @addtogroup lavf_decoding + * @{ + */ +typedef struct AVInputFormat { + /** + * A comma separated list of short names for the format. New names + * may be appended with a minor bump. + */ + const char *name; + + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + + /** + * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, + * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, + * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. + */ + int flags; + + /** + * If extensions are defined, then no probe is done. You should + * usually not use extension format guessing because it is not + * reliable enough + */ + const char *extensions; + + const struct AVCodecTag * const *codec_tag; + + const AVClass *priv_class; ///< AVClass for the private context + + /** + * Comma-separated list of mime types. + * It is used check for matching mime types while probing. + * @see liteav_av_probe_input_format2 + */ + const char *mime_type; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVInputFormat *next; + + /** + * Raw demuxers store their codec ID here. + */ + int raw_codec_id; + + /** + * Size of private data so that it can be allocated in the wrapper. + */ + int priv_data_size; + + /** + * Tell if a given file has a chance of being parsed as this format. + * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes + * big so you do not have to check for that unless you need more. + */ + int (*read_probe)(AVProbeData *); + + /** + * Read the format header and initialize the AVFormatContext + * structure. Return 0 if OK. 'avformat_new_stream' should be + * called to create new streams. + */ + int (*read_header)(struct AVFormatContext *); + + /** + * 自定义协议解析函数需要携带AVDictionary + */ + int (*read_header_with_dict)(struct AVFormatContext *, AVDictionary **options); + + /** + * Read one packet and put it in 'pkt'. pts and flags are also + * set. 'avformat_new_stream' can be called only if the flag + * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a + * background thread). + * @return 0 on success, < 0 on error. + * When returning an error, pkt must not have been allocated + * or must be freed before returning + */ + int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); + + /** + * Close the stream. The AVFormatContext and AVStreams are not + * freed by this function + */ + int (*read_close)(struct AVFormatContext *); + + /** + * Seek to a given timestamp relative to the frames in + * stream component stream_index. + * @param stream_index Must not be -1. + * @param flags Selects which direction should be preferred if no exact + * match is available. + * @return >= 0 on success (but not necessarily the new offset) + */ + int (*read_seek)(struct AVFormatContext *, + int stream_index, int64_t timestamp, int flags); + + /** + * Get the next timestamp in stream[stream_index].time_base units. + * @return the timestamp or AV_NOPTS_VALUE if an error occurred + */ + int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, + int64_t *pos, int64_t pos_limit); + + /** + * Start/resume playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_play)(struct AVFormatContext *); + + /** + * Pause playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_pause)(struct AVFormatContext *); + + /** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + */ + int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); + + /** + * Initialize device capabilities submodule. + * @see avdevice_capabilities_create() for more details. + */ + int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + + /** + * Free device capabilities submodule. + * @see avdevice_capabilities_free() for more details. + */ + int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); +} AVInputFormat; +/** + * @} + */ + +enum AVStreamParseType { + AVSTREAM_PARSE_NONE, + AVSTREAM_PARSE_FULL, /**< full parsing and repack */ + AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ + AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ + AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ + AVSTREAM_PARSE_FULL_RAW, /**< full parsing and repack with timestamp and position generation by parser for raw + this assumes that each packet in the file contains no demuxer level headers and + just codec level data, otherwise position generation would fail */ +}; + +typedef struct AVIndexEntry { + int64_t pos; + int64_t timestamp; /**< + * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available + * when seeking to this entry. That means preferable PTS on keyframe based formats. + * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better + * is known + */ +#define AVINDEX_KEYFRAME 0x0001 +#define AVINDEX_DISCARD_FRAME 0x0002 /** + * Flag is used to indicate which frame should be discarded after decoding. + */ + int flags:2; + int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). + int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ +} AVIndexEntry; + +#define AV_DISPOSITION_DEFAULT 0x0001 +#define AV_DISPOSITION_DUB 0x0002 +#define AV_DISPOSITION_ORIGINAL 0x0004 +#define AV_DISPOSITION_COMMENT 0x0008 +#define AV_DISPOSITION_LYRICS 0x0010 +#define AV_DISPOSITION_KARAOKE 0x0020 + +/** + * Track should be used during playback by default. + * Useful for subtitle track that should be displayed + * even when user did not explicitly ask for subtitles. + */ +#define AV_DISPOSITION_FORCED 0x0040 +#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ +#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ +#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ +/** + * The stream is stored in the file as an attached picture/"cover art" (e.g. + * APIC frame in ID3v2). The first (usually only) packet associated with it + * will be returned among the first few packets read from the file unless + * seeking takes place. It can also be accessed at any time in + * AVStream.attached_pic. + */ +#define AV_DISPOSITION_ATTACHED_PIC 0x0400 +/** + * The stream is sparse, and contains thumbnail images, often corresponding + * to chapter markers. Only ever used with AV_DISPOSITION_ATTACHED_PIC. + */ +#define AV_DISPOSITION_TIMED_THUMBNAILS 0x0800 + +typedef struct AVStreamInternal AVStreamInternal; + +/** + * To specify text track kind (different from subtitles default). + */ +#define AV_DISPOSITION_CAPTIONS 0x10000 +#define AV_DISPOSITION_DESCRIPTIONS 0x20000 +#define AV_DISPOSITION_METADATA 0x40000 +#define AV_DISPOSITION_DEPENDENT 0x80000 ///< dependent audio stream (mix_type=0 in mpegts) +#define AV_DISPOSITION_STILL_IMAGE 0x100000 ///< still images in video stream (still_picture_flag=1 in mpegts) + +/** + * Options for behavior on timestamp wrap detection. + */ +#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap +#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection +#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection + +/** + * Stream structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVStream) must not be used outside libav*. + */ +typedef struct AVStream { + int index; /**< stream index in AVFormatContext */ + /** + * Format-specific stream ID. + * decoding: set by libavformat + * encoding: set by the user, replaced by libavformat if left unset + */ + int id; +#if FF_API_LAVF_AVCTX + /** + * @deprecated use the codecpar struct instead + */ + attribute_deprecated + AVCodecContext *codec; +#endif + void *priv_data; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. + * + * decoding: set by libavformat + * encoding: May be set by the caller before liteav_avformat_write_header() to + * provide a hint to the muxer about the desired timebase. In + * liteav_avformat_write_header(), the muxer will overwrite this field + * with the timebase that will actually be used for the timestamps + * written into the file (which may or may not be related to the + * user-provided one, depending on the format). + */ + AVRational time_base; + + /** + * Decoding: pts of the first frame of the stream in presentation order, in stream time base. + * Only set this if you are absolutely 100% sure that the value you set + * it to really is the pts of the first frame. + * This may be undefined (AV_NOPTS_VALUE). + * @note The ASF header does NOT contain a correct start_time the ASF + * demuxer must NOT set this. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in stream time base. + * If a source file does not specify a duration, but does specify + * a bitrate, this value will be estimated from bitrate and file size. + * + * Encoding: May be set by the caller before liteav_avformat_write_header() to + * provide a hint to the muxer about the estimated duration. + */ + int64_t duration; + + int64_t nb_frames; ///< number of frames in this stream if known or 0 + + int disposition; /**< AV_DISPOSITION_* bit field */ + + enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. + + /** + * sample aspect ratio (0 if unknown) + * - encoding: Set by user. + * - decoding: Set by libavformat. + */ + AVRational sample_aspect_ratio; + + AVDictionary *metadata; + + /** + * Average framerate + * + * - demuxing: May be set by libavformat when creating the stream or in + * avformat_find_stream_info(). + * - muxing: May be set by the caller before liteav_avformat_write_header(). + */ + AVRational avg_frame_rate; + + /** + * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet + * will contain the attached picture. + * + * decoding: set by libavformat, must not be modified by the caller. + * encoding: unused + */ + AVPacket attached_pic; + + /** + * An array of side data that applies to the whole stream (i.e. the + * container does not allow it to change between packets). + * + * There may be no overlap between the side data in this array and side data + * in the packets. I.e. a given side data is either exported by the muxer + * (demuxing) / set by the caller (muxing) in this array, then it never + * appears in the packets, or the side data is exported / sent through + * the packets (always in the first packet where the value becomes known or + * changes), then it does not appear in this array. + * + * - demuxing: Set by libavformat when the stream is created. + * - muxing: May be set by the caller before liteav_avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). + * + * @see av_format_inject_global_side_data() + */ + AVPacketSideData *side_data; + /** + * The number of elements in the AVStream.side_data array. + */ + int nb_side_data; + + /** + * Flags for the user to detect events happening on the stream. Flags must + * be cleared by the user once the event has been handled. + * A combination of AVSTREAM_EVENT_FLAG_*. + */ + int event_flags; +#define AVSTREAM_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata. + + /** + * Real base framerate of the stream. + * This is the lowest framerate with which all timestamps can be + * represented accurately (it is the least common multiple of all + * framerates in the stream). Note, this value is just a guess! + * For example, if the time base is 1/90000 and all frames have either + * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. + */ + AVRational r_frame_rate; + +#if FF_API_LAVF_FFSERVER + /** + * String containing pairs of key and values describing recommended encoder configuration. + * Pairs are separated by ','. + * Keys are separated from values by '='. + * + * @deprecated unused + */ + attribute_deprecated + char *recommended_encoder_configuration; +#endif + + /** + * Codec parameters associated with this stream. Allocated and freed by + * libavformat in avformat_new_stream() and avformat_free_context() + * respectively. + * + * - demuxing: filled by libavformat on stream creation or in + * avformat_find_stream_info() + * - muxing: filled by the caller before liteav_avformat_write_header() + */ + AVCodecParameters *codecpar; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * Internal note: be aware that physically removing these fields + * will break ABI. Replace removed fields with dummy fields, and + * add new fields to AVStreamInternal. + ***************************************************************** + */ + +#define MAX_STD_TIMEBASES (30*12+30+3+6) + /** + * Stream information used internally by avformat_find_stream_info() + */ + struct { + int64_t last_dts; + int64_t duration_gcd; + int duration_count; + int64_t rfps_duration_sum; + double (*duration_error)[2][MAX_STD_TIMEBASES]; + int64_t codec_info_duration; + int64_t codec_info_duration_fields; + int frame_delay_evidence; + + /** + * 0 -> decoder has not been searched for yet. + * >0 -> decoder found + * <0 -> decoder with codec_id == -found_decoder has not been found + */ + int found_decoder; + + int64_t last_duration; + + /** + * Those are used for average framerate estimation. + */ + int64_t fps_first_dts; + int fps_first_dts_idx; + int64_t fps_last_dts; + int fps_last_dts_idx; + + } *info; + + int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ + + // Timestamp generation support: + /** + * Timestamp corresponding to the last dts sync point. + * + * Initialized when AVCodecParserContext.dts_sync_point >= 0 and + * a DTS is received from the underlying container. Otherwise set to + * AV_NOPTS_VALUE by default. + */ + int64_t first_dts; + int64_t cur_dts; + int64_t last_IP_pts; + int last_IP_duration; + + /** + * Number of packets to buffer for codec probing + */ + int probe_packets; + + /** + * Number of frames that have been demuxed during avformat_find_stream_info() + */ + int codec_info_nb_frames; + + /* av_read_frame() support */ + enum AVStreamParseType need_parsing; + struct AVCodecParserContext *parser; + + /** + * last packet in packet_buffer for this stream when muxing. + */ + struct AVPacketList *last_in_packet_buffer; + AVProbeData probe_data; +#define MAX_REORDER_DELAY 16 + int64_t pts_buffer[MAX_REORDER_DELAY+1]; + + AVIndexEntry *index_entries; /**< Only used if the format does not + support seeking natively. */ + int nb_index_entries; + unsigned int index_entries_allocated_size; + + /** + * Stream Identifier + * This is the MPEG-TS stream identifier +1 + * 0 means unknown + */ + int stream_identifier; + + /** + * Details of the MPEG-TS program which created this stream. + */ + int program_num; + int pmt_version; + int pmt_stream_idx; + + int64_t interleaver_chunk_size; + int64_t interleaver_chunk_duration; + + /** + * stream probing state + * -1 -> probing finished + * 0 -> no probing requested + * rest -> perform probing with request_probe being the minimum score to accept. + * NOT PART OF PUBLIC API + */ + int request_probe; + /** + * Indicates that everything up to the next keyframe + * should be discarded. + */ + int skip_to_keyframe; + + /** + * Number of samples to skip at the start of the frame decoded from the next packet. + */ + int skip_samples; + + /** + * If not 0, the number of samples that should be skipped from the start of + * the stream (the samples are removed from packets with pts==0, which also + * assumes negative timestamps do not happen). + * Intended for use with formats such as mp3 with ad-hoc gapless audio + * support. + */ + int64_t start_skip_samples; + + /** + * If not 0, the first audio sample that should be discarded from the stream. + * This is broken by design (needs global sample count), but can't be + * avoided for broken by design formats such as mp3 with ad-hoc gapless + * audio support. + */ + int64_t first_discard_sample; + + /** + * The sample after last sample that is intended to be discarded after + * first_discard_sample. Works on frame boundaries only. Used to prevent + * early EOF if the gapless info is broken (considered concatenated mp3s). + */ + int64_t last_discard_sample; + + /** + * Number of internally decoded frames, used internally in libavformat, do not access + * its lifetime differs from info which is why it is not in that structure. + */ + int nb_decoded_frames; + + /** + * Timestamp offset added to timestamps before muxing + * NOT PART OF PUBLIC API + */ + int64_t mux_ts_offset; + + /** + * Internal data to check for wrapping of the time stamp + */ + int64_t pts_wrap_reference; + + /** + * Options for behavior, when a wrap is detected. + * + * Defined by AV_PTS_WRAP_ values. + * + * If correction is enabled, there are two possibilities: + * If the first time stamp is near the wrap point, the wrap offset + * will be subtracted, which will create negative time stamps. + * Otherwise the offset will be added. + */ + int pts_wrap_behavior; + + /** + * Internal data to prevent doing update_initial_durations() twice + */ + int update_initial_durations_done; + + /** + * Internal data to generate dts from pts + */ + int64_t pts_reorder_error[MAX_REORDER_DELAY+1]; + uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1]; + + /** + * Internal data to analyze DTS and detect faulty mpeg streams + */ + int64_t last_dts_for_order_check; + uint8_t dts_ordered; + uint8_t dts_misordered; + + /** + * Internal data to inject global side data + */ + int inject_global_side_data; + + /** + * display aspect ratio (0 if unknown) + * - encoding: unused + * - decoding: Set by libavformat to calculate sample_aspect_ratio internally + */ + AVRational display_aspect_ratio; + + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVStreamInternal *internal; +} AVStream; + +#if FF_API_FORMAT_GET_SET +/** + * Accessors for some AVStream fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +attribute_deprecated +AVRational av_stream_get_r_frame_rate(const AVStream *s); +attribute_deprecated +void av_stream_set_r_frame_rate(AVStream *s, AVRational r); +#if FF_API_LAVF_FFSERVER +attribute_deprecated +char* av_stream_get_recommended_encoder_configuration(const AVStream *s); +attribute_deprecated +void av_stream_set_recommended_encoder_configuration(AVStream *s, char *configuration); +#endif +#endif + +struct AVCodecParserContext *av_stream_get_parser(const AVStream *s); + +/** + * Returns the pts of the last muxed packet + its duration + * + * the retuned value is undefined when used with a demuxer. + */ +int64_t av_stream_get_end_pts(const AVStream *st); + +#define AV_PROGRAM_RUNNING 1 + +/** + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVProgram) must not be used outside libav*. + */ +typedef struct AVProgram { + int id; + int flags; + enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller + unsigned int *stream_index; + unsigned int nb_stream_indexes; + AVDictionary *metadata; + + int program_num; + int pmt_pid; + int pcr_pid; + int pmt_version; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int64_t start_time; + int64_t end_time; + + int64_t pts_wrap_reference; ///< reference dts for wrap detection + int pts_wrap_behavior; ///< behavior on wrap detection +} AVProgram; + +#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present + (streams are added dynamically) */ +#define AVFMTCTX_UNSEEKABLE 0x0002 /**< signal that the stream is definitely + not seekable, and attempts to call the + seek function will fail. For some + network protocols (e.g. HLS), this can + change dynamically at runtime. */ + +typedef struct AVChapter { + int id; ///< unique ID to identify the chapter + AVRational time_base; ///< time base in which the start/end timestamps are specified + int64_t start, end; ///< chapter start/end time in time_base units + AVDictionary *metadata; +} AVChapter; + + +/** + * Callback used by devices to communicate with application. + */ +typedef int (*av_format_control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + +typedef int (*AVOpenCallback)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * The duration of a video can be estimated through various ways, and this enum can be used + * to know how the duration was estimated. + */ +enum AVDurationEstimationMethod { + AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes + AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration + AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) +}; + +typedef struct AVFormatInternal AVFormatInternal; + +/** + * Format I/O context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVFormatContext) must not be used outside libav*, use + * avformat_alloc_context() to create an AVFormatContext. + * + * Fields can be accessed through AVOptions (av_opt*), + * the name string used matches the associated command line parameter name and + * can be found in libavformat/options_table.h. + * The AVOption/command line parameter names differ in some cases from the C + * structure field names for historic reasons or brevity. + */ +typedef struct AVFormatContext { + /** + * A class for logging and @ref avoptions. Set by avformat_alloc_context(). + * Exports (de)muxer private options if they exist. + */ + const AVClass *av_class; + + /** + * The input container format. + * + * Demuxing only, set by avformat_open_input(). + */ + struct AVInputFormat *iformat; + + /** + * The output container format. + * + * Muxing only, must be set by the caller before liteav_avformat_write_header(). + */ + struct AVOutputFormat *oformat; + + /** + * Format private data. This is an AVOptions-enabled struct + * if and only if iformat/oformat.priv_class is not NULL. + * + * - muxing: set by liteav_avformat_write_header() + * - demuxing: set by avformat_open_input() + */ + void *priv_data; + + /** + * I/O context. + * + * - demuxing: either set by the user before avformat_open_input() (then + * the user must close it manually) or set by avformat_open_input(). + * - muxing: set by the user before liteav_avformat_write_header(). The caller must + * take care of closing / freeing the IO context. + * + * Do NOT set this field if AVFMT_NOFILE flag is set in + * iformat/oformat.flags. In such a case, the (de)muxer will handle + * I/O in some other way and this field will be NULL. + */ + AVIOContext *pb; + + /* stream info */ + /** + * Flags signalling stream properties. A combination of AVFMTCTX_*. + * Set by libavformat. + */ + int ctx_flags; + + /** + * Number of elements in AVFormatContext.streams. + * + * Set by avformat_new_stream(), must not be modified by any other code. + */ + unsigned int nb_streams; + /** + * A list of all streams in the file. New streams are created with + * avformat_new_stream(). + * + * - demuxing: streams are created by libavformat in avformat_open_input(). + * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also + * appear in av_read_frame(). + * - muxing: streams are created by the user before liteav_avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). + */ + AVStream **streams; + +#if FF_API_FORMAT_FILENAME + /** + * input or output filename + * + * - demuxing: set by avformat_open_input() + * - muxing: may be set by the caller before liteav_avformat_write_header() + * + * @deprecated Use url instead. + */ + attribute_deprecated + char filename[1024]; +#endif + + /** + * input or output URL. Unlike the old filename field, this field has no + * length restriction. + * + * - demuxing: set by avformat_open_input(), initialized to an empty + * string if url parameter was NULL in avformat_open_input(). + * - muxing: may be set by the caller before calling liteav_avformat_write_header() + * (or liteav_avformat_init_output() if that is called first) to a string + * which is freeable by liteav_av_free(). Set to an empty string if it + * was NULL in liteav_avformat_init_output(). + * + * Freed by libavformat in avformat_free_context(). + */ + char *url; + + /** + * Position of the first frame of the component, in + * AV_TIME_BASE fractional seconds. NEVER set this value directly: + * It is deduced from the AVStream values. + * + * Demuxing only, set by libavformat. + */ + int64_t start_time; + + /** + * Duration of the stream, in AV_TIME_BASE fractional + * seconds. Only set this value if you know none of the individual stream + * durations and also do not set any of them. This is deduced from the + * AVStream values if not set. + * + * Demuxing only, set by libavformat. + */ + int64_t duration; + + /** + * Total stream bitrate in bit/s, 0 if not + * available. Never set it directly if the file_size and the + * duration are known as FFmpeg can compute it automatically. + */ + int64_t bit_rate; + + unsigned int packet_size; + int max_delay; + + /** + * Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*. + * Set by the user before avformat_open_input() / liteav_avformat_write_header(). + */ + int flags; +#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. +#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. +#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. +#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS +#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container +#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled +#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible +#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't liteav_avio_close() it. +#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted +#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. +/** + * When muxing, try to avoid writing any random/volatile data to the output. + * This includes any random IDs, real-time timestamps/dates, muxer version, etc. + * + * This flag is mainly intended for testing. + */ +#define AVFMT_FLAG_BITEXACT 0x0400 +#if FF_API_LAVF_MP4A_LATM +#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Deprecated, does nothing. +#endif +#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) +#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) +#if FF_API_LAVF_KEEPSIDE_FLAG +#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Deprecated, does nothing. +#endif +#define AVFMT_FLAG_FAST_SEEK 0x80000 ///< Enable fast, but inaccurate seeks for some formats +#define AVFMT_FLAG_SHORTEST 0x100000 ///< Stop muxing when the shortest stream stops. +#define AVFMT_FLAG_AUTO_BSF 0x200000 ///< Add bitstream filters as requested by the muxer + + /** + * Maximum size of the data read from input for determining + * the input container format. + * Demuxing only, set by the caller before avformat_open_input(). + */ + int64_t probesize; + + /** + * Maximum duration (in AV_TIME_BASE units) of the data read + * from input in avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). + * Can be set to 0 to let avformat choose using a heuristic. + */ + int64_t max_analyze_duration; + + const uint8_t *key; + int keylen; + + unsigned int nb_programs; + AVProgram **programs; + + /** + * Forced video codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID video_codec_id; + + /** + * Forced audio codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID audio_codec_id; + + /** + * Forced subtitle codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID subtitle_codec_id; + + /** + * Maximum amount of memory in bytes to use for the index of each stream. + * If the index exceeds this size, entries will be discarded as + * needed to maintain a smaller size. This can lead to slower or less + * accurate seeking (depends on demuxer). + * Demuxers for which a full in-memory index is mandatory will ignore + * this. + * - muxing: unused + * - demuxing: set by user + */ + unsigned int max_index_size; + + /** + * Maximum amount of memory in bytes to use for buffering frames + * obtained from realtime capture devices. + */ + unsigned int max_picture_buffer; + + /** + * Number of chapters in AVChapter array. + * When muxing, chapters are normally written in the file header, + * so nb_chapters should normally be initialized before write_header + * is called. Some muxers (e.g. mov and mkv) can also write chapters + * in the trailer. To write chapters in the trailer, nb_chapters + * must be zero when write_header is called and non-zero when + * write_trailer is called. + * - muxing: set by user + * - demuxing: set by libavformat + */ + unsigned int nb_chapters; + AVChapter **chapters; + + /** + * Metadata that applies to the whole file. + * + * - demuxing: set by libavformat in avformat_open_input() + * - muxing: may be set by the caller before liteav_avformat_write_header() + * + * Freed by libavformat in avformat_free_context(). + */ + AVDictionary *metadata; + + /** + * Start time of the stream in real world time, in microseconds + * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the + * stream was captured at this real world time. + * - muxing: Set by the caller before liteav_avformat_write_header(). If set to + * either 0 or AV_NOPTS_VALUE, then the current wall-time will + * be used. + * - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that + * the value may become known after some number of frames + * have been received. + */ + int64_t start_time_realtime; + + /** + * The number of frames used for determining the framerate in + * avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). + */ + int fps_probe_size; + + /** + * Error recognition; higher values will detect more errors but may + * misdetect some more or less valid parts as errors. + * Demuxing only, set by the caller before avformat_open_input(). + */ + int error_recognition; + + /** + * Custom interrupt callbacks for the I/O layer. + * + * demuxing: set by the user before avformat_open_input(). + * muxing: set by the user before liteav_avformat_write_header() + * (mainly useful for AVFMT_NOFILE formats). The callback + * should also be passed to liteav_avio_open2() if it's used to + * open the file. + */ + AVIOInterruptCB interrupt_callback; + + /** + * Flags to enable debugging. + */ + int debug; +#define FF_FDEBUG_TS 0x0001 + + /** + * Maximum buffering duration for interleaving. + * + * To ensure all the streams are interleaved correctly, + * liteav_av_interleaved_write_frame() will wait until it has at least one packet + * for each stream before actually writing any packets to the output file. + * When some streams are "sparse" (i.e. there are large gaps between + * successive packets), this can result in excessive buffering. + * + * This field specifies the maximum difference between the timestamps of the + * first and the last packet in the muxing queue, above which libavformat + * will output a packet regardless of whether it has queued a packet for all + * the streams. + * + * Muxing only, set by the caller before liteav_avformat_write_header(). + */ + int64_t max_interleave_delta; + + /** + * Allow non-standard and experimental extension + * @see AVCodecContext.strict_std_compliance + */ + int strict_std_compliance; + + /** + * Flags for the user to detect events happening on the file. Flags must + * be cleared by the user once the event has been handled. + * A combination of AVFMT_EVENT_FLAG_*. + */ + int event_flags; +#define AVFMT_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata. +#define AVFMT_EVENT_FLAG_AVFORMAT_UPDATED 0x0002 ///< The call resulted in updated avformat context. +#define AVFMT_EVENT_FLAG_PROGRAM_UPDATED 0x0004 ///< The call resulted in updated program context. +#define AVFMT_EVENT_FLAG_FINDSTREAM_MEET_DISCONTINUITY 0x0008 ///< The call resulted in find stream + ///< info meet discontinuity must be + ///< terminated. + + + /** + * Maximum number of packets to read while waiting for the first timestamp. + * Decoding only. + */ + int max_ts_probe; + + /** + * Avoid negative timestamps during muxing. + * Any value of the AVFMT_AVOID_NEG_TS_* constants. + * Note, this only works when using liteav_av_interleaved_write_frame. (interleave_packet_per_dts is in use) + * - muxing: Set by user + * - demuxing: unused + */ + int avoid_negative_ts; +#define AVFMT_AVOID_NEG_TS_AUTO -1 ///< Enabled when required by target format +#define AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE 1 ///< Shift timestamps so they are non negative +#define AVFMT_AVOID_NEG_TS_MAKE_ZERO 2 ///< Shift timestamps so that they start at 0 + + /** + * Transport stream id. + * This will be moved into demuxer private options. Thus no API/ABI compatibility + */ + int ts_id; + + /** + * Audio preload in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int audio_preload; + + /** + * Max chunk time in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int max_chunk_duration; + + /** + * Max chunk size in bytes + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int max_chunk_size; + + /** + * forces the use of wallclock timestamps as pts/dts of packets + * This has undefined results in the presence of B frames. + * - encoding: unused + * - decoding: Set by user + */ + int use_wallclock_as_timestamps; + + /** + * avio flags, used to force AVIO_FLAG_DIRECT. + * - encoding: unused + * - decoding: Set by user + */ + int avio_flags; + + /** + * The duration field can be estimated through various ways, and this field can be used + * to know how the duration was estimated. + * - encoding: unused + * - decoding: Read by user + */ + enum AVDurationEstimationMethod duration_estimation_method; + + /** + * Skip initial bytes when opening stream + * - encoding: unused + * - decoding: Set by user + */ + int64_t skip_initial_bytes; + + /** + * Correct single timestamp overflows + * - encoding: unused + * - decoding: Set by user + */ + unsigned int correct_ts_overflow; + + /** + * Force seeking to any (also non key) frames. + * - encoding: unused + * - decoding: Set by user + */ + int seek2any; + + /** + * Flush the I/O context after each packet. + * - encoding: Set by user + * - decoding: unused + */ + int flush_packets; + + /** + * format probing score. + * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes + * the format. + * - encoding: unused + * - decoding: set by avformat, read by user + */ + int probe_score; + + /** + * number of bytes to read maximally to identify format. + * - encoding: unused + * - decoding: set by user + */ + int format_probesize; + + /** + * ',' separated list of allowed decoders. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *codec_whitelist; + + /** + * ',' separated list of allowed demuxers. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *format_whitelist; + + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVFormatInternal *internal; + + /** + * IO repositioned flag. + * This is set by avformat when the underlaying IO context read pointer + * is repositioned, for example when doing byte based seeking. + * Demuxers can use the flag to detect such changes. + */ + int io_repositioned; + + /** + * Forced video codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *video_codec; + + /** + * Forced audio codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *audio_codec; + + /** + * Forced subtitle codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *subtitle_codec; + + /** + * Forced data codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *data_codec; + + /** + * Number of bytes to be written as padding in a metadata header. + * Demuxing: Unused. + * Muxing: Set by user via av_format_set_metadata_header_padding. + */ + int metadata_header_padding; + + /** + * User data. + * This is a place for some private data of the user. + */ + void *opaque; + + /** + * Callback used by devices to communicate with application. + */ + av_format_control_message control_message_cb; + + /** + * Output timestamp offset, in microseconds. + * Muxing: set by user + */ + int64_t output_ts_offset; + + /** + * dump format separator. + * can be ", " or "\n " or anything else + * - muxing: Set by user. + * - demuxing: Set by user. + */ + uint8_t *dump_separator; + + /** + * Forced Data codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID data_codec_id; + +#if FF_API_OLD_OPEN_CALLBACKS + /** + * Called to open further IO contexts when needed for demuxing. + * + * This can be set by the user application to perform security checks on + * the URLs before opening them. + * The function should behave like liteav_avio_open2(), AVFormatContext is provided + * as contextual information and to reach AVFormatContext.opaque. + * + * If NULL then some simple checks are used together with liteav_avio_open2(). + * + * Must not be accessed directly from outside avformat. + * @See av_format_set_open_cb() + * + * Demuxing: Set by user. + * + * @deprecated Use io_open and io_close. + */ + attribute_deprecated + int (*open_cb)(struct AVFormatContext *s, AVIOContext **p, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options); +#endif + + /** + * ',' separated list of allowed protocols. + * - encoding: unused + * - decoding: set by user + */ + char *protocol_whitelist; + + /** + * A callback for opening new IO streams. + * + * Whenever a muxer or a demuxer needs to open an IO stream (typically from + * avformat_open_input() for demuxers, but for certain formats can happen at + * other times as well), it will call this callback to obtain an IO context. + * + * @param s the format context + * @param pb on success, the newly opened IO context should be returned here + * @param url the url to open + * @param flags a combination of AVIO_FLAG_* + * @param options a dictionary of additional options, with the same + * semantics as in liteav_avio_open2() + * @return 0 on success, a negative AVERROR code on failure + * + * @note Certain muxers and demuxers do nesting, i.e. they open one or more + * additional internal format contexts. Thus the AVFormatContext pointer + * passed to this callback may be different from the one facing the caller. + * It will, however, have the same 'opaque' field. + */ + int (*io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, + int flags, AVDictionary **options); + + /** + * A callback for closing the streams opened with AVFormatContext.io_open(). + */ + void (*io_close)(struct AVFormatContext *s, AVIOContext *pb); + + /** + * ',' separated list of disallowed protocols. + * - encoding: unused + * - decoding: set by user + */ + char *protocol_blacklist; + + /** + * The maximum number of streams. + * - encoding: unused + * - decoding: set by user + */ + int max_streams; + + int64_t open_time; + int is_has_open_time; + + /** + * Skip duration calcuation in estimate_timings_from_pts. + * - encoding: unused + * - decoding: set by user + */ + int skip_estimate_duration_from_pts; + + /** + * Means find stream info in progress + */ + int find_stream_info_in_progress; +} AVFormatContext; + +#if FF_API_FORMAT_GET_SET +/** + * Accessors for some AVFormatContext fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +attribute_deprecated +int av_format_get_probe_score(const AVFormatContext *s); +attribute_deprecated +AVCodec * av_format_get_video_codec(const AVFormatContext *s); +attribute_deprecated +void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); +attribute_deprecated +AVCodec * av_format_get_audio_codec(const AVFormatContext *s); +attribute_deprecated +void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c); +attribute_deprecated +AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s); +attribute_deprecated +void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c); +attribute_deprecated +AVCodec * av_format_get_data_codec(const AVFormatContext *s); +attribute_deprecated +void av_format_set_data_codec(AVFormatContext *s, AVCodec *c); +attribute_deprecated +int av_format_get_metadata_header_padding(const AVFormatContext *s); +attribute_deprecated +void av_format_set_metadata_header_padding(AVFormatContext *s, int c); +attribute_deprecated +void * av_format_get_opaque(const AVFormatContext *s); +attribute_deprecated +void av_format_set_opaque(AVFormatContext *s, void *opaque); +attribute_deprecated +av_format_control_message av_format_get_control_message_cb(const AVFormatContext *s); +attribute_deprecated +void av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback); +#if FF_API_OLD_OPEN_CALLBACKS +attribute_deprecated AVOpenCallback av_format_get_open_cb(const AVFormatContext *s); +attribute_deprecated void av_format_set_open_cb(AVFormatContext *s, AVOpenCallback callback); +#endif +#endif + +/** + * This function will cause global side data to be injected in the next packet + * of each stream as well as after any subsequent seek. + */ +void av_format_inject_global_side_data(AVFormatContext *s); + +/** + * Returns the method used to set ctx->duration. + * + * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. + */ +enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); + +typedef struct AVPacketList { + AVPacket pkt; + struct AVPacketList *next; +} AVPacketList; + + +/** + * @defgroup lavf_core Core functions + * @ingroup libavf + * + * Functions for querying libavformat capabilities, allocating core structures, + * etc. + * @{ + */ + +/** + * Return the LIBAVFORMAT_VERSION_INT constant. + */ +unsigned avformat_version(void); + +/** + * Return the libavformat build-time configuration. + */ +const char *avformat_configuration(void); + +/** + * Return the libavformat license. + */ +const char *avformat_license(void); + +#if FF_API_NEXT +/** + * Initialize libavformat and register all the muxers, demuxers and + * protocols. If you do not call this function, then you can select + * exactly which formats you want to support. + * + * @see liteav_av_register_input_format() + * @see liteav_av_register_output_format() + */ +attribute_deprecated +void liteav_av_register_all(void); + +attribute_deprecated +void liteav_av_register_input_format(AVInputFormat *format); +attribute_deprecated +void liteav_av_register_output_format(AVOutputFormat *format); +#endif + +/** + * Do global initialization of network libraries. This is optional, + * and not recommended anymore. + * + * This functions only exists to work around thread-safety issues + * with older GnuTLS or OpenSSL libraries. If libavformat is linked + * to newer versions of those libraries, or if you do not use them, + * calling this function is unnecessary. Otherwise, you need to call + * this function before any other threads using them are started. + * + * This function will be deprecated once support for older GnuTLS and + * OpenSSL libraries is removed, and this function has no purpose + * anymore. + */ +int avformat_network_init(void); + +/** + * Undo the initialization done by avformat_network_init. Call it only + * once for each time you called avformat_network_init. + */ +int avformat_network_deinit(void); + +#if FF_API_NEXT +/** + * If f is NULL, returns the first registered input format, + * if f is non-NULL, returns the next registered input format after f + * or NULL if f is the last one. + */ +attribute_deprecated +AVInputFormat *liteav_av_iformat_next(const AVInputFormat *f); + +/** + * If f is NULL, returns the first registered output format, + * if f is non-NULL, returns the next registered output format after f + * or NULL if f is the last one. + */ +attribute_deprecated +AVOutputFormat *liteav_av_oformat_next(const AVOutputFormat *f); +#endif + +/** + * Iterate over all registered muxers. + * + * @param opaque a pointer where libavformat will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered muxer or NULL when the iteration is + * finished + */ +const AVOutputFormat *liteav_av_muxer_iterate(void **opaque); + +/** + * Iterate over all registered demuxers. + * + * @param opaque a pointer where libavformat will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered demuxer or NULL when the iteration is + * finished + */ +const AVInputFormat *liteav_av_demuxer_iterate(void **opaque); + +/** + * Allocate an AVFormatContext. + * avformat_free_context() can be used to free the context and everything + * allocated by the framework within it. + */ +AVFormatContext *avformat_alloc_context(void); + +/** + * Free an AVFormatContext and all its streams. + * @param s context to free + */ +void avformat_free_context(AVFormatContext *s); + +/** + * Get the AVClass for AVFormatContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *avformat_get_class(void); + +/** + * Add a new stream to a media file. + * + * When demuxing, it is called by the demuxer in read_header(). If the + * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also + * be called in read_packet(). + * + * When muxing, should be called by the user before liteav_avformat_write_header(). + * + * User is required to call avcodec_close() and avformat_free_context() to + * clean up the allocation by avformat_new_stream(). + * + * @param s media file handle + * @param c If non-NULL, the AVCodecContext corresponding to the new stream + * will be initialized to use this codec. This is needed for e.g. codec-specific + * defaults to be set, so codec should be provided if it is known. + * + * @return newly created stream or NULL on error. + */ +AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); + +/** + * Wrap an existing array as stream side data. + * + * @param st stream + * @param type side information type + * @param data the side data array. It must be allocated with the liteav_av_malloc() + * family of functions. The ownership of the data is transferred to + * st. + * @param size side information size + * @return zero on success, a negative AVERROR code on failure. On failure, + * the stream is unchanged and the data remains owned by the caller. + */ +int av_stream_add_side_data(AVStream *st, enum AVPacketSideDataType type, + uint8_t *data, size_t size); + +/** + * Allocate new information from stream. + * + * @param stream stream + * @param type desired side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t *av_stream_new_side_data(AVStream *stream, + enum AVPacketSideDataType type, int size); +/** + * Get side information from stream. + * + * @param stream stream + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t *av_stream_get_side_data(const AVStream *stream, + enum AVPacketSideDataType type, int *size); + +AVProgram *av_new_program(AVFormatContext *s, int id); + +/** + * @} + */ + + +/** + * Allocate an AVFormatContext for an output format. + * avformat_free_context() can be used to free the context and + * everything allocated by the framework within it. + * + * @param *ctx is set to the created format context, or to NULL in + * case of failure + * @param oformat format to use for allocating the context, if NULL + * format_name and filename are used instead + * @param format_name the name of output format to use for allocating the + * context, if NULL filename is used instead + * @param filename the name of the filename to use for allocating the + * context, may be NULL + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +int liteav_avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, + const char *format_name, const char *filename); + +/** + * @addtogroup lavf_decoding + * @{ + */ + +/** + * Find AVInputFormat based on the short name of the input format. + */ +AVInputFormat *liteav_av_find_input_format(const char *short_name); + +/** + * Guess the file format. + * + * @param pd data to be probed + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + */ +AVInputFormat *liteav_av_probe_input_format(AVProbeData *pd, int is_opened); + +/** + * Guess the file format. + * + * @param pd data to be probed + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_max A probe score larger that this is required to accept a + * detection, the variable is set to the actual detection + * score afterwards. + * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended + * to retry with a larger probe buffer. + */ +AVInputFormat *liteav_av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_ret The score of the best detection. + */ +AVInputFormat *liteav_av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); + +/** + * Probe a bytestream to determine the input format. Each time a probe returns + * with a score that is too low, the probe buffer size is increased and another + * attempt is made. When the maximum probe size is reached, the input format + * with the highest score is returned. + * + * @param pb the bytestream to probe + * @param fmt the input format is put here + * @param url the url of the stream + * @param logctx the log context + * @param offset the offset within the bytestream to probe from + * @param max_probe_size the maximum probe buffer size (zero for default) + * @return the score in case of success, a negative value corresponding to an + * the maximal score is AVPROBE_SCORE_MAX + * AVERROR code otherwise + */ +int liteav_av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, + const char *url, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Like liteav_av_probe_input_buffer2() but returns 0 on success + */ +int liteav_av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, + const char *url, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Open an input stream and read the header. The codecs are not opened. + * The stream must be closed with avformat_close_input(). + * + * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). + * May be a pointer to NULL, in which case an AVFormatContext is allocated by this + * function and written into ps. + * Note that a user-supplied AVFormatContext will be freed on failure. + * @param url URL of the stream to open. + * @param fmt If non-NULL, this parameter forces a specific input format. + * Otherwise the format is autodetected. + * @param options A dictionary filled with AVFormatContext and demuxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, a negative AVERROR on failure. + * + * @note If you want to use custom IO, preallocate the format context and set its pb field. + */ +int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options); + +attribute_deprecated +int av_demuxer_open(AVFormatContext *ic); + +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @param options If non-NULL, an ic.nb_streams long array of pointers to + * dictionaries, where i-th member contains options for + * codec corresponding to i-th stream. + * On return each dictionary will be filled with options that were not found. + * @return >=0 if OK, AVERROR_xxx on error + * + * @note this function isn't guaranteed to open all the codecs, so + * options being non-empty at return is a perfectly normal behavior. + * + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + */ +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); + +/** + * Find the programs which belong to a given stream. + * + * @param ic media file handle + * @param last the last found program, the search will start after this + * program, or from the beginning if it is NULL + * @param s stream index + * @return the next program which belongs to s, NULL if no program is found or + * the last program is not among the programs of ic. + */ +AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); + +void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx); + +/** + * Find the "best" stream in the file. + * The best stream is determined according to various heuristics as the most + * likely to be what the user expects. + * If the decoder parameter is non-NULL, av_find_best_stream will find the + * default decoder for the stream's codec; streams for which no decoder can + * be found are ignored. + * + * @param ic media file handle + * @param type stream type: video, audio, subtitles, etc. + * @param wanted_stream_nb user-requested stream number, + * or -1 for automatic selection + * @param related_stream try to find a stream related (eg. in the same + * program) to this one, or -1 if none + * @param decoder_ret if non-NULL, returns the decoder for the + * selected stream + * @param flags flags; none are currently defined + * @return the non-negative stream number in case of success, + * AVERROR_STREAM_NOT_FOUND if no stream with the requested type + * could be found, + * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder + * @note If av_find_best_stream returns successfully and decoder_ret is not + * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec. + */ +int av_find_best_stream(AVFormatContext *ic, + enum AVMediaType type, + int wanted_stream_nb, + int related_stream, + AVCodec **decoder_ret, + int flags); + +/** + * Return the next frame of a stream. + * This function returns what is stored in the file, and does not validate + * that what is there are valid frames for the decoder. It will split what is + * stored in the file into frames and return one for each call. It will not + * omit invalid data between valid frames so as to give the decoder the maximum + * information possible for decoding. + * + * If pkt->buf is NULL, then the packet is valid until the next + * av_read_frame() or until avformat_close_input(). Otherwise the packet + * is valid indefinitely. In both cases the packet must be freed with + * liteav_av_packet_unref when it is no longer needed. For video, the packet contains + * exactly one frame. For audio, it contains an integer number of frames if each + * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames + * have a variable size (e.g. MPEG audio), then it contains one frame. + * + * pkt->pts, pkt->dts and pkt->duration are always set to correct + * values in AVStream.time_base units (and guessed if the format cannot + * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format + * has B-frames, so it is better to rely on pkt->dts if you do not + * decompress the payload. + * + * @return 0 if OK, < 0 on error or end of file + */ +int av_read_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Seek to the keyframe at timestamp. + * 'timestamp' in 'stream_index'. + * + * @param s media file handle + * @param stream_index If stream_index is (-1), a default + * stream is selected, and timestamp is automatically converted + * from AV_TIME_BASE units to the stream specific time_base. + * @param timestamp Timestamp in AVStream.time_base units + * or, if no stream is specified, in AV_TIME_BASE units. + * @param flags flags which select direction and seeking mode + * @return >= 0 on success + */ +int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, + int flags); + +/** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + * + * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and + * are the file position (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames + * in the stream with stream_index (this may not be supported by all demuxers). + * Otherwise all timestamps are in units of the stream selected by stream_index + * or if stream_index is -1, in AV_TIME_BASE units. + * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as + * keyframes (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. + * + * @param s media file handle + * @param stream_index index of the stream which is used as time base reference + * @param min_ts smallest acceptable timestamp + * @param ts target timestamp + * @param max_ts largest acceptable timestamp + * @param flags flags + * @return >=0 on success, error code otherwise + * + * @note This is part of the new seek API which is still under construction. + * Thus do not use this yet. It may change at any time, do not expect + * ABI compatibility yet! + */ +int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + +/** + * Discard all internally buffered data. This can be useful when dealing with + * discontinuities in the byte stream. Generally works only with formats that + * can resync. This includes headerless formats like MPEG-TS/TS but should also + * work with NUT, Ogg and in a limited way AVI for example. + * + * The set of streams, the detected duration, stream parameters and codecs do + * not change when calling this function. If you want a complete reset, it's + * better to open a new AVFormatContext. + * + * This does not flush the AVIOContext (s->pb). If necessary, call + * liteav_avio_flush(s->pb) before calling this function. + * + * @param s media file handle + * @return >=0 on success, error code otherwise + */ +int avformat_flush(AVFormatContext *s); + +/** + * Start playing a network-based stream (e.g. RTSP stream) at the + * current position. + */ +int av_read_play(AVFormatContext *s); + +/** + * Pause a network-based stream (e.g. RTSP stream). + * + * Use av_read_play() to resume it. + */ +int av_read_pause(AVFormatContext *s); + +/** + * Close an opened input AVFormatContext. Free it and all its contents + * and set *s to NULL. + */ +void avformat_close_input(AVFormatContext **s); +/** + * @} + */ + +#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward +#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes +#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes +#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number +#define AVSEEK_FLAG_SEQUENCE 16 ///<seeking based on ts sequence num +#define AVSEEK_FLAG_SEG_INTERNAL 32 ///<seeking based on ts sequence internal i frame which more than seek pos. +#define AVSEEK_FLAG_SEG_INTERNAL_BEFORE 64 ///<seeking based on ts sequence internal i frame which less than seek pos. + +/** + * @addtogroup lavf_encoding + * @{ + */ + +#define AVSTREAM_INIT_IN_WRITE_HEADER 0 ///< stream parameters initialized in liteav_avformat_write_header +#define AVSTREAM_INIT_IN_INIT_OUTPUT 1 ///< stream parameters initialized in liteav_avformat_init_output + +/** + * Allocate the stream private data and write the stream header to + * an output media file. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec had not already been fully initialized in avformat_init, + * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec had already been fully initialized in avformat_init, + * negative AVERROR on failure. + * + * @see liteav_av_opt_find, liteav_av_dict_set, liteav_avio_open, liteav_av_oformat_next, liteav_avformat_init_output. + */ +av_warn_unused_result +int liteav_avformat_write_header(AVFormatContext *s, AVDictionary **options); + +/** + * Allocate the stream private data and initialize the codec, but do not write the header. + * May optionally be used before liteav_avformat_write_header to initialize stream parameters + * before actually writing the header. + * If using this function, do not pass the same options to liteav_avformat_write_header. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec requires liteav_avformat_write_header to fully initialize, + * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec has been fully initialized, + * negative AVERROR on failure. + * + * @see liteav_av_opt_find, liteav_av_dict_set, liteav_avio_open, liteav_av_oformat_next, liteav_avformat_write_header. + */ +av_warn_unused_result +int liteav_avformat_init_output(AVFormatContext *s, AVDictionary **options); + +/** + * Write a packet to an output media file. + * + * This function passes the packet directly to the muxer, without any buffering + * or reordering. The caller is responsible for correctly interleaving the + * packets if the format requires it. Callers that want libavformat to handle + * the interleaving should call liteav_av_interleaved_write_frame() instead of this + * function. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. Note that unlike + * liteav_av_interleaved_write_frame(), this function does not take + * ownership of the packet passed to it (though some muxers may make + * an internal reference to the input packet). + * <br> + * This parameter can be NULL (at any time, not just at the end), in + * order to immediately flush data buffered within the muxer, for + * muxers that buffer up data internally before writing it to the + * output. + * <br> + * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". + * <br> + * The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") + * must be set to correct values in the stream's timebase (unless the + * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then + * they can be set to AV_NOPTS_VALUE). + * The dts for subsequent packets passed to this function must be strictly + * increasing when compared in their respective timebases (unless the + * output format is flagged with the AVFMT_TS_NONSTRICT, then they + * merely have to be nondecreasing). @ref AVPacket.duration + * "duration") should also be set if known. + * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush + * + * @see liteav_av_interleaved_write_frame() + */ +int liteav_av_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write a packet to an output media file ensuring correct interleaving. + * + * This function will buffer the packets internally as needed to make sure the + * packets in the output file are properly interleaved in the order of + * increasing dts. Callers doing their own interleaving should call + * liteav_av_write_frame() instead of this function. + * + * Using this function instead of liteav_av_write_frame() can give muxers advance + * knowledge of future packets, improving e.g. the behaviour of the mp4 + * muxer for VFR content in fragmenting mode. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. + * <br> + * If the packet is reference-counted, this function will take + * ownership of this reference and unreference it later when it sees + * fit. + * The caller must not access the data through this reference after + * this function returns. If the packet is not reference-counted, + * libavformat will make a copy. + * <br> + * This parameter can be NULL (at any time, not just at the end), to + * flush the interleaving queues. + * <br> + * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". + * <br> + * The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") + * must be set to correct values in the stream's timebase (unless the + * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then + * they can be set to AV_NOPTS_VALUE). + * The dts for subsequent packets in one stream must be strictly + * increasing (unless the output format is flagged with the + * AVFMT_TS_NONSTRICT, then they merely have to be nondecreasing). + * @ref AVPacket.duration "duration") should also be set if known. + * + * @return 0 on success, a negative AVERROR on error. Libavformat will always + * take care of freeing the packet, even if this function fails. + * + * @see liteav_av_write_frame(), AVFormatContext.max_interleave_delta + */ +int liteav_av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write an uncoded frame to an output media file. + * + * The frame must be correctly interleaved according to the container + * specification; if not, then liteav_av_interleaved_write_frame() must be used. + * + * See liteav_av_interleaved_write_frame() for details. + */ +int liteav_av_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Write an uncoded frame to an output media file. + * + * If the muxer supports it, this function makes it possible to write an AVFrame + * structure directly, without encoding it into a packet. + * It is mostly useful for devices and similar special muxers that use raw + * video or PCM data and will not serialize it into a byte stream. + * + * To test whether it is possible to use it with a given muxer and stream, + * use liteav_av_write_uncoded_frame_query(). + * + * The caller gives up ownership of the frame and must not access it + * afterwards. + * + * @return >=0 for success, a negative code on error + */ +int liteav_av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Test whether a muxer supports uncoded frame. + * + * @return >=0 if an uncoded frame can be written to that muxer and stream, + * <0 if not + */ +int liteav_av_write_uncoded_frame_query(AVFormatContext *s, int stream_index); + +/** + * Write the stream trailer to an output media file and free the + * file private data. + * + * May only be called after a successful call to liteav_avformat_write_header. + * + * @param s media file handle + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_av_write_trailer(AVFormatContext *s); + +/** + * Return the output format in the list of registered output formats + * which best matches the provided parameters, or return NULL if + * there is no match. + * + * @param short_name if non-NULL checks if short_name matches with the + * names of the registered formats + * @param filename if non-NULL checks if filename terminates with the + * extensions of the registered formats + * @param mime_type if non-NULL checks if mime_type matches with the + * MIME type of the registered formats + */ +AVOutputFormat *liteav_av_guess_format(const char *short_name, + const char *filename, + const char *mime_type); + +/** + * Guess the codec ID based upon muxer and filename. + */ +enum AVCodecID liteav_av_guess_codec(AVOutputFormat *fmt, const char *short_name, + const char *filename, const char *mime_type, + enum AVMediaType type); + +/** + * Get timing information for the data currently output. + * The exact meaning of "currently output" depends on the format. + * It is mostly relevant for devices that have an internal buffer and/or + * work in real time. + * @param s media file handle + * @param stream stream in the media file + * @param[out] dts DTS of the last packet output for the stream, in stream + * time_base units + * @param[out] wall absolute time when that packet whas output, + * in microsecond + * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it + * Note: some formats or devices may not allow to measure dts and wall + * atomically. + */ +int liteav_av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + + +/** + * @} + */ + + +/** + * @defgroup lavf_misc Utility functions + * @ingroup libavf + * @{ + * + * Miscellaneous utility functions related to both muxing and demuxing + * (or neither). + */ + +/** + * Send a nice hexadecimal dump of a buffer to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param buf buffer + * @param size buffer size + * + * @see liteav_av_hex_dump_log, liteav_av_pkt_dump2, liteav_av_pkt_dump_log2 + */ +void liteav_av_hex_dump(FILE *f, const uint8_t *buf, int size); + +/** + * Send a nice hexadecimal dump of a buffer to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param buf buffer + * @param size buffer size + * + * @see liteav_av_hex_dump, liteav_av_pkt_dump2, liteav_av_pkt_dump_log2 + */ +void liteav_av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); + +/** + * Send a nice dump of a packet to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void liteav_av_pkt_dump2(FILE *f, const AVPacket *pkt, int dump_payload, const AVStream *st); + + +/** + * Send a nice dump of a packet to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void liteav_av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, + const AVStream *st); + +/** + * Get the AVCodecID for the given codec tag tag. + * If no codec id is found returns AV_CODEC_ID_NONE. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param tag codec tag to match to a codec ID + */ +enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); + +/** + * Get the codec tag for the given codec id id. + * If no codec tag is found returns 0. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec ID to match to a codec tag + */ +unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); + +/** + * Get the codec tag for the given codec id. + * + * @param tags list of supported codec_id - codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec id that should be searched for in the list + * @param tag A pointer to the found tag + * @return 0 if id was not found in tags, > 0 if it was found + */ +int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, + unsigned int *tag); + +int av_find_default_stream_index(AVFormatContext *s); + +/** + * Get the index for a specific timestamp. + * + * @param st stream that the timestamp belongs to + * @param timestamp timestamp to retrieve the index for + * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond + * to the timestamp which is <= the requested one, if backward + * is 0, then it will be >= + * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise + * @return < 0 if no such timestamp could be found + */ +int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); + +/** + * Add an index entry into a sorted list. Update the entry if the list + * already contains it. + * + * @param timestamp timestamp in the time base of the given stream + */ +int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, + int size, int distance, int flags); + + +/** + * Split a URL string into components. + * + * The pointers to buffers for storing individual components may be null, + * in order to ignore that component. Buffers for components not found are + * set to empty strings. If the port is not found, it is set to a negative + * value. + * + * @param proto the buffer for the protocol + * @param proto_size the size of the proto buffer + * @param authorization the buffer for the authorization + * @param authorization_size the size of the authorization buffer + * @param hostname the buffer for the host name + * @param hostname_size the size of the hostname buffer + * @param port_ptr a pointer to store the port number in + * @param path the buffer for the path + * @param path_size the size of the path buffer + * @param url the URL to split + */ +void av_url_split(char *proto, int proto_size, + char *authorization, int authorization_size, + char *hostname, int hostname_size, + int *port_ptr, + char *path, int path_size, + const char *url); + + +/** + * Print detailed information about the input or output format, such as + * duration, bitrate, streams, container, programs, metadata, side data, + * codec and time base. + * + * @param ic the context to analyze + * @param index index of the stream to dump information about + * @param url the URL to print, such as source or destination file + * @param is_output Select whether the specified context is an input(0) or output(1) + */ +void liteav_av_dump_format(AVFormatContext *ic, + int index, + const char *url, + int is_output); + + +#define AV_FRAME_FILENAME_FLAGS_MULTIPLE 1 ///< Allow multiple %d + +/** + * Return in 'buf' the path with '%d' replaced by a number. + * + * Also handles the '%0nd' format where 'n' is the total number + * of digits and '%%'. + * + * @param buf destination buffer + * @param buf_size destination buffer size + * @param path numbered sequence string + * @param number frame number + * @param flags AV_FRAME_FILENAME_FLAGS_* + * @return 0 if OK, -1 on format error + */ +int av_get_frame_filename2(char *buf, int buf_size, + const char *path, int number, int flags); + +int av_get_frame_filename(char *buf, int buf_size, + const char *path, int number); + +/** + * Check whether filename actually is a numbered sequence generator. + * + * @param filename possible numbered sequence string + * @return 1 if a valid numbered sequence string, 0 otherwise + */ +int av_filename_number_test(const char *filename); + +/** + * Generate an SDP for an RTP session. + * + * Note, this overwrites the id values of AVStreams in the muxer contexts + * for getting unique dynamic payload types. + * + * @param ac array of AVFormatContexts describing the RTP streams. If the + * array is composed by only one context, such context can contain + * multiple AVStreams (one AVStream per RTP stream). Otherwise, + * all the contexts in the array (an AVCodecContext per RTP stream) + * must contain only one AVStream. + * @param n_files number of AVCodecContexts contained in ac + * @param buf buffer where the SDP will be stored (must be allocated by + * the caller) + * @param size the size of the buffer + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); + +/** + * Return a positive value if the given filename has one of the given + * extensions, 0 otherwise. + * + * @param filename file name to check against the given extensions + * @param extensions a comma-separated list of filename extensions + */ +int liteav_av_match_ext(const char *filename, const char *extensions); + +/** + * Test if the given container can store a codec. + * + * @param ofmt container to check for compatibility + * @param codec_id codec to potentially store in container + * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* + * + * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. + * A negative number if this information is not available. + */ +int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id, + int std_compliance); + +/** + * @defgroup riff_fourcc RIFF FourCCs + * @{ + * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are + * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the + * following code: + * @code + * uint32_t tag = MKTAG('H', '2', '6', '4'); + * const struct AVCodecTag *table[] = { liteav_avformat_get_riff_video_tags(), 0 }; + * enum AVCodecID id = av_codec_get_id(table, tag); + * @endcode + */ +/** + * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *liteav_avformat_get_riff_video_tags(void); +/** + * @return the table mapping RIFF FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *liteav_avformat_get_riff_audio_tags(void); +/** + * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *liteav_avformat_get_mov_video_tags(void); +/** + * @return the table mapping MOV FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *liteav_avformat_get_mov_audio_tags(void); + +/** + * @} + */ + +/** + * Guess the sample aspect ratio of a frame, based on both the stream and the + * frame aspect ratio. + * + * Since the frame aspect ratio is set by the codec but the stream aspect ratio + * is set by the demuxer, these two may not be equal. This function tries to + * return the value that you should use if you would like to display the frame. + * + * Basic logic is to use the stream aspect ratio if it is set to something sane + * otherwise use the frame aspect ratio. This way a container setting, which is + * usually easy to modify can override the coded value in the frames. + * + * @param format the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame with the aspect ratio to be determined + * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea + */ +AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); + +/** + * Guess the frame rate, based on both the container and codec information. + * + * @param ctx the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame for which the frame rate should be determined, may be NULL + * @return the guessed (valid) frame rate, 0/1 if no idea + */ +AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); + +/** + * Check if the stream st contained in s is matched by the stream specifier + * spec. + * + * See the "stream specifiers" chapter in the documentation for the syntax + * of spec. + * + * @return >0 if st is matched by spec; + * 0 if st is not matched by spec; + * AVERROR code if spec is invalid + * + * @note A stream specifier can match several streams in the format. + */ +int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, + const char *spec); + +int avformat_queue_attached_pictures(AVFormatContext *s); + +#if FF_API_OLD_BSF +/** + * Apply a list of bitstream filters to a packet. + * + * @param codec AVCodecContext, usually from an AVStream + * @param pkt the packet to apply filters to. If, on success, the returned + * packet has size == 0 and side_data_elems == 0, it indicates that + * the packet should be dropped + * @param bsfc a NULL-terminated list of filters to apply + * @return >=0 on success; + * AVERROR code on failure + */ +attribute_deprecated +int av_apply_bitstream_filters(AVCodecContext *codec, AVPacket *pkt, + AVBitStreamFilterContext *bsfc); +#endif + +enum AVTimebaseSource { + AVFMT_TBCF_AUTO = -1, + AVFMT_TBCF_DECODER, + AVFMT_TBCF_DEMUXER, +#if FF_API_R_FRAME_RATE + AVFMT_TBCF_R_FRAMERATE, +#endif +}; + +/** + * Transfer internal timing information from one stream to another. + * + * This function is useful when doing stream copy. + * + * @param ofmt target output format for ost + * @param ost output stream which needs timings copy and adjustments + * @param ist reference input stream to copy timings from + * @param copy_tb define from where the stream codec timebase needs to be imported + */ +int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, + AVStream *ost, const AVStream *ist, + enum AVTimebaseSource copy_tb); + +/** + * Get the internal codec timebase from a stream. + * + * @param st input stream to extract the timebase from + */ +AVRational av_stream_get_codec_timebase(const AVStream *st); + +/** + * @} + */ + +#endif /* AVFORMAT_AVFORMAT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avio.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avio.h new file mode 100644 index 0000000..37ed31f --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/avio.h @@ -0,0 +1,868 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef AVFORMAT_AVIO_H +#define AVFORMAT_AVIO_H + +/** + * @file + * @ingroup lavf_io + * Buffered I/O operations + */ + +#include <stdint.h> + +#include "libavutil/common.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavformat/version.h" + +/** + * Seeking works like for a local file. + */ +#define AVIO_SEEKABLE_NORMAL (1 << 0) + +/** + * Seeking by timestamp with liteav_avio_seek_time() is possible. + */ +#define AVIO_SEEKABLE_TIME (1 << 1) + +/** + * Callback for checking whether to abort blocking functions. + * AVERROR_EXIT is returned in this case by the interrupted + * function. During blocking operations, callback is called with + * opaque as parameter. If the callback returns 1, the + * blocking operation will be aborted. + * + * No members can be added to this struct without a major bump, if + * new elements have been added after this struct in AVFormatContext + * or AVIOContext. + */ +typedef struct AVIOInterruptCB { + int (*callback)(void*); + void *opaque; +} AVIOInterruptCB; + +/** + * Directory entry types. + */ +enum AVIODirEntryType { + AVIO_ENTRY_UNKNOWN, + AVIO_ENTRY_BLOCK_DEVICE, + AVIO_ENTRY_CHARACTER_DEVICE, + AVIO_ENTRY_DIRECTORY, + AVIO_ENTRY_NAMED_PIPE, + AVIO_ENTRY_SYMBOLIC_LINK, + AVIO_ENTRY_SOCKET, + AVIO_ENTRY_FILE, + AVIO_ENTRY_SERVER, + AVIO_ENTRY_SHARE, + AVIO_ENTRY_WORKGROUP, +}; + +/** + * Describes single entry of the directory. + * + * Only name and type fields are guaranteed be set. + * Rest of fields are protocol or/and platform dependent and might be unknown. + */ +typedef struct AVIODirEntry { + char *name; /**< Filename */ + int type; /**< Type of the entry */ + int utf8; /**< Set to 1 when name is encoded with UTF-8, 0 otherwise. + Name can be encoded with UTF-8 even though 0 is set. */ + int64_t size; /**< File size in bytes, -1 if unknown. */ + int64_t modification_timestamp; /**< Time of last modification in microseconds since unix + epoch, -1 if unknown. */ + int64_t access_timestamp; /**< Time of last access in microseconds since unix epoch, + -1 if unknown. */ + int64_t status_change_timestamp; /**< Time of last status change in microseconds since unix + epoch, -1 if unknown. */ + int64_t user_id; /**< User ID of owner, -1 if unknown. */ + int64_t group_id; /**< Group ID of owner, -1 if unknown. */ + int64_t filemode; /**< Unix file mode, -1 if unknown. */ +} AVIODirEntry; + +typedef struct AVIODirContext { + struct URLContext *url_context; +} AVIODirContext; + +/** + * Different data types that can be returned via the AVIO + * write_data_type callback. + */ +enum AVIODataMarkerType { + /** + * Header data; this needs to be present for the stream to be decodeable. + */ + AVIO_DATA_MARKER_HEADER, + /** + * A point in the output bytestream where a decoder can start decoding + * (i.e. a keyframe). A demuxer/decoder given the data flagged with + * AVIO_DATA_MARKER_HEADER, followed by any AVIO_DATA_MARKER_SYNC_POINT, + * should give decodeable results. + */ + AVIO_DATA_MARKER_SYNC_POINT, + /** + * A point in the output bytestream where a demuxer can start parsing + * (for non self synchronizing bytestream formats). That is, any + * non-keyframe packet start point. + */ + AVIO_DATA_MARKER_BOUNDARY_POINT, + /** + * This is any, unlabelled data. It can either be a muxer not marking + * any positions at all, it can be an actual boundary/sync point + * that the muxer chooses not to mark, or a later part of a packet/fragment + * that is cut into multiple write callbacks due to limited IO buffer size. + */ + AVIO_DATA_MARKER_UNKNOWN, + /** + * Trailer data, which doesn't contain actual content, but only for + * finalizing the output file. + */ + AVIO_DATA_MARKER_TRAILER, + /** + * A point in the output bytestream where the underlying AVIOContext might + * flush the buffer depending on latency or buffering requirements. Typically + * means the end of a packet. + */ + AVIO_DATA_MARKER_FLUSH_POINT, +}; + +/** + * Bytestream IO Context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVIOContext) must not be used outside libav*. + * + * @note None of the function pointers in AVIOContext should be called + * directly, they should only be set by the client application + * when implementing custom I/O. Normally these are set to the + * function pointers specified in liteav_avio_alloc_context() + */ +typedef struct AVIOContext { + /** + * A class for private options. + * + * If this AVIOContext is created by liteav_avio_open2(), av_class is set and + * passes the options down to protocols. + * + * If this AVIOContext is manually allocated, then av_class may be set by + * the caller. + * + * warning -- this field can be NULL, be sure to not pass this AVIOContext + * to any av_opt_* functions in that case. + */ + const AVClass *av_class; + + /* + * The following shows the relationship between buffer, buf_ptr, + * buf_ptr_max, buf_end, buf_size, and pos, when reading and when writing + * (since AVIOContext is used for both): + * + ********************************************************************************** + * READING + ********************************************************************************** + * + * | buffer_size | + * |---------------------------------------| + * | | + * + * buffer buf_ptr buf_end + * +---------------+-----------------------+ + * |/ / / / / / / /|/ / / / / / /| | + * read buffer: |/ / consumed / | to be read /| | + * |/ / / / / / / /|/ / / / / / /| | + * +---------------+-----------------------+ + * + * pos + * +-------------------------------------------+-----------------+ + * input file: | | | + * +-------------------------------------------+-----------------+ + * + * + ********************************************************************************** + * WRITING + ********************************************************************************** + * + * | buffer_size | + * |--------------------------------------| + * | | + * + * buf_ptr_max + * buffer (buf_ptr) buf_end + * +-----------------------+--------------+ + * |/ / / / / / / / / / / /| | + * write buffer: | / / to be flushed / / | | + * |/ / / / / / / / / / / /| | + * +-----------------------+--------------+ + * buf_ptr can be in this + * due to a backward seek + * + * pos + * +-------------+----------------------------------------------+ + * output file: | | | + * +-------------+----------------------------------------------+ + * + */ + unsigned char *buffer; /**< Start of the buffer. */ + int buffer_size; /**< Maximum buffer size */ + unsigned char *buf_ptr; /**< Current position in the buffer */ + unsigned char *buf_end; /**< End of the data, may be less than + buffer+buffer_size if the read function returned + less data than requested, e.g. for streams where + no more data has been received yet. */ + void *opaque; /**< A private pointer, passed to the read/write/seek/... + functions. */ + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size); + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); + int64_t (*seek)(void *opaque, int64_t offset, int whence); + int64_t pos; /**< position in the file of the current buffer */ + int eof_reached; /**< true if eof reached */ + int write_flag; /**< true if open for writing */ + int max_packet_size; + unsigned long checksum; + unsigned char *checksum_ptr; + unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size); + int error; /**< contains the error code or 0 if no error happened */ + /** + * Pause or resume playback for network streaming protocols - e.g. MMS. + */ + int (*read_pause)(void *opaque, int pause); + /** + * Seek to a given timestamp in stream with the specified stream_index. + * Needed for some network streaming protocols which don't support seeking + * to byte position. + */ + int64_t (*read_seek)(void *opaque, int stream_index, + int64_t timestamp, int flags); + /** + * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. + */ + int seekable; + + /** + * max filesize, used to limit allocations + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t maxsize; + + /** + * liteav_avio_read and liteav_avio_write should if possible be satisfied directly + * instead of going through a buffer, and liteav_avio_seek will always + * call the underlying seek function directly. + */ + int direct; + + /** + * Bytes read statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t bytes_read; + + /** + * seek statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int seek_count; + + /** + * writeout statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int writeout_count; + + /** + * Original buffer size + * used internally after probing and ensure seekback to reset the buffer size + * This field is internal to libavformat and access from outside is not allowed. + */ + int orig_buffer_size; + + /** + * Threshold to favor readahead over seek. + * This is current internal only, do not use from outside. + */ + int short_seek_threshold; + + /** + * ',' separated list of allowed protocols. + */ + const char *protocol_whitelist; + + /** + * ',' separated list of disallowed protocols. + */ + const char *protocol_blacklist; + + /** + * A callback that is used instead of write_packet. + */ + int (*write_data_type)(void *opaque, uint8_t *buf, int buf_size, + enum AVIODataMarkerType type, int64_t time); + /** + * If set, don't call write_data_type separately for AVIO_DATA_MARKER_BOUNDARY_POINT, + * but ignore them and treat them as AVIO_DATA_MARKER_UNKNOWN (to avoid needlessly + * small chunks of data returned from the callback). + */ + int ignore_boundary_point; + + /** + * Internal, not meant to be used from outside of AVIOContext. + */ + enum AVIODataMarkerType current_type; + int64_t last_time; + + /** + * A callback that is used instead of short_seek_threshold. + * This is current internal only, do not use from outside. + */ + int (*short_seek_get)(void *opaque); + + int64_t written; + + /** + * Maximum reached position before a backward seek in the write buffer, + * used keeping track of already written data for a later flush. + */ + unsigned char *buf_ptr_max; + + /** + * Try to buffer at least this amount of data before flushing it + */ + int min_packet_size; +} AVIOContext; + +/** + * Return the name of the protocol that will handle the passed URL. + * + * NULL is returned if no protocol could be found for the given URL. + * + * @return Name of the protocol or NULL. + */ +const char *liteav_avio_find_protocol_name(const char *url); + +/** + * Return AVIO_FLAG_* access flags corresponding to the access permissions + * of the resource in url, or a negative value corresponding to an + * AVERROR code in case of failure. The returned access flags are + * masked by the value in flags. + * + * @note This function is intrinsically unsafe, in the sense that the + * checked resource may change its existence or permission status from + * one call to another. Thus you should not trust the returned value, + * unless you are sure that no other processes are accessing the + * checked resource. + */ +int liteav_avio_check(const char *url, int flags); + +/** + * Move or rename a resource. + * + * @note url_src and url_dst should share the same protocol and authority. + * + * @param url_src url to resource to be moved + * @param url_dst new url to resource if the operation succeeded + * @return >=0 on success or negative on error. + */ +int liteav_avpriv_io_move(const char *url_src, const char *url_dst); + +/** + * Delete a resource. + * + * @param url resource to be deleted. + * @return >=0 on success or negative on error. + */ +int liteav_avpriv_io_delete(const char *url); + +/** + * Open directory for reading. + * + * @param s directory read context. Pointer to a NULL pointer must be passed. + * @param url directory to be listed. + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dictionary + * containing options that were not found. May be NULL. + * @return >=0 on success or negative on error. + */ +int liteav_avio_open_dir(AVIODirContext **s, const char *url, AVDictionary **options); + +/** + * Get next directory entry. + * + * Returned entry must be freed with liteav_avio_free_directory_entry(). In particular + * it may outlive AVIODirContext. + * + * @param s directory read context. + * @param[out] next next entry or NULL when no more entries. + * @return >=0 on success or negative on error. End of list is not considered an + * error. + */ +int liteav_avio_read_dir(AVIODirContext *s, AVIODirEntry **next); + +/** + * Close directory. + * + * @note Entries created using liteav_avio_read_dir() are not deleted and must be + * freeded with liteav_avio_free_directory_entry(). + * + * @param s directory read context. + * @return >=0 on success or negative on error. + */ +int liteav_avio_close_dir(AVIODirContext **s); + +/** + * Free entry allocated by liteav_avio_read_dir(). + * + * @param entry entry to be freed. + */ +void liteav_avio_free_directory_entry(AVIODirEntry **entry); + +/** + * Allocate and initialize an AVIOContext for buffered I/O. It must be later + * freed with liteav_avio_context_free(). + * + * @param buffer Memory block for input/output operations via AVIOContext. + * The buffer must be allocated with liteav_av_malloc() and friends. + * It may be freed and replaced with a new buffer by libavformat. + * AVIOContext.buffer holds the buffer currently in use, + * which must be later freed with liteav_av_free(). + * @param buffer_size The buffer size is very important for performance. + * For protocols with fixed blocksize it should be set to this blocksize. + * For others a typical size is a cache page, e.g. 4kb. + * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise. + * @param opaque An opaque pointer to user-specific data. + * @param read_packet A function for refilling the buffer, may be NULL. + * For stream protocols, must never return 0 but rather + * a proper AVERROR code. + * @param write_packet A function for writing the buffer contents, may be NULL. + * The function may not change the input buffers content. + * @param seek A function for seeking to specified byte position, may be NULL. + * + * @return Allocated AVIOContext or NULL on failure. + */ +AVIOContext *liteav_avio_alloc_context( + unsigned char *buffer, + int buffer_size, + int write_flag, + void *opaque, + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), + int64_t (*seek)(void *opaque, int64_t offset, int whence)); + +/** + * Free the supplied IO context and everything associated with it. + * + * @param s Double pointer to the IO context. This function will write NULL + * into s. + */ +void liteav_avio_context_free(AVIOContext **s); + +void liteav_avio_w8(AVIOContext *s, int b); +void liteav_avio_write(AVIOContext *s, const unsigned char *buf, int size); +void liteav_avio_wl64(AVIOContext *s, uint64_t val); +void liteav_avio_wb64(AVIOContext *s, uint64_t val); +void liteav_avio_wl32(AVIOContext *s, unsigned int val); +void liteav_avio_wb32(AVIOContext *s, unsigned int val); +void liteav_avio_wl24(AVIOContext *s, unsigned int val); +void liteav_avio_wb24(AVIOContext *s, unsigned int val); +void liteav_avio_wl16(AVIOContext *s, unsigned int val); +void liteav_avio_wb16(AVIOContext *s, unsigned int val); + +/** + * Write a NULL-terminated string. + * @return number of bytes written. + */ +int liteav_avio_put_str(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16LE and write it. + * @param s the AVIOContext + * @param str NULL-terminated UTF-8 string + * + * @return number of bytes written. + */ +int liteav_avio_put_str16le(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16BE and write it. + * @param s the AVIOContext + * @param str NULL-terminated UTF-8 string + * + * @return number of bytes written. + */ +int liteav_avio_put_str16be(AVIOContext *s, const char *str); + +/** + * Mark the written bytestream as a specific type. + * + * Zero-length ranges are omitted from the output. + * + * @param time the stream time the current bytestream pos corresponds to + * (in AV_TIME_BASE units), or AV_NOPTS_VALUE if unknown or not + * applicable + * @param type the kind of data written starting at the current pos + */ +void liteav_avio_write_marker(AVIOContext *s, int64_t time, enum AVIODataMarkerType type); + +/** + * ORing this as the "whence" parameter to a seek function causes it to + * return the filesize without seeking anywhere. Supporting this is optional. + * If it is not supported then the seek function will return <0. + */ +#define AVSEEK_SIZE 0x10000 + +/** + * Passing this flag as the "whence" parameter to a seek function causes it to + * seek by any means (like reopening and linear reading) or other normally unreasonable + * means that can be extremely slow. + * This may be ignored by the seek code. + */ +#define AVSEEK_FORCE 0x20000 + +/** + * fseek() equivalent for AVIOContext. + * @return new position or AVERROR. + */ +int64_t liteav_avio_seek(AVIOContext *s, int64_t offset, int whence); + +/** + * Skip given number of bytes forward + * @return new position or AVERROR. + */ +int64_t liteav_avio_skip(AVIOContext *s, int64_t offset); + +/** + * ftell() equivalent for AVIOContext. + * @return position or AVERROR. + */ +static av_always_inline int64_t avio_tell(AVIOContext *s) +{ + return liteav_avio_seek(s, 0, SEEK_CUR); +} + +/** + * Get the filesize. + * @return filesize or AVERROR + */ +int64_t liteav_avio_size(AVIOContext *s); + +/** + * feof() equivalent for AVIOContext. + * @return non zero if and only if end of file + */ +int liteav_avio_feof(AVIOContext *s); + +/** + * 增加AVDictionary用于辅助判断自定义协议的结尾判断 + * @return non zero if and only if end of file by hlscache + */ +int liteav_avio_feof_with_dict(AVIOContext *s, AVDictionary **options); + +/** @warning Writes up to 4 KiB per call */ +int liteav_avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Force flushing of buffered data. + * + * For write streams, force the buffered data to be immediately written to the output, + * without to wait to fill the internal buffer. + * + * For read streams, discard all currently buffered data, and advance the + * reported file position to that of the underlying stream. This does not + * read new data, and does not perform any seeks. + */ +void liteav_avio_flush(AVIOContext *s); + +/** + * Read size bytes from AVIOContext into buf. + * @return number of bytes read or AVERROR + */ +int liteav_avio_read(AVIOContext *s, unsigned char *buf, int size); + +/** + * Read size bytes from AVIOContext into buf. Unlike liteav_avio_read(), this is allowed + * to read fewer bytes than requested. The missing bytes can be read in the next + * call. This always tries to read at least 1 byte. + * Useful to reduce latency in certain cases. + * @return number of bytes read or AVERROR + */ +int liteav_avio_read_partial(AVIOContext *s, unsigned char *buf, int size); + +/** + * @name Functions for reading from AVIOContext + * @{ + * + * @note return 0 if EOF, so you cannot use it if EOF handling is + * necessary + */ +int liteav_avio_r8 (AVIOContext *s); +unsigned int liteav_avio_rl16(AVIOContext *s); +unsigned int liteav_avio_rl24(AVIOContext *s); +unsigned int liteav_avio_rl32(AVIOContext *s); +uint64_t liteav_avio_rl64(AVIOContext *s); +unsigned int liteav_avio_rb16(AVIOContext *s); +unsigned int liteav_avio_rb24(AVIOContext *s); +unsigned int liteav_avio_rb32(AVIOContext *s); +uint64_t liteav_avio_rb64(AVIOContext *s); +/** + * @} + */ + +/** + * Read a string from pb into buf. The reading will terminate when either + * a NULL character was encountered, maxlen bytes have been read, or nothing + * more can be read from pb. The result is guaranteed to be NULL-terminated, it + * will be truncated if buf is too small. + * Note that the string is not interpreted or validated in any way, it + * might get truncated in the middle of a sequence for multi-byte encodings. + * + * @return number of bytes read (is always <= maxlen). + * If reading ends on EOF or error, the return value will be one more than + * bytes actually read. + */ +int liteav_avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen); + +/** + * Read a UTF-16 string from pb and convert it to UTF-8. + * The reading will terminate when either a null or invalid character was + * encountered or maxlen bytes have been read. + * @return number of bytes read (is always <= maxlen) + */ +int liteav_avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen); +int liteav_avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); + + +/** + * @name URL open modes + * The flags argument to liteav_avio_open must be one of the following + * constants, optionally ORed with other flags. + * @{ + */ +#define AVIO_FLAG_READ 1 /**< read-only */ +#define AVIO_FLAG_WRITE 2 /**< write-only */ +#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */ +/** + * @} + */ + +/** + * Use non-blocking mode. + * If this flag is set, operations on the context will return + * AVERROR(EAGAIN) if they can not be performed immediately. + * If this flag is not set, operations on the context will never return + * AVERROR(EAGAIN). + * Note that this flag does not affect the opening/connecting of the + * context. Connecting a protocol will always block if necessary (e.g. on + * network protocols) but never hang (e.g. on busy devices). + * Warning: non-blocking protocols is work-in-progress; this flag may be + * silently ignored. + */ +#define AVIO_FLAG_NONBLOCK 8 + +/** + * Use direct mode. + * liteav_avio_read and liteav_avio_write should if possible be satisfied directly + * instead of going through a buffer, and liteav_avio_seek will always + * call the underlying seek function directly. + */ +#define AVIO_FLAG_DIRECT 0x8000 + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param url resource to access + * @param flags flags which control how the resource indicated by url + * is to be opened + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int liteav_avio_open(AVIOContext **s, const char *url, int flags); + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param url resource to access + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb an interrupt callback to be used at the protocols level + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int liteav_avio_open2(AVIOContext **s, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Close the resource accessed by the AVIOContext s and free it. + * This function can only be used if s was opened by liteav_avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see liteav_avio_closep + */ +int liteav_avio_close(AVIOContext *s); + +/** + * Close the resource accessed by the AVIOContext *s, free it + * and set the pointer pointing to it to NULL. + * This function can only be used if s was opened by liteav_avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see liteav_avio_close + */ +int liteav_avio_closep(AVIOContext **s); + + +/** + * Open a write only memory stream. + * + * @param s new IO context + * @return zero if no error. + */ +int liteav_avio_open_dyn_buf(AVIOContext **s); + +/** + * Return the written size and a pointer to the buffer. + * The AVIOContext stream is left intact. + * The buffer must NOT be freed. + * No padding is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int liteav_avio_get_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Return the written size and a pointer to the buffer. The buffer + * must be freed with liteav_av_free(). + * Padding of AV_INPUT_BUFFER_PADDING_SIZE is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int liteav_avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Iterate through names of available protocols. + * + * @param opaque A private pointer representing current protocol. + * It must be a pointer to NULL on first iteration and will + * be updated by successive calls to liteav_avio_enum_protocols. + * @param output If set to 1, iterate over output protocols, + * otherwise over input protocols. + * + * @return A static string containing the name of current protocol or NULL + */ +const char *liteav_avio_enum_protocols(void **opaque, int output); + +/** + * Pause and resume playing - only meaningful if using a network streaming + * protocol (e.g. MMS). + * + * @param h IO context from which to call the read_pause function pointer + * @param pause 1 for pause, 0 for resume + */ +int liteav_avio_pause(AVIOContext *h, int pause); + +/** + * Seek to a given timestamp relative to some component stream. + * Only meaningful if using a network streaming protocol (e.g. MMS.). + * + * @param h IO context from which to call the seek function pointers + * @param stream_index The stream index that the timestamp is relative to. + * If stream_index is (-1) the timestamp should be in AV_TIME_BASE + * units from the beginning of the presentation. + * If a stream_index >= 0 is used and the protocol does not support + * seeking based on component streams, the call will fail. + * @param timestamp timestamp in AVStream.time_base units + * or if there is no stream specified then in AV_TIME_BASE units. + * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE + * and AVSEEK_FLAG_ANY. The protocol may silently ignore + * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will + * fail if used and not supported. + * @return >= 0 on success + * @see AVInputFormat::read_seek + */ +int64_t liteav_avio_seek_time(AVIOContext *h, int stream_index, + int64_t timestamp, int flags); + +/* Avoid a warning. The header can not be included because it breaks c++. */ +struct AVBPrint; + +/** + * Read contents of h into print buffer, up to max_size bytes, or up to EOF. + * + * @return 0 for success (max_size bytes read or EOF reached), negative error + * code otherwise + */ +int liteav_avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size); + +/** + * Accept and allocate a client context on a server context. + * @param s the server context + * @param c the client context, must be unallocated + * @return >= 0 on success or a negative value corresponding + * to an AVERROR on failure + */ +int liteav_avio_accept(AVIOContext *s, AVIOContext **c); + +/** + * Perform one step of the protocol handshake to accept a new client. + * This function must be called on a client returned by liteav_avio_accept() before + * using it as a read/write context. + * It is separate from liteav_avio_accept() because it may block. + * A step of the handshake is defined by places where the application may + * decide to change the proceedings. + * For example, on a protocol with a request header and a reply header, each + * one can constitute a step because the application may use the parameters + * from the request to change parameters in the reply; or each individual + * chunk of the request can constitute a step. + * If the handshake is already finished, liteav_avio_handshake() does nothing and + * returns 0 immediately. + * + * @param c the client context to perform the handshake on + * @return 0 on a complete and successful handshake + * > 0 if the handshake progressed, but is not complete + * < 0 for an AVERROR code + */ +int liteav_avio_handshake(AVIOContext *c); +#endif /* AVFORMAT_AVIO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/internal.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/internal.h new file mode 100644 index 0000000..a95f3ae --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/internal.h @@ -0,0 +1,807 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_INTERNAL_H +#define AVFORMAT_INTERNAL_H + +#include <stdint.h> + +#include "libavutil/bprint.h" +#include "avformat.h" +#include "os_support.h" + +#define MAX_URL_SIZE 4096 + +/** size of probe buffer, for guessing file type from file contents */ +#define PROBE_BUF_MIN 2048 +#define PROBE_BUF_MAX (1 << 20) + +#define MAX_PROBE_PACKETS 2500 + +#ifdef DEBUG +# define hex_dump_debug(class, buf, size) liteav_av_hex_dump_log(class, AV_LOG_DEBUG, buf, size) +#else +# define hex_dump_debug(class, buf, size) do { if (0) liteav_av_hex_dump_log(class, AV_LOG_DEBUG, buf, size); } while(0) +#endif + +typedef struct AVCodecTag { + enum AVCodecID id; + unsigned int tag; +} AVCodecTag; + +typedef struct CodecMime{ + char str[32]; + enum AVCodecID id; +} CodecMime; + +/*************************************************/ +/* fractional numbers for exact pts handling */ + +/** + * The exact value of the fractional number is: 'val + num / den'. + * num is assumed to be 0 <= num < den. + */ +typedef struct FFFrac { + int64_t val, num, den; +} FFFrac; + + +struct AVFormatInternal { + /** + * Number of streams relevant for interleaving. + * Muxing only. + */ + int nb_interleaved_streams; + + /** + * This buffer is only needed when packets were already buffered but + * not decoded, for example to get the codec parameters in MPEG + * streams. + */ + struct AVPacketList *packet_buffer; + struct AVPacketList *packet_buffer_end; + + /* av_seek_frame() support */ + int64_t data_offset; /**< offset of the first packet */ + + /** + * Raw packets from the demuxer, prior to parsing and decoding. + * This buffer is used for buffering packets until the codec can + * be identified, as parsing cannot be done without knowing the + * codec. + */ + struct AVPacketList *raw_packet_buffer; + struct AVPacketList *raw_packet_buffer_end; + /** + * Packets split by the parser get queued here. + */ + struct AVPacketList *parse_queue; + struct AVPacketList *parse_queue_end; + /** + * Remaining size available for raw_packet_buffer, in bytes. + */ +#define RAW_PACKET_BUFFER_SIZE 2500000 + int raw_packet_buffer_remaining_size; + + /** + * Offset to remap timestamps to be non-negative. + * Expressed in timebase units. + * @see AVStream.mux_ts_offset + */ + int64_t offset; + + /** + * Timebase for the timestamp offset. + */ + AVRational offset_timebase; + +#if FF_API_COMPUTE_PKT_FIELDS2 + int missing_ts_warning; +#endif + + int inject_global_side_data; + + int avoid_negative_ts_use_pts; + + /** + * Timestamp of the end of the shortest stream. + */ + int64_t shortest_end; + + /** + * Whether or not liteav_avformat_init_output has already been called + */ + int initialized; + + /** + * Whether or not liteav_avformat_init_output fully initialized streams + */ + int streams_initialized; + + /** + * ID3v2 tag useful for MP3 demuxing + */ + AVDictionary *id3v2_meta; + + /* + * Prefer the codec framerate for avg_frame_rate computation. + */ + int prefer_codec_framerate; +}; + +struct AVStreamInternal { + /** + * Set to 1 if the codec allows reordering, so pts can be different + * from dts. + */ + int reorder; + + /** + * bitstream filters to run on stream + * - encoding: Set by muxer using liteav_ff_stream_add_bitstream_filter + * - decoding: unused + */ + AVBSFContext **bsfcs; + int nb_bsfcs; + + /** + * Whether or not check_bitstream should still be run on each packet + */ + int bitstream_checked; + + /** + * The codec context used by avformat_find_stream_info, the parser, etc. + */ + AVCodecContext *avctx; + /** + * 1 if avctx has been initialized with the values from the codec parameters + */ + int avctx_inited; + + enum AVCodecID orig_codec_id; + + /* the context for extracting extradata in find_stream_info() + * inited=1/bsf=NULL signals that extracting is not possible (codec not + * supported) */ + struct { + AVBSFContext *bsf; + AVPacket *pkt; + int inited; + } extract_extradata; + + /** + * Whether the internal avctx needs to be updated from codecpar (after a late change to codecpar) + */ + int need_context_update; + + FFFrac *priv_pts; +}; + +#ifdef __GNUC__ +#define dynarray_add(tab, nb_ptr, elem)\ +do {\ + __typeof__(tab) _tab = (tab);\ + __typeof__(elem) _elem = (elem);\ + (void)sizeof(**_tab == _elem); /* check that types are compatible */\ + liteav_av_dynarray_add(_tab, nb_ptr, _elem);\ +} while(0) +#else +#define dynarray_add(tab, nb_ptr, elem)\ +do {\ + liteav_av_dynarray_add((tab), nb_ptr, (elem));\ +} while(0) +#endif + +struct tm *liteav_ff_brktimegm(time_t secs, struct tm *tm); + +/** + * Automatically create sub-directories + * + * @param path will create sub-directories by path + * @return 0, or < 0 on error + */ +int liteav_ff_mkdir_p(const char *path); + +char *liteav_ff_data_to_hex(char *buf, const uint8_t *src, int size, int lowercase); + +/** + * Parse a string of hexadecimal strings. Any space between the hexadecimal + * digits is ignored. + * + * @param data if non-null, the parsed data is written to this pointer + * @param p the string to parse + * @return the number of bytes written (or to be written, if data is null) + */ +int liteav_ff_hex_to_data(uint8_t *data, const char *p); + +/** + * Add packet to AVFormatContext->packet_buffer list, determining its + * interleaved position using compare() function argument. + * @return 0, or < 0 on error + */ +int liteav_ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, + int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)); + +void liteav_ff_read_frame_flush(AVFormatContext *s); + +#define NTP_OFFSET 2208988800ULL +#define NTP_OFFSET_US (NTP_OFFSET * 1000000ULL) + +/** Get the current time since NTP epoch in microseconds. */ +uint64_t liteav_ff_ntp_time(void); + +/** + * Get the NTP time stamp formatted as per the RFC-5905. + * + * @param ntp_time NTP time in micro seconds (since NTP epoch) + * @return the formatted NTP time stamp + */ +uint64_t liteav_ff_get_formatted_ntp_time(uint64_t ntp_time_us); + +/** + * Append the media-specific SDP fragment for the media stream c + * to the buffer buff. + * + * Note, the buffer needs to be initialized, since it is appended to + * existing content. + * + * @param buff the buffer to append the SDP fragment to + * @param size the size of the buff buffer + * @param st the AVStream of the media to describe + * @param idx the global stream index + * @param dest_addr the destination address of the media stream, may be NULL + * @param dest_type the destination address type, may be NULL + * @param port the destination port of the media stream, 0 if unknown + * @param ttl the time to live of the stream, 0 if not multicast + * @param fmt the AVFormatContext, which might contain options modifying + * the generated SDP + */ +void liteav_ff_sdp_write_media(char *buff, int size, AVStream *st, int idx, + const char *dest_addr, const char *dest_type, + int port, int ttl, AVFormatContext *fmt); + +/** + * Write a packet to another muxer than the one the user originally + * intended. Useful when chaining muxers, where one muxer internally + * writes a received packet to another muxer. + * + * @param dst the muxer to write the packet to + * @param dst_stream the stream index within dst to write the packet to + * @param pkt the packet to be written + * @param src the muxer the packet originally was intended for + * @param interleave 0->use liteav_av_write_frame, 1->liteav_av_interleaved_write_frame + * @return the value liteav_av_write_frame returned + */ +int liteav_ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt, + AVFormatContext *src, int interleave); + +/** + * Get the length in bytes which is needed to store val as v. + */ +int liteav_ff_get_v_length(uint64_t val); + +/** + * Put val using a variable number of bytes. + */ +void liteav_ff_put_v(AVIOContext *bc, uint64_t val); + +/** + * Read a whole line of text from AVIOContext. Stop reading after reaching + * either a \\n, a \\0 or EOF. The returned string is always \\0-terminated, + * and may be truncated if the buffer is too small. + * + * @param s the read-only AVIOContext + * @param buf buffer to store the read line + * @param maxlen size of the buffer + * @return the length of the string written in the buffer, not including the + * final \\0 + */ +int liteav_ff_get_line(AVIOContext *s, char *buf, int maxlen); + +/** + * Same as liteav_ff_get_line but strip the white-space characters in the text tail + * + * @param s the read-only AVIOContext + * @param buf buffer to store the read line + * @param maxlen size of the buffer + * @return the length of the string written in the buffer + */ +int liteav_ff_get_chomp_line(AVIOContext *s, char *buf, int maxlen); + +/** + * Read a whole line of text from AVIOContext to an AVBPrint buffer. Stop + * reading after reaching a \\r, a \\n, a \\r\\n, a \\0 or EOF. The line + * ending characters are NOT included in the buffer, but they are skipped on + * the input. + * + * @param s the read-only AVIOContext + * @param bp the AVBPrint buffer + * @return the length of the read line, not including the line endings, + * negative on error. + */ +int64_t liteav_ff_read_line_to_bprint(AVIOContext *s, AVBPrint *bp); + +/** + * Read a whole line of text from AVIOContext to an AVBPrint buffer overwriting + * its contents. Stop reading after reaching a \\r, a \\n, a \\r\\n, a \\0 or + * EOF. The line ending characters are NOT included in the buffer, but they + * are skipped on the input. + * + * @param s the read-only AVIOContext + * @param bp the AVBPrint buffer + * @return the length of the read line not including the line endings, + * negative on error, or if the buffer becomes truncated. + */ +int64_t liteav_ff_read_line_to_bprint_overwrite(AVIOContext *s, AVBPrint *bp); + +#define SPACE_CHARS " \t\r\n" + +/** + * Callback function type for liteav_ff_parse_key_value. + * + * @param key a pointer to the key + * @param key_len the number of bytes that belong to the key, including the '=' + * char + * @param dest return the destination pointer for the value in *dest, may + * be null to ignore the value + * @param dest_len the length of the *dest buffer + */ +typedef void (*liteav_ff_parse_key_val_cb)(void *context, const char *key, + int key_len, char **dest, int *dest_len); +/** + * Parse a string with comma-separated key=value pairs. The value strings + * may be quoted and may contain escaped characters within quoted strings. + * + * @param str the string to parse + * @param callback_get_buf function that returns where to store the + * unescaped value string. + * @param context the opaque context pointer to pass to callback_get_buf + */ +void liteav_ff_parse_key_value(const char *str, liteav_ff_parse_key_val_cb callback_get_buf, + void *context); + +/** + * Find stream index based on format-specific stream ID + * @return stream index, or < 0 on error + */ +int liteav_ff_find_stream_index(AVFormatContext *s, int id); + +/** + * Internal version of av_index_search_timestamp + */ +int liteav_ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries, + int64_t wanted_timestamp, int flags); + +/** + * Internal version of av_add_index_entry + */ +int liteav_ff_add_index_entry(AVIndexEntry **index_entries, + int *nb_index_entries, + unsigned int *index_entries_allocated_size, + int64_t pos, int64_t timestamp, int size, int distance, int flags); + +void liteav_ff_configure_buffers_for_index(AVFormatContext *s, int64_t time_tolerance); + +/** + * Add a new chapter. + * + * @param s media file handle + * @param id unique ID for this chapter + * @param start chapter start time in time_base units + * @param end chapter end time in time_base units + * @param title chapter title + * + * @return AVChapter or NULL on error + */ +AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, + int64_t start, int64_t end, const char *title); + +/** + * Ensure the index uses less memory than the maximum specified in + * AVFormatContext.max_index_size by discarding entries if it grows + * too large. + */ +void liteav_ff_reduce_index(AVFormatContext *s, int stream_index); + +enum AVCodecID liteav_ff_guess_image2_codec(const char *filename); + +/** + * Perform a binary search using av_index_search_timestamp() and + * AVInputFormat.read_timestamp(). + * + * @param target_ts target timestamp in the time base of the given stream + * @param stream_index stream number + */ +int liteav_ff_seek_frame_binary(AVFormatContext *s, int stream_index, + int64_t target_ts, int flags); + +/** + * Update cur_dts of all streams based on the given timestamp and AVStream. + * + * Stream ref_st unchanged, others set cur_dts in their native time base. + * Only needed for timestamp wrapping or if (dts not set and pts!=dts). + * @param timestamp new dts expressed in time_base of param ref_st + * @param ref_st reference stream giving time_base of param timestamp + */ +void liteav_ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp); + +int liteav_ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos, + int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )); + +/** + * Perform a binary search using read_timestamp(). + * + * @param target_ts target timestamp in the time base of the given stream + * @param stream_index stream number + */ +int64_t liteav_ff_gen_search(AVFormatContext *s, int stream_index, + int64_t target_ts, int64_t pos_min, + int64_t pos_max, int64_t pos_limit, + int64_t ts_min, int64_t ts_max, + int flags, int64_t *ts_ret, + int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )); + +/** + * Set the time base and wrapping info for a given stream. This will be used + * to interpret the stream's timestamps. If the new time base is invalid + * (numerator or denominator are non-positive), it leaves the stream + * unchanged. + * + * @param s stream + * @param pts_wrap_bits number of bits effectively used by the pts + * (used for wrap control) + * @param pts_num time base numerator + * @param pts_den time base denominator + */ +void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, + unsigned int pts_num, unsigned int pts_den); + +/** + * Add side data to a packet for changing parameters to the given values. + * Parameters set to 0 aren't included in the change. + */ +int liteav_ff_add_param_change(AVPacket *pkt, int32_t channels, + uint64_t channel_layout, int32_t sample_rate, + int32_t width, int32_t height); + +/** + * Set the timebase for each stream from the corresponding codec timebase and + * print it. + */ +int liteav_ff_framehash_write_header(AVFormatContext *s); + +/** + * Read a transport packet from a media file. + * + * @param s media file handle + * @param pkt is filled + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_ff_read_packet(AVFormatContext *s, AVPacket *pkt); + +/** + * Interleave a packet per dts in an output media file. + * + * Packets with pkt->destruct == av_destruct_packet will be freed inside this + * function, so they cannot be used after it. Note that calling liteav_av_packet_unref() + * on them is still safe. + * + * @param s media file handle + * @param out the interleaved packet will be output here + * @param pkt the input packet + * @param flush 1 if no further packets are available as input and all + * remaining packets should be output + * @return 1 if a packet was output, 0 if no packet could be output, + * < 0 if an error occurred + */ +int liteav_ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, + AVPacket *pkt, int flush); + +/* + * check specified stream st is in s then free it(set to NULL in s->streams) + * do nothing else, like modify nb_streams or reorder streams in s->streams + */ +void liteav_ff_free_stream_only(AVFormatContext *s, AVStream *st); + +void liteav_ff_free_stream(AVFormatContext *s, AVStream *st); + +/** + * Return the frame duration in seconds. Return 0 if not available. + */ +void liteav_ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st, + AVCodecParserContext *pc, AVPacket *pkt); + +unsigned int liteav_ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id); + +enum AVCodecID liteav_ff_codec_get_id(const AVCodecTag *tags, unsigned int tag); + +/** + * Select a PCM codec based on the given parameters. + * + * @param bps bits-per-sample + * @param flt floating-point + * @param be big-endian + * @param sflags signed flags. each bit corresponds to one byte of bit depth. + * e.g. the 1st bit indicates if 8-bit should be signed or + * unsigned, the 2nd bit indicates if 16-bit should be signed or + * unsigned, etc... This is useful for formats such as WAVE where + * only 8-bit is unsigned and all other bit depths are signed. + * @return a PCM codec id or AV_CODEC_ID_NONE + */ +enum AVCodecID liteav_ff_get_pcm_codec_id(int bps, int flt, int be, int sflags); + +/** + * Chooses a timebase for muxing the specified stream. + * + * The chosen timebase allows sample accurate timestamps based + * on the framerate or sample rate for audio streams. It also is + * at least as precise as 1/min_precision would be. + */ +AVRational liteav_ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precision); + +/** + * Chooses a timebase for muxing the specified stream. + */ +enum AVChromaLocation liteav_ff_choose_chroma_location(AVFormatContext *s, AVStream *st); + +/** + * Generate standard extradata for AVC-Intra based on width/height and field + * order. + */ +int liteav_ff_generate_avci_extradata(AVStream *st); + +/** + * Add a bitstream filter to a stream. + * + * @param st output stream to add a filter to + * @param name the name of the filter to add + * @param args filter-specific argument string + * @return >0 on success; + * AVERROR code on failure + */ +int liteav_ff_stream_add_bitstream_filter(AVStream *st, const char *name, const char *args); + +/** + * Copy encoding parameters from source to destination stream + * + * @param dst pointer to destination AVStream + * @param src pointer to source AVStream + * @return >=0 on success, AVERROR code on error + */ +int liteav_ff_stream_encode_params_copy(AVStream *dst, const AVStream *src); + +/** + * Wrap errno on rename() error. + * + * @param oldpath source path + * @param newpath destination path + * @return 0 or AVERROR on failure + */ +static inline int liteav_ff_rename(const char *oldpath, const char *newpath, void *logctx) +{ + int ret = 0; + if (rename(oldpath, newpath) == -1) { + ret = AVERROR(errno); + if (logctx) { + char err[AV_ERROR_MAX_STRING_SIZE] = {0}; + av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret); + liteav_av_log(logctx, AV_LOG_ERROR, "failed to rename file %s to %s: %s\n", oldpath, newpath, err); + } + } + return ret; +} + +/** + * Allocate extradata with additional AV_INPUT_BUFFER_PADDING_SIZE at end + * which is always set to 0. + * + * Previously allocated extradata in par will be freed. + * + * @param size size of extradata + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_ff_alloc_extradata(AVCodecParameters *par, int size); + +/** + * Allocate extradata with additional AV_INPUT_BUFFER_PADDING_SIZE at end + * which is always set to 0 and fill it from pb. + * + * @param size size of extradata + * @return >= 0 if OK, AVERROR_xxx on error + */ +int liteav_ff_get_extradata(AVFormatContext *s, AVCodecParameters *par, AVIOContext *pb, int size); + +/** + * add frame for rfps calculation. + * + * @param dts timestamp of the i-th frame + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t dts); + +void liteav_ff_rfps_calculate(AVFormatContext *ic); + +/** + * Flags for AVFormatContext.write_uncoded_frame() + */ +enum AVWriteUncodedFrameFlags { + + /** + * Query whether the feature is possible on this stream. + * The frame argument is ignored. + */ + AV_WRITE_UNCODED_FRAME_QUERY = 0x0001, + +}; + +/** + * Copies the whilelists from one context to the other + */ +int liteav_ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src); + +int liteav_ffio_open2_wrapper(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Returned by demuxers to indicate that data was consumed but discarded + * (ignored streams or junk data). The framework will re-call the demuxer. + */ +#define FFERROR_REDO FFERRTAG('R','E','D','O') + +/** + * Utility function to open IO stream of output format. + * + * @param s AVFormatContext + * @param url URL or file name to open for writing + * @options optional options which will be passed to io_open callback + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_ff_format_output_open(AVFormatContext *s, const char *url, AVDictionary **options); + +/* + * A wrapper around AVFormatContext.io_close that should be used + * instead of calling the pointer directly. + */ +void liteav_ff_format_io_close(AVFormatContext *s, AVIOContext **pb); + +/** + * Utility function to check if the file uses http or https protocol + * + * @param s AVFormatContext + * @param filename URL or file name to open for writing + */ +int liteav_ff_is_http_proto(char *filename); + +/** + * Parse creation_time in AVFormatContext metadata if exists and warn if the + * parsing fails. + * + * @param s AVFormatContext + * @param timestamp parsed timestamp in microseconds, only set on successful parsing + * @param return_seconds set this to get the number of seconds in timestamp instead of microseconds + * @return 1 if OK, 0 if the metadata was not present, AVERROR(EINVAL) on parse error + */ +int liteav_ff_parse_creation_time_metadata(AVFormatContext *s, int64_t *timestamp, int return_seconds); + +/** + * Standardize creation_time metadata in AVFormatContext to an ISO-8601 + * timestamp string. + * + * @param s AVFormatContext + * @return <0 on error + */ +int liteav_ff_standardize_creation_time(AVFormatContext *s); + +#define CONTAINS_PAL 2 +/** + * Reshuffles the lines to use the user specified stride. + * + * @param ppkt input and output packet + * @return negative error code or + * 0 if no new packet was allocated + * non-zero if a new packet was allocated and ppkt has to be freed + * CONTAINS_PAL if in addition to a new packet the old contained a palette + */ +int liteav_ff_reshuffle_raw_rgb(AVFormatContext *s, AVPacket **ppkt, AVCodecParameters *par, int expected_stride); + +/** + * Retrieves the palette from a packet, either from side data, or + * appended to the video data in the packet itself (raw video only). + * It is commonly used after a call to liteav_ff_reshuffle_raw_rgb(). + * + * Use 0 for the ret parameter to check for side data only. + * + * @param pkt pointer to packet before calling liteav_ff_reshuffle_raw_rgb() + * @param ret return value from liteav_ff_reshuffle_raw_rgb(), or 0 + * @param palette pointer to palette buffer + * @return negative error code or + * 1 if the packet has a palette, else 0 + */ +int liteav_ff_get_packet_palette(AVFormatContext *s, AVPacket *pkt, int ret, uint32_t *palette); + +/** + * Finalize buf into extradata and set its size appropriately. + */ +int liteav_ff_bprint_to_codecpar_extradata(AVCodecParameters *par, struct AVBPrint *buf); + +/** + * Find the next packet in the interleaving queue for the given stream. + * The pkt parameter is filled in with the queued packet, including + * references to the data (which the caller is not allowed to keep or + * modify). + * + * @return 0 if a packet was found, a negative value if no packet was found + */ +int liteav_ff_interleaved_peek(AVFormatContext *s, int stream, + AVPacket *pkt, int add_offset); + + +int liteav_ff_lock_avformat(void); +int liteav_ff_unlock_avformat(void); + +/** + * Set AVFormatContext url field to the provided pointer. The pointer must + * point to a valid string. The existing url field is freed if necessary. Also + * set the legacy filename field to the same string which was provided in url. + */ +void liteav_ff_format_set_url(AVFormatContext *s, char *url); + +#define FF_PACKETLIST_FLAG_REF_PACKET (1 << 0) /**< Create a new reference for the packet instead of + transferring the ownership of the existing one to the + list. */ + +/** + * Append an AVPacket to the list. + * + * @param head List head element + * @param tail List tail element + * @param pkt The packet being appended + * @param flags Any combination of FF_PACKETLIST_FLAG_* flags + * @return 0 on success, negative AVERROR value on failure. On failure, + the list is unchanged + */ +int liteav_ff_packet_list_put(AVPacketList **head, AVPacketList **tail, + AVPacket *pkt, int flags); + +/** + * Remove the oldest AVPacket in the list and return it. + * + * @note The pkt will be overwritten completely. The caller owns the + * packet and must unref it by itself. + * + * @param head List head element + * @param tail List tail element + * @param pkt Pointer to an initialized AVPacket struct + */ +int liteav_ff_packet_list_get(AVPacketList **head, AVPacketList **tail, + AVPacket *pkt); + +/** + * Wipe the list and unref all the packets in it. + * + * @param head List head element + * @param tail List tail element + */ +void liteav_ff_packet_list_free(AVPacketList **head, AVPacketList **tail); + +void liteav_avpriv_register_devices(const AVOutputFormat * const o[], const AVInputFormat * const i[]); + +#endif /* AVFORMAT_INTERNAL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/os_support.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/os_support.h new file mode 100644 index 0000000..172b2c2 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/os_support.h @@ -0,0 +1,248 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * various OS-feature replacement utilities + * copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_OS_SUPPORT_H +#define AVFORMAT_OS_SUPPORT_H + +/** + * @file + * miscellaneous OS support macros and functions. + */ + +#include "config.h" + +#include <sys/stat.h> + +#ifdef _WIN32 +#if HAVE_DIRECT_H +#include <direct.h> +#endif +#if HAVE_IO_H +#include <io.h> +#endif +#endif + +#ifdef _WIN32 +# include <fcntl.h> +# ifdef lseek +# undef lseek +# endif +# define lseek(f,p,w) _lseeki64((f), (p), (w)) +# ifdef stat +# undef stat +# endif +# define stat _stati64 +# ifdef fstat +# undef fstat +# endif +# define fstat(f,s) _fstati64((f), (s)) +#endif /* defined(_WIN32) */ + + +#ifdef __ANDROID__ +# if HAVE_UNISTD_H +# include <unistd.h> +# endif +# ifdef lseek +# undef lseek +# endif +# define lseek(f,p,w) lseek64((f), (p), (w)) +#endif + +static inline int is_dos_path(const char *path) +{ +#if HAVE_DOS_PATHS + if (path[0] && path[1] == ':') + return 1; +#endif + return 0; +} + +#if defined(__OS2__) +#define SHUT_RD 0 +#define SHUT_WR 1 +#define SHUT_RDWR 2 +#endif + +#if defined(_WIN32) +#define SHUT_RD SD_RECEIVE +#define SHUT_WR SD_SEND +#define SHUT_RDWR SD_BOTH + +#ifndef S_IRUSR +#define S_IRUSR S_IREAD +#endif +#ifndef S_IWUSR +#define S_IWUSR S_IWRITE +#endif +#endif + +#if CONFIG_NETWORK +#if !HAVE_SOCKLEN_T +typedef int socklen_t; +#endif + +/* most of the time closing a socket is just closing an fd */ +#if !HAVE_CLOSESOCKET +#define closesocket close +#endif + +#if !HAVE_POLL_H +typedef unsigned long nfds_t; + +#if HAVE_WINSOCK2_H +#include <winsock2.h> +#endif +#if !HAVE_STRUCT_POLLFD +struct pollfd { + int fd; + short events; /* events to look for */ + short revents; /* events that occurred */ +}; + +/* events & revents */ +#define POLLIN 0x0001 /* any readable data available */ +#define POLLOUT 0x0002 /* file descriptor is writeable */ +#define POLLRDNORM POLLIN +#define POLLWRNORM POLLOUT +#define POLLRDBAND 0x0008 /* priority readable data */ +#define POLLWRBAND 0x0010 /* priority data can be written */ +#define POLLPRI 0x0020 /* high priority readable data */ + +/* revents only */ +#define POLLERR 0x0004 /* errors pending */ +#define POLLHUP 0x0080 /* disconnected */ +#define POLLNVAL 0x1000 /* invalid file descriptor */ +#endif + + +int liteav_ff_poll(struct pollfd *fds, nfds_t numfds, int timeout); +#define poll liteav_ff_poll +#endif /* HAVE_POLL_H */ +#endif /* CONFIG_NETWORK */ + +#ifdef _WIN32 +#include <stdio.h> +#include <windows.h> +#include "libavutil/wchar_filename.h" + +#define DEF_FS_FUNCTION(name, wfunc, afunc) \ +static inline int win32_##name(const char *filename_utf8) \ +{ \ + wchar_t *filename_w; \ + int ret; \ + \ + if (utf8towchar(filename_utf8, &filename_w)) \ + return -1; \ + if (!filename_w) \ + goto fallback; \ + \ + ret = wfunc(filename_w); \ + liteav_av_free(filename_w); \ + return ret; \ + \ +fallback: \ + /* filename may be be in CP_ACP */ \ + return afunc(filename_utf8); \ +} + +DEF_FS_FUNCTION(unlink, _wunlink, _unlink) +DEF_FS_FUNCTION(mkdir, _wmkdir, _mkdir) +DEF_FS_FUNCTION(rmdir, _wrmdir , _rmdir) + +#define DEF_FS_FUNCTION2(name, wfunc, afunc, partype) \ +static inline int win32_##name(const char *filename_utf8, partype par) \ +{ \ + wchar_t *filename_w; \ + int ret; \ + \ + if (utf8towchar(filename_utf8, &filename_w)) \ + return -1; \ + if (!filename_w) \ + goto fallback; \ + \ + ret = wfunc(filename_w, par); \ + liteav_av_free(filename_w); \ + return ret; \ + \ +fallback: \ + /* filename may be be in CP_ACP */ \ + return afunc(filename_utf8, par); \ +} + +DEF_FS_FUNCTION2(access, _waccess, _access, int) +DEF_FS_FUNCTION2(stat, _wstati64, _stati64, struct stat*) + +static inline int win32_rename(const char *src_utf8, const char *dest_utf8) +{ + wchar_t *src_w, *dest_w; + int ret; + + if (utf8towchar(src_utf8, &src_w)) + return -1; + if (utf8towchar(dest_utf8, &dest_w)) { + liteav_av_free(src_w); + return -1; + } + if (!src_w || !dest_w) { + liteav_av_free(src_w); + liteav_av_free(dest_w); + goto fallback; + } + + ret = MoveFileExW(src_w, dest_w, MOVEFILE_REPLACE_EXISTING); + liteav_av_free(src_w); + liteav_av_free(dest_w); + // Lacking proper mapping from GetLastError() error codes to errno codes + if (ret) + errno = EPERM; + return ret; + +fallback: + /* filename may be be in CP_ACP */ +#if !HAVE_UWP + ret = MoveFileExA(src_utf8, dest_utf8, MOVEFILE_REPLACE_EXISTING); + if (ret) + errno = EPERM; +#else + /* Windows Phone doesn't have MoveFileExA, and for Windows Store apps, + * it is available but not allowed by the app certification kit. However, + * it's unlikely that anybody would input filenames in CP_ACP there, so this + * fallback is kept mostly for completeness. Alternatively we could + * do MultiByteToWideChar(CP_ACP) and use MoveFileExW, but doing + * explicit conversions with CP_ACP is allegedly forbidden in windows + * store apps (or windows phone), and the notion of a native code page + * doesn't make much sense there. */ + ret = rename(src_utf8, dest_utf8); +#endif + return ret; +} + +#define mkdir(a, b) win32_mkdir(a) +#define rename win32_rename +#define rmdir win32_rmdir +#define unlink win32_unlink +#define access win32_access + +#endif + +#endif /* AVFORMAT_OS_SUPPORT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/url.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/url.h new file mode 100644 index 0000000..34cc30e --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/url.h @@ -0,0 +1,345 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * unbuffered private I/O API + */ + +#ifndef AVFORMAT_URL_H +#define AVFORMAT_URL_H + +#include "avio.h" +#include "libavformat/version.h" + +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#define URL_PROTOCOL_FLAG_NESTED_SCHEME 1 /*< The protocol name can be the first part of a nested protocol scheme */ +#define URL_PROTOCOL_FLAG_NETWORK 2 /*< The protocol uses network */ + +extern const AVClass liteav_ffurl_context_class; + +typedef struct URLContext { + const AVClass *av_class; /**< information for liteav_av_log(). Set by url_open(). */ + const struct URLProtocol *prot; + void *priv_data; + char *filename; /**< specified URL */ + int flags; + int max_packet_size; /**< if non zero, the stream is packetized with this max packet size */ + int is_streamed; /**< true if streamed (no seek possible), default = false */ + int is_connected; + AVIOInterruptCB interrupt_callback; + int64_t rw_timeout; /**< maximum time to wait for (network) read/write operation completion, in mcs */ + const char *protocol_whitelist; + const char *protocol_blacklist; + int nReopenTimes; + int min_packet_size; /**< if non zero, the stream is packetized with this min packet size */ +} URLContext; + +typedef struct URLProtocol { + const char *name; + int (*url_open)( URLContext *h, const char *url, int flags); + /** + * This callback is to be used by protocols which open further nested + * protocols. options are then to be passed to liteav_ffurl_open()/liteav_ffurl_connect() + * for those nested protocols. + */ + int (*url_open2)(URLContext *h, const char *url, int flags, AVDictionary **options); + int (*url_accept)(URLContext *s, URLContext **c); + int (*url_handshake)(URLContext *c); + + /** + * Read data from the protocol. + * If data is immediately available (even less than size), EOF is + * reached or an error occurs (including EINTR), return immediately. + * Otherwise: + * In non-blocking mode, return AVERROR(EAGAIN) immediately. + * In blocking mode, wait for data/EOF/error with a short timeout (0.1s), + * and return AVERROR(EAGAIN) on timeout. + * Checking interrupt_callback, looping on EINTR and EAGAIN and until + * enough data has been read is left to the calling function; see + * retry_transfer_wrapper in avio.c. + */ + int (*url_read)( URLContext *h, unsigned char *buf, int size); + int (*url_write)(URLContext *h, const unsigned char *buf, int size); + int64_t (*url_seek)( URLContext *h, int64_t pos, int whence); + int (*url_close)(URLContext *h); + int (*url_read_pause)(URLContext *h, int pause); + int64_t (*url_read_seek)(URLContext *h, int stream_index, + int64_t timestamp, int flags); + int (*url_get_file_handle)(URLContext *h); + int (*url_get_multi_file_handle)(URLContext *h, int **handles, + int *numhandles); + int (*url_get_short_seek)(URLContext *h); + int (*url_shutdown)(URLContext *h, int flags); + int priv_data_size; + const AVClass *priv_data_class; + int flags; + int (*url_check)(URLContext *h, int mask); + int (*url_open_dir)(URLContext *h); + int (*url_read_dir)(URLContext *h, AVIODirEntry **next); + int (*url_close_dir)(URLContext *h); + int (*url_delete)(URLContext *h); + int (*url_move)(URLContext *h_src, URLContext *h_dst); + const char *default_whitelist; +} URLProtocol; + +/** + * Create a URLContext for accessing to the resource indicated by + * url, but do not initiate the connection yet. + * + * @param puc pointer to the location where, in case of success, the + * function puts the pointer to the created URLContext + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb interrupt callback to use for the URLContext, may be + * NULL + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int liteav_ffurl_alloc(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb); + +/** + * Connect an URLContext that has been allocated by liteav_ffurl_alloc + * + * @param options A dictionary filled with options for nested protocols, + * i.e. it will be passed to url_open2() for protocols implementing it. + * This parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + */ +int liteav_ffurl_connect(URLContext *uc, AVDictionary **options); + +/** + * Create an URLContext for accessing to the resource indicated by + * url, and open it. + * + * @param puc pointer to the location where, in case of success, the + * function puts the pointer to the created URLContext + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb interrupt callback to use for the URLContext, may be + * NULL + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @param parent An enclosing URLContext, whose generic options should + * be applied to this URLContext as well. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int liteav_ffurl_open_whitelist(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options, + const char *whitelist, const char* blacklist, + URLContext *parent); + +int liteav_ffurl_open(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Accept an URLContext c on an URLContext s + * + * @param s server context + * @param c client context, must be unallocated. + * @return >= 0 on success, liteav_ff_neterrno() on failure. + */ +int liteav_ffurl_accept(URLContext *s, URLContext **c); + +/** + * Perform one step of the protocol handshake to accept a new client. + * See liteav_avio_handshake() for details. + * Implementations should try to return decreasing values. + * If the protocol uses an underlying protocol, the underlying handshake is + * usually the first step, and the return value can be: + * (largest value for this protocol) + (return value from other protocol) + * + * @param c the client context + * @return >= 0 on success or a negative value corresponding + * to an AVERROR code on failure + */ +int liteav_ffurl_handshake(URLContext *c); + +/** + * Read up to size bytes from the resource accessed by h, and store + * the read bytes in buf. + * + * @return The number of bytes actually read, or a negative value + * corresponding to an AVERROR code in case of error. A value of zero + * indicates that it is not possible to read more from the accessed + * resource (except if the value of the size argument is also zero). + */ +int liteav_ffurl_read(URLContext *h, unsigned char *buf, int size); + +/** + * Read as many bytes as possible (up to size), calling the + * read function multiple times if necessary. + * This makes special short-read handling in applications + * unnecessary, if the return value is < size then it is + * certain there was either an error or the end of file was reached. + */ +int liteav_ffurl_read_complete(URLContext *h, unsigned char *buf, int size); + +/** + * Write size bytes from buf to the resource accessed by h. + * + * @return the number of bytes actually written, or a negative value + * corresponding to an AVERROR code in case of failure + */ +int liteav_ffurl_write(URLContext *h, const unsigned char *buf, int size); + +/** + * Change the position that will be used by the next read/write + * operation on the resource accessed by h. + * + * @param pos specifies the new position to set + * @param whence specifies how pos should be interpreted, it must be + * one of SEEK_SET (seek from the beginning), SEEK_CUR (seek from the + * current position), SEEK_END (seek from the end), or AVSEEK_SIZE + * (return the filesize of the requested resource, pos is ignored). + * @return a negative value corresponding to an AVERROR code in case + * of failure, or the resulting file position, measured in bytes from + * the beginning of the file. You can use this feature together with + * SEEK_CUR to read the current file position. + */ +int64_t liteav_ffurl_seek(URLContext *h, int64_t pos, int whence); + +/** + * Close the resource accessed by the URLContext h, and free the + * memory used by it. Also set the URLContext pointer to NULL. + * + * @return a negative value if an error condition occurred, 0 + * otherwise + */ +int liteav_ffurl_closep(URLContext **h); +int liteav_ffurl_close(URLContext *h); + +/** + * Return the filesize of the resource accessed by h, AVERROR(ENOSYS) + * if the operation is not supported by h, or another negative value + * corresponding to an AVERROR error code in case of failure. + */ +int64_t liteav_ffurl_size(URLContext *h); + +/** + * Return the file descriptor associated with this URL. For RTP, this + * will return only the RTP file descriptor, not the RTCP file descriptor. + * + * @return the file descriptor associated with this URL, or <0 on error. + */ +int liteav_ffurl_get_file_handle(URLContext *h); + +/** + * Return the file descriptors associated with this URL. + * + * @return 0 on success or <0 on error. + */ +int liteav_ffurl_get_multi_file_handle(URLContext *h, int **handles, int *numhandles); + +/** + * Return the current short seek threshold value for this URL. + * + * @return threshold (>0) on success or <=0 on error. + */ +int liteav_ffurl_get_short_seek(URLContext *h); + +/** + * Signal the URLContext that we are done reading or writing the stream. + * + * @param h pointer to the resource + * @param flags flags which control how the resource indicated by url + * is to be shutdown + * + * @return a negative value if an error condition occurred, 0 + * otherwise + */ +int liteav_ffurl_shutdown(URLContext *h, int flags); + +/** + * Check if the user has requested to interrupt a blocking function + * associated with cb. + */ +int liteav_ff_check_interrupt(AVIOInterruptCB *cb); + +/* udp.c */ +int liteav_ff_udp_set_remote_url(URLContext *h, const char *uri); +int liteav_ff_udp_get_local_port(URLContext *h); + +/** + * Assemble a URL string from components. This is the reverse operation + * of av_url_split. + * + * Note, this requires networking to be initialized, so the caller must + * ensure liteav_ff_network_init has been called. + * + * @see av_url_split + * + * @param str the buffer to fill with the url + * @param size the size of the str buffer + * @param proto the protocol identifier, if null, the separator + * after the identifier is left out, too + * @param authorization an optional authorization string, may be null. + * An empty string is treated the same as a null string. + * @param hostname the host name string + * @param port the port number, left out from the string if negative + * @param fmt a generic format string for everything to add after the + * host/port, may be null + * @return the number of characters written to the destination buffer + */ +int liteav_ff_url_join(char *str, int size, const char *proto, + const char *authorization, const char *hostname, + int port, const char *fmt, ...) av_printf_format(7, 8); + +/** + * Convert a relative url into an absolute url, given a base url. + * + * @param buf the buffer where output absolute url is written + * @param size the size of buf + * @param base the base url, may be equal to buf. + * @param rel the new url, which is interpreted relative to base + */ +void liteav_ff_make_absolute_url(char *buf, int size, const char *base, + const char *rel); + +/** + * Allocate directory entry with default values. + * + * @return entry or NULL on error + */ +AVIODirEntry *liteav_ff_alloc_dir_entry(void); + +const AVClass *liteav_ff_urlcontext_child_class_next(const AVClass *prev); + +/** + * Construct a list of protocols matching a given whitelist and/or blacklist. + * + * @param whitelist a comma-separated list of allowed protocol names or NULL. If + * this is a non-empty string, only protocols in this list will + * be included. + * @param blacklist a comma-separated list of forbidden protocol names or NULL. + * If this is a non-empty string, all protocols in this list + * will be excluded. + * + * @return a NULL-terminated array of matching protocols. The array must be + * freed by the caller. + */ +const URLProtocol **liteav_ffurl_get_protocols(const char *whitelist, + const char *blacklist); + +#endif /* AVFORMAT_URL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/version.h new file mode 100644 index 0000000..ac14a55 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavformat/version.h @@ -0,0 +1,111 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_VERSION_H +#define AVFORMAT_VERSION_H + +/** + * @file + * @ingroup libavf + * Libavformat version macros + */ + +#include "libavutil/version.h" + +// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium) +// Also please add any ticket numbers that you believe might be affected here +#define LIBAVFORMAT_VERSION_MAJOR 58 +#define LIBAVFORMAT_VERSION_MINOR 20 +#define LIBAVFORMAT_VERSION_MICRO 100 + +#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT + +#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + * + */ +#ifndef FF_API_COMPUTE_PKT_FIELDS2 +#define FF_API_COMPUTE_PKT_FIELDS2 (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_OPEN_CALLBACKS +#define FF_API_OLD_OPEN_CALLBACKS (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_AVCTX +#define FF_API_LAVF_AVCTX (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_HTTP_USER_AGENT +#define FF_API_HTTP_USER_AGENT (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_HLS_WRAP +#define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_HLS_USE_LOCALTIME +#define FF_API_HLS_USE_LOCALTIME (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_KEEPSIDE_FLAG +#define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_ROTATE_API +#define FF_API_OLD_ROTATE_API (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_FORMAT_GET_SET +#define FF_API_FORMAT_GET_SET (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_AVIO_EOF_0 +#define FF_API_OLD_AVIO_EOF_0 (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_FFSERVER +#define FF_API_LAVF_FFSERVER (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_FORMAT_FILENAME +#define FF_API_FORMAT_FILENAME (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_RTSP_OPTIONS +#define FF_API_OLD_RTSP_OPTIONS (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_NEXT +#define FF_API_NEXT (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_DASH_MIN_SEG_DURATION +#define FF_API_DASH_MIN_SEG_DURATION (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_MP4A_LATM +#define FF_API_LAVF_MP4A_LATM (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif + + +#ifndef FF_API_R_FRAME_RATE +#define FF_API_R_FRAME_RATE 1 +#endif +#endif /* AVFORMAT_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/adler32.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/adler32.h new file mode 100644 index 0000000..52f4781 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/adler32.h @@ -0,0 +1,61 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_adler32 + * Public header for Adler-32 hash function implementation. + */ + +#ifndef AVUTIL_ADLER32_H +#define AVUTIL_ADLER32_H + +#include <stdint.h> +#include "attributes.h" + +/** + * @defgroup lavu_adler32 Adler-32 + * @ingroup lavu_hash + * Adler-32 hash function implementation. + * + * @{ + */ + +/** + * Calculate the Adler32 checksum of a buffer. + * + * Passing the return value to a subsequent liteav_av_adler32_update() call + * allows the checksum of multiple buffers to be calculated as though + * they were concatenated. + * + * @param adler initial checksum value + * @param buf pointer to input buffer + * @param len size of input buffer + * @return updated checksum + */ +unsigned long liteav_av_adler32_update(unsigned long adler, const uint8_t *buf, + unsigned int len) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_ADLER32_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes.h new file mode 100644 index 0000000..ae41e45 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes.h @@ -0,0 +1,66 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_H +#define AVUTIL_AES_H + +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_aes AES + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_aes_size; + +struct AVAES; + +/** + * Allocate an AVAES context. + */ +struct AVAES *liteav_av_aes_alloc(void); + +/** + * Initialize an AVAES context. + * @param key_bits 128, 192 or 256 + * @param decrypt 0 for encryption, 1 for decryption + */ +int liteav_av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * @param count number of 16 byte blocks + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_AES_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h new file mode 100644 index 0000000..665955d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h @@ -0,0 +1,89 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * AES-CTR cipher + * Copyright (c) 2015 Eran Kornblau <erankor at gmail dot com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_CTR_H +#define AVUTIL_AES_CTR_H + +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +#define AES_CTR_KEY_SIZE (16) +#define AES_CTR_IV_SIZE (8) + +struct AVAESCTR; + +/** + * Allocate an AVAESCTR context. + */ +struct AVAESCTR *liteav_av_aes_ctr_alloc(void); + +/** + * Initialize an AVAESCTR context. + * @param key encryption key, must have a length of AES_CTR_KEY_SIZE + */ +int liteav_av_aes_ctr_init(struct AVAESCTR *a, const uint8_t *key); + +/** + * Release an AVAESCTR context. + */ +void liteav_av_aes_ctr_free(struct AVAESCTR *a); + +/** + * Process a buffer using a previously initialized context. + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param size the size of src and dst + */ +void liteav_av_aes_ctr_crypt(struct AVAESCTR *a, uint8_t *dst, const uint8_t *src, int size); + +/** + * Get the current iv + */ +const uint8_t* liteav_av_aes_ctr_get_iv(struct AVAESCTR *a); + +/** + * Generate a random iv + */ +void liteav_av_aes_ctr_set_random_iv(struct AVAESCTR *a); + +/** + * Forcefully change the 8-byte iv + */ +void liteav_av_aes_ctr_set_iv(struct AVAESCTR *a, const uint8_t* iv); + +/** + * Forcefully change the "full" 16-byte iv, including the counter + */ +void liteav_av_aes_ctr_set_full_iv(struct AVAESCTR *a, const uint8_t* iv); + +/** + * Increment the top 64 bit of the iv (performed after each frame) + */ +void liteav_av_aes_ctr_increment_iv(struct AVAESCTR *a); + +/** + * @} + */ + +#endif /* AVUTIL_AES_CTR_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/attributes.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/attributes.h new file mode 100644 index 0000000..ced108a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/attributes.h @@ -0,0 +1,167 @@ +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Macro definitions for various function/variable attributes + */ + +#ifndef AVUTIL_ATTRIBUTES_H +#define AVUTIL_ATTRIBUTES_H + +#ifdef __GNUC__ +# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +# define AV_GCC_VERSION_AT_MOST(x,y) (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y)) +#else +# define AV_GCC_VERSION_AT_LEAST(x,y) 0 +# define AV_GCC_VERSION_AT_MOST(x,y) 0 +#endif + +#ifndef av_always_inline +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_always_inline __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +# define av_always_inline __forceinline +#else +# define av_always_inline inline +#endif +#endif + +#ifndef av_extern_inline +#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__) +# define av_extern_inline extern inline +#else +# define av_extern_inline inline +#endif +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,4) +# define av_warn_unused_result __attribute__((warn_unused_result)) +#else +# define av_warn_unused_result +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_noinline __attribute__((noinline)) +#elif defined(_MSC_VER) +# define av_noinline __declspec(noinline) +#else +# define av_noinline +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__) +# define av_pure __attribute__((pure)) +#else +# define av_pure +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,6) || defined(__clang__) +# define av_const __attribute__((const)) +#else +# define av_const +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) || defined(__clang__) +# define av_cold __attribute__((cold)) +#else +# define av_cold +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__) +# define av_flatten __attribute__((flatten)) +#else +# define av_flatten +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define attribute_deprecated __attribute__((deprecated)) +#elif defined(_MSC_VER) +# define attribute_deprecated __declspec(deprecated) +#else +# define attribute_deprecated +#endif + +/** + * Disable warnings about deprecated features + * This is useful for sections of code kept for backward compatibility and + * scheduled for removal. + */ +#ifndef AV_NOWARN_DEPRECATED +#if AV_GCC_VERSION_AT_LEAST(4,6) +# define AV_NOWARN_DEPRECATED(code) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ + code \ + _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define AV_NOWARN_DEPRECATED(code) \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) \ + code; \ + __pragma(warning(pop)) +#else +# define AV_NOWARN_DEPRECATED(code) code +#endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define av_unused __attribute__((unused)) +#else +# define av_unused +#endif + +/** + * Mark a variable as used and prevent the compiler from optimizing it + * away. This is useful for variables accessed only from inline + * assembler without the compiler being aware. + */ +#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__) +# define av_used __attribute__((used)) +#else +# define av_used +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,3) || defined(__clang__) +# define av_alias __attribute__((may_alias)) +#else +# define av_alias +#endif + +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__INTEL_COMPILER) +# define av_uninit(x) x=x +#else +# define av_uninit(x) x +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define av_builtin_constant_p __builtin_constant_p +# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) +#else +# define av_builtin_constant_p(x) 0 +# define av_printf_format(fmtpos, attrpos) +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,5) || defined(__clang__) +# define av_noreturn __attribute__((noreturn)) +#else +# define av_noreturn +#endif + +#endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h new file mode 100644 index 0000000..a31f07e --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h @@ -0,0 +1,188 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Audio FIFO + * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio FIFO Buffer + */ + +#ifndef AVUTIL_AUDIO_FIFO_H +#define AVUTIL_AUDIO_FIFO_H + +#include "avutil.h" +#include "fifo.h" +#include "samplefmt.h" + +/** + * @addtogroup lavu_audio + * @{ + * + * @defgroup lavu_audiofifo Audio FIFO Buffer + * @{ + */ + +/** + * Context for an Audio FIFO Buffer. + * + * - Operates at the sample level rather than the byte level. + * - Supports multiple channels with either planar or packed sample format. + * - Automatic reallocation when writing to a full buffer. + */ +typedef struct AVAudioFifo AVAudioFifo; + +/** + * Free an AVAudioFifo. + * + * @param af AVAudioFifo to free + */ +void liteav_av_audio_fifo_free(AVAudioFifo *af); + +/** + * Allocate an AVAudioFifo. + * + * @param sample_fmt sample format + * @param channels number of channels + * @param nb_samples initial allocation size, in samples + * @return newly allocated AVAudioFifo, or NULL on error + */ +AVAudioFifo *liteav_av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, + int nb_samples); + +/** + * Reallocate an AVAudioFifo. + * + * @param af AVAudioFifo to reallocate + * @param nb_samples new allocation size, in samples + * @return 0 if OK, or negative AVERROR code on failure + */ +av_warn_unused_result +int liteav_av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples); + +/** + * Write data to an AVAudioFifo. + * + * The AVAudioFifo will be reallocated automatically if the available space + * is less than nb_samples. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to write to + * @param data audio data plane pointers + * @param nb_samples number of samples to write + * @return number of samples actually written, or negative AVERROR + * code on failure. If successful, the number of samples + * actually written will always be nb_samples. + */ +int liteav_av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Peek data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to peek + * @return number of samples actually peek, or negative AVERROR code + * on failure. The number of samples actually peek will not + * be greater than nb_samples, and will only be less than + * nb_samples if liteav_av_audio_fifo_size is less than nb_samples. + */ +int liteav_av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Peek data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to peek + * @param offset offset from current read position + * @return number of samples actually peek, or negative AVERROR code + * on failure. The number of samples actually peek will not + * be greater than nb_samples, and will only be less than + * nb_samples if liteav_av_audio_fifo_size is less than nb_samples. + */ +int liteav_av_audio_fifo_peek_at(AVAudioFifo *af, void **data, int nb_samples, int offset); + +/** + * Read data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to read + * @return number of samples actually read, or negative AVERROR code + * on failure. The number of samples actually read will not + * be greater than nb_samples, and will only be less than + * nb_samples if liteav_av_audio_fifo_size is less than nb_samples. + */ +int liteav_av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Drain data from an AVAudioFifo. + * + * Removes the data without reading it. + * + * @param af AVAudioFifo to drain + * @param nb_samples number of samples to drain + * @return 0 if OK, or negative AVERROR code on failure + */ +int liteav_av_audio_fifo_drain(AVAudioFifo *af, int nb_samples); + +/** + * Reset the AVAudioFifo buffer. + * + * This empties all data in the buffer. + * + * @param af AVAudioFifo to reset + */ +void liteav_av_audio_fifo_reset(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for reading. + * + * @param af the AVAudioFifo to query + * @return number of samples available for reading + */ +int liteav_av_audio_fifo_size(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for writing. + * + * @param af the AVAudioFifo to query + * @return number of samples available for writing + */ +int liteav_av_audio_fifo_space(AVAudioFifo *af); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_AUDIO_FIFO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avassert.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avassert.h new file mode 100644 index 0000000..b73cd94 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avassert.h @@ -0,0 +1,76 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple assert() macros that are a bit more flexible than ISO C assert(). + * @author Michael Niedermayer <michaelni@gmx.at> + */ + +#ifndef AVUTIL_AVASSERT_H +#define AVUTIL_AVASSERT_H + +#include <stdlib.h> +#include "avutil.h" +#include "log.h" + +/** + * assert() equivalent, that is always enabled. + */ +#define av_assert0(cond) do { \ + if (!(cond)) { \ + liteav_av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \ + AV_STRINGIFY(cond), __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) + + +/** + * assert() equivalent, that does not lie in speed critical code. + * These asserts() thus can be enabled without fearing speed loss. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0 +#define av_assert1(cond) av_assert0(cond) +#else +#define av_assert1(cond) ((void)0) +#endif + + +/** + * assert() equivalent, that does lie in speed critical code. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 +#define av_assert2(cond) av_assert0(cond) +#define av_assert2_fpu() av_assert0_fpu() +#else +#define av_assert2(cond) ((void)0) +#define av_assert2_fpu() ((void)0) +#endif + +/** + * Assert that floating point operations can be executed. + * + * This will av_assert0() that the cpu is not in MMX state on X86 + */ +void av_assert0_fpu(void); + +#endif /* AVUTIL_AVASSERT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avconfig.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avconfig.h new file mode 100644 index 0000000..41fbcb6 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avconfig.h @@ -0,0 +1,16 @@ +/* Generated by ffmpeg configure */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H + +#if defined(FFMPEG_AV_HAVE_BIGENDIAN) +#define AV_HAVE_BIGENDIAN 1 +#else +#define AV_HAVE_BIGENDIAN 0 +#endif + +#if defined(FFMPEG_AV_HAVE_FAST_UNALIGNED) +#define AV_HAVE_FAST_UNALIGNED 1 +#else +#define AV_HAVE_FAST_UNALIGNED 0 +#endif +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avstring.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avstring.h new file mode 100644 index 0000000..9e829af --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avstring.h @@ -0,0 +1,408 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2007 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVSTRING_H +#define AVUTIL_AVSTRING_H + +#include <stddef.h> +#include <stdint.h> +#include "attributes.h" + +/** + * @addtogroup lavu_string + * @{ + */ + +/** + * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to + * the address of the first character in str after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int liteav_av_strstart(const char *str, const char *pfx, const char **ptr); + +/** + * Return non-zero if pfx is a prefix of str independent of case. If + * it is, *ptr is set to the address of the first character in str + * after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int liteav_av_stristart(const char *str, const char *pfx, const char **ptr); + +/** + * Locate the first case-independent occurrence in the string haystack + * of the string needle. A zero-length string needle is considered to + * match at the start of haystack. + * + * This function is a case-insensitive version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *liteav_av_stristr(const char *haystack, const char *needle); + +/** + * Locate the first occurrence of the string needle in the string haystack + * where not more than hay_length characters are searched. A zero-length + * string needle is considered to match at the start of haystack. + * + * This function is a length-limited version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @param hay_length length of string to search in + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *liteav_av_strnstr(const char *haystack, const char *needle, size_t hay_length); + +/** + * Copy the string src to dst, but no more than size - 1 bytes, and + * null-terminate dst. + * + * This function is the same as BSD strlcpy(). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the length of src + * + * @warning since the return value is the length of src, src absolutely + * _must_ be a properly 0-terminated string, otherwise this will read beyond + * the end of the buffer and possibly crash. + */ +size_t liteav_av_strlcpy(char *dst, const char *src, size_t size); + +/** + * Append the string src to the string dst, but to a total length of + * no more than size - 1 bytes, and null-terminate dst. + * + * This function is similar to BSD strlcat(), but differs when + * size <= strlen(dst). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the total length of src and dst + * + * @warning since the return value use the length of src and dst, these + * absolutely _must_ be a properly 0-terminated strings, otherwise this + * will read beyond the end of the buffer and possibly crash. + */ +size_t liteav_av_strlcat(char *dst, const char *src, size_t size); + +/** + * Append output to a string, according to a format. Never write out of + * the destination buffer, and always put a terminating 0 within + * the buffer. + * @param dst destination buffer (string to which the output is + * appended) + * @param size total size of the destination buffer + * @param fmt printf-compatible format string, specifying how the + * following parameters are used + * @return the length of the string that would have been generated + * if enough space had been available + */ +size_t liteav_av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); + +/** + * Get the count of continuous non zero chars starting from the beginning. + * + * @param len maximum number of characters to check in the string, that + * is the maximum value which is returned by the function + */ +static inline size_t av_strnlen(const char *s, size_t len) +{ + size_t i; + for (i = 0; i < len && s[i]; i++) + ; + return i; +} + +/** + * Print arguments following specified format into a large enough auto + * allocated buffer. It is similar to GNU asprintf(). + * @param fmt printf-compatible format string, specifying how the + * following parameters are used. + * @return the allocated string + * @note You have to free the string yourself with liteav_av_free(). + */ +char *liteav_av_asprintf(const char *fmt, ...) av_printf_format(1, 2); + +/** + * Convert a number to an av_malloced string. + */ +char *liteav_av_d2str(double d); + +/** + * Unescape the given string until a non escaped terminating char, + * and return the token corresponding to the unescaped string. + * + * The normal \ and ' escaping is supported. Leading and trailing + * whitespaces are removed, unless they are escaped with '\' or are + * enclosed between ''. + * + * @param buf the buffer to parse, buf will be updated to point to the + * terminating char + * @param term a 0-terminated list of terminating chars + * @return the malloced unescaped string, which must be av_freed by + * the user, NULL in case of allocation failure + */ +char *liteav_av_get_token(const char **buf, const char *term); + +/** + * Split the string into several tokens which can be accessed by + * successive calls to liteav_av_strtok(). + * + * A token is defined as a sequence of characters not belonging to the + * set specified in delim. + * + * On the first call to liteav_av_strtok(), s should point to the string to + * parse, and the value of saveptr is ignored. In subsequent calls, s + * should be NULL, and saveptr should be unchanged since the previous + * call. + * + * This function is similar to strtok_r() defined in POSIX.1. + * + * @param s the string to parse, may be NULL + * @param delim 0-terminated list of token delimiters, must be non-NULL + * @param saveptr user-provided pointer which points to stored + * information necessary for liteav_av_strtok() to continue scanning the same + * string. saveptr is updated to point to the next character after the + * first delimiter found, or to NULL if the string was terminated + * @return the found token, or NULL when no token is found + */ +char *liteav_av_strtok(char *s, const char *delim, char **saveptr); + +/** + * Locale-independent conversion of ASCII isdigit. + */ +static inline av_const int av_isdigit(int c) +{ + return c >= '0' && c <= '9'; +} + +/** + * Locale-independent conversion of ASCII isgraph. + */ +static inline av_const int av_isgraph(int c) +{ + return c > 32 && c < 127; +} + +/** + * Locale-independent conversion of ASCII isspace. + */ +static inline av_const int av_isspace(int c) +{ + return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || + c == '\v'; +} + +/** + * Locale-independent conversion of ASCII characters to uppercase. + */ +static inline av_const int av_toupper(int c) +{ + if (c >= 'a' && c <= 'z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII characters to lowercase. + */ +static inline av_const int av_tolower(int c) +{ + if (c >= 'A' && c <= 'Z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII isxdigit. + */ +static inline av_const int av_isxdigit(int c) +{ + c = av_tolower(c); + return av_isdigit(c) || (c >= 'a' && c <= 'f'); +} + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int liteav_av_strcasecmp(const char *a, const char *b); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int liteav_av_strncasecmp(const char *a, const char *b, size_t n); + +/** + * Locale-independent strings replace. + * @note This means only ASCII-range characters are replace + */ +char *liteav_av_strireplace(const char *str, const char *from, const char *to); + +/** + * Thread safe basename. + * @param path the path, on DOS both \ and / are considered separators. + * @return pointer to the basename substring. + */ +const char *liteav_av_basename(const char *path); + +/** + * Thread safe dirname. + * @param path the path, on DOS both \ and / are considered separators. + * @return the path with the separator replaced by the string terminator or ".". + * @note the function may change the input string. + */ +const char *liteav_av_dirname(char *path); + +/** + * Match instances of a name in a comma-separated list of names. + * List entries are checked from the start to the end of the names list, + * the first match ends further processing. If an entry prefixed with '-' + * matches, then 0 is returned. The "ALL" list entry is considered to + * match all names. + * + * @param name Name to look for. + * @param names List of names. + * @return 1 on match, 0 otherwise. + */ +int liteav_av_match_name(const char *name, const char *names); + +/** + * Append path component to the existing path. + * Path separator '/' is placed between when needed. + * Resulting string have to be freed with liteav_av_free(). + * @param path base path + * @param component component to be appended + * @return new path or NULL on error. + */ +char *liteav_av_append_path_component(const char *path, const char *component); + +enum AVEscapeMode { + AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode. + AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping. + AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping. +}; + +/** + * Consider spaces special and escape them even in the middle of the + * string. + * + * This is equivalent to adding the whitespace characters to the special + * characters lists, except it is guaranteed to use the exact same list + * of whitespace characters as the rest of libavutil. + */ +#define AV_ESCAPE_FLAG_WHITESPACE (1 << 0) + +/** + * Escape only specified special characters. + * Without this flag, escape also any characters that may be considered + * special by liteav_av_get_token(), such as the single quote. + */ +#define AV_ESCAPE_FLAG_STRICT (1 << 1) + +/** + * Escape string in src, and put the escaped string in an allocated + * string in *dst, which must be freed with liteav_av_free(). + * + * @param dst pointer where an allocated string is put + * @param src string to escape, must be non-NULL + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros + * @return the length of the allocated string, or a negative error code in case of error + * @see liteav_av_bprint_escape() + */ +av_warn_unused_result +int liteav_av_escape(char **dst, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES 1 ///< accept codepoints over 0x10FFFF +#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS 2 ///< accept non-characters - 0xFFFE and 0xFFFF +#define AV_UTF8_FLAG_ACCEPT_SURROGATES 4 ///< accept UTF-16 surrogates codes +#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML + +#define AV_UTF8_FLAG_ACCEPT_ALL \ + AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES + +/** + * Read and decode a single UTF-8 code point (character) from the + * buffer in *buf, and update *buf to point to the next byte to + * decode. + * + * In case of an invalid byte sequence, the pointer will be updated to + * the next byte after the invalid sequence and the function will + * return an error code. + * + * Depending on the specified flags, the function will also fail in + * case the decoded code point does not belong to a valid range. + * + * @note For speed-relevant code a carefully implemented use of + * GET_UTF8() may be preferred. + * + * @param codep pointer used to return the parsed code in case of success. + * The value in *codep is set even in case the range check fails. + * @param bufp pointer to the address the first byte of the sequence + * to decode, updated by the function to point to the + * byte next after the decoded sequence + * @param buf_end pointer to the end of the buffer, points to the next + * byte past the last in the buffer. This is used to + * avoid buffer overreads (in case of an unfinished + * UTF-8 sequence towards the end of the buffer). + * @param flags a collection of AV_UTF8_FLAG_* flags + * @return >= 0 in case a sequence was successfully read, a negative + * value in case of invalid sequence + */ +av_warn_unused_result +int liteav_av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end, + unsigned int flags); + +/** + * Check if a name is in a list. + * @returns 0 if not found, or the 1 based index where it has been found in the + * list. + */ +int liteav_av_match_list(const char *name, const char *list, char separator); + +/** + * @} + */ + +#endif /* AVUTIL_AVSTRING_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avutil.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avutil.h new file mode 100644 index 0000000..1f5d53f --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/avutil.h @@ -0,0 +1,366 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVUTIL_H +#define AVUTIL_AVUTIL_H + +/** + * @file + * @ingroup lavu + * Convenience header that includes @ref lavu "libavutil"'s core. + */ + +/** + * @mainpage + * + * @section ffmpeg_intro Introduction + * + * This document describes the usage of the different libraries + * provided by FFmpeg. + * + * @li @ref libavc "libavcodec" encoding/decoding library + * @li @ref lavfi "libavfilter" graph-based frame editing library + * @li @ref libavf "libavformat" I/O and muxing/demuxing library + * @li @ref lavd "libavdevice" special devices muxing/demuxing library + * @li @ref lavu "libavutil" common utility library + * @li @ref lswr "libswresample" audio resampling, format conversion and mixing + * @li @ref lpp "libpostproc" post processing library + * @li @ref libsws "libswscale" color conversion and scaling library + * + * @section ffmpeg_versioning Versioning and compatibility + * + * Each of the FFmpeg libraries contains a version.h header, which defines a + * major, minor and micro version number with the + * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version + * number is incremented with backward incompatible changes - e.g. removing + * parts of the public API, reordering public struct members, etc. The minor + * version number is incremented for backward compatible API changes or major + * new features - e.g. adding a new public function or a new decoder. The micro + * version number is incremented for smaller changes that a calling program + * might still want to check for - e.g. changing behavior in a previously + * unspecified situation. + * + * FFmpeg guarantees backward API and ABI compatibility for each library as long + * as its major version number is unchanged. This means that no public symbols + * will be removed or renamed. Types and names of the public struct members and + * values of public macros and enums will remain the same (unless they were + * explicitly declared as not part of the public API). Documented behavior will + * not change. + * + * In other words, any correct program that works with a given FFmpeg snapshot + * should work just as well without any changes with any later snapshot with the + * same major versions. This applies to both rebuilding the program against new + * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program + * links against. + * + * However, new public symbols may be added and new members may be appended to + * public structs whose size is not part of public ABI (most public structs in + * FFmpeg). New macros and enum values may be added. Behavior in undocumented + * situations may change slightly (and be documented). All those are accompanied + * by an entry in doc/APIchanges and incrementing either the minor or micro + * version number. + */ + +/** + * @defgroup lavu libavutil + * Common code shared across all FFmpeg libraries. + * + * @note + * libavutil is designed to be modular. In most cases, in order to use the + * functions provided by one component of libavutil you must explicitly include + * the specific header containing that feature. If you are only using + * media-related components, you could simply include libavutil/avutil.h, which + * brings in most of the "core" components. + * + * @{ + * + * @defgroup lavu_crypto Crypto and Hashing + * + * @{ + * @} + * + * @defgroup lavu_math Mathematics + * @{ + * + * @} + * + * @defgroup lavu_string String Manipulation + * + * @{ + * + * @} + * + * @defgroup lavu_mem Memory Management + * + * @{ + * + * @} + * + * @defgroup lavu_data Data Structures + * @{ + * + * @} + * + * @defgroup lavu_video Video related + * + * @{ + * + * @} + * + * @defgroup lavu_audio Audio related + * + * @{ + * + * @} + * + * @defgroup lavu_error Error Codes + * + * @{ + * + * @} + * + * @defgroup lavu_log Logging Facility + * + * @{ + * + * @} + * + * @defgroup lavu_misc Other + * + * @{ + * + * @defgroup preproc_misc Preprocessor String Macros + * + * @{ + * + * @} + * + * @defgroup version_utils Library Version Macros + * + * @{ + * + * @} + */ + + +/** + * @addtogroup lavu_ver + * @{ + */ + +/** + * Return the LIBAVUTIL_VERSION_INT constant. + */ +unsigned avutil_version(void); + +/** + * Return an informative version string. This usually is the actual release + * version number or a git commit description. This string has no fixed format + * and can change any time. It should never be parsed by code. + */ +const char *av_version_info(void); + +/** + * Return the libavutil build-time configuration. + */ +const char *avutil_configuration(void); + +/** + * Return the libavutil license. + */ +const char *avutil_license(void); + +/** + * @} + */ + +/** + * @addtogroup lavu_media Media Type + * @brief Media Type + */ + +enum AVMediaType { + AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA + AVMEDIA_TYPE_VIDEO, + AVMEDIA_TYPE_AUDIO, + AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous + AVMEDIA_TYPE_SUBTITLE, + AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse + AVMEDIA_TYPE_NB +}; + +/** + * Return a string describing the media_type enum, NULL if media_type + * is unknown. + */ +const char *av_get_media_type_string(enum AVMediaType media_type); + +/** + * @defgroup lavu_const Constants + * @{ + * + * @defgroup lavu_enc Encoding specific + * + * @note those definition should move to avcodec + * @{ + */ + +#define FF_LAMBDA_SHIFT 7 +#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT) +#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda +#define FF_LAMBDA_MAX (256*128-1) + +#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove + +/** + * @} + * @defgroup lavu_time Timestamp specific + * + * FFmpeg internal timebase and timestamp definitions + * + * @{ + */ + +/** + * @brief Undefined timestamp value + * + * Usually reported by demuxer that work on containers that do not provide + * either pts or dts. + */ + +#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000)) + +/** + * Internal time base represented as integer + */ + +#define AV_TIME_BASE 1000000 + +/** + * Internal time base represented as fractional value + */ + +#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE} + +/** + * @} + * @} + * @defgroup lavu_picture Image related + * + * AVPicture types, pixel formats and basic image planes manipulation. + * + * @{ + */ + +enum AVPictureType { + AV_PICTURE_TYPE_NONE = 0, ///< Undefined + AV_PICTURE_TYPE_I, ///< Intra + AV_PICTURE_TYPE_P, ///< Predicted + AV_PICTURE_TYPE_B, ///< Bi-dir predicted + AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4 + AV_PICTURE_TYPE_SI, ///< Switching Intra + AV_PICTURE_TYPE_SP, ///< Switching Predicted + AV_PICTURE_TYPE_BI, ///< BI type +}; + +/** + * Return a single letter to describe the given picture type + * pict_type. + * + * @param[in] pict_type the picture type @return a single character + * representing the picture type, '?' if pict_type is unknown + */ +char av_get_picture_type_char(enum AVPictureType pict_type); + +/** + * @} + */ + +#include "common.h" +#include "error.h" +#include "rational.h" +#include "version.h" +#include "macros.h" +#include "mathematics.h" +#include "log.h" +#include "pixfmt.h" + +/** + * Return x default pointer in case p is NULL. + */ +static inline void *av_x_if_null(const void *p, const void *x) +{ + return (void *)(intptr_t)(p ? p : x); +} + +/** + * Compute the length of an integer list. + * + * @param elsize size in bytes of each list element (only 1, 2, 4 or 8) + * @param term list terminator (usually 0 or -1) + * @param list pointer to the list + * @return length of the list, in elements, not counting the terminator + */ +unsigned av_int_list_length_for_size(unsigned elsize, + const void *list, uint64_t term) av_pure; + +/** + * Compute the length of an integer list. + * + * @param term list terminator (usually 0 or -1) + * @param list pointer to the list + * @return length of the list, in elements, not counting the terminator + */ +#define av_int_list_length(list, term) \ + av_int_list_length_for_size(sizeof(*(list)), list, term) + +/** + * Open a file using a UTF-8 filename. + * The API of this function matches POSIX fopen(), errors are returned through + * errno. + */ +FILE *liteav_av_fopen_utf8(const char *path, const char *mode); + +/** + * Return the fractional representation of the internal time base. + */ +AVRational av_get_time_base_q(void); + +#define AV_FOURCC_MAX_STRING_SIZE 32 + +#define av_fourcc2str(fourcc) av_fourcc_make_string((char[AV_FOURCC_MAX_STRING_SIZE]){0}, fourcc) + +/** + * Fill the provided buffer with a string containing a FourCC (four-character + * code) representation. + * + * @param buf a buffer with size in bytes of at least AV_FOURCC_MAX_STRING_SIZE + * @param fourcc the fourcc to represent + * @return the buffer in input + */ +char *av_fourcc_make_string(char *buf, uint32_t fourcc); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_AVUTIL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/base64.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/base64.h new file mode 100644 index 0000000..29bf711 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/base64.h @@ -0,0 +1,73 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com) + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BASE64_H +#define AVUTIL_BASE64_H + +#include <stdint.h> + +/** + * @defgroup lavu_base64 Base64 + * @ingroup lavu_crypto + * @{ + */ + +/** + * Decode a base64-encoded string. + * + * @param out buffer for decoded data + * @param in null-terminated input string + * @param out_size size in bytes of the out buffer, must be at + * least 3/4 of the length of in, that is AV_BASE64_DECODE_SIZE(strlen(in)) + * @return number of bytes written, or a negative value in case of + * invalid input + */ +int liteav_av_base64_decode(uint8_t *out, const char *in, int out_size); + +/** + * Calculate the output size in bytes needed to decode a base64 string + * with length x to a data buffer. + */ +#define AV_BASE64_DECODE_SIZE(x) ((x) * 3LL / 4) + +/** + * Encode data to base64 and null-terminate. + * + * @param out buffer for encoded data + * @param out_size size in bytes of the out buffer (including the + * null terminator), must be at least AV_BASE64_SIZE(in_size) + * @param in input buffer containing the data to encode + * @param in_size size in bytes of the in buffer + * @return out or NULL in case of error + */ +char *liteav_av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size); + +/** + * Calculate the output size needed to base64-encode x bytes to a + * null-terminated string. + */ +#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1) + + /** + * @} + */ + +#endif /* AVUTIL_BASE64_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/blowfish.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/blowfish.h new file mode 100644 index 0000000..e4b9f6f --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/blowfish.h @@ -0,0 +1,83 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Blowfish algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BLOWFISH_H +#define AVUTIL_BLOWFISH_H + +#include <stdint.h> + +/** + * @defgroup lavu_blowfish Blowfish + * @ingroup lavu_crypto + * @{ + */ + +#define AV_BF_ROUNDS 16 + +typedef struct AVBlowfish { + uint32_t p[AV_BF_ROUNDS + 2]; + uint32_t s[4][256]; +} AVBlowfish; + +/** + * Allocate an AVBlowfish context. + */ +AVBlowfish *liteav_av_blowfish_alloc(void); + +/** + * Initialize an AVBlowfish context. + * + * @param ctx an AVBlowfish context + * @param key a key + * @param key_len length of the key + */ +void liteav_av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param xl left four bytes halves of input to be encrypted + * @param xr right four bytes halves of input to be encrypted + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr, + int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_BLOWFISH_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bprint.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bprint.h new file mode 100644 index 0000000..23b028a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bprint.h @@ -0,0 +1,220 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BPRINT_H +#define AVUTIL_BPRINT_H + +#include <stdarg.h> + +#include "attributes.h" +#include "avstring.h" + +/** + * Define a structure with extra padding to a fixed size + * This helps ensuring binary compatibility with future versions. + */ + +#define FF_PAD_STRUCTURE(name, size, ...) \ +struct liteav_ff_pad_helper_##name { __VA_ARGS__ }; \ +typedef struct name { \ + __VA_ARGS__ \ + char reserved_padding[size - sizeof(struct liteav_ff_pad_helper_##name)]; \ +} name; + +/** + * Buffer to print data progressively + * + * The string buffer grows as necessary and is always 0-terminated. + * The content of the string is never accessed, and thus is + * encoding-agnostic and can even hold binary data. + * + * Small buffers are kept in the structure itself, and thus require no + * memory allocation at all (unless the contents of the buffer is needed + * after the structure goes out of scope). This is almost as lightweight as + * declaring a local "char buf[512]". + * + * The length of the string can go beyond the allocated size: the buffer is + * then truncated, but the functions still keep account of the actual total + * length. + * + * In other words, buf->len can be greater than buf->size and records the + * total length of what would have been to the buffer if there had been + * enough memory. + * + * Append operations do not need to be tested for failure: if a memory + * allocation fails, data stop being appended to the buffer, but the length + * is still updated. This situation can be tested with + * av_bprint_is_complete(). + * + * The size_max field determines several possible behaviours: + * + * size_max = -1 (= UINT_MAX) or any large value will let the buffer be + * reallocated as necessary, with an amortized linear cost. + * + * size_max = 0 prevents writing anything to the buffer: only the total + * length is computed. The write operations can then possibly be repeated in + * a buffer with exactly the necessary size + * (using size_init = size_max = len + 1). + * + * size_max = 1 is automatically replaced by the exact size available in the + * structure itself, thus ensuring no dynamic memory allocation. The + * internal buffer is large enough to hold a reasonable paragraph of text, + * such as the current paragraph. + */ + +FF_PAD_STRUCTURE(AVBPrint, 1024, + char *str; /**< string so far */ + unsigned len; /**< length so far */ + unsigned size; /**< allocated memory */ + unsigned size_max; /**< maximum allocated memory */ + char reserved_internal_buffer[1]; +) + +/** + * Convenience macros for special values for liteav_av_bprint_init() size_max + * parameter. + */ +#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1) +#define AV_BPRINT_SIZE_AUTOMATIC 1 +#define AV_BPRINT_SIZE_COUNT_ONLY 0 + +/** + * Init a print buffer. + * + * @param buf buffer to init + * @param size_init initial size (including the final 0) + * @param size_max maximum size; + * 0 means do not write anything, just count the length; + * 1 is replaced by the maximum value for automatic storage; + * any large value means that the internal buffer will be + * reallocated as needed up to that limit; -1 is converted to + * UINT_MAX, the largest limit possible. + * Check also AV_BPRINT_SIZE_* macros. + */ +void liteav_av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max); + +/** + * Init a print buffer using a pre-existing buffer. + * + * The buffer will not be reallocated. + * + * @param buf buffer structure to init + * @param buffer byte buffer to use for the string data + * @param size size of buffer + */ +void liteav_av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); + +/** + * Append a formatted string to a print buffer. + */ +void liteav_av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Append a formatted string to a print buffer. + */ +void liteav_av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg); + +/** + * Append char c n times to a print buffer. + */ +void liteav_av_bprint_chars(AVBPrint *buf, char c, unsigned n); + +/** + * Append data to a print buffer. + * + * param buf bprint buffer to use + * param data pointer to data + * param size size of data + */ +void liteav_av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size); + +struct tm; +/** + * Append a formatted date and time to a print buffer. + * + * param buf bprint buffer to use + * param fmt date and time format string, see strftime() + * param tm broken-down time structure to translate + * + * @note due to poor design of the standard strftime function, it may + * produce poor results if the format string expands to a very long text and + * the bprint buffer is near the limit stated by the size_max option. + */ +void liteav_av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm); + +/** + * Allocate bytes in the buffer for external use. + * + * @param[in] buf buffer structure + * @param[in] size required size + * @param[out] mem pointer to the memory area + * @param[out] actual_size size of the memory area after allocation; + * can be larger or smaller than size + */ +void liteav_av_bprint_get_buffer(AVBPrint *buf, unsigned size, + unsigned char **mem, unsigned *actual_size); + +/** + * Reset the string to "" but keep internal allocated data. + */ +void liteav_av_bprint_clear(AVBPrint *buf); + +/** + * Test if the print buffer is complete (not truncated). + * + * It may have been truncated due to a memory allocation failure + * or the size_max limit (compare size and size_max if necessary). + */ +static inline int av_bprint_is_complete(const AVBPrint *buf) +{ + return buf->len < buf->size; +} + +/** + * Finalize a print buffer. + * + * The print buffer can no longer be used afterwards, + * but the len and size fields are still valid. + * + * @arg[out] ret_str if not NULL, used to return a permanent copy of the + * buffer contents, or NULL if memory allocation fails; + * if NULL, the buffer is discarded and freed + * @return 0 for success or error code (probably AVERROR(ENOMEM)) + */ +int liteav_av_bprint_finalize(AVBPrint *buf, char **ret_str); + +/** + * Escape the content in src and append it to dstbuf. + * + * @param dstbuf already inited destination bprint buffer + * @param src string containing the text to escape + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros + */ +void liteav_av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#endif /* AVUTIL_BPRINT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bswap.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bswap.h new file mode 100644 index 0000000..6254af7 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/bswap.h @@ -0,0 +1,109 @@ +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * byte swapping routines + */ + +#ifndef AVUTIL_BSWAP_H +#define AVUTIL_BSWAP_H + +#include <stdint.h> +#include "libavutil/avconfig.h" +#include "attributes.h" + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_AARCH64 +# include "aarch64/bswap.h" +#elif ARCH_ARM +# include "arm/bswap.h" +#elif ARCH_AVR32 +# include "avr32/bswap.h" +#elif ARCH_SH4 +# include "sh4/bswap.h" +#elif ARCH_X86 +# include "x86/bswap.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff)) +#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16)) +#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32)) + +#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x) + +#ifndef av_bswap16 +static av_always_inline av_const uint16_t av_bswap16(uint16_t x) +{ + x= (x>>8) | (x<<8); + return x; +} +#endif + +#ifndef av_bswap32 +static av_always_inline av_const uint32_t av_bswap32(uint32_t x) +{ + return AV_BSWAP32C(x); +} +#endif + +#ifndef av_bswap64 +static inline uint64_t av_const av_bswap64(uint64_t x) +{ + return (uint64_t)av_bswap32((uint32_t)x) << 32 | av_bswap32((uint32_t)(x >> 32)); +} +#endif + +// be2ne ... big-endian to native-endian +// le2ne ... little-endian to native-endian + +#if AV_HAVE_BIGENDIAN +#define av_be2ne16(x) (x) +#define av_be2ne32(x) (x) +#define av_be2ne64(x) (x) +#define av_le2ne16(x) av_bswap16(x) +#define av_le2ne32(x) av_bswap32(x) +#define av_le2ne64(x) av_bswap64(x) +#define AV_BE2NEC(s, x) (x) +#define AV_LE2NEC(s, x) AV_BSWAPC(s, x) +#else +#define av_be2ne16(x) av_bswap16(x) +#define av_be2ne32(x) av_bswap32(x) +#define av_be2ne64(x) av_bswap64(x) +#define av_le2ne16(x) (x) +#define av_le2ne32(x) (x) +#define av_le2ne64(x) (x) +#define AV_BE2NEC(s, x) AV_BSWAPC(s, x) +#define AV_LE2NEC(s, x) (x) +#endif + +#define AV_BE2NE16C(x) AV_BE2NEC(16, x) +#define AV_BE2NE32C(x) AV_BE2NEC(32, x) +#define AV_BE2NE64C(x) AV_BE2NEC(64, x) +#define AV_LE2NE16C(x) AV_LE2NEC(16, x) +#define AV_LE2NE32C(x) AV_LE2NEC(32, x) +#define AV_LE2NE64C(x) AV_LE2NEC(64, x) + +#endif /* AVUTIL_BSWAP_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/buffer.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/buffer.h new file mode 100644 index 0000000..c26fb4d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/buffer.h @@ -0,0 +1,292 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_buffer + * refcounted data buffer API + */ + +#ifndef AVUTIL_BUFFER_H +#define AVUTIL_BUFFER_H + +#include <stdint.h> + +/** + * @defgroup lavu_buffer AVBuffer + * @ingroup lavu_data + * + * @{ + * AVBuffer is an API for reference-counted data buffers. + * + * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer + * represents the data buffer itself; it is opaque and not meant to be accessed + * by the caller directly, but only through AVBufferRef. However, the caller may + * e.g. compare two AVBuffer pointers to check whether two different references + * are describing the same data buffer. AVBufferRef represents a single + * reference to an AVBuffer and it is the object that may be manipulated by the + * caller directly. + * + * There are two functions provided for creating a new AVBuffer with a single + * reference -- liteav_av_buffer_alloc() to just allocate a new buffer, and + * liteav_av_buffer_create() to wrap an existing array in an AVBuffer. From an existing + * reference, additional references may be created with liteav_av_buffer_ref(). + * Use liteav_av_buffer_unref() to free a reference (this will automatically free the + * data once all the references are freed). + * + * The convention throughout this API and the rest of FFmpeg is such that the + * buffer is considered writable if there exists only one reference to it (and + * it has not been marked as read-only). The liteav_av_buffer_is_writable() function is + * provided to check whether this is true and liteav_av_buffer_make_writable() will + * automatically create a new writable buffer when necessary. + * Of course nothing prevents the calling code from violating this convention, + * however that is safe only when all the existing references are under its + * control. + * + * @note Referencing and unreferencing the buffers is thread-safe and thus + * may be done from multiple threads simultaneously without any need for + * additional locking. + * + * @note Two different references to the same buffer can point to different + * parts of the buffer (i.e. their AVBufferRef.data will not be equal). + */ + +/** + * A reference counted buffer type. It is opaque and is meant to be used through + * references (AVBufferRef). + */ +typedef struct AVBuffer AVBuffer; + +/** + * A reference to a data buffer. + * + * The size of this struct is not a part of the public ABI and it is not meant + * to be allocated directly. + */ +typedef struct AVBufferRef { + AVBuffer *buffer; + + /** + * The data buffer. It is considered writable if and only if + * this is the only reference to the buffer, in which case + * liteav_av_buffer_is_writable() returns 1. + */ + uint8_t *data; + /** + * Size of data in bytes. + */ + int size; +} AVBufferRef; + +/** + * Allocate an AVBuffer of the given size using liteav_av_malloc(). + * + * @return an AVBufferRef of given size or NULL when out of memory + */ +AVBufferRef *liteav_av_buffer_alloc(int size); + +/** + * Same as liteav_av_buffer_alloc(), except the returned buffer will be initialized + * to zero. + */ +AVBufferRef *liteav_av_buffer_allocz(int size); + +/** + * Always treat the buffer as read-only, even when it has only one + * reference. + */ +#define AV_BUFFER_FLAG_READONLY (1 << 0) + +/** + * Create an AVBuffer from an existing array. + * + * If this function is successful, data is owned by the AVBuffer. The caller may + * only access data through the returned AVBufferRef and references derived from + * it. + * If this function fails, data is left untouched. + * @param data data array + * @param size size of data in bytes + * @param free a callback for freeing this buffer's data + * @param opaque parameter to be got for processing or passed to free + * @param flags a combination of AV_BUFFER_FLAG_* + * + * @return an AVBufferRef referring to data on success, NULL on failure. + */ +AVBufferRef *liteav_av_buffer_create(uint8_t *data, int size, + void (*free)(void *opaque, uint8_t *data), + void *opaque, int flags); + +/** + * Default free callback, which calls liteav_av_free() on the buffer data. + * This function is meant to be passed to liteav_av_buffer_create(), not called + * directly. + */ +void liteav_av_buffer_default_free(void *opaque, uint8_t *data); + +/** + * Create a new reference to an AVBuffer. + * + * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on + * failure. + */ +AVBufferRef *liteav_av_buffer_ref(AVBufferRef *buf); + +/** + * Free a given reference and automatically free the buffer if there are no more + * references to it. + * + * @param buf the reference to be freed. The pointer is set to NULL on return. + */ +void liteav_av_buffer_unref(AVBufferRef **buf); + +/** + * @return 1 if the caller may write to the data referred to by buf (which is + * true if and only if buf is the only reference to the underlying AVBuffer). + * Return 0 otherwise. + * A positive answer is valid until liteav_av_buffer_ref() is called on buf. + */ +int liteav_av_buffer_is_writable(const AVBufferRef *buf); + +/** + * @return the opaque parameter set by liteav_av_buffer_create. + */ +void *liteav_av_buffer_get_opaque(const AVBufferRef *buf); + +int liteav_av_buffer_get_ref_count(const AVBufferRef *buf); + +/** + * Create a writable reference from a given buffer reference, avoiding data copy + * if possible. + * + * @param buf buffer reference to make writable. On success, buf is either left + * untouched, or it is unreferenced and a new writable AVBufferRef is + * written in its place. On failure, buf is left untouched. + * @return 0 on success, a negative AVERROR on failure. + */ +int liteav_av_buffer_make_writable(AVBufferRef **buf); + +/** + * Reallocate a given buffer. + * + * @param buf a buffer reference to reallocate. On success, buf will be + * unreferenced and a new reference with the required size will be + * written in its place. On failure buf will be left untouched. *buf + * may be NULL, then a new buffer is allocated. + * @param size required new buffer size. + * @return 0 on success, a negative AVERROR on failure. + * + * @note the buffer is actually reallocated with liteav_av_realloc() only if it was + * initially allocated through liteav_av_buffer_realloc(NULL) and there is only one + * reference to it (i.e. the one passed to this function). In all other cases + * a new buffer is allocated and the data is copied. + */ +int liteav_av_buffer_realloc(AVBufferRef **buf, int size); + +/** + * @} + */ + +/** + * @defgroup lavu_bufferpool AVBufferPool + * @ingroup lavu_data + * + * @{ + * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers. + * + * Frequently allocating and freeing large buffers may be slow. AVBufferPool is + * meant to solve this in cases when the caller needs a set of buffers of the + * same size (the most obvious use case being buffers for raw video or audio + * frames). + * + * At the beginning, the user must call liteav_av_buffer_pool_init() to create the + * buffer pool. Then whenever a buffer is needed, call liteav_av_buffer_pool_get() to + * get a reference to a new buffer, similar to liteav_av_buffer_alloc(). This new + * reference works in all aspects the same way as the one created by + * liteav_av_buffer_alloc(). However, when the last reference to this buffer is + * unreferenced, it is returned to the pool instead of being freed and will be + * reused for subsequent liteav_av_buffer_pool_get() calls. + * + * When the caller is done with the pool and no longer needs to allocate any new + * buffers, liteav_av_buffer_pool_uninit() must be called to mark the pool as freeable. + * Once all the buffers are released, it will automatically be freed. + * + * Allocating and releasing buffers with this API is thread-safe as long as + * either the default alloc callback is used, or the user-supplied one is + * thread-safe. + */ + +/** + * The buffer pool. This structure is opaque and not meant to be accessed + * directly. It is allocated with liteav_av_buffer_pool_init() and freed with + * liteav_av_buffer_pool_uninit(). + */ +typedef struct AVBufferPool AVBufferPool; + +/** + * Allocate and initialize a buffer pool. + * + * @param size size of each buffer in this pool + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. May be NULL, then the default allocator will be used + * (liteav_av_buffer_alloc()). + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *liteav_av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); + +/** + * Allocate and initialize a buffer pool with a more complex allocator. + * + * @param size size of each buffer in this pool + * @param opaque arbitrary user data used by the allocator + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. + * @param pool_free a function that will be called immediately before the pool + * is freed. I.e. after liteav_av_buffer_pool_uninit() is called + * by the caller and all the frames are returned to the pool + * and freed. It is intended to uninitialize the user opaque + * data. + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *liteav_av_buffer_pool_init2(int size, void *opaque, + AVBufferRef* (*alloc)(void *opaque, int size), + void (*pool_free)(void *opaque)); + +/** + * Mark the pool as being available for freeing. It will actually be freed only + * once all the allocated buffers associated with the pool are released. Thus it + * is safe to call this function while some of the allocated buffers are still + * in use. + * + * @param pool pointer to the pool to be freed. It will be set to NULL. + */ +void liteav_av_buffer_pool_uninit(AVBufferPool **pool); + +/** + * Allocate a new AVBuffer, reusing an old buffer from the pool when available. + * This function may be called simultaneously from multiple threads. + * + * @return a reference to the new buffer on success, NULL on error. + */ +AVBufferRef *liteav_av_buffer_pool_get(AVBufferPool *pool); + +/** + * @} + */ + +#endif /* AVUTIL_BUFFER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/camellia.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/camellia.h new file mode 100644 index 0000000..9f2a6e4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/camellia.h @@ -0,0 +1,71 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * An implementation of the CAMELLIA algorithm as mentioned in RFC3713 + * Copyright (c) 2014 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CAMELLIA_H +#define AVUTIL_CAMELLIA_H + +#include <stdint.h> + + +/** + * @file + * @brief Public header for libavutil CAMELLIA algorithm + * @defgroup lavu_camellia CAMELLIA + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_camellia_size; + +struct AVCAMELLIA; + +/** + * Allocate an AVCAMELLIA context + * To free the struct: liteav_av_free(ptr) + */ +struct AVCAMELLIA *liteav_av_camellia_alloc(void); + +/** + * Initialize an AVCAMELLIA context. + * + * @param ctx an AVCAMELLIA context + * @param key a key of 16, 24, 32 bytes used for encryption/decryption + * @param key_bits number of keybits: possible are 128, 192, 256 + */ +int liteav_av_camellia_init(struct AVCAMELLIA *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVCAMELLIA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 16 byte blocks + * @paran iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_camellia_crypt(struct AVCAMELLIA *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt); + +/** + * @} + */ +#endif /* AVUTIL_CAMELLIA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cast5.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cast5.h new file mode 100644 index 0000000..62378cb --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cast5.h @@ -0,0 +1,81 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * An implementation of the CAST128 algorithm as mentioned in RFC2144 + * Copyright (c) 2014 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CAST5_H +#define AVUTIL_CAST5_H + +#include <stdint.h> + + +/** + * @file + * @brief Public header for libavutil CAST5 algorithm + * @defgroup lavu_cast5 CAST5 + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_cast5_size; + +struct AVCAST5; + +/** + * Allocate an AVCAST5 context + * To free the struct: liteav_av_free(ptr) + */ +struct AVCAST5 *liteav_av_cast5_alloc(void); +/** + * Initialize an AVCAST5 context. + * + * @param ctx an AVCAST5 context + * @param key a key of 5,6,...16 bytes used for encryption/decryption + * @param key_bits number of keybits: possible are 40,48,...,128 + * @return 0 on success, less than 0 on failure + */ +int liteav_av_cast5_init(struct AVCAST5 *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, ECB mode only + * + * @param ctx an AVCAST5 context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_cast5_crypt(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVCAST5 context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_cast5_crypt2(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); +/** + * @} + */ +#endif /* AVUTIL_CAST5_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/channel_layout.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/channel_layout.h new file mode 100644 index 0000000..62a327b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/channel_layout.h @@ -0,0 +1,233 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * Copyright (c) 2008 Peter Ross + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CHANNEL_LAYOUT_H +#define AVUTIL_CHANNEL_LAYOUT_H + +#include <stdint.h> + +/** + * @file + * audio channel layout utility functions + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup channel_masks Audio channel masks + * + * A channel layout is a 64-bits integer with a bit set for every channel. + * The number of bits set must be equal to the number of channels. + * The value 0 means that the channel layout is not known. + * @note this data structure is not powerful enough to handle channels + * combinations that have the same channel multiple times, such as + * dual-mono. + * + * @{ + */ +#define AV_CH_FRONT_LEFT 0x00000001 +#define AV_CH_FRONT_RIGHT 0x00000002 +#define AV_CH_FRONT_CENTER 0x00000004 +#define AV_CH_LOW_FREQUENCY 0x00000008 +#define AV_CH_BACK_LEFT 0x00000010 +#define AV_CH_BACK_RIGHT 0x00000020 +#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 +#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 +#define AV_CH_BACK_CENTER 0x00000100 +#define AV_CH_SIDE_LEFT 0x00000200 +#define AV_CH_SIDE_RIGHT 0x00000400 +#define AV_CH_TOP_CENTER 0x00000800 +#define AV_CH_TOP_FRONT_LEFT 0x00001000 +#define AV_CH_TOP_FRONT_CENTER 0x00002000 +#define AV_CH_TOP_FRONT_RIGHT 0x00004000 +#define AV_CH_TOP_BACK_LEFT 0x00008000 +#define AV_CH_TOP_BACK_CENTER 0x00010000 +#define AV_CH_TOP_BACK_RIGHT 0x00020000 +#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. +#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. +#define AV_CH_WIDE_LEFT 0x0000000080000000ULL +#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL +#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL +#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL +#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL + +/** Channel mask value used for AVCodecContext.request_channel_layout + to indicate that the user requests the channel order of the decoder output + to be the native codec channel order. */ +#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL + +/** + * @} + * @defgroup channel_mask_c Audio channel layouts + * @{ + * */ +#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) +#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT) +#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) + +enum AVMatrixEncoding { + AV_MATRIX_ENCODING_NONE, + AV_MATRIX_ENCODING_DOLBY, + AV_MATRIX_ENCODING_DPLII, + AV_MATRIX_ENCODING_DPLIIX, + AV_MATRIX_ENCODING_DPLIIZ, + AV_MATRIX_ENCODING_DOLBYEX, + AV_MATRIX_ENCODING_DOLBYHEADPHONE, + AV_MATRIX_ENCODING_NB +}; + +/** + * Return a channel layout id that matches name, or 0 if no match is found. + * + * name can be one or several of the following notations, + * separated by '+' or '|': + * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, + * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); + * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, + * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); + * - a number of channels, in decimal, followed by 'c', yielding + * the default channel layout for that number of channels (@see + * liteav_av_get_default_channel_layout); + * - a channel layout mask, in hexadecimal starting with "0x" (see the + * AV_CH_* macros). + * + * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" + */ +uint64_t liteav_av_get_channel_layout(const char *name); + +/** + * Return a channel layout and the number of channels based on the specified name. + * + * This function is similar to (@see liteav_av_get_channel_layout), but can also parse + * unknown channel layout specifications. + * + * @param[in] name channel layout specification string + * @param[out] channel_layout parsed channel layout (0 if unknown) + * @param[out] nb_channels number of channels + * + * @return 0 on success, AVERROR(EINVAL) if the parsing fails. + */ +int liteav_av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels); + +/** + * Return a description of a channel layout. + * If nb_channels is <= 0, it is guessed from the channel_layout. + * + * @param buf put here the string containing the channel layout + * @param buf_size size in bytes of the buffer + */ +void liteav_av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); + +struct AVBPrint; +/** + * Append a description of a channel layout to a bprint buffer. + */ +void liteav_av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); + +/** + * Return the number of channels in the channel layout. + */ +int liteav_av_get_channel_layout_nb_channels(uint64_t channel_layout); + +/** + * Return default channel layout for a given number of channels. + */ +int64_t liteav_av_get_default_channel_layout(int nb_channels); + +/** + * Get the index of a channel in channel_layout. + * + * @param channel a channel layout describing exactly one channel which must be + * present in channel_layout. + * + * @return index of channel in channel_layout on success, a negative AVERROR + * on error. + */ +int liteav_av_get_channel_layout_channel_index(uint64_t channel_layout, + uint64_t channel); + +/** + * Get the channel with the given index in channel_layout. + */ +uint64_t liteav_av_channel_layout_extract_channel(uint64_t channel_layout, int index); + +/** + * Get the name of a given channel. + * + * @return channel name on success, NULL on error. + */ +const char *liteav_av_get_channel_name(uint64_t channel); + +/** + * Get the description of a given channel. + * + * @param channel a channel layout with a single channel + * @return channel description on success, NULL on error + */ +const char *liteav_av_get_channel_description(uint64_t channel); + +/** + * Get the value and name of a standard channel layout. + * + * @param[in] index index in an internal list, starting at 0 + * @param[out] layout channel layout mask + * @param[out] name name of the layout + * @return 0 if the layout exists, + * <0 if index is beyond the limits + */ +int liteav_av_get_standard_channel_layout(unsigned index, uint64_t *layout, + const char **name); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_CHANNEL_LAYOUT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/common.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/common.h new file mode 100644 index 0000000..ded9c8c --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/common.h @@ -0,0 +1,561 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * common internal and external API header + */ + +#ifndef AVUTIL_COMMON_H +#define AVUTIL_COMMON_H + +#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C) +#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS +#endif + +#include <errno.h> +#include <inttypes.h> +#include <limits.h> +#include <math.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "attributes.h" +#include "macros.h" +#include "version.h" +#include "libavutil/avconfig.h" + +#if AV_HAVE_BIGENDIAN +# define AV_NE(be, le) (be) +#else +# define AV_NE(be, le) (le) +#endif + +//rounded division & shift +#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) +/* assume b>0 */ +#define ROUNDED_DIV(a,b) (((a)>=0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) +/* Fast a/(1<<b) rounded toward +inf. Assume a>=0 and b>=0 */ +#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ + : ((a) + (1<<(b)) - 1) >> (b)) +/* Backwards compat. */ +#define FF_CEIL_RSHIFT AV_CEIL_RSHIFT + +#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b)) +#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b)) + +/** + * Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they + * are not representable as absolute values of their type. This is the same + * as with *abs() + * @see FFNABS() + */ +#define FFABS(a) ((a) >= 0 ? (a) : (-(a))) +#define FFSIGN(a) ((a) > 0 ? 1 : -1) + +/** + * Negative Absolute value. + * this works for all integers of all types. + * As with many macros, this evaluates its argument twice, it thus must not have + * a sideeffect, that is FFNABS(x++) has undefined behavior. + */ +#define FFNABS(a) ((a) <= 0 ? (a) : (-(a))) + +/** + * Comparator. + * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0 + * if x == y. This is useful for instance in a qsort comparator callback. + * Furthermore, compilers are able to optimize this to branchless code, and + * there is no risk of overflow with signed types. + * As with many macros, this evaluates its argument multiple times, it thus + * must not have a side-effect. + */ +#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y))) + +#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) +#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) + +#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0) +#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) + +/* misc math functions */ + +#ifdef HAVE_AV_CONFIG_H +# include "config.h" +# include "intmath.h" +#endif + +/* Pull in unguarded fallback defines at the end of this file. */ +#include "common.h" + +#ifndef liteav_av_log2 +av_const int liteav_av_log2(unsigned v); +#endif + +#ifndef liteav_av_log2_16bit +av_const int liteav_av_log2_16bit(unsigned v); +#endif + +/** + * Clip a signed integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int av_clip_c(int a, int amin, int amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed 64bit integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed integer value into the 0-255 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint8_t av_clip_uint8_c(int a) +{ + if (a&(~0xFF)) return (~a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -128,127 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int8_t av_clip_int8_c(int a) +{ + if ((a+0x80U) & ~0xFF) return (a>>31) ^ 0x7F; + else return a; +} + +/** + * Clip a signed integer value into the 0-65535 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint16_t av_clip_uint16_c(int a) +{ + if (a&(~0xFFFF)) return (~a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -32768,32767 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int16_t av_clip_int16_c(int a) +{ + if ((a+0x8000U) & ~0xFFFF) return (a>>31) ^ 0x7FFF; + else return a; +} + +/** + * Clip a signed 64-bit integer value into the -2147483648,2147483647 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) +{ + if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF); + else return (int32_t)a; +} + +/** + * Clip a signed integer into the -(2^p),(2^p-1) range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const int av_clip_intp2_c(int a, int p) +{ + if (((unsigned)a + (1 << p)) & ~((2 << p) - 1)) + return (a >> 31) ^ ((1 << p) - 1); + else + return a; +} + +/** + * Clip a signed integer to an unsigned power of two range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) +{ + if (a & ~((1<<p) - 1)) return (~a) >> 31 & ((1<<p) - 1); + else return a; +} + +/** + * Clear high bits from an unsigned integer starting with specific bit position + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a, unsigned p) +{ + return a & ((1 << p) - 1); +} + +/** + * Add two signed 32-bit values with saturation. + * + * @param a one value + * @param b another value + * @return sum with signed saturation + */ +static av_always_inline int av_sat_add32_c(int a, int b) +{ + return av_clipl_int32((int64_t)a + b); +} + +/** + * Add a doubled value to another value with saturation at both stages. + * + * @param a first value + * @param b value doubled and added to a + * @return sum sat(a + sat(2*b)) with signed saturation + */ +static av_always_inline int av_sat_dadd32_c(int a, int b) +{ + return av_sat_add32(a, av_sat_add32(b, b)); +} + +/** + * Subtract two signed 32-bit values with saturation. + * + * @param a one value + * @param b another value + * @return difference with signed saturation + */ +static av_always_inline int av_sat_sub32_c(int a, int b) +{ + return av_clipl_int32((int64_t)a - b); +} + +/** + * Subtract a doubled value from another value with saturation at both stages. + * + * @param a first value + * @param b value doubled and subtracted from a + * @return difference sat(a - sat(2*b)) with signed saturation + */ +static av_always_inline int av_sat_dsub32_c(int a, int b) +{ + return av_sat_sub32(a, av_sat_add32(b, b)); +} + +/** + * Clip a float value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const float av_clipf_c(float a, float amin, float amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a double value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const double av_clipd_c(double a, double amin, double amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** Compute ceil(log2(x)). + * @param x value used to compute ceil(log2(x)) + * @return computed ceiling of log2(x) + */ +static av_always_inline av_const int av_ceil_log2_c(int x) +{ + return liteav_av_log2((x - 1) << 1); +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount_c(uint32_t x) +{ + x -= (x >> 1) & 0x55555555; + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x += x >> 8; + return (x + (x >> 16)) & 0x3F; +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount64_c(uint64_t x) +{ + return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32)); +} + +static av_always_inline av_const int av_parity_c(uint32_t v) +{ + return av_popcount(v) & 1; +} + +#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) +#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) + +/** + * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_BYTE Expression reading one byte from the input. + * Evaluated up to 7 times (4 for the currently + * assigned Unicode range). With a memory buffer + * input, this could be *ptr++. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + * + * @warning ERROR should not contain a loop control statement which + * could interact with the internal while loop, and should force an + * exit from the macro code (e.g. through a goto or a return) in order + * to prevent undefined results. + */ +#define GET_UTF8(val, GET_BYTE, ERROR)\ + val= (GET_BYTE);\ + {\ + uint32_t top = (val & 128) >> 1;\ + if ((val & 0xc0) == 0x80 || val >= 0xFE)\ + ERROR\ + while (val & top) {\ + int tmp= (GET_BYTE) - 128;\ + if(tmp>>6)\ + ERROR\ + val= (val<<6) + tmp;\ + top <<= 5;\ + }\ + val &= (top << 1) - 1;\ + } + +/** + * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_16BIT Expression returning two bytes of UTF-16 data converted + * to native byte order. Evaluated one or two times. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + */ +#define GET_UTF16(val, GET_16BIT, ERROR)\ + val = GET_16BIT;\ + {\ + unsigned int hi = val - 0xD800;\ + if (hi < 0x800) {\ + val = GET_16BIT - 0xDC00;\ + if (val > 0x3FFU || hi > 0x3FFU)\ + ERROR\ + val += (hi<<10) + 0x10000;\ + }\ + }\ + +/** + * @def PUT_UTF8(val, tmp, PUT_BYTE) + * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint8_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_BYTE. + * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. + * It could be a function or a statement, and uses tmp as the input byte. + * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be + * executed up to 4 times for values in the valid UTF-8 range and up to + * 7 times in the general case, depending on the length of the converted + * Unicode character. + */ +#define PUT_UTF8(val, tmp, PUT_BYTE)\ + {\ + int bytes, shift;\ + uint32_t in = val;\ + if (in < 0x80) {\ + tmp = in;\ + PUT_BYTE\ + } else {\ + bytes = (liteav_av_log2(in) + 4) / 5;\ + shift = (bytes - 1) * 6;\ + tmp = (256 - (256 >> bytes)) | (in >> shift);\ + PUT_BYTE\ + while (shift >= 6) {\ + shift -= 6;\ + tmp = 0x80 | ((in >> shift) & 0x3f);\ + PUT_BYTE\ + }\ + }\ + } + +/** + * @def PUT_UTF16(val, tmp, PUT_16BIT) + * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint16_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_16BIT. + * @param PUT_16BIT writes the converted UTF-16 data to any proper destination + * in desired endianness. It could be a function or a statement, and uses tmp + * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" + * PUT_BYTE will be executed 1 or 2 times depending on input character. + */ +#define PUT_UTF16(val, tmp, PUT_16BIT)\ + {\ + uint32_t in = val;\ + if (in < 0x10000) {\ + tmp = in;\ + PUT_16BIT\ + } else {\ + tmp = 0xD800 | ((in - 0x10000) >> 10);\ + PUT_16BIT\ + tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\ + PUT_16BIT\ + }\ + }\ + + + +#include "mem.h" + +#ifdef HAVE_AV_CONFIG_H +# include "internal.h" +#endif /* HAVE_AV_CONFIG_H */ + +#endif /* AVUTIL_COMMON_H */ + +/* + * The following definitions are outside the multiple inclusion guard + * to ensure they are immediately available in intmath.h. + */ + +#ifndef av_ceil_log2 +# define av_ceil_log2 av_ceil_log2_c +#endif +#ifndef av_clip +# define av_clip av_clip_c +#endif +#ifndef av_clip64 +# define av_clip64 av_clip64_c +#endif +#ifndef av_clip_uint8 +# define av_clip_uint8 av_clip_uint8_c +#endif +#ifndef av_clip_int8 +# define av_clip_int8 av_clip_int8_c +#endif +#ifndef av_clip_uint16 +# define av_clip_uint16 av_clip_uint16_c +#endif +#ifndef av_clip_int16 +# define av_clip_int16 av_clip_int16_c +#endif +#ifndef av_clipl_int32 +# define av_clipl_int32 av_clipl_int32_c +#endif +#ifndef av_clip_intp2 +# define av_clip_intp2 av_clip_intp2_c +#endif +#ifndef av_clip_uintp2 +# define av_clip_uintp2 av_clip_uintp2_c +#endif +#ifndef av_mod_uintp2 +# define av_mod_uintp2 av_mod_uintp2_c +#endif +#ifndef av_sat_add32 +# define av_sat_add32 av_sat_add32_c +#endif +#ifndef av_sat_dadd32 +# define av_sat_dadd32 av_sat_dadd32_c +#endif +#ifndef av_sat_sub32 +# define av_sat_sub32 av_sat_sub32_c +#endif +#ifndef av_sat_dsub32 +# define av_sat_dsub32 av_sat_dsub32_c +#endif +#ifndef av_clipf +# define av_clipf av_clipf_c +#endif +#ifndef av_clipd +# define av_clipd av_clipd_c +#endif +#ifndef av_popcount +# define av_popcount av_popcount_c +#endif +#ifndef av_popcount64 +# define av_popcount64 av_popcount64_c +#endif +#ifndef av_parity +# define av_parity av_parity_c +#endif diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cpu.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cpu.h new file mode 100644 index 0000000..decf084 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/cpu.h @@ -0,0 +1,131 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CPU_H +#define AVUTIL_CPU_H + +#include <stddef.h> + +#include "attributes.h" + +#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ + + /* lower 16 bits - CPU features */ +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions +#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer) +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions +#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions +#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 +#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 +#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used + +#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard +#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06 +#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07 + +#define AV_CPU_FLAG_ARMV5TE (1 << 0) +#define AV_CPU_FLAG_ARMV6 (1 << 1) +#define AV_CPU_FLAG_ARMV6T2 (1 << 2) +#define AV_CPU_FLAG_VFP (1 << 3) +#define AV_CPU_FLAG_VFPV3 (1 << 4) +#define AV_CPU_FLAG_NEON (1 << 5) +#define AV_CPU_FLAG_ARMV8 (1 << 6) +#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations +#define AV_CPU_FLAG_SETEND (1 <<16) + +/** + * Return the flags which specify extensions supported by the CPU. + * The returned value is affected by liteav_av_force_cpu_flags() if that was used + * before. So liteav_av_get_cpu_flags() can easily be used in an application to + * detect the enabled cpu flags. + */ +int liteav_av_get_cpu_flags(void); + +/** + * Disables cpu detection and forces the specified flags. + * -1 is a special case that disables forcing of specific flags. + */ +void liteav_av_force_cpu_flags(int flags); + +/** + * Set a mask on flags returned by liteav_av_get_cpu_flags(). + * This function is mainly useful for testing. + * Please use liteav_av_force_cpu_flags() and liteav_av_get_cpu_flags() instead which are more flexible + */ +attribute_deprecated void liteav_av_set_cpu_flags_mask(int mask); + +/** + * Parse CPU flags from a string. + * + * The returned flags contain the specified flags as well as related unspecified flags. + * + * This function exists only for compatibility with libav. + * Please use liteav_av_parse_cpu_caps() when possible. + * @return a combination of AV_CPU_* flags, negative on error. + */ +attribute_deprecated +int liteav_av_parse_cpu_flags(const char *s); + +/** + * Parse CPU caps from a string and update the given AV_CPU_* flags based on that. + * + * @return negative on error. + */ +int liteav_av_parse_cpu_caps(unsigned *flags, const char *s); + +/** + * @return the number of logical CPU cores present. + */ +int liteav_av_cpu_count(void); + +/** + * Get the maximum data alignment that may be required by FFmpeg. + * + * Note that this is affected by the build configuration and the CPU flags mask, + * so e.g. if the CPU supports AVX, but libavutil has been built with + * --disable-avx or the AV_CPU_FLAG_AVX flag has been disabled through + * liteav_av_set_cpu_flags_mask(), then this function will behave as if AVX is not + * present. + */ +size_t liteav_av_cpu_max_align(void); + +#endif /* AVUTIL_CPU_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/crc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/crc.h new file mode 100644 index 0000000..e399b0d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/crc.h @@ -0,0 +1,101 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_crc32 + * Public header for CRC hash function implementation. + */ + +#ifndef AVUTIL_CRC_H +#define AVUTIL_CRC_H + +#include <stdint.h> +#include <stddef.h> +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_crc32 CRC + * @ingroup lavu_hash + * CRC (Cyclic Redundancy Check) hash function implementation. + * + * This module supports numerous CRC polynomials, in addition to the most + * widely used CRC-32-IEEE. See @ref AVCRCId for a list of available + * polynomials. + * + * @{ + */ + +typedef uint32_t AVCRC; + +typedef enum { + AV_CRC_8_ATM, + AV_CRC_16_ANSI, + AV_CRC_16_CCITT, + AV_CRC_32_IEEE, + AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */ + AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */ + AV_CRC_24_IEEE, + AV_CRC_8_EBU, + AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */ +}AVCRCId; + +/** + * Initialize a CRC table. + * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024 + * @param le If 1, the lowest bit represents the coefficient for the highest + * exponent of the corresponding polynomial (both for poly and + * actual CRC). + * If 0, you must swap the CRC parameter and the result of liteav_av_crc + * if you need the standard representation (can be simplified in + * most cases to e.g. bswap16): + * av_bswap32(crc << (32-bits)) + * @param bits number of bits for the CRC + * @param poly generator polynomial without the x**bits coefficient, in the + * representation as specified by le + * @param ctx_size size of ctx in bytes + * @return <0 on failure + */ +int liteav_av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size); + +/** + * Get an initialized standard CRC table. + * @param crc_id ID of a standard CRC + * @return a pointer to the CRC table or NULL on failure + */ +const AVCRC *liteav_av_crc_get_table(AVCRCId crc_id); + +/** + * Calculate the CRC of a block. + * @param crc CRC of previous blocks if any or initial value for CRC + * @return CRC updated with the data from the given block + * + * @see liteav_av_crc_init() "le" parameter + */ +uint32_t liteav_av_crc(const AVCRC *ctx, uint32_t crc, + const uint8_t *buffer, size_t length) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_CRC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/des.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/des.h new file mode 100644 index 0000000..b98dd9a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/des.h @@ -0,0 +1,78 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * DES encryption/decryption + * Copyright (c) 2007 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DES_H +#define AVUTIL_DES_H + +#include <stdint.h> + +/** + * @defgroup lavu_des DES + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVDES { + uint64_t round_keys[3][16]; + int triple_des; +} AVDES; + +/** + * Allocate an AVDES context. + */ +AVDES *liteav_av_des_alloc(void); + +/** + * @brief Initializes an AVDES context. + * + * @param key_bits must be 64 or 192 + * @param decrypt 0 for encryption/CBC-MAC, 1 for decryption + * @return zero on success, negative value otherwise + */ +int liteav_av_des_init(struct AVDES *d, const uint8_t *key, int key_bits, int decrypt); + +/** + * @brief Encrypts / decrypts using the DES algorithm. + * + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + * @param iv initialization vector for CBC mode, if NULL then ECB will be used, + * must be 8-byte aligned + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_des_crypt(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @brief Calculates CBC-MAC using the DES algorithm. + * + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + */ +void liteav_av_des_mac(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count); + +/** + * @} + */ + +#endif /* AVUTIL_DES_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dict.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dict.h new file mode 100644 index 0000000..7794958 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dict.h @@ -0,0 +1,201 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Public dictionary API. + * @deprecated + * AVDictionary is provided for compatibility with libav. It is both in + * implementation as well as API inefficient. It does not scale and is + * extremely slow with large dictionaries. + * It is recommended that new code uses our tree container from tree.c/h + * where applicable, which uses AVL trees to achieve O(log n) performance. + */ + +#ifndef AVUTIL_DICT_H +#define AVUTIL_DICT_H + +#include <stdint.h> + +#include "version.h" + +/** + * @addtogroup lavu_dict AVDictionary + * @ingroup lavu_data + * + * @brief Simple key:value store + * + * @{ + * Dictionaries are used for storing key:value pairs. To create + * an AVDictionary, simply pass an address of a NULL pointer to + * liteav_av_dict_set(). NULL can be used as an empty dictionary wherever + * a pointer to an AVDictionary is required. + * Use liteav_av_dict_get() to retrieve an entry or iterate over all + * entries and finally liteav_av_dict_free() to free the dictionary + * and all its contents. + * + @code + AVDictionary *d = NULL; // "create" an empty dictionary + AVDictionaryEntry *t = NULL; + + liteav_av_dict_set(&d, "foo", "bar", 0); // add an entry + + char *k = liteav_av_strdup("key"); // if your strings are already allocated, + char *v = liteav_av_strdup("value"); // you can avoid copying them like this + liteav_av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + + while (t = liteav_av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + <....> // iterate over all entries in d + } + liteav_av_dict_free(&d); + @endcode + */ + +#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in liteav_av_dict_get(). */ +#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key, + ignoring the suffix of the found key string. Only relevant in liteav_av_dict_get(). */ +#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been + allocated with liteav_av_malloc() or another memory allocation function. */ +#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been + allocated with liteav_av_malloc() or another memory allocation function. */ +#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. +#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no + delimiter is added, the strings are simply concatenated. */ +#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys in the dictionary */ + +typedef struct AVDictionaryEntry { + char *key; + char *value; +} AVDictionaryEntry; + +typedef struct AVDictionary AVDictionary; + +/** + * Get a dictionary entry with matching key. + * + * The returned entry key or value must not be changed, or it will + * cause undefined behavior. + * + * To iterate through all the dictionary entries, you can set the matching key + * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag. + * + * @param prev Set to the previous matching element to find the next. + * If set to NULL the first matching element is returned. + * @param key matching key + * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved + * @return found entry or NULL in case no matching entry was found in the dictionary + */ +AVDictionaryEntry *liteav_av_dict_get(const AVDictionary *m, const char *key, + const AVDictionaryEntry *prev, int flags); + +/** + * Get number of entries in dictionary. + * + * @param m dictionary + * @return number of entries in dictionary + */ +int liteav_av_dict_count(const AVDictionary *m); + +/** + * Set the given entry in *pm, overwriting an existing entry. + * + * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set, + * these arguments will be freed on error. + * + * Warning: Adding a new entry to a dictionary invalidates all existing entries + * previously returned with liteav_av_dict_get. + * + * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL + * a dictionary struct is allocated and put in *pm. + * @param key entry key to add to *pm (will either be av_strduped or added as a new key depending on flags) + * @param value entry value to add to *pm (will be av_strduped or added as a new key depending on flags). + * Passing a NULL value will cause an existing entry to be deleted. + * @return >= 0 on success otherwise an error code <0 + */ +int liteav_av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); + +/** + * Convenience wrapper for liteav_av_dict_set that converts the value to a string + * and stores it. + * + * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error. + */ +int liteav_av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags); + +/** + * Parse the key/value pairs list and add the parsed entries to a dictionary. + * + * In case of failure, all the successfully set entries are stored in + * *pm. You may need to manually free the created dictionary. + * + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @param flags flags to use when adding to dictionary. + * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL + * are ignored since the key/value tokens will always + * be duplicated. + * @return 0 on success, negative AVERROR code on failure + */ +int liteav_av_dict_parse_string(AVDictionary **pm, const char *str, + const char *key_val_sep, const char *pairs_sep, + int flags); + +/** + * Copy entries from one AVDictionary struct into another. + * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL, + * this function will allocate a struct for you and put it in *dst + * @param src pointer to source AVDictionary struct + * @param flags flags to use when setting entries in *dst + * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag + * @return 0 on success, negative AVERROR code on failure. If dst was allocated + * by this function, callers should free the associated memory. + */ +int liteav_av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags); + +/** + * Free all the memory allocated for an AVDictionary struct + * and all keys and values. + */ +void liteav_av_dict_free(AVDictionary **m); + +/** + * Get dictionary entries as a string. + * + * Create a string containing dictionary's entries. + * Such string may be passed back to liteav_av_dict_parse_string(). + * @note String is escaped with backslashes ('\'). + * + * @param[in] m dictionary + * @param[out] buffer Pointer to buffer that will be allocated with string containg entries. + * Buffer must be freed by the caller when is no longer needed. + * @param[in] key_val_sep character used to separate key from value + * @param[in] pairs_sep character used to separate two pairs from each other + * @return >= 0 on success, negative on error + * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same. + */ +int liteav_av_dict_get_string(const AVDictionary *m, char **buffer, + const char key_val_sep, const char pairs_sep); + +/** + * @} + */ + +#endif /* AVUTIL_DICT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/display.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/display.h new file mode 100644 index 0000000..c492b96 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/display.h @@ -0,0 +1,115 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2014 Vittorio Giovara <vittorio.giovara@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Display matrix + */ + +#ifndef AVUTIL_DISPLAY_H +#define AVUTIL_DISPLAY_H + +#include <stdint.h> +#include "common.h" + +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_display Display transformation matrix functions + * @{ + */ + +/** + * @addtogroup lavu_video_display + * The display transformation matrix specifies an affine transformation that + * should be applied to video frames for correct presentation. It is compatible + * with the matrices stored in the ISO/IEC 14496-12 container format. + * + * The data is a 3x3 matrix represented as a 9-element array: + * + * @code{.unparsed} + * | a b u | + * (a, b, u, c, d, v, x, y, w) -> | c d v | + * | x y w | + * @endcode + * + * All numbers are stored in native endianness, as 16.16 fixed-point values, + * except for u, v and w, which are stored as 2.30 fixed-point values. + * + * The transformation maps a point (p, q) in the source (pre-transformation) + * frame to the point (p', q') in the destination (post-transformation) frame as + * follows: + * + * @code{.unparsed} + * | a b u | + * (p, q, 1) . | c d v | = z * (p', q', 1) + * | x y w | + * @endcode + * + * The transformation can also be more explicitly written in components as + * follows: + * + * @code{.unparsed} + * p' = (a * p + c * q + x) / z; + * q' = (b * p + d * q + y) / z; + * z = u * p + v * q + w + * @endcode + */ + +/** + * Extract the rotation component of the transformation matrix. + * + * @param matrix the transformation matrix + * @return the angle (in degrees) by which the transformation rotates the frame + * counterclockwise. The angle will be in range [-180.0, 180.0], + * or NaN if the matrix is singular. + * + * @note floating point numbers are inherently inexact, so callers are + * recommended to round the return value to nearest integer before use. + */ +double liteav_av_display_rotation_get(const int32_t matrix[9]); + +/** + * Initialize a transformation matrix describing a pure counterclockwise + * rotation by the specified angle (in degrees). + * + * @param matrix an allocated transformation matrix (will be fully overwritten + * by this function) + * @param angle rotation angle in degrees. + */ +void liteav_av_display_rotation_set(int32_t matrix[9], double angle); + +/** + * Flip the input matrix horizontally and/or vertically. + * + * @param matrix an allocated transformation matrix + * @param hflip whether the matrix should be flipped horizontally + * @param vflip whether the matrix should be flipped vertically + */ +void liteav_av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_DISPLAY_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h new file mode 100644 index 0000000..51a6533 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h @@ -0,0 +1,71 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2020 Vacing Fang <vacingfang@tencent.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * DOVI configuration + */ + + +#ifndef AVUTIL_DOVI_META_H +#define AVUTIL_DOVI_META_H + +#include <stdint.h> +#include <stddef.h> + +/* + * DOVI configuration + * ref: dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2 + dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2 + * @code + * uint8_t dv_version_major, the major version number that the stream complies with + * uint8_t dv_version_minor, the minor version number that the stream complies with + * uint8_t dv_profile, the Dolby Vision profile + * uint8_t dv_level, the Dolby Vision level + * uint8_t rpu_present_flag + * uint8_t el_present_flag + * uint8_t bl_present_flag + * uint8_t dv_bl_signal_compatibility_id + * @endcode + * + * @note The struct must be allocated with liteav_av_dovi_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVDOVIDecoderConfigurationRecord { + uint8_t dv_version_major; + uint8_t dv_version_minor; + uint8_t dv_profile; + uint8_t dv_level; + uint8_t rpu_present_flag; + uint8_t el_present_flag; + uint8_t bl_present_flag; + uint8_t dv_bl_signal_compatibility_id; +} AVDOVIDecoderConfigurationRecord; + +/** + * Allocate a AVDOVIDecoderConfigurationRecord structure and initialize its + * fields to default values. + * + * @return the newly allocated struct or NULL on failure + */ +AVDOVIDecoderConfigurationRecord *liteav_av_dovi_alloc(size_t *size); + +#endif /* AVUTIL_DOVI_META_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/downmix_info.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/downmix_info.h new file mode 100644 index 0000000..c42bd0a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/downmix_info.h @@ -0,0 +1,116 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2014 Tim Walker <tdskywalker@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DOWNMIX_INFO_H +#define AVUTIL_DOWNMIX_INFO_H + +#include "frame.h" + +/** + * @file + * audio downmix medatata + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup downmix_info Audio downmix metadata + * @{ + */ + +/** + * Possible downmix types. + */ +enum AVDownmixType { + AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */ + AV_DOWNMIX_TYPE_LORO, /**< Lo/Ro 2-channel downmix (Stereo). */ + AV_DOWNMIX_TYPE_LTRT, /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */ + AV_DOWNMIX_TYPE_DPLII, /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */ + AV_DOWNMIX_TYPE_NB /**< Number of downmix types. Not part of ABI. */ +}; + +/** + * This structure describes optional metadata relevant to a downmix procedure. + * + * All fields are set by the decoder to the value indicated in the audio + * bitstream (if present), or to a "sane" default otherwise. + */ +typedef struct AVDownmixInfo { + /** + * Type of downmix preferred by the mastering engineer. + */ + enum AVDownmixType preferred_downmix_type; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during a regular downmix. + */ + double center_mix_level; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during an Lt/Rt compatible downmix. + */ + double center_mix_level_ltrt; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during a regular downmix. + */ + double surround_mix_level; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during an Lt/Rt compatible downmix. + */ + double surround_mix_level_ltrt; + + /** + * Absolute scale factor representing the level at which the LFE data is + * mixed into L/R channels during downmixing. + */ + double lfe_mix_level; +} AVDownmixInfo; + +/** + * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing. + * + * If the side data is absent, it is created and added to the frame. + * + * @param frame the frame for which the side data is to be obtained or created + * + * @return the AVDownmixInfo structure to be edited by the caller, or NULL if + * the structure cannot be allocated. + */ +AVDownmixInfo *liteav_av_downmix_info_update_side_data(AVFrame *frame); + +/** + * @} + */ + +/** + * @} + */ + +#endif /* AVUTIL_DOWNMIX_INFO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/encryption_info.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/encryption_info.h new file mode 100644 index 0000000..f7ee2ee --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/encryption_info.h @@ -0,0 +1,206 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/** + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_ENCRYPTION_INFO_H +#define AVUTIL_ENCRYPTION_INFO_H + +#include <stddef.h> +#include <stdint.h> + +typedef struct AVSubsampleEncryptionInfo { + /** The number of bytes that are clear. */ + unsigned int bytes_of_clear_data; + + /** + * The number of bytes that are protected. If using pattern encryption, + * the pattern applies to only the protected bytes; if not using pattern + * encryption, all these bytes are encrypted. + */ + unsigned int bytes_of_protected_data; +} AVSubsampleEncryptionInfo; + +/** + * This describes encryption info for a packet. This contains frame-specific + * info for how to decrypt the packet before passing it to the decoder. + * + * The size of this struct is not part of the public ABI. + */ +typedef struct AVEncryptionInfo { + /** The fourcc encryption scheme, in big-endian byte order. */ + uint32_t scheme; + + /** + * Only used for pattern encryption. This is the number of 16-byte blocks + * that are encrypted. + */ + uint32_t crypt_byte_block; + + /** + * Only used for pattern encryption. This is the number of 16-byte blocks + * that are clear. + */ + uint32_t skip_byte_block; + + /** + * The ID of the key used to encrypt the packet. This should always be + * 16 bytes long, but may be changed in the future. + */ + uint8_t *key_id; + uint32_t key_id_size; + + /** + * The initialization vector. This may have been zero-filled to be the + * correct block size. This should always be 16 bytes long, but may be + * changed in the future. + */ + uint8_t *iv; + uint32_t iv_size; + + /** + * An array of subsample encryption info specifying how parts of the sample + * are encrypted. If there are no subsamples, then the whole sample is + * encrypted. + */ + AVSubsampleEncryptionInfo *subsamples; + uint32_t subsample_count; +} AVEncryptionInfo; + +/** + * This describes info used to initialize an encryption key system. + * + * The size of this struct is not part of the public ABI. + */ +typedef struct AVEncryptionInitInfo { + /** + * A unique identifier for the key system this is for, can be NULL if it + * is not known. This should always be 16 bytes, but may change in the + * future. + */ + uint8_t* system_id; + uint32_t system_id_size; + + /** + * An array of key IDs this initialization data is for. All IDs are the + * same length. Can be NULL if there are no known key IDs. + */ + uint8_t** key_ids; + /** The number of key IDs. */ + uint32_t num_key_ids; + /** + * The number of bytes in each key ID. This should always be 16, but may + * change in the future. + */ + uint32_t key_id_size; + + /** + * Key-system specific initialization data. This data is copied directly + * from the file and the format depends on the specific key system. This + * can be NULL if there is no initialization data; in that case, there + * will be at least one key ID. + */ + uint8_t* data; + uint32_t data_size; + + /** + * An optional pointer to the next initialization info in the list. + */ + struct AVEncryptionInitInfo *next; +} AVEncryptionInitInfo; + +/** + * Allocates an AVEncryptionInfo structure and sub-pointers to hold the given + * number of subsamples. This will allocate pointers for the key ID, IV, + * and subsample entries, set the size members, and zero-initialize the rest. + * + * @param subsample_count The number of subsamples. + * @param key_id_size The number of bytes in the key ID, should be 16. + * @param iv_size The number of bytes in the IV, should be 16. + * + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *liteav_av_encryption_info_alloc(uint32_t subsample_count, uint32_t key_id_size, uint32_t iv_size); + +/** + * Allocates an AVEncryptionInfo structure with a copy of the given data. + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *liteav_av_encryption_info_clone(const AVEncryptionInfo *info); + +/** + * Frees the given encryption info object. This MUST NOT be used to free the + * side-data data pointer, that should use normal side-data methods. + */ +void liteav_av_encryption_info_free(AVEncryptionInfo *info); + +/** + * Creates a copy of the AVEncryptionInfo that is contained in the given side + * data. The resulting object should be passed to liteav_av_encryption_info_free() + * when done. + * + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *liteav_av_encryption_info_get_side_data(const uint8_t *side_data, size_t side_data_size); + +/** + * Allocates and initializes side data that holds a copy of the given encryption + * info. The resulting pointer should be either freed using liteav_av_free or given + * to liteav_av_packet_add_side_data(). + * + * @return The new side-data pointer, or NULL. + */ +uint8_t *liteav_av_encryption_info_add_side_data( + const AVEncryptionInfo *info, size_t *side_data_size); + + +/** + * Allocates an AVEncryptionInitInfo structure and sub-pointers to hold the + * given sizes. This will allocate pointers and set all the fields. + * + * @return The new AVEncryptionInitInfo structure, or NULL on error. + */ +AVEncryptionInitInfo *liteav_av_encryption_init_info_alloc( + uint32_t system_id_size, uint32_t num_key_ids, uint32_t key_id_size, uint32_t data_size); + +/** + * Frees the given encryption init info object. This MUST NOT be used to free + * the side-data data pointer, that should use normal side-data methods. + */ +void liteav_av_encryption_init_info_free(AVEncryptionInitInfo* info); + +/** + * Creates a copy of the AVEncryptionInitInfo that is contained in the given + * side data. The resulting object should be passed to + * liteav_av_encryption_init_info_free() when done. + * + * @return The new AVEncryptionInitInfo structure, or NULL on error. + */ +AVEncryptionInitInfo *liteav_av_encryption_init_info_get_side_data( + const uint8_t* side_data, size_t side_data_size); + +/** + * Allocates and initializes side data that holds a copy of the given encryption + * init info. The resulting pointer should be either freed using liteav_av_free or + * given to liteav_av_packet_add_side_data(). + * + * @return The new side-data pointer, or NULL. + */ +uint8_t *liteav_av_encryption_init_info_add_side_data( + const AVEncryptionInitInfo *info, size_t *side_data_size); + +#endif /* AVUTIL_ENCRYPTION_INFO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/error.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/error.h new file mode 100644 index 0000000..269fb1c --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/error.h @@ -0,0 +1,134 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * error code definitions + */ + +#ifndef AVUTIL_ERROR_H +#define AVUTIL_ERROR_H + +#include <errno.h> +#include <stddef.h> + +/** + * @addtogroup lavu_error + * + * @{ + */ + + +/* error handling */ +#if EDOM > 0 +#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions. +#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value. +#else +/* Some platforms have E* and errno already negated. */ +#define AVERROR(e) (e) +#define AVUNERROR(e) (e) +#endif + +#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d)) + +#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found +#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2 +#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small +#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found +#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found +#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found +#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file +#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted +#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library +#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found +#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input +#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found +#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found +#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome +#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found + +#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found +/** + * This is semantically identical to AVERROR_BUG + * it has been introduced in Libav after our AVERROR_BUG and with a modified value. + */ +#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ') +#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library +#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it. +#define AVERROR_INPUT_CHANGED (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) +#define AVERROR_OUTPUT_CHANGED (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED) +/* HTTP & RTSP errors */ +#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8,'4','0','0') +#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8,'4','0','1') +#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8,'4','0','3') +#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8,'4','0','4') +#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8,'4','X','X') +#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8,'5','X','X') + +#define AV_ERROR_MAX_STRING_SIZE 64 + +#define AVERROR_NETERROR FFERRTAG( 'N','E','T','E') ///< Net Error + +/** + * Define this error, to find the stream error which may be caused by downloading component's giving some wrong data. + */ +#define AVERROR_STRMERROR FFERRTAG( 'S','T','M','E') ///< Stream Error + +/** + * Put a description of the AVERROR code errnum in errbuf. + * In case of failure the global variable errno is set to indicate the + * error. Even in case of failure liteav_av_strerror() will print a generic + * error message indicating the errnum provided to errbuf. + * + * @param errnum error code to describe + * @param errbuf buffer to which description is written + * @param errbuf_size the size in bytes of errbuf + * @return 0 on success, a negative value if a description for errnum + * cannot be found + */ +int liteav_av_strerror(int errnum, char *errbuf, size_t errbuf_size); + +/** + * Fill the provided buffer with a string containing an error string + * corresponding to the AVERROR code errnum. + * + * @param errbuf a buffer + * @param errbuf_size size in bytes of errbuf + * @param errnum error code to describe + * @return the buffer in input, filled with the error description + * @see liteav_av_strerror() + */ +static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum) +{ + liteav_av_strerror(errnum, errbuf, errbuf_size); + return errbuf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_err2str(errnum) \ + av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum) + +/** + * @} + */ + +#endif /* AVUTIL_ERROR_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/eval.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/eval.h new file mode 100644 index 0000000..33b1169 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/eval.h @@ -0,0 +1,114 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple arithmetic expression evaluator + */ + +#ifndef AVUTIL_EVAL_H +#define AVUTIL_EVAL_H + +#include "avutil.h" + +typedef struct AVExpr AVExpr; + +/** + * Parse and evaluate an expression. + * Note, this is significantly slower than liteav_av_expr_eval(). + * + * @param res a pointer to a double where is put the result value of + * the expression, or NAN in case of error + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param const_values a zero terminated array of values for the identifiers from const_names + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int liteav_av_expr_parse_and_eval(double *res, const char *s, + const char * const *const_names, const double *const_values, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + void *opaque, int log_offset, void *log_ctx); + +/** + * Parse an expression. + * + * @param expr a pointer where is put an AVExpr containing the parsed + * value in case of successful parsing, or NULL otherwise. + * The pointed to AVExpr must be freed with liteav_av_expr_free() by the user + * when it is not needed anymore. + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int liteav_av_expr_parse(AVExpr **expr, const char *s, + const char * const *const_names, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + int log_offset, void *log_ctx); + +/** + * Evaluate a previously parsed expression. + * + * @param const_values a zero terminated array of values for the identifiers from liteav_av_expr_parse() const_names + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @return the value of the expression + */ +double liteav_av_expr_eval(AVExpr *e, const double *const_values, void *opaque); + +/** + * Free a parsed expression previously created with liteav_av_expr_parse(). + */ +void liteav_av_expr_free(AVExpr *e); + +/** + * Parse the string in numstr and return its value as a double. If + * the string is empty, contains only whitespaces, or does not contain + * an initial substring that has the expected syntax for a + * floating-point number, no conversion is performed. In this case, + * returns a value of zero and the value returned in tail is the value + * of numstr. + * + * @param numstr a string representing a number, may contain one of + * the International System number postfixes, for example 'K', 'M', + * 'G'. If 'i' is appended after the postfix, powers of 2 are used + * instead of powers of 10. The 'B' postfix multiplies the value by + * 8, and can be appended after another postfix or used alone. This + * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. + * @param tail if non-NULL puts here the pointer to the char next + * after the last parsed character + */ +double liteav_av_strtod(const char *numstr, char **tail); + +#endif /* AVUTIL_EVAL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ffversion.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ffversion.h new file mode 100644 index 0000000..57873ec --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ffversion.h @@ -0,0 +1,5 @@ +/* Automatically generated by version.sh, do not manually edit! */ +#ifndef AVUTIL_FFVERSION_H +#define AVUTIL_FFVERSION_H +#define FFMPEG_VERSION "332c5c1-4.3.1" +#endif /* AVUTIL_FFVERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/fifo.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/fifo.h new file mode 100644 index 0000000..e13ddd1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/fifo.h @@ -0,0 +1,180 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * a very simple circular buffer FIFO implementation + */ + +#ifndef AVUTIL_FIFO_H +#define AVUTIL_FIFO_H + +#include <stdint.h> +#include "avutil.h" +#include "attributes.h" + +typedef struct AVFifoBuffer { + uint8_t *buffer; + uint8_t *rptr, *wptr, *end; + uint32_t rndx, wndx; +} AVFifoBuffer; + +/** + * Initialize an AVFifoBuffer. + * @param size of FIFO + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *liteav_av_fifo_alloc(unsigned int size); + +/** + * Initialize an AVFifoBuffer. + * @param nmemb number of elements + * @param size size of the single element + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *liteav_av_fifo_alloc_array(size_t nmemb, size_t size); + +/** + * Free an AVFifoBuffer. + * @param f AVFifoBuffer to free + */ +void liteav_av_fifo_free(AVFifoBuffer *f); + +/** + * Free an AVFifoBuffer and reset pointer to NULL. + * @param f AVFifoBuffer to free + */ +void liteav_av_fifo_freep(AVFifoBuffer **f); + +/** + * Reset the AVFifoBuffer to the state right after liteav_av_fifo_alloc, in particular it is emptied. + * @param f AVFifoBuffer to reset + */ +void liteav_av_fifo_reset(AVFifoBuffer *f); + +/** + * Return the amount of data in bytes in the AVFifoBuffer, that is the + * amount of data you can read from it. + * @param f AVFifoBuffer to read from + * @return size + */ +int liteav_av_fifo_size(const AVFifoBuffer *f); + +/** + * Return the amount of space in bytes in the AVFifoBuffer, that is the + * amount of data you can write into it. + * @param f AVFifoBuffer to write into + * @return size + */ +int liteav_av_fifo_space(const AVFifoBuffer *f); + +/** + * Feed data at specific position from an AVFifoBuffer to a user-supplied callback. + * Similar as av_fifo_gereric_read but without discarding data. + * @param f AVFifoBuffer to read from + * @param offset offset from current read position + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int liteav_av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * Similar as av_fifo_gereric_read but without discarding data. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int liteav_av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int liteav_av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from a user-supplied callback to an AVFifoBuffer. + * @param f AVFifoBuffer to write to + * @param src data source; non-const since it may be used as a + * modifiable context by the function defined in func + * @param size number of bytes to write + * @param func generic write function; the first parameter is src, + * the second is dest_buf, the third is dest_buf_size. + * func must return the number of bytes written to dest_buf, or <= 0 to + * indicate no more data available to write. + * If func is NULL, src is interpreted as a simple byte array for source data. + * @return the number of bytes written to the FIFO + */ +int liteav_av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); + +/** + * Resize an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * + * @param f AVFifoBuffer to resize + * @param size new AVFifoBuffer size in bytes + * @return <0 for failure, >=0 otherwise + */ +int liteav_av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); + +/** + * Enlarge an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * The new fifo size may be larger than the requested size. + * + * @param f AVFifoBuffer to resize + * @param additional_space the amount of space in bytes to allocate in addition to liteav_av_fifo_size() + * @return <0 for failure, >=0 otherwise + */ +int liteav_av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); + +/** + * Read and discard the specified amount of data from an AVFifoBuffer. + * @param f AVFifoBuffer to read from + * @param size amount of data to read in bytes + */ +void liteav_av_fifo_drain(AVFifoBuffer *f, int size); + +/** + * Return a pointer to the data stored in a FIFO buffer at a certain offset. + * The FIFO buffer is not modified. + * + * @param f AVFifoBuffer to peek at, f must be non-NULL + * @param offs an offset in bytes, its absolute value must be less + * than the used buffer size or the returned pointer will + * point outside to the buffer data. + * The used buffer size can be checked with liteav_av_fifo_size(). + */ +static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) +{ + uint8_t *ptr = f->rptr + offs; + if (ptr >= f->end) + ptr = f->buffer + (ptr - f->end); + else if (ptr < f->buffer) + ptr = f->end - (f->buffer - ptr); + return ptr; +} + +#endif /* AVUTIL_FIFO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/file.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/file.h new file mode 100644 index 0000000..a3050de --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/file.h @@ -0,0 +1,72 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FILE_H +#define AVUTIL_FILE_H + +#include <stdint.h> + +#include "avutil.h" + +/** + * @file + * Misc file utilities. + */ + +/** + * Read the file with name filename, and put its content in a newly + * allocated buffer or map it with mmap() when available. + * In case of success set *bufptr to the read or mmapped buffer, and + * *size to the size in bytes of the buffer in *bufptr. + * Unlike mmap this function succeeds with zero sized files, in this + * case *bufptr will be set to NULL and *size will be set to 0. + * The returned buffer must be released with liteav_av_file_unmap(). + * + * @param log_offset loglevel offset used for logging + * @param log_ctx context used for logging + * @return a non negative number in case of success, a negative value + * corresponding to an AVERROR error code in case of failure + */ +av_warn_unused_result +int liteav_av_file_map(const char *filename, uint8_t **bufptr, size_t *size, + int log_offset, void *log_ctx); + +/** + * Unmap or free the buffer bufptr created by liteav_av_file_map(). + * + * @param size size in bytes of bufptr, must be the same as returned + * by liteav_av_file_map() + */ +void liteav_av_file_unmap(uint8_t *bufptr, size_t size); + +/** + * Wrapper to work around the lack of mkstemp() on mingw. + * Also, tries to create file in /tmp first, if possible. + * *prefix can be a character constant; *filename will be allocated internally. + * @return file descriptor of opened file (or negative value corresponding to an + * AVERROR code on error) + * and opened file name in **filename. + * @note On very old libcs it is necessary to set a secure umask before + * calling this, liteav_av_tempfile() can't call umask itself as it is used in + * libraries and could interfere with the calling application. + * @deprecated as fd numbers cannot be passed saftely between libs on some platforms + */ +int liteav_av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx); + +#endif /* AVUTIL_FILE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/frame.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/frame.h new file mode 100644 index 0000000..2701a70 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/frame.h @@ -0,0 +1,902 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_frame + * reference-counted frame API + */ + +#ifndef AVUTIL_FRAME_H +#define AVUTIL_FRAME_H + +#include <stddef.h> +#include <stdint.h> + +#include "avutil.h" +#include "buffer.h" +#include "dict.h" +#include "rational.h" +#include "samplefmt.h" +#include "pixfmt.h" +#include "version.h" + + +/** + * @defgroup lavu_frame AVFrame + * @ingroup lavu_data + * + * @{ + * AVFrame is an abstraction for reference-counted raw multimedia data. + */ + +enum AVFrameSideDataType { + /** + * The data is the AVPanScan struct defined in libavcodec. + */ + AV_FRAME_DATA_PANSCAN, + /** + * ATSC A53 Part 4 Closed Captions. + * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data. + * The number of bytes of CC data is AVFrameSideData.size. + */ + AV_FRAME_DATA_A53_CC, + /** + * Stereoscopic 3d metadata. + * The data is the AVStereo3D struct defined in libavutil/stereo3d.h. + */ + AV_FRAME_DATA_STEREO3D, + /** + * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h. + */ + AV_FRAME_DATA_MATRIXENCODING, + /** + * Metadata relevant to a downmix procedure. + * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h. + */ + AV_FRAME_DATA_DOWNMIX_INFO, + /** + * ReplayGain information in the form of the AVReplayGain struct. + */ + AV_FRAME_DATA_REPLAYGAIN, + /** + * This side data contains a 3x3 transformation matrix describing an affine + * transformation that needs to be applied to the frame for correct + * presentation. + * + * See libavutil/display.h for a detailed description of the data. + */ + AV_FRAME_DATA_DISPLAYMATRIX, + /** + * Active Format Description data consisting of a single byte as specified + * in ETSI TS 101 154 using AVActiveFormatDescription enum. + */ + AV_FRAME_DATA_AFD, + /** + * Motion vectors exported by some codecs (on demand through the export_mvs + * flag set in the libavcodec AVCodecContext flags2 option). + * The data is the AVMotionVector struct defined in + * libavutil/motion_vector.h. + */ + AV_FRAME_DATA_MOTION_VECTORS, + /** + * Recommmends skipping the specified number of samples. This is exported + * only if the "skip_manual" AVOption is set in libavcodec. + * This has the same format as AV_PKT_DATA_SKIP_SAMPLES. + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_FRAME_DATA_SKIP_SAMPLES, + /** + * This side data must be associated with an audio frame and corresponds to + * enum AVAudioServiceType defined in avcodec.h. + */ + AV_FRAME_DATA_AUDIO_SERVICE_TYPE, + /** + * Mastering display metadata associated with a video frame. The payload is + * an AVMasteringDisplayMetadata type and contains information about the + * mastering display color volume. + */ + AV_FRAME_DATA_MASTERING_DISPLAY_METADATA, + /** + * The GOP timecode in 25 bit timecode format. Data format is 64-bit integer. + * This is set on the first frame of a GOP that has a temporal reference of 0. + */ + AV_FRAME_DATA_GOP_TIMECODE, + + /** + * The data represents the AVSphericalMapping structure defined in + * libavutil/spherical.h. + */ + AV_FRAME_DATA_SPHERICAL, + + /** + * Content light level (based on CTA-861.3). This payload contains data in + * the form of the AVContentLightMetadata struct. + */ + AV_FRAME_DATA_CONTENT_LIGHT_LEVEL, + + /** + * The data contains an ICC profile as an opaque octet buffer following the + * format described by ISO 15076-1 with an optional name defined in the + * metadata key entry "name". + */ + AV_FRAME_DATA_ICC_PROFILE, + +#if FF_API_FRAME_QP + /** + * Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA. + * The contents of this side data are undocumented and internal; use + * liteav_av_frame_set_qp_table() and liteav_av_frame_get_qp_table() to access this in a + * meaningful way instead. + */ + AV_FRAME_DATA_QP_TABLE_PROPERTIES, + + /** + * Raw QP table data. Its format is described by + * AV_FRAME_DATA_QP_TABLE_PROPERTIES. Use liteav_av_frame_set_qp_table() and + * liteav_av_frame_get_qp_table() to access this instead. + */ + AV_FRAME_DATA_QP_TABLE_DATA, +#endif + + /** + * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t + * where the first uint32_t describes how many (1-3) of the other timecodes are used. + * The timecode format is described in the liteav_av_timecode_get_smpte_from_framenum() + * function in libavutil/timecode.c. + */ + AV_FRAME_DATA_S12M_TIMECODE, +}; + +enum AVActiveFormatDescription { + AV_AFD_SAME = 8, + AV_AFD_4_3 = 9, + AV_AFD_16_9 = 10, + AV_AFD_14_9 = 11, + AV_AFD_4_3_SP_14_9 = 13, + AV_AFD_16_9_SP_14_9 = 14, + AV_AFD_SP_4_3 = 15, +}; + + +/** + * Structure to hold side data for an AVFrame. + * + * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + */ +typedef struct AVFrameSideData { + enum AVFrameSideDataType type; + uint8_t *data; + int size; + AVDictionary *metadata; + AVBufferRef *buf; +} AVFrameSideData; + +/** + * This structure describes decoded (raw) audio or video data. + * + * AVFrame must be allocated using liteav_av_frame_alloc(). Note that this only + * allocates the AVFrame itself, the buffers for the data must be managed + * through other means (see below). + * AVFrame must be freed with liteav_av_frame_free(). + * + * AVFrame is typically allocated once and then reused multiple times to hold + * different data (e.g. a single AVFrame to hold frames received from a + * decoder). In such a case, liteav_av_frame_unref() will free any references held by + * the frame and reset it to its original clean state before it + * is reused again. + * + * The data described by an AVFrame is usually reference counted through the + * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / + * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at + * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, + * every single data plane must be contained in one of the buffers in + * AVFrame.buf or AVFrame.extended_buf. + * There may be a single buffer for all the data, or one separate buffer for + * each plane, or anything in between. + * + * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + * + * Fields can be accessed through AVOptions, the name string used, matches the + * C structure field name for fields accessible through AVOptions. The AVClass + * for AVFrame can be obtained from avcodec_get_frame_class() + */ +typedef struct AVFrame { +#define AV_NUM_DATA_POINTERS 8 + /** + * pointer to the picture/channel planes. + * This might be different from the first allocated byte + * + * Some decoders access areas outside 0,0 - width,height, please + * see avcodec_align_dimensions2(). Some filters and swscale can read + * up to 16 bytes beyond the planes, if these filters are to be used, + * then 16 extra bytes must be allocated. + * + * NOTE: Except for hwaccel formats, pointers not needed by the format + * MUST be set to NULL. + */ + uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * For video, size in bytes of each picture line. + * For audio, size in bytes of each plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + * + * For video the linesizes should be multiples of the CPUs alignment + * preference, this is 16 or 32 for modern desktop CPUs. + * Some code requires such alignment other code can be slower without + * correct alignment, for yet other it makes no difference. + * + * @note The linesize may be larger than the size of usable data -- there + * may be extra padding present for performance reasons. + */ + int linesize[AV_NUM_DATA_POINTERS]; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data should always be set in a valid frame, + * but for planar audio with more channels that can fit in data, + * extended_data must be used in order to access all channels. + */ + uint8_t **extended_data; + + /** + * @name Video dimensions + * Video frames only. The coded dimensions (in pixels) of the video frame, + * i.e. the size of the rectangle that contains some well-defined values. + * + * @note The part of the frame intended for display/presentation is further + * restricted by the @ref cropping "Cropping rectangle". + * @{ + */ + int width, height; + /** + * @} + */ + + /** + * number of audio samples (per channel) described by this frame + */ + int nb_samples; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum AVPixelFormat for video frames, + * enum AVSampleFormat for audio) + */ + int format; + + /** + * 1 -> keyframe, 0-> not + */ + int key_frame; + + /** + * Picture type of the frame. + */ + enum AVPictureType pict_type; + + /** + * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + */ + AVRational sample_aspect_ratio; + + /** + * Presentation timestamp in time_base units (time when frame should be shown to user). + */ + int64_t pts; + +#if FF_API_PKT_PTS + /** + * PTS copied from the AVPacket that was decoded to produce this frame. + * @deprecated use the pts field instead + */ + attribute_deprecated + int64_t pkt_pts; +#endif + + /** + * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used) + * This is also the Presentation time of this AVFrame calculated from + * only AVPacket.dts values without pts values. + */ + int64_t pkt_dts; + + /** + * picture number in bitstream order + */ + int coded_picture_number; + /** + * picture number in display order + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + */ + int quality; + + /** + * for some private data of the user + */ + void *opaque; + +#if FF_API_ERROR_FRAME + /** + * @deprecated unused + */ + attribute_deprecated + uint64_t error[AV_NUM_DATA_POINTERS]; +#endif + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + */ + int repeat_pict; + + /** + * The content of the picture is interlaced. + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + */ + int top_field_first; + + /** + * Tell user application that palette has changed from previous frame. + */ + int palette_has_changed; + + /** + * reordered opaque 64 bits (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + */ + int64_t reordered_opaque; + + /** + * Sample rate of the audio data. + */ + int sample_rate; + + /** + * Channel layout of the audio data. + */ + uint64_t channel_layout; + + /** + * AVBuffer references backing the data for this frame. If all elements of + * this array are NULL, then this frame is not reference counted. This array + * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must + * also be non-NULL for all j < i. + * + * There may be at most one AVBuffer per data plane, so for video this array + * always contains all the references. For planar audio with more than + * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in + * this array. Then the extra AVBufferRef pointers are stored in the + * extended_buf array. + */ + AVBufferRef *buf[AV_NUM_DATA_POINTERS]; + + /** + * For planar audio which requires more than AV_NUM_DATA_POINTERS + * AVBufferRef pointers, this array will hold all the references which + * cannot fit into AVFrame.buf. + * + * Note that this is different from AVFrame.extended_data, which always + * contains all the pointers. This array only contains the extra pointers, + * which cannot fit into AVFrame.buf. + * + * This array is always allocated using liteav_av_malloc() by whoever constructs + * the frame. It is freed in liteav_av_frame_unref(). + */ + AVBufferRef **extended_buf; + /** + * Number of elements in extended_buf. + */ + int nb_extended_buf; + + AVFrameSideData **side_data; + int nb_side_data; + +/** + * @defgroup lavu_frame_flags AV_FRAME_FLAGS + * @ingroup lavu_frame + * Flags describing additional frame properties. + * + * @{ + */ + +/** + * The frame data may be corrupted, e.g. due to decoding errors. + */ +#define AV_FRAME_FLAG_CORRUPT (1 << 0) +/** + * A flag to mark the frames which need to be decoded, but shouldn't be output. + */ +#define AV_FRAME_FLAG_DISCARD (1 << 2) +/** + * @} + */ + + /** + * Frame flags, a combination of @ref lavu_frame_flags + */ + int flags; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + enum AVColorPrimaries color_primaries; + + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + enum AVChromaLocation chroma_location; + + /** + * frame timestamp estimated using various heuristics, in stream time base + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int64_t best_effort_timestamp; + + /** + * reordered pos from the last AVPacket that has been input into the decoder + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pos; + + /** + * duration of the corresponding packet, expressed in + * AVStream->time_base units, 0 if unknown. + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_duration; + + /** + * metadata. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVDictionary *metadata; + + /** + * decode error flags of the frame, set to a combination of + * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there + * were errors during the decoding. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int decode_error_flags; +#define FF_DECODE_ERROR_INVALID_BITSTREAM 1 +#define FF_DECODE_ERROR_MISSING_REFERENCE 2 + + /** + * number of audio channels, only used for audio. + * - encoding: unused + * - decoding: Read by user. + */ + int channels; + + /** + * size of the corresponding packet containing the compressed + * frame. + * It is set to a negative value if unknown. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int pkt_size; + +#if FF_API_FRAME_QP + /** + * QP table + */ + attribute_deprecated + int8_t *qscale_table; + /** + * QP store stride + */ + attribute_deprecated + int qstride; + + attribute_deprecated + int qscale_type; + + attribute_deprecated + AVBufferRef *qp_table_buf; +#endif + /** + * For hwaccel-format frames, this should be a reference to the + * AVHWFramesContext describing the frame. + */ + AVBufferRef *hw_frames_ctx; + + /** + * AVBufferRef for free use by the API user. FFmpeg will never check the + * contents of the buffer ref. FFmpeg calls liteav_av_buffer_unref() on it when + * the frame is unreferenced. liteav_av_frame_copy_props() calls create a new + * reference with liteav_av_buffer_ref() for the target frame's opaque_ref field. + * + * This is unrelated to the opaque field, although it serves a similar + * purpose. + */ + AVBufferRef *opaque_ref; + + /** + * @anchor cropping + * @name Cropping + * Video frames only. The number of pixels to discard from the the + * top/bottom/left/right border of the frame to obtain the sub-rectangle of + * the frame intended for presentation. + * @{ + */ + size_t crop_top; + size_t crop_bottom; + size_t crop_left; + size_t crop_right; + /** + * @} + */ + + /** + * AVBufferRef for internal use by a single libav* library. + * Must not be used to transfer data between libraries. + * Has to be NULL when ownership of the frame leaves the respective library. + * + * Code outside the FFmpeg libs should never check or change the contents of the buffer ref. + * + * FFmpeg calls liteav_av_buffer_unref() on it when the frame is unreferenced. + * liteav_av_frame_copy_props() calls create a new reference with liteav_av_buffer_ref() + * for the target frame's private_ref field. + */ + AVBufferRef *private_ref; +} AVFrame; + +#if FF_API_FRAME_GET_SET +/** + * Accessors for some AVFrame fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +attribute_deprecated +int64_t liteav_av_frame_get_best_effort_timestamp(const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); +attribute_deprecated +int64_t liteav_av_frame_get_pkt_duration (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_pkt_duration (AVFrame *frame, int64_t val); +attribute_deprecated +int64_t liteav_av_frame_get_pkt_pos (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_pkt_pos (AVFrame *frame, int64_t val); +attribute_deprecated +int64_t liteav_av_frame_get_channel_layout (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_channel_layout (AVFrame *frame, int64_t val); +attribute_deprecated +int liteav_av_frame_get_channels (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_channels (AVFrame *frame, int val); +attribute_deprecated +int liteav_av_frame_get_sample_rate (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_sample_rate (AVFrame *frame, int val); +attribute_deprecated +AVDictionary *liteav_av_frame_get_metadata (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_metadata (AVFrame *frame, AVDictionary *val); +attribute_deprecated +int liteav_av_frame_get_decode_error_flags (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_decode_error_flags (AVFrame *frame, int val); +attribute_deprecated +int liteav_av_frame_get_pkt_size(const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_pkt_size(AVFrame *frame, int val); +#if FF_API_FRAME_QP +attribute_deprecated +int8_t *liteav_av_frame_get_qp_table(AVFrame *f, int *stride, int *type); +attribute_deprecated +int liteav_av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type); +#endif +attribute_deprecated +enum AVColorSpace liteav_av_frame_get_colorspace(const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val); +attribute_deprecated +enum AVColorRange liteav_av_frame_get_color_range(const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_color_range(AVFrame *frame, enum AVColorRange val); +#endif + +/** + * Get the name of a colorspace. + * @return a static string identifying the colorspace; can be NULL. + */ +const char *liteav_av_get_colorspace_name(enum AVColorSpace val); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using liteav_av_frame_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * + * @note this only allocates the AVFrame itself, not the data buffers. Those + * must be allocated through other means, e.g. with liteav_av_frame_get_buffer() or + * manually. + */ +AVFrame *liteav_av_frame_alloc(void); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. If the frame is reference counted, it will be + * unreferenced first. + * + * @param frame frame to be freed. The pointer will be set to NULL. + */ +void liteav_av_frame_free(AVFrame **frame); + +/** + * Set up a new reference to the data described by the source frame. + * + * Copy frame properties from src to dst and create a new reference for each + * AVBufferRef from src. + * + * If src is not reference counted, new buffers are allocated and the data is + * copied. + * + * @warning: dst MUST have been either unreferenced with liteav_av_frame_unref(dst), + * or newly allocated with liteav_av_frame_alloc() before calling this + * function, or undefined behavior will occur. + * + * @return 0 on success, a negative AVERROR on error + */ +int liteav_av_frame_ref(AVFrame *dst, const AVFrame *src); + +/** + * Create a new frame that references the same data as src. + * + * This is a shortcut for liteav_av_frame_alloc()+liteav_av_frame_ref(). + * + * @return newly created AVFrame on success, NULL on error. + */ +AVFrame *liteav_av_frame_clone(const AVFrame *src); + +/** + * Unreference all the buffers referenced by frame and reset the frame fields. + */ +void liteav_av_frame_unref(AVFrame *frame); + +/** + * Move everything contained in src to dst and reset src. + * + * @warning: dst is not unreferenced, but directly overwritten without reading + * or deallocating its contents. Call liteav_av_frame_unref(dst) manually + * before calling this function to ensure that no memory is leaked. + */ +void liteav_av_frame_move_ref(AVFrame *dst, AVFrame *src); + +/** + * Allocate new buffer(s) for audio or video data. + * + * The following fields must be set on frame before calling this function: + * - format (pixel format for video, sample format for audio) + * - width and height for video + * - nb_samples and channel_layout for audio + * + * This function will fill AVFrame.data and AVFrame.buf arrays and, if + * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. + * For planar formats, one buffer will be allocated for each plane. + * + * @warning: if frame already has been allocated, calling this function will + * leak memory. In addition, undefined behavior can occur in certain + * cases. + * + * @param frame frame in which to store the new buffers. + * @param align Required buffer size alignment. If equal to 0, alignment will be + * chosen automatically for the current CPU. It is highly + * recommended to pass 0 here unless you know what you are doing. + * + * @return 0 on success, a negative AVERROR on error. + */ +int liteav_av_frame_get_buffer(AVFrame *frame, int align); + +/** + * Check if the frame data is writable. + * + * @return A positive value if the frame data is writable (which is true if and + * only if each of the underlying buffers has only one reference, namely the one + * stored in this frame). Return 0 otherwise. + * + * If 1 is returned the answer is valid until liteav_av_buffer_ref() is called on any + * of the underlying AVBufferRefs (e.g. through liteav_av_frame_ref() or directly). + * + * @see liteav_av_frame_make_writable(), liteav_av_buffer_is_writable() + */ +int liteav_av_frame_is_writable(AVFrame *frame); + +/** + * Ensure that the frame data is writable, avoiding data copy if possible. + * + * Do nothing if the frame is writable, allocate new buffers and copy the data + * if it is not. + * + * @return 0 on success, a negative AVERROR on error. + * + * @see liteav_av_frame_is_writable(), liteav_av_buffer_is_writable(), + * liteav_av_buffer_make_writable() + */ +int liteav_av_frame_make_writable(AVFrame *frame); + +/** + * Copy the frame data from src to dst. + * + * This function does not allocate anything, dst must be already initialized and + * allocated with the same parameters as src. + * + * This function only copies the frame data (i.e. the contents of the data / + * extended data arrays), not any other properties. + * + * @return >= 0 on success, a negative AVERROR on error. + */ +int liteav_av_frame_copy(AVFrame *dst, const AVFrame *src); + +/** + * Copy only "metadata" fields from src to dst. + * + * Metadata for the purpose of this function are those fields that do not affect + * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample + * aspect ratio (for video), but not width/height or channel layout. + * Side data is also copied. + */ +int liteav_av_frame_copy_props(AVFrame *dst, const AVFrame *src); + +/** + * Get the buffer reference a given data plane is stored in. + * + * @param plane index of the data plane of interest in frame->extended_data. + * + * @return the buffer reference that contains the plane or NULL if the input + * frame is not valid. + */ +AVBufferRef *liteav_av_frame_get_plane_buffer(AVFrame *frame, int plane); + +/** + * Add a new side data to a frame. + * + * @param frame a frame to which the side data should be added + * @param type type of the added side data + * @param size size of the side data + * + * @return newly added side data on success, NULL on error + */ +AVFrameSideData *liteav_av_frame_new_side_data(AVFrame *frame, + enum AVFrameSideDataType type, + int size); + +/** + * Add a new side data to a frame from an existing AVBufferRef + * + * @param frame a frame to which the side data should be added + * @param type the type of the added side data + * @param buf an AVBufferRef to add as side data. The ownership of + * the reference is transferred to the frame. + * + * @return newly added side data on success, NULL on error. On failure + * the frame is unchanged and the AVBufferRef remains owned by + * the caller. + */ +AVFrameSideData *liteav_av_frame_new_side_data_from_buf(AVFrame *frame, + enum AVFrameSideDataType type, + AVBufferRef *buf); + +/** + * @return a pointer to the side data of a given type on success, NULL if there + * is no side data with such type in this frame. + */ +AVFrameSideData *liteav_av_frame_get_side_data(const AVFrame *frame, + enum AVFrameSideDataType type); + +/** + * If side data of the supplied type exists in the frame, free it and remove it + * from the frame. + */ +void liteav_av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type); + + +/** + * Flags for frame cropping. + */ +enum { + /** + * Apply the maximum possible cropping, even if it requires setting the + * AVFrame.data[] entries to unaligned pointers. Passing unaligned data + * to FFmpeg API is generally not allowed, and causes undefined behavior + * (such as crashes). You can pass unaligned data only to FFmpeg APIs that + * are explicitly documented to accept it. Use this flag only if you + * absolutely know what you are doing. + */ + AV_FRAME_CROP_UNALIGNED = 1 << 0, +}; + +/** + * Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ + * crop_bottom fields. If cropping is successful, the function will adjust the + * data pointers and the width/height fields, and set the crop fields to 0. + * + * In all cases, the cropping boundaries will be rounded to the inherent + * alignment of the pixel format. In some cases, such as for opaque hwaccel + * formats, the left/top cropping is ignored. The crop fields are set to 0 even + * if the cropping was rounded or ignored. + * + * @param frame the frame which should be cropped + * @param flags Some combination of AV_FRAME_CROP_* flags, or 0. + * + * @return >= 0 on success, a negative AVERROR on error. If the cropping fields + * were invalid, AVERROR(ERANGE) is returned, and nothing is changed. + */ +int liteav_av_frame_apply_cropping(AVFrame *frame, int flags); + +/** + * @return a string identifying the side data type + */ +const char *liteav_av_frame_side_data_name(enum AVFrameSideDataType type); + +/** + * @} + */ + +#endif /* AVUTIL_FRAME_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hash.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hash.h new file mode 100644 index 0000000..5e29000 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hash.h @@ -0,0 +1,270 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_hash_generic + * Generic hashing API + */ + +#ifndef AVUTIL_HASH_H +#define AVUTIL_HASH_H + +#include <stdint.h> + +#include "version.h" + +/** + * @defgroup lavu_hash Hash Functions + * @ingroup lavu_crypto + * Hash functions useful in multimedia. + * + * Hash functions are widely used in multimedia, from error checking and + * concealment to internal regression testing. libavutil has efficient + * implementations of a variety of hash functions that may be useful for + * FFmpeg and other multimedia applications. + * + * @{ + * + * @defgroup lavu_hash_generic Generic Hashing API + * An abstraction layer for all hash functions supported by libavutil. + * + * If your application needs to support a wide range of different hash + * functions, then the Generic Hashing API is for you. It provides a generic, + * reusable API for @ref lavu_hash "all hash functions" implemented in libavutil. + * If you just need to use one particular hash function, use the @ref lavu_hash + * "individual hash" directly. + * + * @section Sample Code + * + * A basic template for using the Generic Hashing API follows: + * + * @code + * struct AVHashContext *ctx = NULL; + * const char *hash_name = NULL; + * uint8_t *output_buf = NULL; + * + * // Select from a string returned by liteav_av_hash_names() + * hash_name = ...; + * + * // Allocate a hash context + * ret = liteav_av_hash_alloc(&ctx, hash_name); + * if (ret < 0) + * return ret; + * + * // Initialize the hash context + * liteav_av_hash_init(ctx); + * + * // Update the hash context with data + * while (data_left) { + * liteav_av_hash_update(ctx, data, size); + * } + * + * // Now we have no more data, so it is time to finalize the hash and get the + * // output. But we need to first allocate an output buffer. Note that you can + * // use any memory allocation function, including malloc(), not just + * // liteav_av_malloc(). + * output_buf = liteav_av_malloc(liteav_av_hash_get_size(ctx)); + * if (!output_buf) + * return AVERROR(ENOMEM); + * + * // Finalize the hash context. + * // You can use any of the liteav_av_hash_final*() functions provided, for other + * // output formats. If you do so, be sure to adjust the memory allocation + * // above. See the function documentation below for the exact amount of extra + * // memory needed. + * liteav_av_hash_final(ctx, output_buffer); + * + * // Free the context + * liteav_av_hash_freep(&ctx); + * @endcode + * + * @section Hash Function-Specific Information + * If the CRC32 hash is selected, the #AV_CRC_32_IEEE polynomial will be + * used. + * + * If the Murmur3 hash is selected, the default seed will be used. See @ref + * lavu_murmur3_seedinfo "Murmur3" for more information. + * + * @{ + */ + +/** + * @example ffhash.c + * This example is a simple command line application that takes one or more + * arguments. It demonstrates a typical use of the hashing API with allocation, + * initialization, updating, and finalizing. + */ + +struct AVHashContext; + +/** + * Allocate a hash context for the algorithm specified by name. + * + * @return >= 0 for success, a negative error code for failure + * + * @note The context is not initialized after a call to this function; you must + * call liteav_av_hash_init() to do so. + */ +int liteav_av_hash_alloc(struct AVHashContext **ctx, const char *name); + +/** + * Get the names of available hash algorithms. + * + * This function can be used to enumerate the algorithms. + * + * @param[in] i Index of the hash algorithm, starting from 0 + * @return Pointer to a static string or `NULL` if `i` is out of range + */ +const char *liteav_av_hash_names(int i); + +/** + * Get the name of the algorithm corresponding to the given hash context. + */ +const char *liteav_av_hash_get_name(const struct AVHashContext *ctx); + +/** + * Maximum value that liteav_av_hash_get_size() will currently return. + * + * You can use this if you absolutely want or need to use static allocation for + * the output buffer and are fine with not supporting hashes newly added to + * libavutil without recompilation. + * + * @warning + * Adding new hashes with larger sizes, and increasing the macro while doing + * so, will not be considered an ABI change. To prevent your code from + * overflowing a buffer, either dynamically allocate the output buffer with + * liteav_av_hash_get_size(), or limit your use of the Hashing API to hashes that are + * already in FFmpeg during the time of compilation. + */ +#define AV_HASH_MAX_SIZE 64 + +/** + * Get the size of the resulting hash value in bytes. + * + * The maximum value this function will currently return is available as macro + * #AV_HASH_MAX_SIZE. + * + * @param[in] ctx Hash context + * @return Size of the hash value in bytes + */ +int liteav_av_hash_get_size(const struct AVHashContext *ctx); + +/** + * Initialize or reset a hash context. + * + * @param[in,out] ctx Hash context + */ +void liteav_av_hash_init(struct AVHashContext *ctx); + +/** + * Update a hash context with additional data. + * + * @param[in,out] ctx Hash context + * @param[in] src Data to be added to the hash context + * @param[in] len Size of the additional data + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len); +#else +void liteav_av_hash_update(struct AVHashContext *ctx, const uint8_t *src, size_t len); +#endif + +/** + * Finalize a hash context and compute the actual hash value. + * + * The minimum size of `dst` buffer is given by liteav_av_hash_get_size() or + * #AV_HASH_MAX_SIZE. The use of the latter macro is discouraged. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * + * @see liteav_av_hash_final_bin() provides an alternative API + */ +void liteav_av_hash_final(struct AVHashContext *ctx, uint8_t *dst); + +/** + * Finalize a hash context and store the actual hash value in a buffer. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * If `size` is smaller than the hash size (given by liteav_av_hash_get_size()), the + * hash is truncated; if size is larger, the buffer is padded with 0. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * @param[in] size Number of bytes to write to `dst` + */ +void liteav_av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Finalize a hash context and store the hexadecimal representation of the + * actual hash value as a string. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * The string is always 0-terminated. + * + * If `size` is smaller than `2 * hash_size + 1`, where `hash_size` is the + * value returned by liteav_av_hash_get_size(), the string will be truncated. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the string will be stored + * @param[in] size Maximum number of bytes to write to `dst` + */ +void liteav_av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Finalize a hash context and store the Base64 representation of the + * actual hash value as a string. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * The string is always 0-terminated. + * + * If `size` is smaller than AV_BASE64_SIZE(hash_size), where `hash_size` is + * the value returned by liteav_av_hash_get_size(), the string will be truncated. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * @param[in] size Maximum number of bytes to write to `dst` + */ +void liteav_av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Free hash context and set hash context pointer to `NULL`. + * + * @param[in,out] ctx Pointer to hash context + */ +void liteav_av_hash_freep(struct AVHashContext **ctx); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_HASH_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hmac.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hmac.h new file mode 100644 index 0000000..5007cdf --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hmac.h @@ -0,0 +1,101 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2012 Martin Storsjo + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HMAC_H +#define AVUTIL_HMAC_H + +#include <stdint.h> + +#include "version.h" +/** + * @defgroup lavu_hmac HMAC + * @ingroup lavu_crypto + * @{ + */ + +enum AVHMACType { + AV_HMAC_MD5, + AV_HMAC_SHA1, + AV_HMAC_SHA224, + AV_HMAC_SHA256, + AV_HMAC_SHA384, + AV_HMAC_SHA512, +}; + +typedef struct AVHMAC AVHMAC; + +/** + * Allocate an AVHMAC context. + * @param type The hash function used for the HMAC. + */ +AVHMAC *liteav_av_hmac_alloc(enum AVHMACType type); + +/** + * Free an AVHMAC context. + * @param ctx The context to free, may be NULL + */ +void liteav_av_hmac_free(AVHMAC *ctx); + +/** + * Initialize an AVHMAC context with an authentication key. + * @param ctx The HMAC context + * @param key The authentication key + * @param keylen The length of the key, in bytes + */ +void liteav_av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen); + +/** + * Hash data with the HMAC. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + */ +void liteav_av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len); + +/** + * Finish hashing and output the HMAC digest. + * @param ctx The HMAC context + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int liteav_av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen); + +/** + * Hash an array of data with a key. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + * @param key The authentication key + * @param keylen The length of the key, in bytes + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int liteav_av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len, + const uint8_t *key, unsigned int keylen, + uint8_t *out, unsigned int outlen); + +/** + * @} + */ + +#endif /* AVUTIL_HMAC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext.h new file mode 100644 index 0000000..e97a9f4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext.h @@ -0,0 +1,585 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_H +#define AVUTIL_HWCONTEXT_H + +#include "buffer.h" +#include "frame.h" +#include "log.h" +#include "pixfmt.h" + +enum AVHWDeviceType { + AV_HWDEVICE_TYPE_NONE, + AV_HWDEVICE_TYPE_VDPAU, + AV_HWDEVICE_TYPE_CUDA, + AV_HWDEVICE_TYPE_VAAPI, + AV_HWDEVICE_TYPE_DXVA2, + AV_HWDEVICE_TYPE_QSV, + AV_HWDEVICE_TYPE_VIDEOTOOLBOX, + AV_HWDEVICE_TYPE_D3D11VA, + AV_HWDEVICE_TYPE_DRM, + AV_HWDEVICE_TYPE_OPENCL, + AV_HWDEVICE_TYPE_MEDIACODEC, +}; + +typedef struct AVHWDeviceInternal AVHWDeviceInternal; + +/** + * This struct aggregates all the (hardware/vendor-specific) "high-level" state, + * i.e. state that is not tied to a concrete processing configuration. + * E.g., in an API that supports hardware-accelerated encoding and decoding, + * this struct will (if possible) wrap the state that is common to both encoding + * and decoding and from which specific instances of encoders or decoders can be + * derived. + * + * This struct is reference-counted with the AVBuffer mechanism. The + * liteav_av_hwdevice_ctx_alloc() constructor yields a reference, whose data field + * points to the actual AVHWDeviceContext. Further objects derived from + * AVHWDeviceContext (such as AVHWFramesContext, describing a frame pool with + * specific properties) will hold an internal reference to it. After all the + * references are released, the AVHWDeviceContext itself will be freed, + * optionally invoking a user-specified callback for uninitializing the hardware + * state. + */ +typedef struct AVHWDeviceContext { + /** + * A class for logging. Set by liteav_av_hwdevice_ctx_alloc(). + */ + const AVClass *av_class; + + /** + * Private data used internally by libavutil. Must not be accessed in any + * way by the caller. + */ + AVHWDeviceInternal *internal; + + /** + * This field identifies the underlying API used for hardware access. + * + * This field is set when this struct is allocated and never changed + * afterwards. + */ + enum AVHWDeviceType type; + + /** + * The format-specific data, allocated and freed by libavutil along with + * this context. + * + * Should be cast by the user to the format-specific context defined in the + * corresponding header (hwcontext_*.h) and filled as described in the + * documentation before calling liteav_av_hwdevice_ctx_init(). + * + * After calling liteav_av_hwdevice_ctx_init() this struct should not be modified + * by the caller. + */ + void *hwctx; + + /** + * This field may be set by the caller before calling liteav_av_hwdevice_ctx_init(). + * + * If non-NULL, this callback will be called when the last reference to + * this context is unreferenced, immediately before it is freed. + * + * @note when other objects (e.g an AVHWFramesContext) are derived from this + * struct, this callback will be invoked after all such child objects + * are fully uninitialized and their respective destructors invoked. + */ + void (*free)(struct AVHWDeviceContext *ctx); + + /** + * Arbitrary user data, to be used e.g. by the free() callback. + */ + void *user_opaque; +} AVHWDeviceContext; + +typedef struct AVHWFramesInternal AVHWFramesInternal; + +/** + * This struct describes a set or pool of "hardware" frames (i.e. those with + * data not located in normal system memory). All the frames in the pool are + * assumed to be allocated in the same way and interchangeable. + * + * This struct is reference-counted with the AVBuffer mechanism and tied to a + * given AVHWDeviceContext instance. The liteav_av_hwframe_ctx_alloc() constructor + * yields a reference, whose data field points to the actual AVHWFramesContext + * struct. + */ +typedef struct AVHWFramesContext { + /** + * A class for logging. + */ + const AVClass *av_class; + + /** + * Private data used internally by libavutil. Must not be accessed in any + * way by the caller. + */ + AVHWFramesInternal *internal; + + /** + * A reference to the parent AVHWDeviceContext. This reference is owned and + * managed by the enclosing AVHWFramesContext, but the caller may derive + * additional references from it. + */ + AVBufferRef *device_ref; + + /** + * The parent AVHWDeviceContext. This is simply a pointer to + * device_ref->data provided for convenience. + * + * Set by libavutil in liteav_av_hwframe_ctx_init(). + */ + AVHWDeviceContext *device_ctx; + + /** + * The format-specific data, allocated and freed automatically along with + * this context. + * + * Should be cast by the user to the format-specific context defined in the + * corresponding header (hwframe_*.h) and filled as described in the + * documentation before calling liteav_av_hwframe_ctx_init(). + * + * After any frames using this context are created, the contents of this + * struct should not be modified by the caller. + */ + void *hwctx; + + /** + * This field may be set by the caller before calling liteav_av_hwframe_ctx_init(). + * + * If non-NULL, this callback will be called when the last reference to + * this context is unreferenced, immediately before it is freed. + */ + void (*free)(struct AVHWFramesContext *ctx); + + /** + * Arbitrary user data, to be used e.g. by the free() callback. + */ + void *user_opaque; + + /** + * A pool from which the frames are allocated by liteav_av_hwframe_get_buffer(). + * This field may be set by the caller before calling liteav_av_hwframe_ctx_init(). + * The buffers returned by calling liteav_av_buffer_pool_get() on this pool must + * have the properties described in the documentation in the corresponding hw + * type's header (hwcontext_*.h). The pool will be freed strictly before + * this struct's free() callback is invoked. + * + * This field may be NULL, then libavutil will attempt to allocate a pool + * internally. Note that certain device types enforce pools allocated at + * fixed size (frame count), which cannot be extended dynamically. In such a + * case, initial_pool_size must be set appropriately. + */ + AVBufferPool *pool; + + /** + * Initial size of the frame pool. If a device type does not support + * dynamically resizing the pool, then this is also the maximum pool size. + * + * May be set by the caller before calling liteav_av_hwframe_ctx_init(). Must be + * set if pool is NULL and the device type does not support dynamic pools. + */ + int initial_pool_size; + + /** + * The pixel format identifying the underlying HW surface type. + * + * Must be a hwaccel format, i.e. the corresponding descriptor must have the + * AV_PIX_FMT_FLAG_HWACCEL flag set. + * + * Must be set by the user before calling liteav_av_hwframe_ctx_init(). + */ + enum AVPixelFormat format; + + /** + * The pixel format identifying the actual data layout of the hardware + * frames. + * + * Must be set by the caller before calling liteav_av_hwframe_ctx_init(). + * + * @note when the underlying API does not provide the exact data layout, but + * only the colorspace/bit depth, this field should be set to the fully + * planar version of that format (e.g. for 8-bit 420 YUV it should be + * AV_PIX_FMT_YUV420P, not AV_PIX_FMT_NV12 or anything else). + */ + enum AVPixelFormat sw_format; + + /** + * The allocated dimensions of the frames in this pool. + * + * Must be set by the user before calling liteav_av_hwframe_ctx_init(). + */ + int width, height; +} AVHWFramesContext; + +/** + * Look up an AVHWDeviceType by name. + * + * @param name String name of the device type (case-insensitive). + * @return The type from enum AVHWDeviceType, or AV_HWDEVICE_TYPE_NONE if + * not found. + */ +enum AVHWDeviceType liteav_av_hwdevice_find_type_by_name(const char *name); + +/** Get the string name of an AVHWDeviceType. + * + * @param type Type from enum AVHWDeviceType. + * @return Pointer to a static string containing the name, or NULL if the type + * is not valid. + */ +const char *liteav_av_hwdevice_get_type_name(enum AVHWDeviceType type); + +/** + * Iterate over supported device types. + * + * @param type AV_HWDEVICE_TYPE_NONE initially, then the previous type + * returned by this function in subsequent iterations. + * @return The next usable device type from enum AVHWDeviceType, or + * AV_HWDEVICE_TYPE_NONE if there are no more. + */ +enum AVHWDeviceType liteav_av_hwdevice_iterate_types(enum AVHWDeviceType prev); + +/** + * Allocate an AVHWDeviceContext for a given hardware type. + * + * @param type the type of the hardware device to allocate. + * @return a reference to the newly created AVHWDeviceContext on success or NULL + * on failure. + */ +AVBufferRef *liteav_av_hwdevice_ctx_alloc(enum AVHWDeviceType type); + +/** + * Finalize the device context before use. This function must be called after + * the context is filled with all the required information and before it is + * used in any way. + * + * @param ref a reference to the AVHWDeviceContext + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_hwdevice_ctx_init(AVBufferRef *ref); + +/** + * Open a device of the specified type and create an AVHWDeviceContext for it. + * + * This is a convenience function intended to cover the simple cases. Callers + * who need to fine-tune device creation/management should open the device + * manually and then wrap it in an AVHWDeviceContext using + * liteav_av_hwdevice_ctx_alloc()/liteav_av_hwdevice_ctx_init(). + * + * The returned context is already initialized and ready for use, the caller + * should not call liteav_av_hwdevice_ctx_init() on it. The user_opaque/free fields of + * the created AVHWDeviceContext are set by this function and should not be + * touched by the caller. + * + * @param device_ctx On success, a reference to the newly-created device context + * will be written here. The reference is owned by the caller + * and must be released with liteav_av_buffer_unref() when no longer + * needed. On failure, NULL will be written to this pointer. + * @param type The type of the device to create. + * @param device A type-specific string identifying the device to open. + * @param opts A dictionary of additional (type-specific) options to use in + * opening the device. The dictionary remains owned by the caller. + * @param flags currently unused + * + * @return 0 on success, a negative AVERROR code on failure. + */ +int liteav_av_hwdevice_ctx_create(AVBufferRef **device_ctx, enum AVHWDeviceType type, + const char *device, AVDictionary *opts, int flags); + +/** + * Create a new device of the specified type from an existing device. + * + * If the source device is a device of the target type or was originally + * derived from such a device (possibly through one or more intermediate + * devices of other types), then this will return a reference to the + * existing device of the same type as is requested. + * + * Otherwise, it will attempt to derive a new device from the given source + * device. If direct derivation to the new type is not implemented, it will + * attempt the same derivation from each ancestor of the source device in + * turn looking for an implemented derivation method. + * + * @param dst_ctx On success, a reference to the newly-created + * AVHWDeviceContext. + * @param type The type of the new device to create. + * @param src_ctx A reference to an existing AVHWDeviceContext which will be + * used to create the new device. + * @param flags Currently unused; should be set to zero. + * @return Zero on success, a negative AVERROR code on failure. + */ +int liteav_av_hwdevice_ctx_create_derived(AVBufferRef **dst_ctx, + enum AVHWDeviceType type, + AVBufferRef *src_ctx, int flags); + + +/** + * Allocate an AVHWFramesContext tied to a given device context. + * + * @param device_ctx a reference to a AVHWDeviceContext. This function will make + * a new reference for internal use, the one passed to the + * function remains owned by the caller. + * @return a reference to the newly created AVHWFramesContext on success or NULL + * on failure. + */ +AVBufferRef *liteav_av_hwframe_ctx_alloc(AVBufferRef *device_ctx); + +/** + * Finalize the context before use. This function must be called after the + * context is filled with all the required information and before it is attached + * to any frames. + * + * @param ref a reference to the AVHWFramesContext + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_hwframe_ctx_init(AVBufferRef *ref); + +/** + * Allocate a new frame attached to the given AVHWFramesContext. + * + * @param hwframe_ctx a reference to an AVHWFramesContext + * @param frame an empty (freshly allocated or unreffed) frame to be filled with + * newly allocated buffers. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_hwframe_get_buffer(AVBufferRef *hwframe_ctx, AVFrame *frame, int flags); + +/** + * Copy data to or from a hw surface. At least one of dst/src must have an + * AVHWFramesContext attached. + * + * If src has an AVHWFramesContext attached, then the format of dst (if set) + * must use one of the formats returned by liteav_av_hwframe_transfer_get_formats(src, + * AV_HWFRAME_TRANSFER_DIRECTION_FROM). + * If dst has an AVHWFramesContext attached, then the format of src must use one + * of the formats returned by liteav_av_hwframe_transfer_get_formats(dst, + * AV_HWFRAME_TRANSFER_DIRECTION_TO) + * + * dst may be "clean" (i.e. with data/buf pointers unset), in which case the + * data buffers will be allocated by this function using liteav_av_frame_get_buffer(). + * If dst->format is set, then this format will be used, otherwise (when + * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be chosen. + * + * The two frames must have matching allocated dimensions (i.e. equal to + * AVHWFramesContext.width/height), since not all device types support + * transferring a sub-rectangle of the whole surface. The display dimensions + * (i.e. AVFrame.width/height) may be smaller than the allocated dimensions, but + * also have to be equal for both frames. When the display dimensions are + * smaller than the allocated dimensions, the content of the padding in the + * destination frame is unspecified. + * + * @param dst the destination frame. dst is not touched on failure. + * @param src the source frame. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR error code on failure. + */ +int liteav_av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags); + +enum AVHWFrameTransferDirection { + /** + * Transfer the data from the queried hw frame. + */ + AV_HWFRAME_TRANSFER_DIRECTION_FROM, + + /** + * Transfer the data to the queried hw frame. + */ + AV_HWFRAME_TRANSFER_DIRECTION_TO, +}; + +/** + * Get a list of possible source or target formats usable in + * liteav_av_hwframe_transfer_data(). + * + * @param hwframe_ctx the frame context to obtain the information for + * @param dir the direction of the transfer + * @param formats the pointer to the output format list will be written here. + * The list is terminated with AV_PIX_FMT_NONE and must be freed + * by the caller when no longer needed using liteav_av_free(). + * If this function returns successfully, the format list will + * have at least one item (not counting the terminator). + * On failure, the contents of this pointer are unspecified. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR code on failure. + */ +int liteav_av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ctx, + enum AVHWFrameTransferDirection dir, + enum AVPixelFormat **formats, int flags); + + +/** + * This struct describes the constraints on hardware frames attached to + * a given device with a hardware-specific configuration. This is returned + * by liteav_av_hwdevice_get_hwframe_constraints() and must be freed by + * liteav_av_hwframe_constraints_free() after use. + */ +typedef struct AVHWFramesConstraints { + /** + * A list of possible values for format in the hw_frames_ctx, + * terminated by AV_PIX_FMT_NONE. This member will always be filled. + */ + enum AVPixelFormat *valid_hw_formats; + + /** + * A list of possible values for sw_format in the hw_frames_ctx, + * terminated by AV_PIX_FMT_NONE. Can be NULL if this information is + * not known. + */ + enum AVPixelFormat *valid_sw_formats; + + /** + * The minimum size of frames in this hw_frames_ctx. + * (Zero if not known.) + */ + int min_width; + int min_height; + + /** + * The maximum size of frames in this hw_frames_ctx. + * (INT_MAX if not known / no limit.) + */ + int max_width; + int max_height; +} AVHWFramesConstraints; + +/** + * Allocate a HW-specific configuration structure for a given HW device. + * After use, the user must free all members as required by the specific + * hardware structure being used, then free the structure itself with + * liteav_av_free(). + * + * @param device_ctx a reference to the associated AVHWDeviceContext. + * @return The newly created HW-specific configuration structure on + * success or NULL on failure. + */ +void *liteav_av_hwdevice_hwconfig_alloc(AVBufferRef *device_ctx); + +/** + * Get the constraints on HW frames given a device and the HW-specific + * configuration to be used with that device. If no HW-specific + * configuration is provided, returns the maximum possible capabilities + * of the device. + * + * @param ref a reference to the associated AVHWDeviceContext. + * @param hwconfig a filled HW-specific configuration structure, or NULL + * to return the maximum possible capabilities of the device. + * @return AVHWFramesConstraints structure describing the constraints + * on the device, or NULL if not available. + */ +AVHWFramesConstraints *liteav_av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, + const void *hwconfig); + +/** + * Free an AVHWFrameConstraints structure. + * + * @param constraints The (filled or unfilled) AVHWFrameConstraints structure. + */ +void liteav_av_hwframe_constraints_free(AVHWFramesConstraints **constraints); + + +/** + * Flags to apply to frame mappings. + */ +enum { + /** + * The mapping must be readable. + */ + AV_HWFRAME_MAP_READ = 1 << 0, + /** + * The mapping must be writeable. + */ + AV_HWFRAME_MAP_WRITE = 1 << 1, + /** + * The mapped frame will be overwritten completely in subsequent + * operations, so the current frame data need not be loaded. Any values + * which are not overwritten are unspecified. + */ + AV_HWFRAME_MAP_OVERWRITE = 1 << 2, + /** + * The mapping must be direct. That is, there must not be any copying in + * the map or unmap steps. Note that performance of direct mappings may + * be much lower than normal memory. + */ + AV_HWFRAME_MAP_DIRECT = 1 << 3, +}; + +/** + * Map a hardware frame. + * + * This has a number of different possible effects, depending on the format + * and origin of the src and dst frames. On input, src should be a usable + * frame with valid buffers and dst should be blank (typically as just created + * by liteav_av_frame_alloc()). src should have an associated hwframe context, and + * dst may optionally have a format and associated hwframe context. + * + * If src was created by mapping a frame from the hwframe context of dst, + * then this function undoes the mapping - dst is replaced by a reference to + * the frame that src was originally mapped from. + * + * If both src and dst have an associated hwframe context, then this function + * attempts to map the src frame from its hardware context to that of dst and + * then fill dst with appropriate data to be usable there. This will only be + * possible if the hwframe contexts and associated devices are compatible - + * given compatible devices, liteav_av_hwframe_ctx_create_derived() can be used to + * create a hwframe context for dst in which mapping should be possible. + * + * If src has a hwframe context but dst does not, then the src frame is + * mapped to normal memory and should thereafter be usable as a normal frame. + * If the format is set on dst, then the mapping will attempt to create dst + * with that format and fail if it is not possible. If format is unset (is + * AV_PIX_FMT_NONE) then dst will be mapped with whatever the most appropriate + * format to use is (probably the sw_format of the src hwframe context). + * + * A return value of AVERROR(ENOSYS) indicates that the mapping is not + * possible with the given arguments and hwframe setup, while other return + * values indicate that it failed somehow. + * + * @param dst Destination frame, to contain the mapping. + * @param src Source frame, to be mapped. + * @param flags Some combination of AV_HWFRAME_MAP_* flags. + * @return Zero on success, negative AVERROR code on failure. + */ +int liteav_av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags); + + +/** + * Create and initialise an AVHWFramesContext as a mapping of another existing + * AVHWFramesContext on a different device. + * + * liteav_av_hwframe_ctx_init() should not be called after this. + * + * @param derived_frame_ctx On success, a reference to the newly created + * AVHWFramesContext. + * @param derived_device_ctx A reference to the device to create the new + * AVHWFramesContext on. + * @param source_frame_ctx A reference to an existing AVHWFramesContext + * which will be mapped to the derived context. + * @param flags Some combination of AV_HWFRAME_MAP_* flags, defining the + * mapping parameters to apply to frames which are allocated + * in the derived device. + * @return Zero on success, negative AVERROR code on failure. + */ +int liteav_av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx, + enum AVPixelFormat format, + AVBufferRef *derived_device_ctx, + AVBufferRef *source_frame_ctx, + int flags); + +#endif /* AVUTIL_HWCONTEXT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h new file mode 100644 index 0000000..81a0552 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h @@ -0,0 +1,52 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef AVUTIL_HWCONTEXT_CUDA_H +#define AVUTIL_HWCONTEXT_CUDA_H + +#ifndef CUDA_VERSION +#include <cuda.h> +#endif + +#include "pixfmt.h" + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_CUDA. + * + * This API supports dynamic frame pools. AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a CUdeviceptr. + */ + +typedef struct AVCUDADeviceContextInternal AVCUDADeviceContextInternal; + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVCUDADeviceContext { + CUcontext cuda_ctx; + CUstream stream; + AVCUDADeviceContextInternal *internal; +} AVCUDADeviceContext; + +/** + * AVHWFramesContext.hwctx is currently not used + */ + +#endif /* AVUTIL_HWCONTEXT_CUDA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h new file mode 100644 index 0000000..0eb694a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h @@ -0,0 +1,170 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_D3D11VA_H +#define AVUTIL_HWCONTEXT_D3D11VA_H + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_D3D11VA. + * + * The default pool implementation will be fixed-size if initial_pool_size is + * set (and allocate elements from an array texture). Otherwise it will allocate + * individual textures. Be aware that decoding requires a single array texture. + * + * Using sw_format==AV_PIX_FMT_YUV420P has special semantics, and maps to + * DXGI_FORMAT_420_OPAQUE. liteav_av_hwframe_transfer_data() is not supported for + * this format. Refer to MSDN for details. + * + * liteav_av_hwdevice_ctx_create() for this device type supports a key named "debug" + * for the AVDictionary entry. If this is set to any value, the device creation + * code will try to load various supported D3D debugging layers. + */ + +#include <d3d11.h> +#include <stdint.h> + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVD3D11VADeviceContext { + /** + * Device used for texture creation and access. This can also be used to + * set the libavcodec decoding device. + * + * Must be set by the user. This is the only mandatory field - the other + * device context fields are set from this and are available for convenience. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11Device *device; + + /** + * If unset, this will be set from the device field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11DeviceContext *device_context; + + /** + * If unset, this will be set from the device field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11VideoDevice *video_device; + + /** + * If unset, this will be set from the device_context field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11VideoContext *video_context; + + /** + * Callbacks for locking. They protect accesses to device_context and + * video_context calls. They also protect access to the internal staging + * texture (for liteav_av_hwframe_transfer_data() calls). They do NOT protect + * access to hwcontext or decoder state in general. + * + * If unset on init, the hwcontext implementation will set them to use an + * internal mutex. + * + * The underlying lock must be recursive. lock_ctx is for free use by the + * locking implementation. + */ + void (*lock)(void *lock_ctx); + void (*unlock)(void *lock_ctx); + void *lock_ctx; +} AVD3D11VADeviceContext; + +/** + * D3D11 frame descriptor for pool allocation. + * + * In user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer pointing at an object of this type describing the + * planes of the frame. + * + * This has no use outside of custom allocation, and AVFrame AVBufferRef do not + * necessarily point to an instance of this struct. + */ +typedef struct AVD3D11FrameDescriptor { + /** + * The texture in which the frame is located. The reference count is + * managed by the AVBufferRef, and destroying the reference will release + * the interface. + * + * Normally stored in AVFrame.data[0]. + */ + ID3D11Texture2D *texture; + + /** + * The index into the array texture element representing the frame, or 0 + * if the texture is not an array texture. + * + * Normally stored in AVFrame.data[1] (cast from intptr_t). + */ + intptr_t index; +} AVD3D11FrameDescriptor; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVD3D11VAFramesContext { + /** + * The canonical texture used for pool allocation. If this is set to NULL + * on init, the hwframes implementation will allocate and set an array + * texture if initial_pool_size > 0. + * + * The only situation when the API user should set this is: + * - the user wants to do manual pool allocation (setting + * AVHWFramesContext.pool), instead of letting AVHWFramesContext + * allocate the pool + * - of an array texture + * - and wants it to use it for decoding + * - this has to be done before calling liteav_av_hwframe_ctx_init() + * + * Deallocating the AVHWFramesContext will always release this interface, + * and it does not matter whether it was user-allocated. + * + * This is in particular used by the libavcodec D3D11VA hwaccel, which + * requires a single array texture. It will create ID3D11VideoDecoderOutputView + * objects for each array texture element on decoder initialization. + */ + ID3D11Texture2D *texture; + + /** + * D3D11_TEXTURE2D_DESC.BindFlags used for texture creation. The user must + * at least set D3D11_BIND_DECODER if the frames context is to be used for + * video decoding. + * This field is ignored/invalid if a user-allocated texture is provided. + */ + UINT BindFlags; + + /** + * D3D11_TEXTURE2D_DESC.MiscFlags used for texture creation. + * This field is ignored/invalid if a user-allocated texture is provided. + */ + UINT MiscFlags; +} AVD3D11VAFramesContext; + +#endif /* AVUTIL_HWCONTEXT_D3D11VA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h new file mode 100644 index 0000000..42709f2 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h @@ -0,0 +1,169 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_DRM_H +#define AVUTIL_HWCONTEXT_DRM_H + +#include <stddef.h> +#include <stdint.h> + +/** + * @file + * API-specific header for AV_HWDEVICE_TYPE_DRM. + * + * Internal frame allocation is not currently supported - all frames + * must be allocated by the user. Thus AVHWFramesContext is always + * NULL, though this may change if support for frame allocation is + * added in future. + */ + +enum { + /** + * The maximum number of layers/planes in a DRM frame. + */ + AV_DRM_MAX_PLANES = 4 +}; + +/** + * DRM object descriptor. + * + * Describes a single DRM object, addressing it as a PRIME file + * descriptor. + */ +typedef struct AVDRMObjectDescriptor { + /** + * DRM PRIME fd for the object. + */ + int fd; + /** + * Total size of the object. + * + * (This includes any parts not which do not contain image data.) + */ + size_t size; + /** + * Format modifier applied to the object (DRM_FORMAT_MOD_*). + * + * If the format modifier is unknown then this should be set to + * DRM_FORMAT_MOD_INVALID. + */ + uint64_t format_modifier; +} AVDRMObjectDescriptor; + +/** + * DRM plane descriptor. + * + * Describes a single plane of a layer, which is contained within + * a single object. + */ +typedef struct AVDRMPlaneDescriptor { + /** + * Index of the object containing this plane in the objects + * array of the enclosing frame descriptor. + */ + int object_index; + /** + * Offset within that object of this plane. + */ + ptrdiff_t offset; + /** + * Pitch (linesize) of this plane. + */ + ptrdiff_t pitch; +} AVDRMPlaneDescriptor; + +/** + * DRM layer descriptor. + * + * Describes a single layer within a frame. This has the structure + * defined by its format, and will contain one or more planes. + */ +typedef struct AVDRMLayerDescriptor { + /** + * Format of the layer (DRM_FORMAT_*). + */ + uint32_t format; + /** + * Number of planes in the layer. + * + * This must match the number of planes required by format. + */ + int nb_planes; + /** + * Array of planes in this layer. + */ + AVDRMPlaneDescriptor planes[AV_DRM_MAX_PLANES]; +} AVDRMLayerDescriptor; + +/** + * DRM frame descriptor. + * + * This is used as the data pointer for AV_PIX_FMT_DRM_PRIME frames. + * It is also used by user-allocated frame pools - allocating in + * AVHWFramesContext.pool must return AVBufferRefs which contain + * an object of this type. + * + * The fields of this structure should be set such it can be + * imported directly by EGL using the EGL_EXT_image_dma_buf_import + * and EGL_EXT_image_dma_buf_import_modifiers extensions. + * (Note that the exact layout of a particular format may vary between + * platforms - we only specify that the same platform should be able + * to import it.) + * + * The total number of planes must not exceed AV_DRM_MAX_PLANES, and + * the order of the planes by increasing layer index followed by + * increasing plane index must be the same as the order which would + * be used for the data pointers in the equivalent software format. + */ +typedef struct AVDRMFrameDescriptor { + /** + * Number of DRM objects making up this frame. + */ + int nb_objects; + /** + * Array of objects making up the frame. + */ + AVDRMObjectDescriptor objects[AV_DRM_MAX_PLANES]; + /** + * Number of layers in the frame. + */ + int nb_layers; + /** + * Array of layers in the frame. + */ + AVDRMLayerDescriptor layers[AV_DRM_MAX_PLANES]; +} AVDRMFrameDescriptor; + +/** + * DRM device. + * + * Allocated as AVHWDeviceContext.hwctx. + */ +typedef struct AVDRMDeviceContext { + /** + * File descriptor of DRM device. + * + * This is used as the device to create frames on, and may also be + * used in some derivation and mapping operations. + * + * If no device is required, set to -1. + */ + int fd; +} AVDRMDeviceContext; + +#endif /* AVUTIL_HWCONTEXT_DRM_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h new file mode 100644 index 0000000..e1b79bc --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h @@ -0,0 +1,75 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef AVUTIL_HWCONTEXT_DXVA2_H +#define AVUTIL_HWCONTEXT_DXVA2_H + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_DXVA2. + * + * Only fixed-size pools are supported. + * + * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer set to a pointer to IDirect3DSurface9. + */ + +#include <d3d9.h> +#include <dxva2api.h> + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVDXVA2DeviceContext { + IDirect3DDeviceManager9 *devmgr; +} AVDXVA2DeviceContext; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVDXVA2FramesContext { + /** + * The surface type (e.g. DXVA2_VideoProcessorRenderTarget or + * DXVA2_VideoDecoderRenderTarget). Must be set by the caller. + */ + DWORD surface_type; + + /** + * The surface pool. When an external pool is not provided by the caller, + * this will be managed (allocated and filled on init, freed on uninit) by + * libavutil. + */ + IDirect3DSurface9 **surfaces; + int nb_surfaces; + + /** + * Certain drivers require the decoder to be destroyed before the surfaces. + * To allow internally managed pools to work properly in such cases, this + * field is provided. + * + * If it is non-NULL, libavutil will call IDirectXVideoDecoder_Release() on + * it just before the internal surface pool is freed. + * + * This is for convenience only. Some code uses other methods to manage the + * decoder reference. + */ + IDirectXVideoDecoder *decoder_to_release; +} AVDXVA2FramesContext; + +#endif /* AVUTIL_HWCONTEXT_DXVA2_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h new file mode 100644 index 0000000..101a980 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h @@ -0,0 +1,36 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_MEDIACODEC_H +#define AVUTIL_HWCONTEXT_MEDIACODEC_H + +/** + * MediaCodec details. + * + * Allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVMediaCodecDeviceContext { + /** + * android/view/Surface handle, to be filled by the user. + * + * This is the default surface used by decoders on this device. + */ + void *surface; +} AVMediaCodecDeviceContext; + +#endif /* AVUTIL_HWCONTEXT_MEDIACODEC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h new file mode 100644 index 0000000..b98d611 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h @@ -0,0 +1,53 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_QSV_H +#define AVUTIL_HWCONTEXT_QSV_H + +#include <mfx/mfxvideo.h> + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_QSV. + * + * This API does not support dynamic frame pools. AVHWFramesContext.pool must + * contain AVBufferRefs whose data pointer points to an mfxFrameSurface1 struct. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVQSVDeviceContext { + mfxSession session; +} AVQSVDeviceContext; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVQSVFramesContext { + mfxFrameSurface1 *surfaces; + int nb_surfaces; + + /** + * A combination of MFX_MEMTYPE_* describing the frame pool. + */ + int frame_type; +} AVQSVFramesContext; + +#endif /* AVUTIL_HWCONTEXT_QSV_H */ + diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h new file mode 100644 index 0000000..46b6be7 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h @@ -0,0 +1,118 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VAAPI_H +#define AVUTIL_HWCONTEXT_VAAPI_H + +#include <va/va.h> + +/** + * @file + * API-specific header for AV_HWDEVICE_TYPE_VAAPI. + * + * Dynamic frame pools are supported, but note that any pool used as a render + * target is required to be of fixed size in order to be be usable as an + * argument to vaCreateContext(). + * + * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer set to a VASurfaceID. + */ + +enum { + /** + * The quirks field has been set by the user and should not be detected + * automatically by liteav_av_hwdevice_ctx_init(). + */ + AV_VAAPI_DRIVER_QUIRK_USER_SET = (1 << 0), + /** + * The driver does not destroy parameter buffers when they are used by + * vaRenderPicture(). Additional code will be required to destroy them + * separately afterwards. + */ + AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS = (1 << 1), + + /** + * The driver does not support the VASurfaceAttribMemoryType attribute, + * so the surface allocation code will not try to use it. + */ + AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE = (1 << 2), + + /** + * The driver does not support surface attributes at all. + * The surface allocation code will never pass them to surface allocation, + * and the results of the vaQuerySurfaceAttributes() call will be faked. + */ + AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES = (1 << 3), +}; + +/** + * VAAPI connection details. + * + * Allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVVAAPIDeviceContext { + /** + * The VADisplay handle, to be filled by the user. + */ + VADisplay display; + /** + * Driver quirks to apply - this is filled by liteav_av_hwdevice_ctx_init(), + * with reference to a table of known drivers, unless the + * AV_VAAPI_DRIVER_QUIRK_USER_SET bit is already present. The user + * may need to refer to this field when performing any later + * operations using VAAPI with the same VADisplay. + */ + unsigned int driver_quirks; +} AVVAAPIDeviceContext; + +/** + * VAAPI-specific data associated with a frame pool. + * + * Allocated as AVHWFramesContext.hwctx. + */ +typedef struct AVVAAPIFramesContext { + /** + * Set by the user to apply surface attributes to all surfaces in + * the frame pool. If null, default settings are used. + */ + VASurfaceAttrib *attributes; + int nb_attributes; + /** + * The surfaces IDs of all surfaces in the pool after creation. + * Only valid if AVHWFramesContext.initial_pool_size was positive. + * These are intended to be used as the render_targets arguments to + * vaCreateContext(). + */ + VASurfaceID *surface_ids; + int nb_surfaces; +} AVVAAPIFramesContext; + +/** + * VAAPI hardware pipeline configuration details. + * + * Allocated with liteav_av_hwdevice_hwconfig_alloc(). + */ +typedef struct AVVAAPIHWConfig { + /** + * ID of a VAAPI pipeline configuration. + */ + VAConfigID config_id; +} AVVAAPIHWConfig; + +#endif /* AVUTIL_HWCONTEXT_VAAPI_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h new file mode 100644 index 0000000..1b7ea1e --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h @@ -0,0 +1,44 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VDPAU_H +#define AVUTIL_HWCONTEXT_VDPAU_H + +#include <vdpau/vdpau.h> + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_VDPAU. + * + * This API supports dynamic frame pools. AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a VdpVideoSurface. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVVDPAUDeviceContext { + VdpDevice device; + VdpGetProcAddress *get_proc_address; +} AVVDPAUDeviceContext; + +/** + * AVHWFramesContext.hwctx is currently not used + */ + +#endif /* AVUTIL_HWCONTEXT_VDPAU_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h new file mode 100644 index 0000000..4f77b9d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h @@ -0,0 +1,55 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H +#define AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H + +#include <stdint.h> + +#include <VideoToolbox/VideoToolbox.h> + +#include "pixfmt.h" + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX. + * + * This API currently does not support frame allocation, as the raw VideoToolbox + * API does allocation, and FFmpeg itself never has the need to allocate frames. + * + * If the API user sets a custom pool, AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a CVImageBufferRef or CVPixelBufferRef. + * + * Currently AVHWDeviceContext.hwctx and AVHWFramesContext.hwctx are always + * NULL. + */ + +/** + * Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat. + * Returns AV_PIX_FMT_NONE if no known equivalent was found. + */ +enum AVPixelFormat liteav_av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt); + +/** + * Convert an AVPixelFormat to a VideoToolbox (actually CoreVideo) format. + * Returns 0 if no known equivalent was found. + */ +uint32_t liteav_av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt); + +#endif /* AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/imgutils.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/imgutils.h new file mode 100644 index 0000000..fc0216b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/imgutils.h @@ -0,0 +1,278 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_IMGUTILS_H +#define AVUTIL_IMGUTILS_H + +/** + * @file + * misc image utilities + * + * @addtogroup lavu_picture + * @{ + */ + +#include "avutil.h" +#include "pixdesc.h" +#include "rational.h" + +/** + * Compute the max pixel step for each plane of an image with a + * format described by pixdesc. + * + * The pixel step is the distance in bytes between the first byte of + * the group of bytes which describe a pixel component and the first + * byte of the successive group in the same plane for the same + * component. + * + * @param max_pixsteps an array which is filled with the max pixel step + * for each plane. Since a plane may contain different pixel + * components, the computed max_pixsteps[plane] is relative to the + * component in the plane with the max pixel step. + * @param max_pixstep_comps an array which is filled with the component + * for each plane which has the max pixel step. May be NULL. + */ +void liteav_av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], + const AVPixFmtDescriptor *pixdesc); + +/** + * Compute the size of an image line with format pix_fmt and width + * width for the plane plane. + * + * @return the computed size in bytes + */ +int liteav_av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane); + +/** + * Fill plane linesizes for an image with pixel format pix_fmt and + * width width. + * + * @param linesizes array to be filled with the linesize for each plane + * @return >= 0 in case of success, a negative error code otherwise + */ +int liteav_av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width); + +/** + * Fill plane data pointers for an image with pixel format pix_fmt and + * height height. + * + * @param data pointers array to be filled with the pointer for each image plane + * @param ptr the pointer to a buffer which will contain the image + * @param linesizes the array containing the linesize for each + * plane, should be filled by liteav_av_image_fill_linesizes() + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int liteav_av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, + uint8_t *ptr, const int linesizes[4]); + +/** + * Allocate an image with size w and h and pixel format pix_fmt, and + * fill pointers and linesizes accordingly. + * The allocated image buffer has to be freed by using + * liteav_av_freep(&pointers[0]). + * + * @param align the value to use for buffer size alignment + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int liteav_av_image_alloc(uint8_t *pointers[4], int linesizes[4], + int w, int h, enum AVPixelFormat pix_fmt, int align); + +/** + * Copy image plane from src to dst. + * That is, copy "height" number of lines of "bytewidth" bytes each. + * The first byte of each successive line is separated by *_linesize + * bytes. + * + * bytewidth must be contained by both absolute values of dst_linesize + * and src_linesize, otherwise the function behavior is undefined. + * + * @param dst_linesize linesize for the image plane in dst + * @param src_linesize linesize for the image plane in src + */ +void liteav_av_image_copy_plane(uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize, + int bytewidth, int height); + +/** + * Copy image in src_data to dst_data. + * + * @param dst_linesizes linesizes for the image in dst_data + * @param src_linesizes linesizes for the image in src_data + */ +void liteav_av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], + const uint8_t *src_data[4], const int src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Copy image data located in uncacheable (e.g. GPU mapped) memory. Where + * available, this function will use special functionality for reading from such + * memory, which may result in greatly improved performance compared to plain + * liteav_av_image_copy(). + * + * The data pointers and the linesizes must be aligned to the maximum required + * by the CPU architecture. + * + * @note The linesize parameters have the type ptrdiff_t here, while they are + * int for liteav_av_image_copy(). + * @note On x86, the linesizes currently need to be aligned to the cacheline + * size (i.e. 64) to get improved performance. + */ +void liteav_av_image_copy_uc_from(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4], + const uint8_t *src_data[4], const ptrdiff_t src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Setup the data pointers and linesizes based on the specified image + * parameters and the provided array. + * + * The fields of the given image are filled in by using the src + * address which points to the image data buffer. Depending on the + * specified pixel format, one or multiple image data pointers and + * line sizes will be set. If a planar format is specified, several + * pointers will be set pointing to the different picture planes and + * the line sizes of the different planes will be stored in the + * lines_sizes array. Call with src == NULL to get the required + * size for the src buffer. + * + * To allocate the buffer and fill in the dst_data and dst_linesize in + * one call, use liteav_av_image_alloc(). + * + * @param dst_data data pointers to be filled in + * @param dst_linesize linesizes for the image in dst_data to be filled in + * @param src buffer which will contain or contains the actual image data, can be NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the value used in src for linesize alignment + * @return the size in bytes required for src, a negative error code + * in case of failure + */ +int liteav_av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Return the size in bytes of the amount of data required to store an + * image with the given parameters. + * + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the assumed linesize alignment + * @return the buffer size in bytes, a negative error code in case of failure + */ +int liteav_av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Copy image data from an image into a buffer. + * + * liteav_av_image_get_buffer_size() can be used to compute the required size + * for the buffer to fill. + * + * @param dst a buffer into which picture data will be copied + * @param dst_size the size in bytes of dst + * @param src_data pointers containing the source image data + * @param src_linesize linesizes for the image in src_data + * @param pix_fmt the pixel format of the source image + * @param width the width of the source image in pixels + * @param height the height of the source image in pixels + * @param align the assumed linesize alignment for dst + * @return the number of bytes written to dst, or a negative value + * (error code) on error + */ +int liteav_av_image_copy_to_buffer(uint8_t *dst, int dst_size, + const uint8_t * const src_data[4], const int src_linesize[4], + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of the image can be addressed with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int liteav_av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of a plane of an image with the specified pix_fmt can be addressed + * with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param max_pixels the maximum number of pixels the user wants to accept + * @param pix_fmt the pixel format, can be AV_PIX_FMT_NONE if unknown. + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int liteav_av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx); + +/** + * Check if the given sample aspect ratio of an image is valid. + * + * It is considered invalid if the denominator is 0 or if applying the ratio + * to the image size would make the smaller dimension less than 1. If the + * sar numerator is 0, it is considered unknown and will return as valid. + * + * @param w width of the image + * @param h height of the image + * @param sar sample aspect ratio of the image + * @return 0 if valid, a negative AVERROR code otherwise + */ +int liteav_av_image_check_sar(unsigned int w, unsigned int h, AVRational sar); + +/** + * Overwrite the image data with black. This is suitable for filling a + * sub-rectangle of an image, meaning the padding between the right most pixel + * and the left most pixel on the next line will not be overwritten. For some + * formats, the image size might be rounded up due to inherent alignment. + * + * If the pixel format has alpha, the alpha is cleared to opaque. + * + * This can return an error if the pixel format is not supported. Normally, all + * non-hwaccel pixel formats should be supported. + * + * Passing NULL for dst_data is allowed. Then the function returns whether the + * operation would have succeeded. (It can return an error if the pix_fmt is + * not supported.) + * + * @param dst_data data pointers to destination image + * @param dst_linesize linesizes for the destination image + * @param pix_fmt the pixel format of the image + * @param range the color range of the image (important for colorspaces such as YUV) + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @return 0 if the image data was cleared, a negative AVERROR code otherwise + */ +int liteav_av_image_fill_black(uint8_t *dst_data[4], const ptrdiff_t dst_linesize[4], + enum AVPixelFormat pix_fmt, enum AVColorRange range, + int width, int height); + +/** + * @} + */ + + +#endif /* AVUTIL_IMGUTILS_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intfloat.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intfloat.h new file mode 100644 index 0000000..fe3d7ec --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intfloat.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_H +#define AVUTIL_INTFLOAT_H + +#include <stdint.h> +#include "attributes.h" + +union av_intfloat32 { + uint32_t i; + float f; +}; + +union av_intfloat64 { + uint64_t i; + double f; +}; + +/** + * Reinterpret a 32-bit integer as a float. + */ +static av_always_inline float av_int2float(uint32_t i) +{ + union av_intfloat32 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a float as a 32-bit integer. + */ +static av_always_inline uint32_t av_float2int(float f) +{ + union av_intfloat32 v; + v.f = f; + return v.i; +} + +/** + * Reinterpret a 64-bit integer as a double. + */ +static av_always_inline double av_int2double(uint64_t i) +{ + union av_intfloat64 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a double as a 64-bit integer. + */ +static av_always_inline uint64_t av_double2int(double f) +{ + union av_intfloat64 v; + v.f = f; + return v.i; +} + +#endif /* AVUTIL_INTFLOAT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h new file mode 100644 index 0000000..67c763b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h @@ -0,0 +1,629 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTREADWRITE_H +#define AVUTIL_INTREADWRITE_H + +#include <stdint.h> +#include "libavutil/avconfig.h" +#include "attributes.h" +#include "bswap.h" + +typedef union { + uint64_t u64; + uint32_t u32[2]; + uint16_t u16[4]; + uint8_t u8 [8]; + double f64; + float f32[2]; +} av_alias av_alias64; + +typedef union { + uint32_t u32; + uint16_t u16[2]; + uint8_t u8 [4]; + float f32; +} av_alias av_alias32; + +typedef union { + uint16_t u16; + uint8_t u8 [2]; +} av_alias av_alias16; + +/* + * Arch-specific headers can provide any combination of + * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. + * Preprocessor symbols must be defined, even if these are implemented + * as inline functions. + * + * R/W means read/write, B/L/N means big/little/native endianness. + * The following macros require aligned access, compared to their + * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A. + * Incorrect usage may range from abysmal performance to crash + * depending on the platform. + * + * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U. + */ + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/intreadwrite.h" +#elif ARCH_AVR32 +# include "avr32/intreadwrite.h" +#elif ARCH_MIPS +# include "mips/intreadwrite.h" +#elif ARCH_PPC +# include "ppc/intreadwrite.h" +#elif ARCH_TOMI +# include "tomi/intreadwrite.h" +#elif ARCH_X86 +# include "x86/intreadwrite.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +/* + * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. + */ + +#if AV_HAVE_BIGENDIAN + +# if defined(AV_RN16) && !defined(AV_RB16) +# define AV_RB16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RB16) +# define AV_RN16(p) AV_RB16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WB16) +# define AV_WB16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WB16) +# define AV_WN16(p, v) AV_WB16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RB24) +# define AV_RB24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RB24) +# define AV_RN24(p) AV_RB24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WB24) +# define AV_WB24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WB24) +# define AV_WN24(p, v) AV_WB24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RB32) +# define AV_RB32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RB32) +# define AV_RN32(p) AV_RB32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WB32) +# define AV_WB32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WB32) +# define AV_WN32(p, v) AV_WB32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RB48) +# define AV_RB48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RB48) +# define AV_RN48(p) AV_RB48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WB48) +# define AV_WB48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WB48) +# define AV_WN48(p, v) AV_WB48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RB64) +# define AV_RB64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RB64) +# define AV_RN64(p) AV_RB64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WB64) +# define AV_WB64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WB64) +# define AV_WN64(p, v) AV_WB64(p, v) +# endif + +#else /* AV_HAVE_BIGENDIAN */ + +# if defined(AV_RN16) && !defined(AV_RL16) +# define AV_RL16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RL16) +# define AV_RN16(p) AV_RL16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WL16) +# define AV_WL16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WL16) +# define AV_WN16(p, v) AV_WL16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RL24) +# define AV_RL24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RL24) +# define AV_RN24(p) AV_RL24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WL24) +# define AV_WL24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WL24) +# define AV_WN24(p, v) AV_WL24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RL32) +# define AV_RL32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RL32) +# define AV_RN32(p) AV_RL32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WL32) +# define AV_WL32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WL32) +# define AV_WN32(p, v) AV_WL32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RL48) +# define AV_RL48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RL48) +# define AV_RN48(p) AV_RL48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WL48) +# define AV_WL48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WL48) +# define AV_WN48(p, v) AV_WL48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RL64) +# define AV_RL64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RL64) +# define AV_RN64(p) AV_RL64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WL64) +# define AV_WL64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WL64) +# define AV_WN64(p, v) AV_WL64(p, v) +# endif + +#endif /* !AV_HAVE_BIGENDIAN */ + +/* + * Define AV_[RW]N helper macros to simplify definitions not provided + * by per-arch headers. + */ + +#if defined(__GNUC__) + +union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; +union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; +union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; + +# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) +# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) + +#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_X64) || defined(_M_ARM64)) && AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) +# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#else + +#ifndef AV_RB16 +# define AV_RB16(x) \ + ((((const uint8_t*)(x))[0] << 8) | \ + ((const uint8_t*)(x))[1]) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, val) do { \ + uint16_t d = (val); \ + ((uint8_t*)(p))[1] = (d); \ + ((uint8_t*)(p))[0] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RL16 +# define AV_RL16(x) \ + ((((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, val) do { \ + uint16_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RB32 +# define AV_RB32(x) \ + (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, val) do { \ + uint32_t d = (val); \ + ((uint8_t*)(p))[3] = (d); \ + ((uint8_t*)(p))[2] = (d)>>8; \ + ((uint8_t*)(p))[1] = (d)>>16; \ + ((uint8_t*)(p))[0] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RL32 +# define AV_RL32(x) \ + (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, val) do { \ + uint32_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RB64 +# define AV_RB64(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ + (uint64_t)((const uint8_t*)(x))[7]) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, val) do { \ + uint64_t d = (val); \ + ((uint8_t*)(p))[7] = (d); \ + ((uint8_t*)(p))[6] = (d)>>8; \ + ((uint8_t*)(p))[5] = (d)>>16; \ + ((uint8_t*)(p))[4] = (d)>>24; \ + ((uint8_t*)(p))[3] = (d)>>32; \ + ((uint8_t*)(p))[2] = (d)>>40; \ + ((uint8_t*)(p))[1] = (d)>>48; \ + ((uint8_t*)(p))[0] = (d)>>56; \ + } while(0) +#endif + +#ifndef AV_RL64 +# define AV_RL64(x) \ + (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, val) do { \ + uint64_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + ((uint8_t*)(p))[6] = (d)>>48; \ + ((uint8_t*)(p))[7] = (d)>>56; \ + } while(0) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RN(s, p) AV_RB##s(p) +# define AV_WN(s, p, v) AV_WB##s(p, v) +#else +# define AV_RN(s, p) AV_RL##s(p) +# define AV_WN(s, p, v) AV_WL##s(p, v) +#endif + +#endif /* HAVE_FAST_UNALIGNED */ + +#ifndef AV_RN16 +# define AV_RN16(p) AV_RN(16, p) +#endif + +#ifndef AV_RN32 +# define AV_RN32(p) AV_RN(32, p) +#endif + +#ifndef AV_RN64 +# define AV_RN64(p) AV_RN(64, p) +#endif + +#ifndef AV_WN16 +# define AV_WN16(p, v) AV_WN(16, p, v) +#endif + +#ifndef AV_WN32 +# define AV_WN32(p, v) AV_WN(32, p, v) +#endif + +#ifndef AV_WN64 +# define AV_WN64(p, v) AV_WN(64, p, v) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RB(s, p) AV_RN##s(p) +# define AV_WB(s, p, v) AV_WN##s(p, v) +# define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) +#else +# define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) +# define AV_RL(s, p) AV_RN##s(p) +# define AV_WL(s, p, v) AV_WN##s(p, v) +#endif + +#define AV_RB8(x) (((const uint8_t*)(x))[0]) +#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) + +#define AV_RL8(x) AV_RB8(x) +#define AV_WL8(p, d) AV_WB8(p, d) + +#ifndef AV_RB16 +# define AV_RB16(p) AV_RB(16, p) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, v) AV_WB(16, p, v) +#endif + +#ifndef AV_RL16 +# define AV_RL16(p) AV_RL(16, p) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, v) AV_WL(16, p, v) +#endif + +#ifndef AV_RB32 +# define AV_RB32(p) AV_RB(32, p) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, v) AV_WB(32, p, v) +#endif + +#ifndef AV_RL32 +# define AV_RL32(p) AV_RL(32, p) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, v) AV_WL(32, p, v) +#endif + +#ifndef AV_RB64 +# define AV_RB64(p) AV_RB(64, p) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, v) AV_WB(64, p, v) +#endif + +#ifndef AV_RL64 +# define AV_RL64(p) AV_RL(64, p) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, v) AV_WL(64, p, v) +#endif + +#ifndef AV_RB24 +# define AV_RB24(x) \ + ((((const uint8_t*)(x))[0] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[2]) +#endif +#ifndef AV_WB24 +# define AV_WB24(p, d) do { \ + ((uint8_t*)(p))[2] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[0] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RL24 +# define AV_RL24(x) \ + ((((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL24 +# define AV_WL24(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RB48 +# define AV_RB48(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ + (uint64_t)((const uint8_t*)(x))[5]) +#endif +#ifndef AV_WB48 +# define AV_WB48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[5] = (d); \ + ((uint8_t*)(p))[4] = (d)>>8; \ + ((uint8_t*)(p))[3] = (d)>>16; \ + ((uint8_t*)(p))[2] = (d)>>24; \ + ((uint8_t*)(p))[1] = (d)>>32; \ + ((uint8_t*)(p))[0] = (d)>>40; \ + } while(0) +#endif + +#ifndef AV_RL48 +# define AV_RL48(x) \ + (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL48 +# define AV_WL48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + } while(0) +#endif + +/* + * The AV_[RW]NA macros access naturally aligned data + * in a type-safe way. + */ + +#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) +#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#ifndef AV_RN16A +# define AV_RN16A(p) AV_RNA(16, p) +#endif + +#ifndef AV_RN32A +# define AV_RN32A(p) AV_RNA(32, p) +#endif + +#ifndef AV_RN64A +# define AV_RN64A(p) AV_RNA(64, p) +#endif + +#ifndef AV_WN16A +# define AV_WN16A(p, v) AV_WNA(16, p, v) +#endif + +#ifndef AV_WN32A +# define AV_WN32A(p, v) AV_WNA(32, p, v) +#endif + +#ifndef AV_WN64A +# define AV_WN64A(p, v) AV_WNA(64, p, v) +#endif + +/* + * The AV_COPYxxU macros are suitable for copying data to/from unaligned + * memory locations. + */ + +#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); + +#ifndef AV_COPY16U +# define AV_COPY16U(d, s) AV_COPYU(16, d, s) +#endif + +#ifndef AV_COPY32U +# define AV_COPY32U(d, s) AV_COPYU(32, d, s) +#endif + +#ifndef AV_COPY64U +# define AV_COPY64U(d, s) AV_COPYU(64, d, s) +#endif + +#ifndef AV_COPY128U +# define AV_COPY128U(d, s) \ + do { \ + AV_COPY64U(d, s); \ + AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ + } while(0) +#endif + +/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be + * naturally aligned. They may be implemented using MMX, + * so emms_c() must be called before using any float code + * afterwards. + */ + +#define AV_COPY(n, d, s) \ + (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) + +#ifndef AV_COPY16 +# define AV_COPY16(d, s) AV_COPY(16, d, s) +#endif + +#ifndef AV_COPY32 +# define AV_COPY32(d, s) AV_COPY(32, d, s) +#endif + +#ifndef AV_COPY64 +# define AV_COPY64(d, s) AV_COPY(64, d, s) +#endif + +#ifndef AV_COPY128 +# define AV_COPY128(d, s) \ + do { \ + AV_COPY64(d, s); \ + AV_COPY64((char*)(d)+8, (char*)(s)+8); \ + } while(0) +#endif + +#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) + +#ifndef AV_SWAP64 +# define AV_SWAP64(a, b) AV_SWAP(64, a, b) +#endif + +#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) + +#ifndef AV_ZERO16 +# define AV_ZERO16(d) AV_ZERO(16, d) +#endif + +#ifndef AV_ZERO32 +# define AV_ZERO32(d) AV_ZERO(32, d) +#endif + +#ifndef AV_ZERO64 +# define AV_ZERO64(d) AV_ZERO(64, d) +#endif + +#ifndef AV_ZERO128 +# define AV_ZERO128(d) \ + do { \ + AV_ZERO64(d); \ + AV_ZERO64((char*)(d)+8); \ + } while(0) +#endif + +#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lfg.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lfg.h new file mode 100644 index 0000000..2a92441 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lfg.h @@ -0,0 +1,72 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Lagged Fibonacci PRNG + * Copyright (c) 2008 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LFG_H +#define AVUTIL_LFG_H + +#include <stdint.h> + +typedef struct AVLFG { + unsigned int state[64]; + int index; +} AVLFG; + +void liteav_av_lfg_init(AVLFG *c, unsigned int seed); + +/** + * Seed the state of the ALFG using binary data. + * + * Return value: 0 on success, negative value (AVERROR) on failure. + */ +int liteav_av_lfg_init_from_data(AVLFG *c, const uint8_t *data, unsigned int length); + +/** + * Get the next random unsigned 32-bit number using an ALFG. + * + * Please also consider a simple LCG like state= state*1664525+1013904223, + * it may be good enough and faster for your specific use case. + */ +static inline unsigned int av_lfg_get(AVLFG *c){ + c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63]; + return c->state[c->index++ & 63]; +} + +/** + * Get the next random unsigned 32-bit number using a MLFG. + * + * Please also consider av_lfg_get() above, it is faster. + */ +static inline unsigned int av_mlfg_get(AVLFG *c){ + unsigned int a= c->state[(c->index-55) & 63]; + unsigned int b= c->state[(c->index-24) & 63]; + return c->state[c->index++ & 63] = 2*a*b+a+b; +} + +/** + * Get the next two numbers generated by a Box-Muller Gaussian + * generator using the random numbers issued by lfg. + * + * @param out array where the two generated numbers are placed + */ +void liteav_av_bmg_get(AVLFG *lfg, double out[2]); + +#endif /* AVUTIL_LFG_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/log.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/log.h new file mode 100644 index 0000000..9396fcc --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/log.h @@ -0,0 +1,411 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LOG_H +#define AVUTIL_LOG_H + +#include <stdarg.h> +#include "avutil.h" +#include "attributes.h" +#include "version.h" + +typedef enum { + AV_CLASS_CATEGORY_NA = 0, + AV_CLASS_CATEGORY_INPUT, + AV_CLASS_CATEGORY_OUTPUT, + AV_CLASS_CATEGORY_MUXER, + AV_CLASS_CATEGORY_DEMUXER, + AV_CLASS_CATEGORY_ENCODER, + AV_CLASS_CATEGORY_DECODER, + AV_CLASS_CATEGORY_FILTER, + AV_CLASS_CATEGORY_BITSTREAM_FILTER, + AV_CLASS_CATEGORY_SWSCALER, + AV_CLASS_CATEGORY_SWRESAMPLER, + AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40, + AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT, + AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT, + AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT, + AV_CLASS_CATEGORY_DEVICE_OUTPUT, + AV_CLASS_CATEGORY_DEVICE_INPUT, + AV_CLASS_CATEGORY_NB ///< not part of ABI/API +}AVClassCategory; + +#define AV_IS_INPUT_DEVICE(category) \ + (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT)) + +#define AV_IS_OUTPUT_DEVICE(category) \ + (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT)) + +struct AVOptionRanges; + +/** + * Describe the class of an AVClass context structure. That is an + * arbitrary struct of which the first field is a pointer to an + * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). + */ +typedef struct AVClass { + /** + * The name of the class; usually it is the same name as the + * context structure type to which the AVClass is associated. + */ + const char* class_name; + + /** + * A pointer to a function which returns the name of a context + * instance ctx associated with the class. + */ + const char* (*item_name)(void* ctx); + + /** + * a pointer to the first option specified in the class if any or NULL + * + * @see av_set_default_options() + */ + const struct AVOption *option; + + /** + * LIBAVUTIL_VERSION with which this structure was created. + * This is used to allow fields to be added without requiring major + * version bumps everywhere. + */ + + int version; + + /** + * Offset in the structure where log_level_offset is stored. + * 0 means there is no such variable + */ + int log_level_offset_offset; + + /** + * Offset in the structure where a pointer to the parent context for + * logging is stored. For example a decoder could pass its AVCodecContext + * to eval as such a parent context, which an liteav_av_log() implementation + * could then leverage to display the parent context. + * The offset can be NULL. + */ + int parent_log_context_offset; + + /** + * Return next AVOptions-enabled child or NULL + */ + void* (*child_next)(void *obj, void *prev); + + /** + * Return an AVClass corresponding to the next potential + * AVOptions-enabled child. + * + * The difference between child_next and this is that + * child_next iterates over _already existing_ objects, while + * child_class_next iterates over _all possible_ children. + */ + const struct AVClass* (*child_class_next)(const struct AVClass *prev); + + /** + * Category used for visualization (like color) + * This is only set if the category is equal for all objects using this class. + * available since version (51 << 16 | 56 << 8 | 100) + */ + AVClassCategory category; + + /** + * Callback to return the category. + * available since version (51 << 16 | 59 << 8 | 100) + */ + AVClassCategory (*get_category)(void* ctx); + + /** + * Callback to return the supported/allowed ranges. + * available since version (52.12) + */ + int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); +} AVClass; + +/** + * @addtogroup lavu_log + * + * @{ + * + * @defgroup lavu_log_constants Logging Constants + * + * @{ + */ + +/** + * Print no output. + */ +#define AV_LOG_QUIET -8 + +/** + * Something went really wrong and we will crash now. + */ +#define AV_LOG_PANIC 0 + +/** + * Something went wrong and recovery is not possible. + * For example, no header was found for a format which depends + * on headers or an illegal combination of parameters is used. + */ +#define AV_LOG_FATAL 8 + +/** + * Something went wrong and cannot losslessly be recovered. + * However, not all future data is affected. + */ +#define AV_LOG_ERROR 16 + +/** + * Something somehow does not look correct. This may or may not + * lead to problems. An example would be the use of '-vstrict -2'. + */ +#define AV_LOG_WARNING 24 + +/** + * Standard information. + */ +#define AV_LOG_INFO 32 + +/** + * Detailed information. + */ +#define AV_LOG_VERBOSE 40 + +/** + * Stuff which is only useful for libav* developers. + */ +#define AV_LOG_DEBUG 48 + +/** + * Extremely verbose debugging, useful for libav* development. + */ +#define AV_LOG_TRACE 56 + +#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET) + +/** + * @} + */ + +/** + * Sets additional colors for extended debugging sessions. + * @code + liteav_av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n"); + @endcode + * Requires 256color terminal support. Uses outside debugging is not + * recommended. + */ +#define AV_LOG_C(x) ((x) << 8) + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see liteav_av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct or NULL if general log. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + */ +void liteav_av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); + + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see liteav_av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void liteav_av_vlog(void *avcl, int level, const char *fmt, va_list vl); + +/** + * Get the current log level + * + * @see lavu_log_constants + * + * @return Current log level + */ +int liteav_av_log_get_level(void); + +/** + * Set the log level + * + * @see lavu_log_constants + * + * @param level Logging level + */ +void liteav_av_log_set_level(int level); + +/** + * Set the logging callback + * + * @note The callback must be thread safe, even if the application does not use + * threads itself as some codecs are multithreaded. + * + * @see liteav_av_log_default_callback + * + * @param callback A logging function with a compatible signature. + */ +void liteav_av_log_set_callback(void (*callback)(void*, int, const char*, va_list)); + +/** + * Default logging callback + * + * It prints the message to stderr, optionally colorizing it. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void liteav_av_log_default_callback(void *avcl, int level, const char *fmt, + va_list vl); + +/** + * Return the context name + * + * @param ctx The AVClass context + * + * @return The AVClass class_name + */ +const char* liteav_av_default_item_name(void* ctx); +AVClassCategory liteav_av_default_get_category(void *ptr); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formatted line + * @param line_size size of the buffer + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + */ +void liteav_av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formatted line; + * may be NULL if line_size is 0 + * @param line_size size of the buffer; at most line_size-1 characters will + * be written to the buffer, plus one null terminator + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + * @return Returns a negative value if an error occurred, otherwise returns + * the number of characters that would have been written for a + * sufficiently large buffer, not including the terminating null + * character. If the return value is not less than line_size, it means + * that the log message was truncated to fit the buffer. + */ +int liteav_av_log_format_line2(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * Skip repeated messages, this requires the user app to use liteav_av_log() instead of + * (f)printf as the 2 would otherwise interfere and lead to + * "Last message repeated x times" messages below (f)printf messages with some + * bad luck. + * Also to receive the last, "last repeated" line if any, the user app must + * call liteav_av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end + */ +#define AV_LOG_SKIP_REPEATED 1 + +/** + * Include the log severity in messages originating from codecs. + * + * Results in messages such as: + * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts + */ +#define AV_LOG_PRINT_LEVEL 2 + +void liteav_av_log_set_flags(int arg); +int liteav_av_log_get_flags(void); + +enum FFmpegMsgType { + FFMPEG_MSG_TYPE_DATAREPORT, +}; + +enum FFmpegDataReportType { + FFMPEG_DATAREPORT_TYPE_NETERROR = 0, //Some Net Error happened, will send last error in tcp.c to app + FFMPEG_DATAREPORT_TYPE_BYTES, //Size that we got from net this time + FFMPEG_DATAREPORT_TYPE_REDIRECTIP, //The redirected ip + FFMPEG_DATAREPORT_TYPE_SVRCONNECTED, //The time when svr is connected + FFMPEG_DATAREPORT_TYPE_DURERROR, //This ts's duration is different from m3u8's defination + FFMPEG_DATAREPORT_TYPE_M3U8ERROR, //This ts's m3u8 is wrong, so we cannot trust its seq_no, and sometimes it may skip 1 Ts_file. + FFMPEG_DATAREPORT_TYPE_TCPCONNECTTIME, //Time(in micro seconds) taken for a TCP connection. It's reported for every successful TCP connection. + FFMPEG_DATAREPORT_TYPE_M3U8DATETIME, //The value of the #EXT-X-PROGRAM-DATE-TIME tag for the current segment + FFMPEG_DATAREPORT_TYPE_M3U8ADTIME, //The value of the #EXT-QQHLS-AD tag for the current segment + + FFMPEG_DATAREPORT_TYPE_GETSTREAMDATATIME, //get stream data at the probe data + FFMPEG_DATAREPORT_TYPE_TCPRECVERROR, //tcp recv error + FFMPEG_DATAREPORT_TYPE_REGISTER_ALL_FAIL // av_reigister_all fail + +}; + +enum FFmpegNetErrorType { + NETERROR_TYPE_GETADDR = 0x00010000, + NETERROR_TYPE_OPENSOCKET = 0x00020000, + NETERROR_TYPE_BINDFAIL = 0x00030000, + NETERROR_TYPE_LISTENFAIL = 0x00040000, + NETERROR_TYPE_POLLFAIL = 0x00050000, + NETERROR_TYPE_ACCEPTFAIL = 0x00060000, + NETERROR_TYPE_RECV = 0x00070000, + NETERROR_TYPE_READTIMEOUT = 0x00080000, + NETERROR_TYPE_SEND = 0x00090000, + NETERROR_TYPE_WRITETIMEOUT = 0x000A0000, + NETERROR_TYPE_OPENTIMEOUT = 0x000B0000, + NETERROR_TYPE_OTHER = 0x40000000, +}; + +typedef void (*av_msg_callback_t)(int, int, const char*, int, void*); + +// This function is a little tricky. +// There's no simple way to send a customized message to the caller with a specific context, +// so we take the AVIOInterruptCB->opaque as the context. But AVIOInterruptCB is defined in +// avformat module, to avoid compiling errors, we define a macro here to access the opaque field. +// +#define INTERRUPT_CB_OPAQUE(pCb) (pCb ? pCb->opaque : NULL) +void liteav_av_msg(int nMsgType, int nSubType, const char* pucMsgBuf, int nBufSize, void* pContext); + +void liteav_av_msg_set_callback(av_msg_callback_t cb); + +/** + * @} + */ + +#endif /* AVUTIL_LOG_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lzo.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lzo.h new file mode 100644 index 0000000..c034039 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/lzo.h @@ -0,0 +1,66 @@ +/* + * LZO 1x decompression + * copyright (c) 2006 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LZO_H +#define AVUTIL_LZO_H + +/** + * @defgroup lavu_lzo LZO + * @ingroup lavu_crypto + * + * @{ + */ + +#include <stdint.h> + +/** @name Error flags returned by av_lzo1x_decode + * @{ */ +/// end of the input buffer reached before decoding finished +#define AV_LZO_INPUT_DEPLETED 1 +/// decoded data did not fit into output buffer +#define AV_LZO_OUTPUT_FULL 2 +/// a reference to previously decoded data was wrong +#define AV_LZO_INVALID_BACKPTR 4 +/// a non-specific error in the compressed bitstream +#define AV_LZO_ERROR 8 +/** @} */ + +#define AV_LZO_INPUT_PADDING 8 +#define AV_LZO_OUTPUT_PADDING 12 + +/** + * @brief Decodes LZO 1x compressed data. + * @param out output buffer + * @param outlen size of output buffer, number of bytes left are returned here + * @param in input buffer + * @param inlen size of input buffer, number of bytes left are returned here + * @return 0 on success, otherwise a combination of the error flags above + * + * Make sure all buffers are appropriately padded, in must provide + * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. + */ +int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); + +/** + * @} + */ + +#endif /* AVUTIL_LZO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/macros.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/macros.h new file mode 100644 index 0000000..2007ee5 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/macros.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu + * Utility Preprocessor macros + */ + +#ifndef AVUTIL_MACROS_H +#define AVUTIL_MACROS_H + +/** + * @addtogroup preproc_misc Preprocessor String Macros + * + * String manipulation macros + * + * @{ + */ + +#define AV_STRINGIFY(s) AV_TOSTRING(s) +#define AV_TOSTRING(s) #s + +#define AV_GLUE(a, b) a ## b +#define AV_JOIN(a, b) AV_GLUE(a, b) + +/** + * @} + */ + +#define AV_PRAGMA(s) _Pragma(#s) + +#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +#endif /* AVUTIL_MACROS_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h new file mode 100644 index 0000000..03670cc --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h @@ -0,0 +1,129 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2016 Neil Birkbeck <neil.birkbeck@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MASTERING_DISPLAY_METADATA_H +#define AVUTIL_MASTERING_DISPLAY_METADATA_H + +#include "frame.h" +#include "rational.h" + + +/** + * Mastering display metadata capable of representing the color volume of + * the display used to master the content (SMPTE 2086:2014). + * + * To be used as payload of a AVFrameSideData or AVPacketSideData with the + * appropriate type. + * + * @note The struct should be allocated with liteav_av_mastering_display_metadata_alloc() + * and its size is not a part of the public ABI. + */ +typedef struct AVMasteringDisplayMetadata { + /** + * CIE 1931 xy chromaticity coords of color primaries (r, g, b order). + */ + AVRational display_primaries[3][2]; + + /** + * CIE 1931 xy chromaticity coords of white point. + */ + AVRational white_point[2]; + + /** + * Min luminance of mastering display (cd/m^2). + */ + AVRational min_luminance; + + /** + * Max luminance of mastering display (cd/m^2). + */ + AVRational max_luminance; + + /** + * Flag indicating whether the display primaries (and white point) are set. + */ + int has_primaries; + + /** + * Flag indicating whether the luminance (min_ and max_) have been set. + */ + int has_luminance; + +} AVMasteringDisplayMetadata; + +/** + * Allocate an AVMasteringDisplayMetadata structure and set its fields to + * default values. The resulting struct can be freed using liteav_av_freep(). + * + * @return An AVMasteringDisplayMetadata filled with default values or NULL + * on failure. + */ +AVMasteringDisplayMetadata *liteav_av_mastering_display_metadata_alloc(void); + +/** + * Allocate a complete AVMasteringDisplayMetadata and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVMasteringDisplayMetadata structure to be filled by caller. + */ +AVMasteringDisplayMetadata *liteav_av_mastering_display_metadata_create_side_data(AVFrame *frame); + +/** + * Content light level needed by to transmit HDR over HDMI (CTA-861.3). + * + * To be used as payload of a AVFrameSideData or AVPacketSideData with the + * appropriate type. + * + * @note The struct should be allocated with liteav_av_content_light_metadata_alloc() + * and its size is not a part of the public ABI. + */ +typedef struct AVContentLightMetadata { + /** + * Max content light level (cd/m^2). + */ + unsigned MaxCLL; + + /** + * Max average light level per frame (cd/m^2). + */ + unsigned MaxFALL; +} AVContentLightMetadata; + +/** + * Allocate an AVContentLightMetadata structure and set its fields to + * default values. The resulting struct can be freed using liteav_av_freep(). + * + * @return An AVContentLightMetadata filled with default values or NULL + * on failure. + */ +AVContentLightMetadata *liteav_av_content_light_metadata_alloc(size_t *size); + +/** + * Allocate a complete AVContentLightMetadata and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVContentLightMetadata structure to be filled by caller. + */ +AVContentLightMetadata *liteav_av_content_light_metadata_create_side_data(AVFrame *frame); + +#endif /* AVUTIL_MASTERING_DISPLAY_METADATA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mathematics.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mathematics.h new file mode 100644 index 0000000..085fb2d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mathematics.h @@ -0,0 +1,243 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @addtogroup lavu_math + * Mathematical utilities for working with timestamp and time base. + */ + +#ifndef AVUTIL_MATHEMATICS_H +#define AVUTIL_MATHEMATICS_H + +#include <stdint.h> +#include <math.h> +#include "attributes.h" +#include "rational.h" +#include "intfloat.h" + +#ifndef M_E +#define M_E 2.7182818284590452354 /* e */ +#endif +#ifndef M_LN2 +#define M_LN2 0.69314718055994530942 /* log_e 2 */ +#endif +#ifndef M_LN10 +#define M_LN10 2.30258509299404568402 /* log_e 10 */ +#endif +#ifndef M_LOG2_10 +#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */ +#endif +#ifndef M_PHI +#define M_PHI 1.61803398874989484820 /* phi / golden ratio */ +#endif +#ifndef M_PI +#define M_PI 3.14159265358979323846 /* pi */ +#endif +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 /* pi/2 */ +#endif +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ +#endif +#ifndef M_SQRT2 +#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ +#endif +#ifndef NAN +#define NAN av_int2float(0x7fc00000) +#endif +#ifndef INFINITY +#define INFINITY av_int2float(0x7f800000) +#endif + +/** + * @addtogroup lavu_math + * + * @{ + */ + +/** + * Rounding methods. + */ +enum AVRounding { + AV_ROUND_ZERO = 0, ///< Round toward zero. + AV_ROUND_INF = 1, ///< Round away from zero. + AV_ROUND_DOWN = 2, ///< Round toward -infinity. + AV_ROUND_UP = 3, ///< Round toward +infinity. + AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. + /** + * Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through + * unchanged, avoiding special cases for #AV_NOPTS_VALUE. + * + * Unlike other values of the enumeration AVRounding, this value is a + * bitmask that must be used in conjunction with another value of the + * enumeration through a bitwise OR, in order to set behavior for normal + * cases. + * + * @code{.c} + * liteav_av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); + * // Rescaling 3: + * // Calculating 3 * 1 / 2 + * // 3 / 2 is rounded up to 2 + * // => 2 + * + * liteav_av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); + * // Rescaling AV_NOPTS_VALUE: + * // AV_NOPTS_VALUE == INT64_MIN + * // AV_NOPTS_VALUE is passed through + * // => AV_NOPTS_VALUE + * @endcode + */ + AV_ROUND_PASS_MINMAX = 8192, +}; + +/** + * Compute the greatest common divisor of two integer operands. + * + * @param a,b Operands + * @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0; + * if a == 0 and b == 0, returns 0. + */ +int64_t av_const liteav_av_gcd(int64_t a, int64_t b); + +/** + * Rescale a 64-bit integer with rounding to nearest. + * + * The operation is mathematically equivalent to `a * b / c`, but writing that + * directly can overflow. + * + * This function is equivalent to liteav_av_rescale_rnd() with #AV_ROUND_NEAR_INF. + * + * @see liteav_av_rescale_rnd(), liteav_av_rescale_q(), liteav_av_rescale_q_rnd() + */ +int64_t liteav_av_rescale(int64_t a, int64_t b, int64_t c) av_const; + +/** + * Rescale a 64-bit integer with specified rounding. + * + * The operation is mathematically equivalent to `a * b / c`, but writing that + * directly can overflow, and does not support different rounding methods. + * + * @see liteav_av_rescale(), liteav_av_rescale_q(), liteav_av_rescale_q_rnd() + */ +int64_t liteav_av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers. + * + * The operation is mathematically equivalent to `a * bq / cq`. + * + * This function is equivalent to liteav_av_rescale_q_rnd() with #AV_ROUND_NEAR_INF. + * + * @see liteav_av_rescale(), liteav_av_rescale_rnd(), liteav_av_rescale_q_rnd() + */ +int64_t liteav_av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers with specified rounding. + * + * The operation is mathematically equivalent to `a * bq / cq`. + * + * @see liteav_av_rescale(), liteav_av_rescale_rnd(), liteav_av_rescale_q() + */ +int64_t liteav_av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, + enum AVRounding rnd) av_const; + +/** + * Compare two timestamps each in its own time base. + * + * @return One of the following values: + * - -1 if `ts_a` is before `ts_b` + * - 1 if `ts_a` is after `ts_b` + * - 0 if they represent the same position + * + * @warning + * The result of the function is undefined if one of the timestamps is outside + * the `int64_t` range when represented in the other's timebase. + */ +int liteav_av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); + +/** + * Compare the remainders of two integer operands divided by a common divisor. + * + * In other words, compare the least significant `log2(mod)` bits of integers + * `a` and `b`. + * + * @code{.c} + * liteav_av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02 % 0x10 (0x2) + * liteav_av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11) > 0x02 % 0x20 (0x02) + * @endcode + * + * @param a,b Operands + * @param mod Divisor; must be a power of 2 + * @return + * - a negative value if `a % mod < b % mod` + * - a positive value if `a % mod > b % mod` + * - zero if `a % mod == b % mod` + */ +int64_t liteav_av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); + +/** + * Rescale a timestamp while preserving known durations. + * + * This function is designed to be called per audio packet to scale the input + * timestamp to a different time base. Compared to a simple liteav_av_rescale_q() + * call, this function is robust against possible inconsistent frame durations. + * + * The `last` parameter is a state variable that must be preserved for all + * subsequent calls for the same stream. For the first call, `*last` should be + * initialized to #AV_NOPTS_VALUE. + * + * @param[in] in_tb Input time base + * @param[in] in_ts Input timestamp + * @param[in] fs_tb Duration time base; typically this is finer-grained + * (greater) than `in_tb` and `out_tb` + * @param[in] duration Duration till the next call to this function (i.e. + * duration of the current packet/frame) + * @param[in,out] last Pointer to a timestamp expressed in terms of + * `fs_tb`, acting as a state variable + * @param[in] out_tb Output timebase + * @return Timestamp expressed in terms of `out_tb` + * + * @note In the context of this function, "duration" is in term of samples, not + * seconds. + */ +int64_t liteav_av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); + +/** + * Add a value to a timestamp. + * + * This function guarantees that when the same value is repeatly added that + * no accumulation of rounding errors occurs. + * + * @param[in] ts Input timestamp + * @param[in] ts_tb Input timestamp time base + * @param[in] inc Value to be added + * @param[in] inc_tb Time base of `inc` + */ +int64_t liteav_av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc); + + +/** + * @} + */ + +#endif /* AVUTIL_MATHEMATICS_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/md5.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/md5.h new file mode 100644 index 0000000..b55c0f3 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/md5.h @@ -0,0 +1,99 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_md5 + * Public header for MD5 hash function implementation. + */ + +#ifndef AVUTIL_MD5_H +#define AVUTIL_MD5_H + +#include <stddef.h> +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_md5 MD5 + * @ingroup lavu_hash + * MD5 hash function implementation. + * + * @{ + */ + +extern const int liteav_av_md5_size; + +struct AVMD5; + +/** + * Allocate an AVMD5 context. + */ +struct AVMD5 *liteav_av_md5_alloc(void); + +/** + * Initialize MD5 hashing. + * + * @param ctx pointer to the function context (of size liteav_av_md5_size) + */ +void liteav_av_md5_init(struct AVMD5 *ctx); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param src input data to update hash with + * @param len input data length + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); +#else +void liteav_av_md5_update(struct AVMD5 *ctx, const uint8_t *src, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param ctx hash function context + * @param dst buffer where output digest value is stored + */ +void liteav_av_md5_final(struct AVMD5 *ctx, uint8_t *dst); + +/** + * Hash an array of data. + * + * @param dst The output buffer to write the digest into + * @param src The data to hash + * @param len The length of the data, in bytes + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); +#else +void liteav_av_md5_sum(uint8_t *dst, const uint8_t *src, size_t len); +#endif + +/** + * @} + */ + +#endif /* AVUTIL_MD5_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mem.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mem.h new file mode 100644 index 0000000..1a1a26a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/mem.h @@ -0,0 +1,701 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_mem + * Memory handling functions + */ + +#ifndef AVUTIL_MEM_H +#define AVUTIL_MEM_H + +#include <limits.h> +#include <stdint.h> + +#include "attributes.h" +#include "error.h" +#include "avutil.h" + +/** + * @addtogroup lavu_mem + * Utilities for manipulating memory. + * + * FFmpeg has several applications of memory that are not required of a typical + * program. For example, the computing-heavy components like video decoding and + * encoding can be sped up significantly through the use of aligned memory. + * + * However, for each of FFmpeg's applications of memory, there might not be a + * recognized or standardized API for that specific use. Memory alignment, for + * instance, varies wildly depending on operating systems, architectures, and + * compilers. Hence, this component of @ref libavutil is created to make + * dealing with memory consistently possible on all platforms. + * + * @{ + * + * @defgroup lavu_mem_macros Alignment Macros + * Helper macros for declaring aligned variables. + * @{ + */ + +/** + * @def DECLARE_ALIGNED(n,t,v) + * Declare a variable that is aligned in memory. + * + * @code{.c} + * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42; + * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128]; + * + * // The default-alignment equivalent would be + * uint16_t aligned_int = 42; + * uint8_t aligned_array[128]; + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +/** + * @def DECLARE_ASM_ALIGNED(n,t,v) + * Declare an aligned variable appropriate for use in inline assembly code. + * + * @code{.c} + * DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008); + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +/** + * @def DECLARE_ASM_CONST(n,t,v) + * Declare a static constant aligned variable appropriate for use in inline + * assembly code. + * + * @code{.c} + * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008); + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v +#elif defined(__DJGPP__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v + #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v +#elif defined(__GNUC__) || defined(__clang__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v +#elif defined(_MSC_VER) + #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v +#else + #define DECLARE_ALIGNED(n,t,v) t v + #define DECLARE_ASM_ALIGNED(n,t,v) t v + #define DECLARE_ASM_CONST(n,t,v) static const t v +#endif + +/** + * @} + */ + +/** + * @defgroup lavu_mem_attrs Function Attributes + * Function attributes applicable to memory handling functions. + * + * These function attributes can help compilers emit more useful warnings, or + * generate better code. + * @{ + */ + +/** + * @def av_malloc_attrib + * Function attribute denoting a malloc-like function. + * + * @see <a href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007bmalloc_007d-function-attribute-3251">Function attribute `malloc` in GCC's documentation</a> + */ + +#if AV_GCC_VERSION_AT_LEAST(3,1) + #define av_malloc_attrib __attribute__((__malloc__)) +#else + #define av_malloc_attrib +#endif + +/** + * @def av_alloc_size(...) + * Function attribute used on a function that allocates memory, whose size is + * given by the specified parameter(s). + * + * @code{.c} + * void *liteav_av_malloc(size_t size) av_alloc_size(1); + * void *liteav_av_calloc(size_t nmemb, size_t size) av_alloc_size(1, 2); + * @endcode + * + * @param ... One or two parameter indexes, separated by a comma + * + * @see <a href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007balloc_005fsize_007d-function-attribute-3220">Function attribute `alloc_size` in GCC's documentation</a> + */ + +#if AV_GCC_VERSION_AT_LEAST(4,3) + #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) +#else + #define av_alloc_size(...) +#endif + +/** + * @} + */ + +/** + * @defgroup lavu_mem_funcs Heap Management + * Functions responsible for allocating, freeing, and copying memory. + * + * All memory allocation functions have a built-in upper limit of `INT_MAX` + * bytes. This may be changed with liteav_av_max_alloc(), although exercise extreme + * caution when doing so. + * + * @{ + */ + +/** + * Allocate a memory block with alignment suitable for all memory accesses + * (including vectors if available on the CPU). + * + * @param size Size in bytes for the memory block to be allocated + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * @see liteav_av_mallocz() + */ +void *liteav_av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a memory block with alignment suitable for all memory accesses + * (including vectors if available on the CPU) and zero all the bytes of the + * block. + * + * @param size Size in bytes for the memory block to be allocated + * @return Pointer to the allocated block, or `NULL` if it cannot be allocated + * @see liteav_av_malloc() + */ +void *liteav_av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a memory block for an array with liteav_av_malloc(). + * + * The allocated memory will have size `size * nmemb` bytes. + * + * @param nmemb Number of element + * @param size Size of a single element + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * @see liteav_av_malloc() + */ +av_alloc_size(1, 2) void *liteav_av_malloc_array(size_t nmemb, size_t size); + +/** + * Allocate a memory block for an array with liteav_av_mallocz(). + * + * The allocated memory will have size `size * nmemb` bytes. + * + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * + * @see liteav_av_mallocz() + * @see liteav_av_malloc_array() + */ +av_alloc_size(1, 2) void *liteav_av_mallocz_array(size_t nmemb, size_t size); + +/** + * Non-inlined equivalent of liteav_av_mallocz_array(). + * + * Created for symmetry with the calloc() C function. + */ +void *liteav_av_calloc(size_t nmemb, size_t size) av_malloc_attrib; + +/** + * Allocate, reallocate, or free a block of memory. + * + * If `ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is + * zero, free the memory block pointed to by `ptr`. Otherwise, expand or + * shrink that block of memory according to `size`. + * + * @param ptr Pointer to a memory block already allocated with + * liteav_av_realloc() or `NULL` + * @param size Size in bytes of the memory block to be allocated or + * reallocated + * + * @return Pointer to a newly-reallocated block or `NULL` if the block + * cannot be reallocated or the function is used to free the memory block + * + * @warning Unlike liteav_av_malloc(), the returned pointer is not guaranteed to be + * correctly aligned. + * @see liteav_av_fast_realloc() + * @see liteav_av_reallocp() + */ +void *liteav_av_realloc(void *ptr, size_t size) av_alloc_size(2); + +/** + * Allocate, reallocate, or free a block of memory through a pointer to a + * pointer. + * + * If `*ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is + * zero, free the memory block pointed to by `*ptr`. Otherwise, expand or + * shrink that block of memory according to `size`. + * + * @param[in,out] ptr Pointer to a pointer to a memory block already allocated + * with liteav_av_realloc(), or a pointer to `NULL`. The pointer + * is updated on success, or freed on failure. + * @param[in] size Size in bytes for the memory block to be allocated or + * reallocated + * + * @return Zero on success, an AVERROR error code on failure + * + * @warning Unlike liteav_av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + */ +av_warn_unused_result +int liteav_av_reallocp(void *ptr, size_t size); + +/** + * Allocate, reallocate, or free a block of memory. + * + * This function does the same thing as liteav_av_realloc(), except: + * - It takes two size arguments and allocates `nelem * elsize` bytes, + * after checking the result of the multiplication for integer overflow. + * - It frees the input block in case of failure, thus avoiding the memory + * leak with the classic + * @code{.c} + * buf = realloc(buf); + * if (!buf) + * return -1; + * @endcode + * pattern. + */ +void *liteav_av_realloc_f(void *ptr, size_t nelem, size_t elsize); + +/** + * Allocate, reallocate, or free an array. + * + * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block. If + * `nmemb` is zero, free the memory block pointed to by `ptr`. + * + * @param ptr Pointer to a memory block already allocated with + * liteav_av_realloc() or `NULL` + * @param nmemb Number of elements in the array + * @param size Size of the single element of the array + * + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block + * + * @warning Unlike liteav_av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + * @see liteav_av_reallocp_array() + */ +av_alloc_size(2, 3) void *liteav_av_realloc_array(void *ptr, size_t nmemb, size_t size); + +/** + * Allocate, reallocate, or free an array through a pointer to a pointer. + * + * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block. If `nmemb` is + * zero, free the memory block pointed to by `*ptr`. + * + * @param[in,out] ptr Pointer to a pointer to a memory block already + * allocated with liteav_av_realloc(), or a pointer to `NULL`. + * The pointer is updated on success, or freed on failure. + * @param[in] nmemb Number of elements + * @param[in] size Size of the single element + * + * @return Zero on success, an AVERROR error code on failure + * + * @warning Unlike liteav_av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + */ +av_alloc_size(2, 3) int liteav_av_reallocp_array(void *ptr, size_t nmemb, size_t size); + +/** + * Reallocate the given buffer if it is not large enough, otherwise do nothing. + * + * If the given buffer is `NULL`, then a new uninitialized buffer is allocated. + * + * If the given buffer is not large enough, and reallocation fails, `NULL` is + * returned and `*size` is set to 0, but the original buffer is not changed or + * freed. + * + * A typical use pattern follows: + * + * @code{.c} + * uint8_t *buf = ...; + * uint8_t *new_buf = liteav_av_fast_realloc(buf, ¤t_size, size_needed); + * if (!new_buf) { + * // Allocation failed; clean up original buffer + * liteav_av_freep(&buf); + * return AVERROR(ENOMEM); + * } + * @endcode + * + * @param[in,out] ptr Already allocated buffer, or `NULL` + * @param[in,out] size Pointer to current size of buffer `ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `ptr` + * @return `ptr` if the buffer is large enough, a pointer to newly reallocated + * buffer if the buffer was not large enough, or `NULL` in case of + * error + * @see liteav_av_realloc() + * @see liteav_av_fast_malloc() + */ +void *liteav_av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to liteav_av_fast_realloc(), the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special handling to + * avoid memleaks is necessary. + * + * `*ptr` is allowed to be `NULL`, in which case allocation always happens if + * `size_needed` is greater than 0. + * + * @code{.c} + * uint8_t *buf = ...; + * liteav_av_fast_malloc(&buf, ¤t_size, size_needed); + * if (!buf) { + * // Allocation failed; buf already freed + * return AVERROR(ENOMEM); + * } + * @endcode + * + * @param[in,out] ptr Pointer to pointer to an already allocated buffer. + * `*ptr` will be overwritten with pointer to new + * buffer on success or `NULL` on failure + * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `*ptr` + * @see liteav_av_realloc() + * @see liteav_av_fast_mallocz() + */ +void liteav_av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate and clear a buffer, reusing the given one if large enough. + * + * Like liteav_av_fast_malloc(), but all newly allocated space is initially cleared. + * Reused buffer is not cleared. + * + * `*ptr` is allowed to be `NULL`, in which case allocation always happens if + * `size_needed` is greater than 0. + * + * @param[in,out] ptr Pointer to pointer to an already allocated buffer. + * `*ptr` will be overwritten with pointer to new + * buffer on success or `NULL` on failure + * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `*ptr` + * @see liteav_av_fast_malloc() + */ +void liteav_av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Free a memory block which has been allocated with a function of liteav_av_malloc() + * or liteav_av_realloc() family. + * + * @param ptr Pointer to the memory block which should be freed. + * + * @note `ptr = NULL` is explicitly allowed. + * @note It is recommended that you use liteav_av_freep() instead, to prevent leaving + * behind dangling pointers. + * @see liteav_av_freep() + */ +void liteav_av_free(void *ptr); + +/** + * Free a memory block which has been allocated with a function of liteav_av_malloc() + * or liteav_av_realloc() family, and set the pointer pointing to it to `NULL`. + * + * @code{.c} + * uint8_t *buf = liteav_av_malloc(16); + * liteav_av_free(buf); + * // buf now contains a dangling pointer to freed memory, and accidental + * // dereference of buf will result in a use-after-free, which may be a + * // security risk. + * + * uint8_t *buf = liteav_av_malloc(16); + * liteav_av_freep(&buf); + * // buf is now NULL, and accidental dereference will only result in a + * // NULL-pointer dereference. + * @endcode + * + * @param ptr Pointer to the pointer to the memory block which should be freed + * @note `*ptr = NULL` is safe and leads to no action. + * @see liteav_av_free() + */ +void liteav_av_freep(void *ptr); + +/** + * Duplicate a string. + * + * @param s String to be duplicated + * @return Pointer to a newly-allocated string containing a + * copy of `s` or `NULL` if the string cannot be allocated + * @see liteav_av_strndup() + */ +char *liteav_av_strdup(const char *s) av_malloc_attrib; + +/** + * Duplicate a substring of a string. + * + * @param s String to be duplicated + * @param len Maximum length of the resulting string (not counting the + * terminating byte) + * @return Pointer to a newly-allocated string containing a + * substring of `s` or `NULL` if the string cannot be allocated + */ +char *liteav_av_strndup(const char *s, size_t len) av_malloc_attrib; + +/** + * Duplicate a buffer with liteav_av_malloc(). + * + * @param p Buffer to be duplicated + * @param size Size in bytes of the buffer copied + * @return Pointer to a newly allocated buffer containing a + * copy of `p` or `NULL` if the buffer cannot be allocated + */ +void *liteav_av_memdup(const void *p, size_t size); + +/** + * Overlapping memcpy() implementation. + * + * @param dst Destination buffer + * @param back Number of bytes back to start copying (i.e. the initial size of + * the overlapping window); must be > 0 + * @param cnt Number of bytes to copy; must be >= 0 + * + * @note `cnt > back` is valid, this will copy the bytes we just copied, + * thus creating a repeating pattern with a period length of `back`. + */ +void liteav_av_memcpy_backptr(uint8_t *dst, int back, int cnt); + +/** + * @} + */ + +/** + * @defgroup lavu_mem_dynarray Dynamic Array + * + * Utilities to make an array grow when needed. + * + * Sometimes, the programmer would want to have an array that can grow when + * needed. The libavutil dynamic array utilities fill that need. + * + * libavutil supports two systems of appending elements onto a dynamically + * allocated array, the first one storing the pointer to the value in the + * array, and the second storing the value directly. In both systems, the + * caller is responsible for maintaining a variable containing the length of + * the array, as well as freeing of the array after use. + * + * The first system stores pointers to values in a block of dynamically + * allocated memory. Since only pointers are stored, the function does not need + * to know the size of the type. Both liteav_av_dynarray_add() and + * liteav_av_dynarray_add_nofree() implement this system. + * + * @code + * type **array = NULL; //< an array of pointers to values + * int nb = 0; //< a variable to keep track of the length of the array + * + * type to_be_added = ...; + * type to_be_added2 = ...; + * + * liteav_av_dynarray_add(&array, &nb, &to_be_added); + * if (nb == 0) + * return AVERROR(ENOMEM); + * + * liteav_av_dynarray_add(&array, &nb, &to_be_added2); + * if (nb == 0) + * return AVERROR(ENOMEM); + * + * // Now: + * // nb == 2 + * // &to_be_added == array[0] + * // &to_be_added2 == array[1] + * + * liteav_av_freep(&array); + * @endcode + * + * The second system stores the value directly in a block of memory. As a + * result, the function has to know the size of the type. liteav_av_dynarray2_add() + * implements this mechanism. + * + * @code + * type *array = NULL; //< an array of values + * int nb = 0; //< a variable to keep track of the length of the array + * + * type to_be_added = ...; + * type to_be_added2 = ...; + * + * type *addr = liteav_av_dynarray2_add((void **)&array, &nb, sizeof(*array), NULL); + * if (!addr) + * return AVERROR(ENOMEM); + * memcpy(addr, &to_be_added, sizeof(to_be_added)); + * + * // Shortcut of the above. + * type *addr = liteav_av_dynarray2_add((void **)&array, &nb, sizeof(*array), + * (const void *)&to_be_added2); + * if (!addr) + * return AVERROR(ENOMEM); + * + * // Now: + * // nb == 2 + * // to_be_added == array[0] + * // to_be_added2 == array[1] + * + * liteav_av_freep(&array); + * @endcode + * + * @{ + */ + +/** + * Add the pointer to an element to a dynamic array. + * + * The array to grow is supposed to be an array of pointers to + * structures, and the element to add must be a pointer to an already + * allocated structure. + * + * The array is reallocated when its size reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by `nb_ptr` + * is incremented. + * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and + * `*nb_ptr` is set to 0. + * + * @param[in,out] tab_ptr Pointer to the array to grow + * @param[in,out] nb_ptr Pointer to the number of elements in the array + * @param[in] elem Element to add + * @see liteav_av_dynarray_add_nofree(), liteav_av_dynarray2_add() + */ +void liteav_av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element to a dynamic array. + * + * Function has the same functionality as liteav_av_dynarray_add(), + * but it doesn't free memory on fails. It returns error code + * instead and leave current buffer untouched. + * + * @return >=0 on success, negative otherwise + * @see liteav_av_dynarray_add(), liteav_av_dynarray2_add() + */ +av_warn_unused_result +int liteav_av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element of size `elem_size` to a dynamic array. + * + * The array is reallocated when its number of elements reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by `nb_ptr` + * is incremented. + * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and + * `*nb_ptr` is set to 0. + * + * @param[in,out] tab_ptr Pointer to the array to grow + * @param[in,out] nb_ptr Pointer to the number of elements in the array + * @param[in] elem_size Size in bytes of an element in the array + * @param[in] elem_data Pointer to the data of the element to add. If + * `NULL`, the space of the newly added element is + * allocated but left uninitialized. + * + * @return Pointer to the data of the element to copy in the newly allocated + * space + * @see liteav_av_dynarray_add(), liteav_av_dynarray_add_nofree() + */ +void *liteav_av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, + const uint8_t *elem_data); + +/** + * @} + */ + +/** + * @defgroup lavu_mem_misc Miscellaneous Functions + * + * Other functions related to memory allocation. + * + * @{ + */ + +/** + * Multiply two `size_t` values checking for overflow. + * + * @param[in] a,b Operands of multiplication + * @param[out] r Pointer to the result of the operation + * @return 0 on success, AVERROR(EINVAL) on overflow + */ +static inline int av_size_mult(size_t a, size_t b, size_t *r) +{ + size_t t = a * b; + /* Hack inspired from glibc: don't try the division if nelem and elsize + * are both less than sqrt(SIZE_MAX). */ + if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) + return AVERROR(EINVAL); + *r = t; + return 0; +} + +/** + * Set the maximum size that may be allocated in one block. + * + * The value specified with this function is effective for all libavutil's @ref + * lavu_mem_funcs "heap management functions." + * + * By default, the max value is defined as `INT_MAX`. + * + * @param max Value to be set as the new maximum size + * + * @warning Exercise extreme caution when using this function. Don't touch + * this if you do not understand the full consequence of doing so. + */ +void liteav_av_max_alloc(size_t max); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_MEM_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/motion_vector.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/motion_vector.h new file mode 100644 index 0000000..ec29556 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/motion_vector.h @@ -0,0 +1,57 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MOTION_VECTOR_H +#define AVUTIL_MOTION_VECTOR_H + +#include <stdint.h> + +typedef struct AVMotionVector { + /** + * Where the current macroblock comes from; negative value when it comes + * from the past, positive value when it comes from the future. + * XXX: set exact relative ref frame reference instead of a +/- 1 "direction". + */ + int32_t source; + /** + * Width and height of the block. + */ + uint8_t w, h; + /** + * Absolute source position. Can be outside the frame area. + */ + int16_t src_x, src_y; + /** + * Absolute destination position. Can be outside the frame area. + */ + int16_t dst_x, dst_y; + /** + * Extra flag information. + * Currently unused. + */ + uint64_t flags; + /** + * Motion vector + * src_x = dst_x + motion_x / motion_scale + * src_y = dst_y + motion_y / motion_scale + */ + int32_t motion_x, motion_y; + uint16_t motion_scale; +} AVMotionVector; + +#endif /* AVUTIL_MOTION_VECTOR_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/murmur3.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/murmur3.h new file mode 100644 index 0000000..0225105 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/murmur3.h @@ -0,0 +1,121 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_murmur3 + * Public header for MurmurHash3 hash function implementation. + */ + +#ifndef AVUTIL_MURMUR3_H +#define AVUTIL_MURMUR3_H + +#include <stdint.h> + +#include "version.h" + +/** + * @defgroup lavu_murmur3 Murmur3 + * @ingroup lavu_hash + * MurmurHash3 hash function implementation. + * + * MurmurHash3 is a non-cryptographic hash function, of which three + * incompatible versions were created by its inventor Austin Appleby: + * + * - 32-bit output + * - 128-bit output for 32-bit platforms + * - 128-bit output for 64-bit platforms + * + * FFmpeg only implements the last variant: 128-bit output designed for 64-bit + * platforms. Even though the hash function was designed for 64-bit platforms, + * the function in reality works on 32-bit systems too, only with reduced + * performance. + * + * @anchor lavu_murmur3_seedinfo + * By design, MurmurHash3 requires a seed to operate. In response to this, + * libavutil provides two functions for hash initiation, one that requires a + * seed (liteav_av_murmur3_init_seeded()) and one that uses a fixed arbitrary integer + * as the seed, and therefore does not (liteav_av_murmur3_init()). + * + * To make hashes comparable, you should provide the same seed for all calls to + * this hash function -- if you are supplying one yourself, that is. + * + * @{ + */ + +/** + * Allocate an AVMurMur3 hash context. + * + * @return Uninitialized hash context or `NULL` in case of error + */ +struct AVMurMur3 *liteav_av_murmur3_alloc(void); + +/** + * Initialize or reinitialize an AVMurMur3 hash context with a seed. + * + * @param[out] c Hash context + * @param[in] seed Random seed + * + * @see liteav_av_murmur3_init() + * @see @ref lavu_murmur3_seedinfo "Detailed description" on a discussion of + * seeds for MurmurHash3. + */ +void liteav_av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); + +/** + * Initialize or reinitialize an AVMurMur3 hash context. + * + * Equivalent to liteav_av_murmur3_init_seeded() with a built-in seed. + * + * @param[out] c Hash context + * + * @see liteav_av_murmur3_init_seeded() + * @see @ref lavu_murmur3_seedinfo "Detailed description" on a discussion of + * seeds for MurmurHash3. + */ +void liteav_av_murmur3_init(struct AVMurMur3 *c); + +/** + * Update hash context with new data. + * + * @param[out] c Hash context + * @param[in] src Input data to update hash with + * @param[in] len Number of bytes to read from `src` + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); +#else +void liteav_av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param[in,out] c Hash context + * @param[out] dst Buffer where output digest value is stored + */ +void liteav_av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); + +/** + * @} + */ + +#endif /* AVUTIL_MURMUR3_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/opt.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/opt.h new file mode 100644 index 0000000..9948bae --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/opt.h @@ -0,0 +1,867 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * AVOptions + * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OPT_H +#define AVUTIL_OPT_H + +/** + * @file + * AVOptions + */ + +#include "rational.h" +#include "avutil.h" +#include "dict.h" +#include "log.h" +#include "pixfmt.h" +#include "samplefmt.h" +#include "version.h" + +/** + * @defgroup avoptions AVOptions + * @ingroup lavu_data + * @{ + * AVOptions provide a generic system to declare options on arbitrary structs + * ("objects"). An option can have a help text, a type and a range of possible + * values. Options may then be enumerated, read and written to. + * + * @section avoptions_implement Implementing AVOptions + * This section describes how to add AVOptions capabilities to a struct. + * + * All AVOptions-related information is stored in an AVClass. Therefore + * the first member of the struct should be a pointer to an AVClass describing it. + * The option field of the AVClass must be set to a NULL-terminated static array + * of AVOptions. Each AVOption must have a non-empty name, a type, a default + * value and for number-type AVOptions also a range of allowed values. It must + * also declare an offset in bytes from the start of the struct, where the field + * associated with this AVOption is located. Other fields in the AVOption struct + * should also be set when applicable, but are not required. + * + * The following example illustrates an AVOptions-enabled struct: + * @code + * typedef struct test_struct { + * const AVClass *class; + * int int_opt; + * char *str_opt; + * uint8_t *bin_opt; + * int bin_len; + * } test_struct; + * + * static const AVOption test_options[] = { + * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), + * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, + * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), + * AV_OPT_TYPE_STRING }, + * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt), + * AV_OPT_TYPE_BINARY }, + * { NULL }, + * }; + * + * static const AVClass test_class = { + * .class_name = "test class", + * .item_name = liteav_av_default_item_name, + * .option = test_options, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * @endcode + * + * Next, when allocating your struct, you must ensure that the AVClass pointer + * is set to the correct value. Then, liteav_av_opt_set_defaults() can be called to + * initialize defaults. After that the struct is ready to be used with the + * AVOptions API. + * + * When cleaning up, you may use the liteav_av_opt_free() function to automatically + * free all the allocated string and binary options. + * + * Continuing with the above example: + * + * @code + * test_struct *alloc_test_struct(void) + * { + * test_struct *ret = liteav_av_mallocz(sizeof(*ret)); + * ret->class = &test_class; + * liteav_av_opt_set_defaults(ret); + * return ret; + * } + * void free_test_struct(test_struct **foo) + * { + * liteav_av_opt_free(*foo); + * liteav_av_freep(foo); + * } + * @endcode + * + * @subsection avoptions_implement_nesting Nesting + * It may happen that an AVOptions-enabled struct contains another + * AVOptions-enabled struct as a member (e.g. AVCodecContext in + * libavcodec exports generic options, while its priv_data field exports + * codec-specific options). In such a case, it is possible to set up the + * parent struct to export a child's options. To do that, simply + * implement AVClass.child_next() and AVClass.child_class_next() in the + * parent struct's AVClass. + * Assuming that the test_struct from above now also contains a + * child_struct field: + * + * @code + * typedef struct child_struct { + * AVClass *class; + * int flags_opt; + * } child_struct; + * static const AVOption child_opts[] = { + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX }, + * { NULL }, + * }; + * static const AVClass child_class = { + * .class_name = "child class", + * .item_name = liteav_av_default_item_name, + * .option = child_opts, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * + * void *child_next(void *obj, void *prev) + * { + * test_struct *t = obj; + * if (!prev && t->child_struct) + * return t->child_struct; + * return NULL + * } + * const AVClass child_class_next(const AVClass *prev) + * { + * return prev ? NULL : &child_class; + * } + * @endcode + * Putting child_next() and child_class_next() as defined above into + * test_class will now make child_struct's options accessible through + * test_struct (again, proper setup as described above needs to be done on + * child_struct right after it is created). + * + * From the above example it might not be clear why both child_next() + * and child_class_next() are needed. The distinction is that child_next() + * iterates over actually existing objects, while child_class_next() + * iterates over all possible child classes. E.g. if an AVCodecContext + * was initialized to use a codec which has private options, then its + * child_next() will return AVCodecContext.priv_data and finish + * iterating. OTOH child_class_next() on AVCodecContext.av_class will + * iterate over all available codecs with private options. + * + * @subsection avoptions_implement_named_constants Named constants + * It is possible to create named constants for options. Simply set the unit + * field of the option the constants should apply to a string and + * create the constants themselves as options of type AV_OPT_TYPE_CONST + * with their unit field set to the same string. + * Their default_val field should contain the value of the named + * constant. + * For example, to add some named constants for the test_flags option + * above, put the following into the child_opts array: + * @code + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" }, + * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" }, + * @endcode + * + * @section avoptions_use Using AVOptions + * This section deals with accessing options in an AVOptions-enabled struct. + * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or + * AVFormatContext in libavformat. + * + * @subsection avoptions_use_examine Examining AVOptions + * The basic functions for examining options are liteav_av_opt_next(), which iterates + * over all options defined for one object, and liteav_av_opt_find(), which searches + * for an option with the given name. + * + * The situation is more complicated with nesting. An AVOptions-enabled struct + * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag + * to liteav_av_opt_find() will make the function search children recursively. + * + * For enumerating there are basically two cases. The first is when you want to + * get all options that may potentially exist on the struct and its children + * (e.g. when constructing documentation). In that case you should call + * liteav_av_opt_child_class_next() recursively on the parent struct's AVClass. The + * second case is when you have an already initialized struct with all its + * children and you want to get all options that can be actually written or read + * from it. In that case you should call liteav_av_opt_child_next() recursively (and + * liteav_av_opt_next() on each result). + * + * @subsection avoptions_use_get_set Reading and writing AVOptions + * When setting options, you often have a string read directly from the + * user. In such a case, simply passing it to liteav_av_opt_set() is enough. For + * non-string type options, liteav_av_opt_set() will parse the string according to the + * option type. + * + * Similarly liteav_av_opt_get() will read any option type and convert it to a string + * which will be returned. Do not forget that the string is allocated, so you + * have to free it with liteav_av_free(). + * + * In some cases it may be more convenient to put all options into an + * AVDictionary and call liteav_av_opt_set_dict() on it. A specific case of this + * are the format/codec open functions in lavf/lavc which take a dictionary + * filled with option as a parameter. This makes it possible to set some options + * that cannot be set otherwise, since e.g. the input file format is not known + * before the file is actually opened. + */ + +enum AVOptionType{ + AV_OPT_TYPE_FLAGS, + AV_OPT_TYPE_INT, + AV_OPT_TYPE_INT64, + AV_OPT_TYPE_DOUBLE, + AV_OPT_TYPE_FLOAT, + AV_OPT_TYPE_STRING, + AV_OPT_TYPE_RATIONAL, + AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + AV_OPT_TYPE_DICT, + AV_OPT_TYPE_UINT64, + AV_OPT_TYPE_CONST, + AV_OPT_TYPE_IMAGE_SIZE, ///< offset must point to two consecutive integers + AV_OPT_TYPE_PIXEL_FMT, + AV_OPT_TYPE_SAMPLE_FMT, + AV_OPT_TYPE_VIDEO_RATE, ///< offset must point to AVRational + AV_OPT_TYPE_DURATION, + AV_OPT_TYPE_COLOR, + AV_OPT_TYPE_CHANNEL_LAYOUT, + AV_OPT_TYPE_BOOL, +}; + +/** + * AVOption + */ +typedef struct AVOption { + const char *name; + + /** + * short English help text + * @todo What about other languages? + */ + const char *help; + + /** + * The offset relative to the context structure where the option + * value is stored. It should be 0 for named constants. + */ + int offset; + enum AVOptionType type; + + /** + * the default value for scalar options + */ + union { + int64_t i64; + double dbl; + const char *str; + /* TODO those are unused now */ + AVRational q; + } default_val; + double min; ///< minimum valid value for the option + double max; ///< maximum valid value for the option + + int flags; +#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding +#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding +#define AV_OPT_FLAG_AUDIO_PARAM 8 +#define AV_OPT_FLAG_VIDEO_PARAM 16 +#define AV_OPT_FLAG_SUBTITLE_PARAM 32 +/** + * The option is intended for exporting values to the caller. + */ +#define AV_OPT_FLAG_EXPORT 64 +/** + * The option may not be set through the AVOptions API, only read. + * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set. + */ +#define AV_OPT_FLAG_READONLY 128 +#define AV_OPT_FLAG_BSF_PARAM (1<<8) ///< a generic parameter which can be set by the user for bit stream filtering +#define AV_OPT_FLAG_RUNTIME_PARAM (1<<15) ///< a generic parameter which can be set by the user at runtime +#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering +#define AV_OPT_FLAG_DEPRECATED (1<<17) ///< set if option is deprecated, users should refer to AVOption.help text for more information +//FIXME think about enc-audio, ... style flags + + /** + * The logical unit to which the option belongs. Non-constant + * options and corresponding named constants share the same + * unit. May be NULL. + */ + const char *unit; +} AVOption; + +/** + * A single allowed range of values, or a single allowed value. + */ +typedef struct AVOptionRange { + const char *str; + /** + * Value range. + * For string ranges this represents the min/max length. + * For dimensions this represents the min/max pixel count or width/height in multi-component case. + */ + double value_min, value_max; + /** + * Value's component range. + * For string this represents the unicode range for chars, 0-127 limits to ASCII. + */ + double component_min, component_max; + /** + * Range flag. + * If set to 1 the struct encodes a range, if set to 0 a single value. + */ + int is_range; +} AVOptionRange; + +/** + * List of AVOptionRange structs. + */ +typedef struct AVOptionRanges { + /** + * Array of option ranges. + * + * Most of option types use just one component. + * Following describes multi-component option types: + * + * AV_OPT_TYPE_IMAGE_SIZE: + * component index 0: range of pixel count (width * height). + * component index 1: range of width. + * component index 2: range of height. + * + * @note To obtain multi-component version of this structure, user must + * provide AV_OPT_MULTI_COMPONENT_RANGE to liteav_av_opt_query_ranges or + * liteav_av_opt_query_ranges_default function. + * + * Multi-component range can be read as in following example: + * + * @code + * int range_index, component_index; + * AVOptionRanges *ranges; + * AVOptionRange *range[3]; //may require more than 3 in the future. + * liteav_av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE); + * for (range_index = 0; range_index < ranges->nb_ranges; range_index++) { + * for (component_index = 0; component_index < ranges->nb_components; component_index++) + * range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index]; + * //do something with range here. + * } + * liteav_av_opt_freep_ranges(&ranges); + * @endcode + */ + AVOptionRange **range; + /** + * Number of ranges per component. + */ + int nb_ranges; + /** + * Number of componentes. + */ + int nb_components; +} AVOptionRanges; + +/** + * Show the obj options. + * + * @param req_flags requested flags for the options to show. Show only the + * options for which it is opt->flags & req_flags. + * @param rej_flags rejected flags for the options to show. Show only the + * options for which it is !(opt->flags & req_flags). + * @param av_log_obj log context to use for showing the options + */ +int liteav_av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags); + +/** + * Set the values of all AVOption fields to their default values. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + */ +void liteav_av_opt_set_defaults(void *s); + +/** + * Set the values of all AVOption fields to their default values. Only these + * AVOption fields for which (opt->flags & mask) == flags will have their + * default applied to s. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + * @param mask combination of AV_OPT_FLAG_* + * @param flags combination of AV_OPT_FLAG_* + */ +void liteav_av_opt_set_defaults2(void *s, int mask, int flags); + +/** + * Parse the key/value pairs list in opts. For each key/value pair + * found, stores the value in the field in ctx that is named like the + * key. ctx must be an AVClass context, storing is done using + * AVOptions. + * + * @param opts options string to parse, may be NULL + * @param key_val_sep a 0-terminated list of characters used to + * separate key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @return the number of successfully set key/value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by liteav_av_opt_set() if a key/value pair + * cannot be set + */ +int liteav_av_set_options_string(void *ctx, const char *opts, + const char *key_val_sep, const char *pairs_sep); + +/** + * Parse the key-value pairs list in opts. For each key=value pair found, + * set the value of the corresponding option in ctx. + * + * @param ctx the AVClass object to set options on + * @param opts the options string, key-value pairs separated by a + * delimiter + * @param shorthand a NULL-terminated array of options names for shorthand + * notation: if the first field in opts has no key part, + * the key is taken from the first element of shorthand; + * then again for the second, etc., until either opts is + * finished, shorthand is finished or a named option is + * found; after that, all options must be named + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @return the number of successfully set key=value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + * + * Options names must use only the following characters: a-z A-Z 0-9 - . / _ + * Separators must use characters distinct from option names and from each + * other. + */ +int liteav_av_opt_set_from_string(void *ctx, const char *opts, + const char *const *shorthand, + const char *key_val_sep, const char *pairs_sep); +/** + * Free all allocated objects in obj. + */ +void liteav_av_opt_free(void *obj); + +/** + * Check whether a particular flag is set in a flags field. + * + * @param field_name the name of the flag field option + * @param flag_name the name of the flag to check + * @return non-zero if the flag is set, zero if the flag isn't set, + * isn't of the right type, or the flags field doesn't exist. + */ +int liteav_av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name); + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with liteav_av_dict_free(). + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see liteav_av_dict_copy() + */ +int liteav_av_opt_set_dict(void *obj, struct AVDictionary **options); + + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with liteav_av_dict_free(). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see liteav_av_dict_copy() + */ +int liteav_av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags); + +/** + * Extract a key-value pair from the beginning of a string. + * + * @param ropts pointer to the options string, will be updated to + * point to the rest of the string (one of the pairs_sep + * or the final NUL) + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @param flags flags; see the AV_OPT_FLAG_* values below + * @param rkey parsed key; must be freed using liteav_av_free() + * @param rval parsed value; must be freed using liteav_av_free() + * + * @return >=0 for success, or a negative value corresponding to an + * AVERROR code in case of error; in particular: + * AVERROR(EINVAL) if no key is present + * + */ +int liteav_av_opt_get_key_value(const char **ropts, + const char *key_val_sep, const char *pairs_sep, + unsigned flags, + char **rkey, char **rval); + +enum { + + /** + * Accept to parse a value without a key; the key will then be returned + * as NULL. + */ + AV_OPT_FLAG_IMPLICIT_KEY = 1, +}; + +/** + * @defgroup opt_eval_funcs Evaluating option strings + * @{ + * This group of functions can be used to evaluate option strings + * and get numbers out of them. They do the same thing as liteav_av_opt_set(), + * except the result is written into the caller-supplied pointer. + * + * @param obj a struct whose first element is a pointer to AVClass. + * @param o an option for which the string is to be evaluated. + * @param val string to be evaluated. + * @param *_out value of the string will be written here. + * + * @return 0 on success, a negative number on failure. + */ +int liteav_av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out); +int liteav_av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out); +int liteav_av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out); +int liteav_av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out); +int liteav_av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out); +int liteav_av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out); +/** + * @} + */ + +#define AV_OPT_SEARCH_CHILDREN (1 << 0) /**< Search in possible children of the + given object first. */ +/** + * The obj passed to liteav_av_opt_find() is fake -- only a double pointer to AVClass + * instead of a required pointer to a struct containing AVClass. This is + * useful for searching for options without needing to allocate the corresponding + * object. + */ +#define AV_OPT_SEARCH_FAKE_OBJ (1 << 1) + +/** + * In liteav_av_opt_get, return NULL if the option has a pointer type and is set to NULL, + * rather than returning an empty string. + */ +#define AV_OPT_ALLOW_NULL (1 << 2) + +/** + * Allows liteav_av_opt_query_ranges and liteav_av_opt_query_ranges_default to return more than + * one component for certain option types. + * @see AVOptionRanges for details. + */ +#define AV_OPT_MULTI_COMPONENT_RANGE (1 << 12) + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return A pointer to the option found, or NULL if no option + * was found. + * + * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable + * directly with liteav_av_opt_set(). Use special calls which take an options + * AVDictionary (e.g. avformat_open_input()) to set options found with this + * flag. + */ +const AVOption *liteav_av_opt_find(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags); + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * @param[out] target_obj if non-NULL, an object to which the option belongs will be + * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present + * in search_flags. This parameter is ignored if search_flags contain + * AV_OPT_SEARCH_FAKE_OBJ. + * + * @return A pointer to the option found, or NULL if no option + * was found. + */ +const AVOption *liteav_av_opt_find2(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags, void **target_obj); + +/** + * Iterate over all AVOptions belonging to obj. + * + * @param obj an AVOptions-enabled struct or a double pointer to an + * AVClass describing it. + * @param prev result of the previous call to liteav_av_opt_next() on this object + * or NULL + * @return next AVOption or NULL + */ +const AVOption *liteav_av_opt_next(const void *obj, const AVOption *prev); + +/** + * Iterate over AVOptions-enabled children of obj. + * + * @param prev result of a previous call to this function or NULL + * @return next AVOptions-enabled child or NULL + */ +void *liteav_av_opt_child_next(void *obj, void *prev); + +/** + * Iterate over potential AVOptions-enabled children of parent. + * + * @param prev result of a previous call to this function or NULL + * @return AVClass corresponding to next potential child or NULL + */ +const AVClass *liteav_av_opt_child_class_next(const AVClass *parent, const AVClass *prev); + +/** + * @defgroup opt_set_funcs Option setting functions + * @{ + * Those functions set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. In case of liteav_av_opt_set() if the field is not + * of a string type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param search_flags flags passed to liteav_av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be set on a child of obj. + * + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + */ +int liteav_av_opt_set (void *obj, const char *name, const char *val, int search_flags); +int liteav_av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags); +int liteav_av_opt_set_double (void *obj, const char *name, double val, int search_flags); +int liteav_av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags); +int liteav_av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags); +int liteav_av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags); +int liteav_av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); +int liteav_av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); +int liteav_av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); +int liteav_av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); +/** + * @note Any old dictionary present is discarded and replaced with a copy of the new one. The + * caller still owns val is and responsible for freeing it. + */ +int liteav_av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, int search_flags); + +/** + * Set a binary option to an integer list. + * + * @param obj AVClass object to set options on + * @param name name of the binary option + * @param val pointer to an integer list (must have the correct type with + * regard to the contents of the list) + * @param term list terminator (usually 0 or -1) + * @param flags search flags + */ +#define av_opt_set_int_list(obj, name, val, term, flags) \ + (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \ + AVERROR(EINVAL) : \ + liteav_av_opt_set_bin(obj, name, (const uint8_t *)(val), \ + av_int_list_length(val, term) * sizeof(*(val)), flags)) + +/** + * @} + */ + +/** + * @defgroup opt_get_funcs Option getting functions + * @{ + * Those functions get a value of the option with the given name from an object. + * + * @param[in] obj a struct whose first element is a pointer to an AVClass. + * @param[in] name name of the option to get. + * @param[in] search_flags flags passed to liteav_av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be found in a child of obj. + * @param[out] out_val value of the option will be written here + * @return >=0 on success, a negative error code otherwise + */ +/** + * @note the returned string will be liteav_av_malloc()ed and must be liteav_av_free()ed by the caller + * + * @note if AV_OPT_ALLOW_NULL is set in search_flags in liteav_av_opt_get, and the option has + * AV_OPT_TYPE_STRING or AV_OPT_TYPE_BINARY and is set to NULL, *out_val will be set + * to NULL instead of an allocated empty string. + */ +int liteav_av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); +int liteav_av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); +int liteav_av_opt_get_double (void *obj, const char *name, int search_flags, double *out_val); +int liteav_av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val); +int liteav_av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out); +int liteav_av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); +int liteav_av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); +int liteav_av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); +int liteav_av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); +/** + * @param[out] out_val The returned dictionary is a copy of the actual value and must + * be freed with liteav_av_dict_free() by the caller + */ +int liteav_av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val); +/** + * @} + */ +/** + * Gets a pointer to the requested field in a struct. + * This function allows accessing a struct even when its fields are moved or + * renamed since the application making the access has been compiled, + * + * @returns a pointer to the field, it can be cast to the correct type and read + * or written to. + */ +void *liteav_av_opt_ptr(const AVClass *avclass, void *obj, const char *name); + +/** + * Free an AVOptionRanges struct and set it to NULL. + */ +void liteav_av_opt_freep_ranges(AVOptionRanges **ranges); + +/** + * Get a list of allowed ranges for the given option. + * + * The returned list may depend on other fields in obj like for example profile. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges + * + * The result must be freed with liteav_av_opt_freep_ranges. + * + * @return number of compontents returned on success, a negative errro code otherwise + */ +int liteav_av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Copy options from src object into dest object. + * + * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object. + * Original memory allocated for such options is freed unless both src and dest options points to the same memory. + * + * @param dest Object to copy from + * @param src Object to copy into + * @return 0 on success, negative on error + */ +int liteav_av_opt_copy(void *dest, const void *src); + +/** + * Get a default list of allowed ranges for the given option. + * + * This list is constructed without using the AVClass.query_ranges() callback + * and can be used as fallback from within the callback. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges + * + * The result must be freed with av_opt_free_ranges. + * + * @return number of compontents returned on success, a negative errro code otherwise + */ +int liteav_av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Check if given option is set to its default value. + * + * Options o must belong to the obj. This function must not be called to check child's options state. + * @see liteav_av_opt_is_set_to_default_by_name(). + * + * @param obj AVClass object to check option on + * @param o option to be checked + * @return >0 when option is set to its default, + * 0 when option is not set its default, + * <0 on error + */ +int liteav_av_opt_is_set_to_default(void *obj, const AVOption *o); + +/** + * Check if given option is set to its default value. + * + * @param obj AVClass object to check option on + * @param name option name + * @param search_flags combination of AV_OPT_SEARCH_* + * @return >0 when option is set to its default, + * 0 when option is not set its default, + * <0 on error + */ +int liteav_av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags); + + +#define AV_OPT_SERIALIZE_SKIP_DEFAULTS 0x00000001 ///< Serialize options that are not set to default values only. +#define AV_OPT_SERIALIZE_OPT_FLAGS_EXACT 0x00000002 ///< Serialize options that exactly match opt_flags only. + +/** + * Serialize object's options. + * + * Create a string containing object's serialized options. + * Such string may be passed back to liteav_av_opt_set_from_string() in order to restore option values. + * A key/value or pairs separator occurring in the serialized value or + * name string are escaped through the liteav_av_escape() function. + * + * @param[in] obj AVClass object to serialize + * @param[in] opt_flags serialize options with all the specified flags set (AV_OPT_FLAG) + * @param[in] flags combination of AV_OPT_SERIALIZE_* flags + * @param[out] buffer Pointer to buffer that will be allocated with string containg serialized options. + * Buffer must be freed by the caller when is no longer needed. + * @param[in] key_val_sep character used to separate key from value + * @param[in] pairs_sep character used to separate two pairs from each other + * @return >= 0 on success, negative on error + * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same. + */ +int liteav_av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer, + const char key_val_sep, const char pairs_sep); +/** + * @} + */ + +#endif /* AVUTIL_OPT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/parseutils.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/parseutils.h new file mode 100644 index 0000000..5da32c4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/parseutils.h @@ -0,0 +1,194 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PARSEUTILS_H +#define AVUTIL_PARSEUTILS_H + +#include <time.h> + +#include "rational.h" + +/** + * @file + * misc parsing utilities + */ + +/** + * Parse str and store the parsed ratio in q. + * + * Note that a ratio with infinite (1/0) or negative value is + * considered valid, so you should check on the returned value if you + * want to exclude those values. + * + * The undefined value can be expressed using the "0:0" string. + * + * @param[in,out] q pointer to the AVRational which will contain the ratio + * @param[in] str the string to parse: it has to be a string in the format + * num:den, a float number or an expression + * @param[in] max the maximum allowed numerator and denominator + * @param[in] log_offset log level offset which is applied to the log + * level of log_ctx + * @param[in] log_ctx parent logging context + * @return >= 0 on success, a negative error code otherwise + */ +int liteav_av_parse_ratio(AVRational *q, const char *str, int max, + int log_offset, void *log_ctx); + +#define av_parse_ratio_quiet(rate, str, max) \ + liteav_av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL) + +/** + * Parse str and put in width_ptr and height_ptr the detected values. + * + * @param[in,out] width_ptr pointer to the variable which will contain the detected + * width value + * @param[in,out] height_ptr pointer to the variable which will contain the detected + * height value + * @param[in] str the string to parse: it has to be a string in the format + * width x height or a valid video size abbreviation. + * @return >= 0 on success, a negative error code otherwise + */ +int liteav_av_parse_video_size(int *width_ptr, int *height_ptr, const char *str); + +/** + * Parse str and store the detected values in *rate. + * + * @param[in,out] rate pointer to the AVRational which will contain the detected + * frame rate + * @param[in] str the string to parse: it has to be a string in the format + * rate_num / rate_den, a float number or a valid video rate abbreviation + * @return >= 0 on success, a negative error code otherwise + */ +int liteav_av_parse_video_rate(AVRational *rate, const char *str); + +/** + * Put the RGBA values that correspond to color_string in rgba_color. + * + * @param color_string a string specifying a color. It can be the name of + * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence, + * possibly followed by "@" and a string representing the alpha + * component. + * The alpha component may be a string composed by "0x" followed by an + * hexadecimal number or a decimal number between 0.0 and 1.0, which + * represents the opacity value (0x00/0.0 means completely transparent, + * 0xff/1.0 completely opaque). + * If the alpha component is not specified then 0xff is assumed. + * The string "random" will result in a random color. + * @param slen length of the initial part of color_string containing the + * color. It can be set to -1 if color_string is a null terminated string + * containing nothing else than the color. + * @return >= 0 in case of success, a negative value in case of + * failure (for example if color_string cannot be parsed). + */ +int liteav_av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, + void *log_ctx); + +/** + * Get the name of a color from the internal table of hard-coded named + * colors. + * + * This function is meant to enumerate the color names recognized by + * liteav_av_parse_color(). + * + * @param color_idx index of the requested color, starting from 0 + * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB + * @return the color name string or NULL if color_idx is not in the array + */ +const char *liteav_av_get_known_color_name(int color_idx, const uint8_t **rgb); + +/** + * Parse timestr and return in *time a corresponding number of + * microseconds. + * + * @param timeval puts here the number of microseconds corresponding + * to the string in timestr. If the string represents a duration, it + * is the number of microseconds contained in the time interval. If + * the string is a date, is the number of microseconds since 1st of + * January, 1970 up to the time of the parsed date. If timestr cannot + * be successfully parsed, set *time to INT64_MIN. + + * @param timestr a string representing a date or a duration. + * - If a date the syntax is: + * @code + * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z] + * now + * @endcode + * If the value is "now" it takes the current time. + * Time is local time unless Z is appended, in which case it is + * interpreted as UTC. + * If the year-month-day part is not specified it takes the current + * year-month-day. + * - If a duration the syntax is: + * @code + * [-][HH:]MM:SS[.m...] + * [-]S+[.m...] + * @endcode + * @param duration flag which tells how to interpret timestr, if not + * zero timestr is interpreted as a duration, otherwise as a date + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int liteav_av_parse_time(int64_t *timeval, const char *timestr, int duration); + +/** + * Attempt to find a specific tag in a URL. + * + * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. + * Return 1 if found. + */ +int liteav_av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); + +/** + * Simplified version of strptime + * + * Parse the input string p according to the format string fmt and + * store its results in the structure dt. + * This implementation supports only a subset of the formats supported + * by the standard strptime(). + * + * The supported input field descriptors are listed below. + * - %H: the hour as a decimal number, using a 24-hour clock, in the + * range '00' through '23' + * - %J: hours as a decimal number, in the range '0' through INT_MAX + * - %M: the minute as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %S: the second as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %Y: the year as a decimal number, using the Gregorian calendar + * - %m: the month as a decimal number, in the range '1' through '12' + * - %d: the day of the month as a decimal number, in the range '1' + * through '31' + * - %T: alias for '%H:%M:%S' + * - %%: a literal '%' + * + * @return a pointer to the first character not processed in this function + * call. In case the input string contains more characters than + * required by the format string the return value points right after + * the last consumed input character. In case the whole input string + * is consumed the return value points to the null byte at the end of + * the string. On failure NULL is returned. + */ +char *liteav_av_small_strptime(const char *p, const char *fmt, struct tm *dt); + +/** + * Convert the decomposed UTC time in tm to a time_t value. + */ +time_t liteav_av_timegm(struct tm *tm); + +#endif /* AVUTIL_PARSEUTILS_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixdesc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixdesc.h new file mode 100644 index 0000000..07e10c4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixdesc.h @@ -0,0 +1,441 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * pixel format descriptor + * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXDESC_H +#define AVUTIL_PIXDESC_H + +#include <inttypes.h> + +#include "attributes.h" +#include "pixfmt.h" +#include "version.h" + +typedef struct AVComponentDescriptor { + /** + * Which of the 4 planes contains the component. + */ + int plane; + + /** + * Number of elements between 2 horizontally consecutive pixels. + * Elements are bits for bitstream formats, bytes otherwise. + */ + int step; + + /** + * Number of elements before the component of the first pixel. + * Elements are bits for bitstream formats, bytes otherwise. + */ + int offset; + + /** + * Number of least significant bits that must be shifted away + * to get the value. + */ + int shift; + + /** + * Number of bits in the component. + */ + int depth; + +#if FF_API_PLUS1_MINUS1 + /** deprecated, use step instead */ + attribute_deprecated int step_minus1; + + /** deprecated, use depth instead */ + attribute_deprecated int depth_minus1; + + /** deprecated, use offset instead */ + attribute_deprecated int offset_plus1; +#endif +} AVComponentDescriptor; + +/** + * Descriptor that unambiguously describes how the bits of a pixel are + * stored in the up to 4 data planes of an image. It also stores the + * subsampling factors and number of components. + * + * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV + * and all the YUV variants) AVPixFmtDescriptor just stores how values + * are stored not what these values represent. + */ +typedef struct AVPixFmtDescriptor { + const char *name; + uint8_t nb_components; ///< The number of components each pixel has, (1-4) + + /** + * Amount to shift the luma width right to find the chroma width. + * For YV12 this is 1 for example. + * chroma_width = AV_CEIL_RSHIFT(luma_width, log2_chroma_w) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_w; + + /** + * Amount to shift the luma height right to find the chroma height. + * For YV12 this is 1 for example. + * chroma_height= AV_CEIL_RSHIFT(luma_height, log2_chroma_h) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_h; + + /** + * Combination of AV_PIX_FMT_FLAG_... flags. + */ + uint64_t flags; + + /** + * Parameters that describe how pixels are packed. + * If the format has 1 or 2 components, then luma is 0. + * If the format has 3 or 4 components: + * if the RGB flag is set then 0 is red, 1 is green and 2 is blue; + * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. + * + * If present, the Alpha channel is always the last component. + */ + AVComponentDescriptor comp[4]; + + /** + * Alternative comma-separated names. + */ + const char *alias; +} AVPixFmtDescriptor; + +/** + * Pixel format is big-endian. + */ +#define AV_PIX_FMT_FLAG_BE (1 << 0) +/** + * Pixel format has a palette in data[1], values are indexes in this palette. + */ +#define AV_PIX_FMT_FLAG_PAL (1 << 1) +/** + * All values of a component are bit-wise packed end to end. + */ +#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2) +/** + * Pixel format is an HW accelerated format. + */ +#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3) +/** + * At least one pixel component is not in the first data plane. + */ +#define AV_PIX_FMT_FLAG_PLANAR (1 << 4) +/** + * The pixel format contains RGB-like data (as opposed to YUV/grayscale). + */ +#define AV_PIX_FMT_FLAG_RGB (1 << 5) + +/** + * The pixel format is "pseudo-paletted". This means that it contains a + * fixed palette in the 2nd plane but the palette is fixed/constant for each + * PIX_FMT. This allows interpreting the data as if it was PAL8, which can + * in some cases be simpler. Or the data can be interpreted purely based on + * the pixel format without using the palette. + * An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8 + * + * @deprecated This flag is deprecated, and will be removed. When it is removed, + * the extra palette allocation in AVFrame.data[1] is removed as well. Only + * actual paletted formats (as indicated by AV_PIX_FMT_FLAG_PAL) will have a + * palette. Starting with FFmpeg versions which have this flag deprecated, the + * extra "pseudo" palette is already ignored, and API users are not required to + * allocate a palette for AV_PIX_FMT_FLAG_PSEUDOPAL formats (it was required + * before the deprecation, though). + */ +#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) + +/** + * The pixel format has an alpha channel. This is set on all formats that + * support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always + * straight, never pre-multiplied. + * + * If a codec or a filter does not support alpha, it should set all alpha to + * opaque, or use the equivalent pixel formats without alpha component, e.g. + * AV_PIX_FMT_RGB0 (or AV_PIX_FMT_RGB24 etc.) instead of AV_PIX_FMT_RGBA. + */ +#define AV_PIX_FMT_FLAG_ALPHA (1 << 7) + +/** + * The pixel format is following a Bayer pattern + */ +#define AV_PIX_FMT_FLAG_BAYER (1 << 8) + +/** + * The pixel format contains IEEE-754 floating point values. Precision (double, + * single, or half) should be determined by the pixel size (64, 32, or 16 bits). + */ +#define AV_PIX_FMT_FLAG_FLOAT (1 << 9) + +/** + * Return the number of bits per pixel used by the pixel format + * described by pixdesc. Note that this is not the same as the number + * of bits per sample. + * + * The returned number of bits refers to the number of bits actually + * used for storing the pixel information, that is padding bits are + * not counted. + */ +int liteav_av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * Return the number of bits per pixel for the pixel format + * described by pixdesc, including any padding or unused bits. + */ +int liteav_av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * @return a pixel format descriptor for provided pixel format or NULL if + * this pixel format is unknown. + */ +const AVPixFmtDescriptor *liteav_av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt); + +/** + * Iterate over all pixel format descriptors known to libavutil. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVPixFmtDescriptor *liteav_av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev); + +/** + * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc + * is not a valid pointer to a pixel format descriptor. + */ +enum AVPixelFormat liteav_av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc); + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w (horizontal/width shift) + * @param[out] v_shift store log2_chroma_h (vertical/height shift) + * + * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format + */ +int liteav_av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, + int *h_shift, int *v_shift); + +/** + * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a + * valid pixel format. + */ +int liteav_av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); + +/** + * @return the name for provided color range or NULL if unknown. + */ +const char *liteav_av_color_range_name(enum AVColorRange range); + +/** + * @return the AVColorRange value for name or an AVError if not found. + */ +int liteav_av_color_range_from_name(const char *name); + +/** + * @return the name for provided color primaries or NULL if unknown. + */ +const char *liteav_av_color_primaries_name(enum AVColorPrimaries primaries); + +/** + * @return the AVColorPrimaries value for name or an AVError if not found. + */ +int liteav_av_color_primaries_from_name(const char *name); + +/** + * @return the name for provided color transfer or NULL if unknown. + */ +const char *liteav_av_color_transfer_name(enum AVColorTransferCharacteristic transfer); + +/** + * @return the AVColorTransferCharacteristic value for name or an AVError if not found. + */ +int liteav_av_color_transfer_from_name(const char *name); + +/** + * @return the name for provided color space or NULL if unknown. + */ +const char *liteav_av_color_space_name(enum AVColorSpace space); + +/** + * @return the AVColorSpace value for name or an AVError if not found. + */ +int liteav_av_color_space_from_name(const char *name); + +/** + * @return the name for provided chroma location or NULL if unknown. + */ +const char *liteav_av_chroma_location_name(enum AVChromaLocation location); + +/** + * @return the AVChromaLocation value for name or an AVError if not found. + */ +int liteav_av_chroma_location_from_name(const char *name); + +/** + * Return the pixel format corresponding to name. + * + * If there is no pixel format with name name, then looks for a + * pixel format with the name corresponding to the native endian + * format of name. + * For example in a little-endian system, first looks for "gray16", + * then for "gray16le". + * + * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. + */ +enum AVPixelFormat liteav_av_get_pix_fmt(const char *name); + +/** + * Return the short name for a pixel format, NULL in case pix_fmt is + * unknown. + * + * @see liteav_av_get_pix_fmt(), liteav_av_get_pix_fmt_string() + */ +const char *liteav_av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); + +/** + * Print in buf the string corresponding to the pixel format with + * number pix_fmt, or a header if pix_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param pix_fmt the number of the pixel format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + */ +char *liteav_av_get_pix_fmt_string(char *buf, int buf_size, + enum AVPixelFormat pix_fmt); + +/** + * Read a line from an image, and write the values of the + * pixel format component c to dst. + * + * @param data the array containing the pointers to the planes of the image + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to read + * @param y the vertical coordinate of the first pixel to read + * @param w the width of the line to read, that is the number of + * values to write to dst + * @param read_pal_component if not zero and the format is a paletted + * format writes the values corresponding to the palette + * component c in data[1] to dst, rather than the palette indexes in + * data[0]. The behavior is undefined if the format is not paletted. + * @param dst_element_size size of elements in dst array (2 or 4 byte) + */ +void liteav_av_read_image_line2(void *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component, + int dst_element_size); + +void liteav_av_read_image_line(uint16_t *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component); + +/** + * Write the values from src to the pixel format component c of an + * image line. + * + * @param src array containing the values to write + * @param data the array containing the pointers to the planes of the + * image to write into. It is supposed to be zeroed. + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to write + * @param y the vertical coordinate of the first pixel to write + * @param w the width of the line to write, that is the number of + * values to write to the image line + * @param src_element_size size of elements in src array (2 or 4 byte) + */ +void liteav_av_write_image_line2(const void *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int src_element_size); + +void liteav_av_write_image_line(const uint16_t *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w); + +/** + * Utility function to swap the endianness of a pixel format. + * + * @param[in] pix_fmt the pixel format + * + * @return pixel format with swapped endianness if it exists, + * otherwise AV_PIX_FMT_NONE + */ +enum AVPixelFormat liteav_av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt); + +#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ +#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ +#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ +#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ +#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ +#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * av_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +int liteav_av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, + enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * av_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +enum AVPixelFormat liteav_av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +#endif /* AVUTIL_PIXDESC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixfmt.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixfmt.h new file mode 100644 index 0000000..6815f8d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pixfmt.h @@ -0,0 +1,542 @@ +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXFMT_H +#define AVUTIL_PIXFMT_H + +/** + * @file + * pixel format definitions + */ + +#include "libavutil/avconfig.h" +#include "version.h" + +#define AVPALETTE_SIZE 1024 +#define AVPALETTE_COUNT 256 + +/** + * Pixel format. + * + * @note + * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little-endian CPU architectures and ARGB on + * big-endian CPUs. + * + * @note + * If the resolution is not a multiple of the chroma subsampling factor + * then the chroma plane resolution must be rounded up. + * + * @par + * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized + * image data is stored in AVFrame.data[0]. The palette is transported in + * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is + * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is + * also endian-specific). Note also that the individual RGB32 palette + * components stored in AVFrame.data[1] should be in the range 0..255. + * This is important as many custom PAL8 video codecs that were designed + * to run on the IBM VGA graphics adapter use 6-bit palette components. + * + * @par + * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like + * for pal8. This palette is filled in automatically by the function + * allocating the picture. + */ +enum AVPixelFormat { + AV_PIX_FMT_NONE = -1, + AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + AV_PIX_FMT_GRAY8, ///< Y , 8bpp + AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette + AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range + AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range + AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range + AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range + AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) + AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined + + AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined + +#if FF_API_VAAPI + /** @name Deprecated pixel formats */ + /**@{*/ + AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID + /**@}*/ + AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD, +#else + /** + * Hardware acceleration through VA-API, data[3] contains a + * VASurfaceID. + */ + AV_PIX_FMT_VAAPI, +#endif + + AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha + + AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + + AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP + AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian + AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian + AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian + AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian + AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian + AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + + AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb + + AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian) + AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian) + + AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp + AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian + AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian + /** + * HW acceleration through QSV, data[3] contains a pointer to the + * mfxFrameSurface1 structure. + */ + AV_PIX_FMT_QSV, + /** + * HW acceleration though MMAL, data[3] contains a pointer to the + * MMAL_BUFFER_HEADER_T structure. + */ + AV_PIX_FMT_MMAL, + + AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer + + /** + * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers + * exactly as for system memory frames. + */ + AV_PIX_FMT_CUDA, + + AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined + AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined + AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined + AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined + + AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian + AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian + AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian + AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian + AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + + AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing + + AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + + AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox + + AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian + AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian + + AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian + + AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian + AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian + + AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec + + AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian + AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian + AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian + AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian + + AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian + AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian + + /** + * Hardware surfaces for Direct3D11. + * + * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11 + * hwaccel API and filtering support AV_PIX_FMT_D3D11 only. + * + * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the + * texture array index of the frame as intptr_t if the ID3D11Texture2D is + * an array texture (or always 0 if it's a normal texture). + */ + AV_PIX_FMT_D3D11, + + AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian + AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian + + AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian + AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian + AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian + AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian + + /** + * DRM-managed buffers exposed through PRIME buffer sharing. + * + * data[0] points to an AVDRMFrameDescriptor. + */ + AV_PIX_FMT_DRM_PRIME, + /** + * Hardware surfaces for OpenCL. + * + * data[i] contain 2D image objects (typed in C as cl_mem, used + * in OpenCL as image2d_t) for each plane of the surface. + */ + AV_PIX_FMT_OPENCL, + + AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian + AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian + + AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian + AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian + + AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +}; + +#if AV_HAVE_BIGENDIAN +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be +#else +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le +#endif + +#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA) +#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR) +#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA) +#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB) +#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) +#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) + +#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE) +#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE) +#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE) +#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE) +#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) +#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE) +#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) +#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE) +#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE) +#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE) +#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE) +#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE) +#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE) +#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE) +#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE) +#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE) + +#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) +#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) +#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) +#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) +#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) +#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE) +#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) +#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) +#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) +#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE) +#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) +#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE) +#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE) +#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE) +#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE) +#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE) +#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE) + +#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) +#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) +#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) +#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) +#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE) +#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE) +#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE) +#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) +#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) + +#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE) +#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE) + +#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE) + +#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) +#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) +#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) +#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) +#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) +#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) +#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) +#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) +#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) + +#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) +#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE) +#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE) +#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE) + +/** + * Chromaticity coordinates of the source primaries. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.1. + */ +enum AVColorPrimaries { + AVCOL_PRI_RESERVED0 = 0, + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_RESERVED = 3, + AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C + AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 + AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) + AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, + AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 + AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 + AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors + AVCOL_PRI_NB ///< Not part of ABI +}; + +/** + * Color Transfer Characteristic. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.2. + */ +enum AVColorTransferCharacteristic { + AVCOL_TRC_RESERVED0 = 0, + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_RESERVED = 3, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics" + AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" + AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" + AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 + AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut + AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) + AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system + AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system + AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084, + AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1 + AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428, + AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" + AVCOL_TRC_NB ///< Not part of ABI +}; + +/** + * YUV colorspace type. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.3. + */ +enum AVColorSpace { + AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_RESERVED = 3, + AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above + AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO, + AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system + AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system + AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x + AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system + AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system + AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp + AVCOL_SPC_NB ///< Not part of ABI +}; + +/** + * MPEG vs JPEG YUV range. + */ +enum AVColorRange { + AVCOL_RANGE_UNSPECIFIED = 0, + AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges + AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges + AVCOL_RANGE_NB ///< Not part of ABI +}; + +/** + * Location of chroma samples. + * + * Illustration showing the location of the first (top left) chroma sample of the + * image, the left shows only luma, the right + * shows the location of the chroma sample, the 2 could be imagined to overlay + * each other but are drawn separately due to limitations of ASCII + * + * 1st 2nd 1st 2nd horizontal luma sample positions + * v v v v + * ______ ______ + *1st luma line > |X X ... |3 4 X ... X are luma samples, + * | |1 2 1-6 are possible chroma positions + *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position + */ +enum AVChromaLocation { + AVCHROMA_LOC_UNSPECIFIED = 0, + AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0 + AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0 + AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2 + AVCHROMA_LOC_TOP = 4, + AVCHROMA_LOC_BOTTOMLEFT = 5, + AVCHROMA_LOC_BOTTOM = 6, + AVCHROMA_LOC_NB ///< Not part of ABI +}; + +#endif /* AVUTIL_PIXFMT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h new file mode 100644 index 0000000..c31040e --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h @@ -0,0 +1,39 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2009 Baptiste Coudurier <qoroliang@tencent.com> + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PTHREAD_HELPER_H +#define AVUTIL_PTHREAD_HELPER_H + +/** + * Wait for a task + * + * @param poll_max_count poll max count + * @param poll_interval_time poll interval time, in microsecond + * + * @return poll count + * + */ + +int liteav_ff_wait_thread(int poll_max_count, int poll_interval_time, int *running); + +#ifdef _WIN32 +unsigned long pthread_self(); +#endif + +#endif diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/random_seed.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/random_seed.h new file mode 100644 index 0000000..7a26962 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/random_seed.h @@ -0,0 +1,44 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RANDOM_SEED_H +#define AVUTIL_RANDOM_SEED_H + +#include <stdint.h> +/** + * @addtogroup lavu_crypto + * @{ + */ + +/** + * Get a seed to use in conjunction with random functions. + * This function tries to provide a good seed at a best effort bases. + * Its possible to call this function multiple times if more bits are needed. + * It can be quite slow, which is why it should only be used as seed for a faster + * PRNG. The quality of the seed depends on the platform. + */ +uint32_t liteav_av_get_random_seed(void); + +/** + * @} + */ + +#endif /* AVUTIL_RANDOM_SEED_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rational.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rational.h new file mode 100644 index 0000000..d3c038a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rational.h @@ -0,0 +1,215 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * rational numbers + * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_math_rational + * Utilties for rational number calculation. + * @author Michael Niedermayer <michaelni@gmx.at> + */ + +#ifndef AVUTIL_RATIONAL_H +#define AVUTIL_RATIONAL_H + +#include <stdint.h> +#include <limits.h> +#include "attributes.h" + +/** + * @defgroup lavu_math_rational AVRational + * @ingroup lavu_math + * Rational number calculation. + * + * While rational numbers can be expressed as floating-point numbers, the + * conversion process is a lossy one, so are floating-point operations. On the + * other hand, the nature of FFmpeg demands highly accurate calculation of + * timestamps. This set of rational number utilities serves as a generic + * interface for manipulating rational numbers as pairs of numerators and + * denominators. + * + * Many of the functions that operate on AVRational's have the suffix `_q`, in + * reference to the mathematical symbol "ℚ" (Q) which denotes the set of all + * rational numbers. + * + * @{ + */ + +/** + * Rational number (pair of numerator and denominator). + */ +typedef struct AVRational{ + int num; ///< Numerator + int den; ///< Denominator +} AVRational; + +/** + * Create an AVRational. + * + * Useful for compilers that do not support compound literals. + * + * @note The return value is not reduced. + * @see liteav_av_reduce() + */ +static inline AVRational av_make_q(int num, int den) +{ + AVRational r = { num, den }; + return r; +} + +/** + * Compare two rationals. + * + * @param a First rational + * @param b Second rational + * + * @return One of the following values: + * - 0 if `a == b` + * - 1 if `a > b` + * - -1 if `a < b` + * - `INT_MIN` if one of the values is of the form `0 / 0` + */ +static inline int av_cmp_q(AVRational a, AVRational b){ + const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den; + + if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1; + else if(b.den && a.den) return 0; + else if(a.num && b.num) return (a.num>>31) - (b.num>>31); + else return INT_MIN; +} + +/** + * Convert an AVRational to a `double`. + * @param a AVRational to convert + * @return `a` in floating-point form + * @see liteav_av_d2q() + */ +static inline double av_q2d(AVRational a){ + return a.num / (double) a.den; +} + +/** + * Reduce a fraction. + * + * This is useful for framerate calculations. + * + * @param[out] dst_num Destination numerator + * @param[out] dst_den Destination denominator + * @param[in] num Source numerator + * @param[in] den Source denominator + * @param[in] max Maximum allowed values for `dst_num` & `dst_den` + * @return 1 if the operation is exact, 0 otherwise + */ +int liteav_av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); + +/** + * Multiply two rationals. + * @param b First rational + * @param c Second rational + * @return b*c + */ +AVRational liteav_av_mul_q(AVRational b, AVRational c) av_const; + +/** + * Divide one rational by another. + * @param b First rational + * @param c Second rational + * @return b/c + */ +AVRational liteav_av_div_q(AVRational b, AVRational c) av_const; + +/** + * Add two rationals. + * @param b First rational + * @param c Second rational + * @return b+c + */ +AVRational liteav_av_add_q(AVRational b, AVRational c) av_const; + +/** + * Subtract one rational from another. + * @param b First rational + * @param c Second rational + * @return b-c + */ +AVRational liteav_av_sub_q(AVRational b, AVRational c) av_const; + +/** + * Invert a rational. + * @param q value + * @return 1 / q + */ +static av_always_inline AVRational av_inv_q(AVRational q) +{ + AVRational r = { q.den, q.num }; + return r; +} + +/** + * Convert a double precision floating point number to a rational. + * + * In case of infinity, the returned value is expressed as `{1, 0}` or + * `{-1, 0}` depending on the sign. + * + * @param d `double` to convert + * @param max Maximum allowed numerator and denominator + * @return `d` in AVRational form + * @see av_q2d() + */ +AVRational liteav_av_d2q(double d, int max) av_const; + +/** + * Find which of the two rationals is closer to another rational. + * + * @param q Rational to be compared against + * @param q1,q2 Rationals to be tested + * @return One of the following values: + * - 1 if `q1` is nearer to `q` than `q2` + * - -1 if `q2` is nearer to `q` than `q1` + * - 0 if they have the same distance + */ +int liteav_av_nearer_q(AVRational q, AVRational q1, AVRational q2); + +/** + * Find the value in a list of rationals nearest a given reference rational. + * + * @param q Reference rational + * @param q_list Array of rationals terminated by `{0, 0}` + * @return Index of the nearest value found in the array + */ +int liteav_av_find_nearest_q_idx(AVRational q, const AVRational* q_list); + +/** + * Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point + * format. + * + * @param q Rational to be converted + * @return Equivalent floating-point value, expressed as an unsigned 32-bit + * integer. + * @note The returned value is platform-indepedant. + */ +uint32_t liteav_av_q2intfloat(AVRational q); + +/** + * @} + */ + +#endif /* AVUTIL_RATIONAL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rc4.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rc4.h new file mode 100644 index 0000000..7eeb575 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/rc4.h @@ -0,0 +1,67 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * RC4 encryption/decryption/pseudo-random number generator + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RC4_H +#define AVUTIL_RC4_H + +#include <stdint.h> + +/** + * @defgroup lavu_rc4 RC4 + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVRC4 { + uint8_t state[256]; + int x, y; +} AVRC4; + +/** + * Allocate an AVRC4 context. + */ +AVRC4 *liteav_av_rc4_alloc(void); + +/** + * @brief Initializes an AVRC4 context. + * + * @param key_bits must be a multiple of 8 + * @param decrypt 0 for encryption, 1 for decryption, currently has no effect + * @return zero on success, negative value otherwise + */ +int liteav_av_rc4_init(struct AVRC4 *d, const uint8_t *key, int key_bits, int decrypt); + +/** + * @brief Encrypts / decrypts using the RC4 algorithm. + * + * @param count number of bytes + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst, may be NULL + * @param iv not (yet) used for RC4, should be NULL + * @param decrypt 0 for encryption, 1 for decryption, not (yet) used + */ +void liteav_av_rc4_crypt(struct AVRC4 *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_RC4_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/replaygain.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/replaygain.h new file mode 100644 index 0000000..b49bf1a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/replaygain.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_REPLAYGAIN_H +#define AVUTIL_REPLAYGAIN_H + +#include <stdint.h> + +/** + * ReplayGain information (see + * http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification). + * The size of this struct is a part of the public ABI. + */ +typedef struct AVReplayGain { + /** + * Track replay gain in microbels (divide by 100000 to get the value in dB). + * Should be set to INT32_MIN when unknown. + */ + int32_t track_gain; + /** + * Peak track amplitude, with 100000 representing full scale (but values + * may overflow). 0 when unknown. + */ + uint32_t track_peak; + /** + * Same as track_gain, but for the whole album. + */ + int32_t album_gain; + /** + * Same as track_peak, but for the whole album, + */ + uint32_t album_peak; +} AVReplayGain; + +#endif /* AVUTIL_REPLAYGAIN_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ripemd.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ripemd.h new file mode 100644 index 0000000..2137d35 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/ripemd.h @@ -0,0 +1,88 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at> + * Copyright (C) 2013 James Almer <jamrial@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_ripemd + * Public header for RIPEMD hash function implementation. + */ + +#ifndef AVUTIL_RIPEMD_H +#define AVUTIL_RIPEMD_H + +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_ripemd RIPEMD + * @ingroup lavu_hash + * RIPEMD hash function implementation. + * + * @{ + */ + +extern const int liteav_av_ripemd_size; + +struct AVRIPEMD; + +/** + * Allocate an AVRIPEMD context. + */ +struct AVRIPEMD *liteav_av_ripemd_alloc(void); + +/** + * Initialize RIPEMD hashing. + * + * @param context pointer to the function context (of size liteav_av_ripemd_size) + * @param bits number of bits in digest (128, 160, 256 or 320 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int liteav_av_ripemd_init(struct AVRIPEMD* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len); +#else +void liteav_av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void liteav_av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_RIPEMD_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/samplefmt.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/samplefmt.h new file mode 100644 index 0000000..118f25a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/samplefmt.h @@ -0,0 +1,273 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SAMPLEFMT_H +#define AVUTIL_SAMPLEFMT_H + +#include <stdint.h> + +#include "avutil.h" +#include "attributes.h" + +/** + * @addtogroup lavu_audio + * @{ + * + * @defgroup lavu_sampfmts Audio sample formats + * + * Audio sample format enumeration and related convenience functions. + * @{ + */ + +/** + * Audio sample formats + * + * - The data described by the sample format is always in native-endian order. + * Sample values can be expressed by native C types, hence the lack of a signed + * 24-bit sample format even though it is a common raw audio data format. + * + * - The floating-point formats are based on full volume being in the range + * [-1.0, 1.0]. Any values outside this range are beyond full volume level. + * + * - The data layout as used in liteav_av_samples_fill_arrays() and elsewhere in FFmpeg + * (such as AVFrame in libavcodec) is as follows: + * + * @par + * For planar sample formats, each audio channel is in a separate data plane, + * and linesize is the buffer size, in bytes, for a single plane. All data + * planes must be the same size. For packed sample formats, only the first data + * plane is used, and samples for each channel are interleaved. In this case, + * linesize is the buffer size, in bytes, for the 1 plane. + * + */ +enum AVSampleFormat { + AV_SAMPLE_FMT_NONE = -1, + AV_SAMPLE_FMT_U8, ///< unsigned 8 bits + AV_SAMPLE_FMT_S16, ///< signed 16 bits + AV_SAMPLE_FMT_S32, ///< signed 32 bits + AV_SAMPLE_FMT_FLT, ///< float + AV_SAMPLE_FMT_DBL, ///< double + + AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar + AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar + AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar + AV_SAMPLE_FMT_FLTP, ///< float, planar + AV_SAMPLE_FMT_DBLP, ///< double, planar + AV_SAMPLE_FMT_S64, ///< signed 64 bits + AV_SAMPLE_FMT_S64P, ///< signed 64 bits, planar + + AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically +}; + +/** + * Return the name of sample_fmt, or NULL if sample_fmt is not + * recognized. + */ +const char *liteav_av_get_sample_fmt_name(enum AVSampleFormat sample_fmt); + +/** + * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE + * on error. + */ +enum AVSampleFormat liteav_av_get_sample_fmt(const char *name); + +/** + * Return the planar<->packed alternative form of the given sample format, or + * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the + * requested planar/packed format, the format returned is the same as the + * input. + */ +enum AVSampleFormat liteav_av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar); + +/** + * Get the packed alternative form of the given sample format. + * + * If the passed sample_fmt is already in packed format, the format returned is + * the same as the input. + * + * @return the packed alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat liteav_av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Get the planar alternative form of the given sample format. + * + * If the passed sample_fmt is already in planar format, the format returned is + * the same as the input. + * + * @return the planar alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat liteav_av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Generate a string corresponding to the sample format with + * sample_fmt, or a header if sample_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param sample_fmt the number of the sample format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + * @return the pointer to the filled buffer or NULL if sample_fmt is + * unknown or in case of other errors + */ +char *liteav_av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt); + +/** + * Return number of bytes per sample. + * + * @param sample_fmt the sample format + * @return number of bytes per sample or zero if unknown for the given + * sample format + */ +int liteav_av_get_bytes_per_sample(enum AVSampleFormat sample_fmt); + +/** + * Check if the sample format is planar. + * + * @param sample_fmt the sample format to inspect + * @return 1 if the sample format is planar, 0 if it is interleaved + */ +int liteav_av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt); + +/** + * Get the required buffer size for the given audio parameters. + * + * @param[out] linesize calculated linesize, may be NULL + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return required buffer size, or negative error code on failure + */ +int liteav_av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * @} + * + * @defgroup lavu_sampmanip Samples manipulation + * + * Functions that manipulate audio samples + * @{ + */ + +/** + * Fill plane data pointers and linesize for samples with sample + * format sample_fmt. + * + * The audio_data array is filled with the pointers to the samples data planes: + * for planar, set the start point of each channel's data within the buffer, + * for packed, set the start point of the entire buffer only. + * + * The value pointed to by linesize is set to the aligned size of each + * channel's data buffer for planar layout, or to the aligned size of the + * buffer for all channels for packed layout. + * + * The buffer in buf must be big enough to contain all the samples + * (use liteav_av_samples_get_buffer_size() to compute its minimum size), + * otherwise the audio_data pointers will point to invalid data. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize calculated linesize, may be NULL + * @param buf the pointer to a buffer containing the samples + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return minimum size in bytes required for the buffer in case + * of success at the next bump + */ +int liteav_av_samples_fill_arrays(uint8_t **audio_data, int *linesize, + const uint8_t *buf, + int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a samples buffer for nb_samples samples, and fill data pointers and + * linesize accordingly. + * The allocated samples buffer can be freed by using liteav_av_freep(&audio_data[0]) + * Allocated data will be initialized to silence. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize aligned size for audio buffer(s), may be NULL + * @param nb_channels number of audio channels + * @param nb_samples number of samples per channel + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return the size of the allocated buffer in case of success at the next bump + * @see liteav_av_samples_fill_arrays() + * @see liteav_av_samples_alloc_array_and_samples() + */ +int liteav_av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a data pointers array, samples buffer for nb_samples + * samples, and fill data pointers and linesize accordingly. + * + * This is the same as liteav_av_samples_alloc(), but also allocates the data + * pointers array. + * + * @see liteav_av_samples_alloc() + */ +int liteav_av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Copy samples from src to dst. + * + * @param dst destination array of pointers to data planes + * @param src source array of pointers to data planes + * @param dst_offset offset in samples at which the data will be written to dst + * @param src_offset offset in samples at which the data will be read from src + * @param nb_samples number of samples to be copied + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int liteav_av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset, + int src_offset, int nb_samples, int nb_channels, + enum AVSampleFormat sample_fmt); + +/** + * Fill an audio buffer with silence. + * + * @param audio_data array of pointers to data planes + * @param offset offset in samples at which to start filling + * @param nb_samples number of samples to fill + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int liteav_av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, + int nb_channels, enum AVSampleFormat sample_fmt); + +/** + * @} + * @} + */ +#endif /* AVUTIL_SAMPLEFMT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha.h new file mode 100644 index 0000000..3eed047 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha.h @@ -0,0 +1,96 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_sha + * Public header for SHA-1 & SHA-256 hash function implementations. + */ + +#ifndef AVUTIL_SHA_H +#define AVUTIL_SHA_H + +#include <stddef.h> +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha SHA + * @ingroup lavu_hash + * SHA-1 and SHA-256 (Secure Hash Algorithm) hash function implementations. + * + * This module supports the following SHA hash functions: + * + * - SHA-1: 160 bits + * - SHA-224: 224 bits, as a variant of SHA-2 + * - SHA-256: 256 bits, as a variant of SHA-2 + * + * @see For SHA-384, SHA-512, and variants thereof, see @ref lavu_sha512. + * + * @{ + */ + +extern const int liteav_av_sha_size; + +struct AVSHA; + +/** + * Allocate an AVSHA context. + */ +struct AVSHA *liteav_av_sha_alloc(void); + +/** + * Initialize SHA-1 or SHA-2 hashing. + * + * @param context pointer to the function context (of size liteav_av_sha_size) + * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int liteav_av_sha_init(struct AVSHA* context, int bits); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param data input data to update hash with + * @param len input data length + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_sha_update(struct AVSHA *ctx, const uint8_t *data, unsigned int len); +#else +void liteav_av_sha_update(struct AVSHA *ctx, const uint8_t *data, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void liteav_av_sha_final(struct AVSHA* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha512.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha512.h new file mode 100644 index 0000000..0975da1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/sha512.h @@ -0,0 +1,98 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at> + * Copyright (C) 2013 James Almer <jamrial@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_sha512 + * Public header for SHA-512 implementation. + */ + +#ifndef AVUTIL_SHA512_H +#define AVUTIL_SHA512_H + +#include <stddef.h> +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha512 SHA-512 + * @ingroup lavu_hash + * SHA-512 (Secure Hash Algorithm) hash function implementations. + * + * This module supports the following SHA-2 hash functions: + * + * - SHA-512/224: 224 bits + * - SHA-512/256: 256 bits + * - SHA-384: 384 bits + * - SHA-512: 512 bits + * + * @see For SHA-1, SHA-256, and variants thereof, see @ref lavu_sha. + * + * @{ + */ + +extern const int liteav_av_sha512_size; + +struct AVSHA512; + +/** + * Allocate an AVSHA512 context. + */ +struct AVSHA512 *liteav_av_sha512_alloc(void); + +/** + * Initialize SHA-2 512 hashing. + * + * @param context pointer to the function context (of size liteav_av_sha512_size) + * @param bits number of bits in digest (224, 256, 384 or 512 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int liteav_av_sha512_init(struct AVSHA512* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); +#else +void liteav_av_sha512_update(struct AVSHA512* context, const uint8_t* data, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void liteav_av_sha512_final(struct AVSHA512* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA512_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/spherical.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/spherical.h new file mode 100644 index 0000000..18a4ce0 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/spherical.h @@ -0,0 +1,233 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2016 Vittorio Giovara <vittorio.giovara@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Spherical video + */ + +#ifndef AVUTIL_SPHERICAL_H +#define AVUTIL_SPHERICAL_H + +#include <stddef.h> +#include <stdint.h> + +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_spherical Spherical video mapping + * @{ + */ + +/** + * @addtogroup lavu_video_spherical + * A spherical video file contains surfaces that need to be mapped onto a + * sphere. Depending on how the frame was converted, a different distortion + * transformation or surface recomposition function needs to be applied before + * the video should be mapped and displayed. + */ + +/** + * Projection of the video surface(s) on a sphere. + */ +enum AVSphericalProjection { + /** + * Video represents a sphere mapped on a flat surface using + * equirectangular projection. + */ + AV_SPHERICAL_EQUIRECTANGULAR, + + /** + * Video frame is split into 6 faces of a cube, and arranged on a + * 3x2 layout. Faces are oriented upwards for the front, left, right, + * and back faces. The up face is oriented so the top of the face is + * forwards and the down face is oriented so the top of the face is + * to the back. + */ + AV_SPHERICAL_CUBEMAP, + + /** + * Video represents a portion of a sphere mapped on a flat surface + * using equirectangular projection. The @ref bounding fields indicate + * the position of the current video in a larger surface. + */ + AV_SPHERICAL_EQUIRECTANGULAR_TILE, +}; + +/** + * This structure describes how to handle spherical videos, outlining + * information about projection, initial layout, and any other view modifier. + * + * @note The struct must be allocated with liteav_av_spherical_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVSphericalMapping { + /** + * Projection type. + */ + enum AVSphericalProjection projection; + + /** + * @name Initial orientation + * @{ + * There fields describe additional rotations applied to the sphere after + * the video frame is mapped onto it. The sphere is rotated around the + * viewer, who remains stationary. The order of transformation is always + * yaw, followed by pitch, and finally by roll. + * + * The coordinate system matches the one defined in OpenGL, where the + * forward vector (z) is coming out of screen, and it is equivalent to + * a rotation matrix of R = r_y(yaw) * r_x(pitch) * r_z(roll). + * + * A positive yaw rotates the portion of the sphere in front of the viewer + * toward their right. A positive pitch rotates the portion of the sphere + * in front of the viewer upwards. A positive roll tilts the portion of + * the sphere in front of the viewer to the viewer's right. + * + * These values are exported as 16.16 fixed point. + * + * See this equirectangular projection as example: + * + * @code{.unparsed} + * Yaw + * -180 0 180 + * 90 +-------------+-------------+ 180 + * | | | up + * P | | | y| forward + * i | ^ | | /z + * t 0 +-------------X-------------+ 0 Roll | / + * c | | | | / + * h | | | 0|/_____right + * | | | x + * -90 +-------------+-------------+ -180 + * + * X - the default camera center + * ^ - the default up vector + * @endcode + */ + int32_t yaw; ///< Rotation around the up vector [-180, 180]. + int32_t pitch; ///< Rotation around the right vector [-90, 90]. + int32_t roll; ///< Rotation around the forward vector [-180, 180]. + /** + * @} + */ + + /** + * @name Bounding rectangle + * @anchor bounding + * @{ + * These fields indicate the location of the current tile, and where + * it should be mapped relative to the original surface. They are + * exported as 0.32 fixed point, and can be converted to classic + * pixel values with av_spherical_bounds(). + * + * @code{.unparsed} + * +----------------+----------+ + * | |bound_top | + * | +--------+ | + * | bound_left |tile | | + * +<---------->| |<--->+bound_right + * | +--------+ | + * | | | + * | bound_bottom| | + * +----------------+----------+ + * @endcode + * + * If needed, the original video surface dimensions can be derived + * by adding the current stream or frame size to the related bounds, + * like in the following example: + * + * @code{c} + * original_width = tile->width + bound_left + bound_right; + * original_height = tile->height + bound_top + bound_bottom; + * @endcode + * + * @note These values are valid only for the tiled equirectangular + * projection type (@ref AV_SPHERICAL_EQUIRECTANGULAR_TILE), + * and should be ignored in all other cases. + */ + uint32_t bound_left; ///< Distance from the left edge + uint32_t bound_top; ///< Distance from the top edge + uint32_t bound_right; ///< Distance from the right edge + uint32_t bound_bottom; ///< Distance from the bottom edge + /** + * @} + */ + + /** + * Number of pixels to pad from the edge of each cube face. + * + * @note This value is valid for only for the cubemap projection type + * (@ref AV_SPHERICAL_CUBEMAP), and should be ignored in all other + * cases. + */ + uint32_t padding; +} AVSphericalMapping; + +/** + * Allocate a AVSphericalVideo structure and initialize its fields to default + * values. + * + * @return the newly allocated struct or NULL on failure + */ +AVSphericalMapping *liteav_av_spherical_alloc(size_t *size); + +/** + * Convert the @ref bounding fields from an AVSphericalVideo + * from 0.32 fixed point to pixels. + * + * @param map The AVSphericalVideo map to read bound values from. + * @param width Width of the current frame or stream. + * @param height Height of the current frame or stream. + * @param left Pixels from the left edge. + * @param top Pixels from the top edge. + * @param right Pixels from the right edge. + * @param bottom Pixels from the bottom edge. + */ +void liteav_av_spherical_tile_bounds(const AVSphericalMapping *map, + size_t width, size_t height, + size_t *left, size_t *top, + size_t *right, size_t *bottom); + +/** + * Provide a human-readable name of a given AVSphericalProjection. + * + * @param projection The input AVSphericalProjection. + * + * @return The name of the AVSphericalProjection, or "unknown". + */ +const char *liteav_av_spherical_projection_name(enum AVSphericalProjection projection); + +/** + * Get the AVSphericalProjection form a human-readable name. + * + * @param name The input string. + * + * @return The AVSphericalProjection value, or -1 if not found. + */ +int liteav_av_spherical_from_name(const char *name); +/** + * @} + * @} + */ + +#endif /* AVUTIL_SPHERICAL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/stereo3d.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/stereo3d.h new file mode 100644 index 0000000..74c0421 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/stereo3d.h @@ -0,0 +1,234 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Stereoscopic video + */ + +#ifndef AVUTIL_STEREO3D_H +#define AVUTIL_STEREO3D_H + +#include <stdint.h> + +#include "frame.h" + +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_stereo3d Stereo3D types and functions + * @{ + */ + +/** + * @addtogroup lavu_video_stereo3d + * A stereoscopic video file consists in multiple views embedded in a single + * frame, usually describing two views of a scene. This file describes all + * possible codec-independent view arrangements. + * */ + +/** + * List of possible 3D Types + */ +enum AVStereo3DType { + /** + * Video is not stereoscopic (and metadata has to be there). + */ + AV_STEREO3D_2D, + + /** + * Views are next to each other. + * + * @code{.unparsed} + * LLLLRRRR + * LLLLRRRR + * LLLLRRRR + * ... + * @endcode + */ + AV_STEREO3D_SIDEBYSIDE, + + /** + * Views are on top of each other. + * + * @code{.unparsed} + * LLLLLLLL + * LLLLLLLL + * RRRRRRRR + * RRRRRRRR + * @endcode + */ + AV_STEREO3D_TOPBOTTOM, + + /** + * Views are alternated temporally. + * + * @code{.unparsed} + * frame0 frame1 frame2 ... + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * ... ... ... + * @endcode + */ + AV_STEREO3D_FRAMESEQUENCE, + + /** + * Views are packed in a checkerboard-like structure per pixel. + * + * @code{.unparsed} + * LRLRLRLR + * RLRLRLRL + * LRLRLRLR + * ... + * @endcode + */ + AV_STEREO3D_CHECKERBOARD, + + /** + * Views are next to each other, but when upscaling + * apply a checkerboard pattern. + * + * @code{.unparsed} + * LLLLRRRR L L L L R R R R + * LLLLRRRR => L L L L R R R R + * LLLLRRRR L L L L R R R R + * LLLLRRRR L L L L R R R R + * @endcode + */ + AV_STEREO3D_SIDEBYSIDE_QUINCUNX, + + /** + * Views are packed per line, as if interlaced. + * + * @code{.unparsed} + * LLLLLLLL + * RRRRRRRR + * LLLLLLLL + * ... + * @endcode + */ + AV_STEREO3D_LINES, + + /** + * Views are packed per column. + * + * @code{.unparsed} + * LRLRLRLR + * LRLRLRLR + * LRLRLRLR + * ... + * @endcode + */ + AV_STEREO3D_COLUMNS, +}; + +/** + * List of possible view types. + */ +enum AVStereo3DView { + /** + * Frame contains two packed views. + */ + AV_STEREO3D_VIEW_PACKED, + + /** + * Frame contains only the left view. + */ + AV_STEREO3D_VIEW_LEFT, + + /** + * Frame contains only the right view. + */ + AV_STEREO3D_VIEW_RIGHT, +}; + +/** + * Inverted views, Right/Bottom represents the left view. + */ +#define AV_STEREO3D_FLAG_INVERT (1 << 0) + +/** + * Stereo 3D type: this structure describes how two videos are packed + * within a single video surface, with additional information as needed. + * + * @note The struct must be allocated with liteav_av_stereo3d_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVStereo3D { + /** + * How views are packed within the video. + */ + enum AVStereo3DType type; + + /** + * Additional information about the frame packing. + */ + int flags; + + /** + * Determines which views are packed. + */ + enum AVStereo3DView view; +} AVStereo3D; + +/** + * Allocate an AVStereo3D structure and set its fields to default values. + * The resulting struct can be freed using liteav_av_freep(). + * + * @return An AVStereo3D filled with default values or NULL on failure. + */ +AVStereo3D *liteav_av_stereo3d_alloc(void); + +/** + * Allocate a complete AVFrameSideData and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVStereo3D structure to be filled by caller. + */ +AVStereo3D *liteav_av_stereo3d_create_side_data(AVFrame *frame); + +/** + * Provide a human-readable name of a given stereo3d type. + * + * @param type The input stereo3d type value. + * + * @return The name of the stereo3d value, or "unknown". + */ +const char *liteav_av_stereo3d_type_name(unsigned int type); + +/** + * Get the AVStereo3DType form a human-readable name. + * + * @param name The input string. + * + * @return The AVStereo3DType value, or -1 if not found. + */ +int liteav_av_stereo3d_from_name(const char *name); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_STEREO3D_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tea.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tea.h new file mode 100644 index 0000000..5d618c0 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tea.h @@ -0,0 +1,72 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * A 32-bit implementation of the TEA algorithm + * Copyright (c) 2015 Vesselin Bontchev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TEA_H +#define AVUTIL_TEA_H + +#include <stdint.h> + +/** + * @file + * @brief Public header for libavutil TEA algorithm + * @defgroup lavu_tea TEA + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_tea_size; + +struct AVTEA; + +/** + * Allocate an AVTEA context + * To free the struct: liteav_av_free(ptr) + */ +struct AVTEA *liteav_av_tea_alloc(void); + +/** + * Initialize an AVTEA context. + * + * @param ctx an AVTEA context + * @param key a key of 16 bytes used for encryption/decryption + * @param rounds the number of rounds in TEA (64 is the "standard") + */ +void liteav_av_tea_init(struct AVTEA *ctx, const uint8_t key[16], int rounds); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_tea_crypt(struct AVTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_TEA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/threadmessage.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/threadmessage.h new file mode 100644 index 0000000..f6a9b31 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/threadmessage.h @@ -0,0 +1,116 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_THREADMESSAGE_H +#define AVUTIL_THREADMESSAGE_H + +typedef struct AVThreadMessageQueue AVThreadMessageQueue; + +typedef enum AVThreadMessageFlags { + + /** + * Perform non-blocking operation. + * If this flag is set, send and recv operations are non-blocking and + * return AVERROR(EAGAIN) immediately if they can not proceed. + */ + AV_THREAD_MESSAGE_NONBLOCK = 1, + +} AVThreadMessageFlags; + +/** + * Allocate a new message queue. + * + * @param mq pointer to the message queue + * @param nelem maximum number of elements in the queue + * @param elsize size of each element in the queue + * @return >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if + * lavu was built without thread support + */ +int liteav_av_thread_message_queue_alloc(AVThreadMessageQueue **mq, + unsigned nelem, + unsigned elsize); + +/** + * Free a message queue. + * + * The message queue must no longer be in use by another thread. + */ +void liteav_av_thread_message_queue_free(AVThreadMessageQueue **mq); + +/** + * Send a message on the queue. + */ +int liteav_av_thread_message_queue_send(AVThreadMessageQueue *mq, + void *msg, + unsigned flags); + +/** + * Receive a message from the queue. + */ +int liteav_av_thread_message_queue_recv(AVThreadMessageQueue *mq, + void *msg, + unsigned flags); + +/** + * Set the sending error code. + * + * If the error code is set to non-zero, liteav_av_thread_message_queue_send() will + * return it immediately. Conventional values, such as AVERROR_EOF or + * AVERROR(EAGAIN), can be used to cause the sending thread to stop or + * suspend its operation. + */ +void liteav_av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, + int err); + +/** + * Set the receiving error code. + * + * If the error code is set to non-zero, liteav_av_thread_message_queue_recv() will + * return it immediately when there are no longer available messages. + * Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used + * to cause the receiving thread to stop or suspend its operation. + */ +void liteav_av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, + int err); + +/** + * Set the optional free message callback function which will be called if an + * operation is removing messages from the queue. + */ +void liteav_av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq, + void (*free_func)(void *msg)); + +/** + * Return the current number of messages in the queue. + * + * @return the current number of messages or AVERROR(ENOSYS) if lavu was built + * without thread support + */ +int liteav_av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq); + +/** + * Flush the message queue + * + * This function is mostly equivalent to reading and free-ing every message + * except that it will be done in a single operation (no lock/unlock between + * reads). + */ +void liteav_av_thread_message_flush(AVThreadMessageQueue *mq); + +#endif /* AVUTIL_THREADMESSAGE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/time.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/time.h new file mode 100644 index 0000000..37d5bf4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/time.h @@ -0,0 +1,57 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2000-2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TIME_H +#define AVUTIL_TIME_H + +#include <stdint.h> + +/** + * Get the current time in microseconds. + */ +int64_t liteav_av_gettime(void); + +/** + * Get the current time in microseconds since some unspecified starting point. + * On platforms that support it, the time comes from a monotonic clock + * This property makes this time source ideal for measuring relative time. + * The returned values may not be monotonic on platforms where a monotonic + * clock is not available. + */ +int64_t liteav_av_gettime_relative(void); + +/** + * Indicates with a boolean result if the liteav_av_gettime_relative() time source + * is monotonic. + */ +int liteav_av_gettime_relative_is_monotonic(void); + +/** + * Sleep for a period of time. Although the duration is expressed in + * microseconds, the actual delay may be rounded to the precision of the + * system timer. + * + * @param usec Number of microseconds to sleep. + * @return zero on success or (negative) error code. + */ +int liteav_av_usleep(unsigned usec); + +#endif /* AVUTIL_TIME_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timecode.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timecode.h new file mode 100644 index 0000000..adc8c46 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timecode.h @@ -0,0 +1,141 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier <baptiste.coudurier@gmail.com> + * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Timecode helpers header + */ + +#ifndef AVUTIL_TIMECODE_H +#define AVUTIL_TIMECODE_H + +#include <stdint.h> +#include "rational.h" + +#define AV_TIMECODE_STR_SIZE 23 + +enum AVTimecodeFlag { + AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame + AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours + AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed +}; + +typedef struct { + int start; ///< timecode frame start (first base frame number) + uint32_t flags; ///< flags such as drop frame, +24 hours support, ... + AVRational rate; ///< frame rate in rational form + unsigned fps; ///< frame per second; must be consistent with the rate field +} AVTimecode; + +/** + * Adjust frame number for NTSC drop frame time code. + * + * @param framenum frame number to adjust + * @param fps frame per second, 30 or 60 + * @return adjusted frame number + * @warning adjustment is only valid in NTSC 29.97 and 59.94 + */ +int liteav_av_timecode_adjust_ntsc_framenum2(int framenum, int fps); + +/** + * Convert frame number to SMPTE 12M binary representation. + * + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the SMPTE binary representation + * + * @note Frame number adjustment is automatically done in case of drop timecode, + * you do NOT have to call liteav_av_timecode_adjust_ntsc_framenum2(). + * @note The frame number is relative to tc->start. + * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity + * correction (PC) bits are set to zero. + */ +uint32_t liteav_av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum); + +/** + * Load timecode string in buf. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the buf parameter + * + * @note Timecode representation can be a negative timecode and have more than + * 24 hours, but will only be honored if the flags are correctly set. + * @note The frame number is relative to tc->start. + */ +char *liteav_av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum); + +/** + * Get the timecode string from the SMPTE timecode format. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tcsmpte the 32-bit SMPTE timecode + * @param prevent_df prevent the use of a drop flag when it is known the DF bit + * is arbitrary + * @return the buf parameter + */ +char *liteav_av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df); + +/** + * Get the timecode string from the 25-bit timecode format (MPEG GOP format). + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc25bit the 25-bits timecode + * @return the buf parameter + */ +char *liteav_av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit); + +/** + * Init a timecode struct with the passed parameters. + * + * @param log_ctx a pointer to an arbitrary struct of which the first field + * is a pointer to an AVClass struct (used for liteav_av_log) + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param flags miscellaneous flags such as drop frame, +24 hours, ... + * (see AVTimecodeFlag) + * @param frame_start the first frame number + * @return 0 on success, AVERROR otherwise + */ +int liteav_av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx); + +/** + * Parse timecode representation (hh:mm:ss[:;.]ff). + * + * @param log_ctx a pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct (used for liteav_av_log). + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param str timecode string which will determine the frame start + * @return 0 on success, AVERROR otherwise + */ +int liteav_av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx); + +/** + * Check if the timecode feature is available for the given frame rate + * + * @return 0 if supported, <0 otherwise + */ +int liteav_av_timecode_check_frame_rate(AVRational rate); + +#endif /* AVUTIL_TIMECODE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timestamp.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timestamp.h new file mode 100644 index 0000000..e082f01 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/timestamp.h @@ -0,0 +1,78 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * timestamp utils, mostly useful for debugging/logging purposes + */ + +#ifndef AVUTIL_TIMESTAMP_H +#define AVUTIL_TIMESTAMP_H + +#include "common.h" + +#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64) +#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS +#endif + +#define AV_TS_MAX_STRING_SIZE 32 + +/** + * Fill the provided buffer with a string containing a timestamp + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @return the buffer in input + */ +static inline char *av_ts_make_string(char *buf, int64_t ts) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%" PRId64, ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts) + +/** + * Fill the provided buffer with a string containing a timestamp time + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @param tb the timebase of the timestamp + * @return the buffer in input + */ +static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb) + +#endif /* AVUTIL_TIMESTAMP_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tree.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tree.h new file mode 100644 index 0000000..de0c881 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tree.h @@ -0,0 +1,139 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * A tree container. + * @author Michael Niedermayer <michaelni@gmx.at> + */ + +#ifndef AVUTIL_TREE_H +#define AVUTIL_TREE_H + +#include "attributes.h" +#include "version.h" + +/** + * @addtogroup lavu_tree AVTree + * @ingroup lavu_data + * + * Low-complexity tree container + * + * Insertion, removal, finding equal, largest which is smaller than and + * smallest which is larger than, all have O(log n) worst-case complexity. + * @{ + */ + + +struct AVTreeNode; +extern const int liteav_av_tree_node_size; + +/** + * Allocate an AVTreeNode. + */ +struct AVTreeNode *liteav_av_tree_node_alloc(void); + +/** + * Find an element. + * @param root a pointer to the root node of the tree + * @param next If next is not NULL, then next[0] will contain the previous + * element and next[1] the next element. If either does not exist, + * then the corresponding entry in next is unchanged. + * @param cmp compare function used to compare elements in the tree, + * API identical to that of Standard C's qsort + * It is guaranteed that the first and only the first argument to cmp() + * will be the key parameter to liteav_av_tree_find(), thus it could if the + * user wants, be a different type (like an opaque context). + * @return An element with cmp(key, elem) == 0 or NULL if no such element + * exists in the tree. + */ +void *liteav_av_tree_find(const struct AVTreeNode *root, void *key, + int (*cmp)(const void *key, const void *b), void *next[2]); + +/** + * Insert or remove an element. + * + * If *next is NULL, then the supplied element will be removed if it exists. + * If *next is non-NULL, then the supplied element will be inserted, unless + * it already exists in the tree. + * + * @param rootp A pointer to a pointer to the root node of the tree; note that + * the root node can change during insertions, this is required + * to keep the tree balanced. + * @param key pointer to the element key to insert in the tree + * @param next Used to allocate and free AVTreeNodes. For insertion the user + * must set it to an allocated and zeroed object of at least + * liteav_av_tree_node_size bytes size. liteav_av_tree_insert() will set it to + * NULL if it has been consumed. + * For deleting elements *next is set to NULL by the user and + * liteav_av_tree_insert() will set it to the AVTreeNode which was + * used for the removed element. + * This allows the use of flat arrays, which have + * lower overhead compared to many malloced elements. + * You might want to define a function like: + * @code + * void *tree_insert(struct AVTreeNode **rootp, void *key, + * int (*cmp)(void *key, const void *b), + * AVTreeNode **next) + * { + * if (!*next) + * *next = liteav_av_mallocz(liteav_av_tree_node_size); + * return liteav_av_tree_insert(rootp, key, cmp, next); + * } + * void *tree_remove(struct AVTreeNode **rootp, void *key, + * int (*cmp)(void *key, const void *b, AVTreeNode **next)) + * { + * liteav_av_freep(next); + * return liteav_av_tree_insert(rootp, key, cmp, next); + * } + * @endcode + * @param cmp compare function used to compare elements in the tree, API identical + * to that of Standard C's qsort + * @return If no insertion happened, the found element; if an insertion or + * removal happened, then either key or NULL will be returned. + * Which one it is depends on the tree state and the implementation. You + * should make no assumptions that it's one or the other in the code. + */ +void *liteav_av_tree_insert(struct AVTreeNode **rootp, void *key, + int (*cmp)(const void *key, const void *b), + struct AVTreeNode **next); + +void liteav_av_tree_destroy(struct AVTreeNode *t); + +/** + * Apply enu(opaque, &elem) to all the elements in the tree in a given range. + * + * @param cmp a comparison function that returns < 0 for an element below the + * range, > 0 for an element above the range and == 0 for an + * element inside the range + * + * @note The cmp function should use the same ordering used to construct the + * tree. + */ +void liteav_av_tree_enumerate(struct AVTreeNode *t, void *opaque, + int (*cmp)(void *opaque, void *elem), + int (*enu)(void *opaque, void *elem)); + +/** + * @} + */ + +#endif /* AVUTIL_TREE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/twofish.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/twofish.h new file mode 100644 index 0000000..eeb1057 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/twofish.h @@ -0,0 +1,71 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * An implementation of the TwoFish algorithm + * Copyright (c) 2015 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TWOFISH_H +#define AVUTIL_TWOFISH_H + +#include <stdint.h> + + +/** + * @file + * @brief Public header for libavutil TWOFISH algorithm + * @defgroup lavu_twofish TWOFISH + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_twofish_size; + +struct AVTWOFISH; + +/** + * Allocate an AVTWOFISH context + * To free the struct: liteav_av_free(ptr) + */ +struct AVTWOFISH *liteav_av_twofish_alloc(void); + +/** + * Initialize an AVTWOFISH context. + * + * @param ctx an AVTWOFISH context + * @param key a key of size ranging from 1 to 32 bytes used for encryption/decryption + * @param key_bits number of keybits: 128, 192, 256 If less than the required, padded with zeroes to nearest valid value; return value is 0 if key_bits is 128/192/256, -1 if less than 0, 1 otherwise + */ +int liteav_av_twofish_init(struct AVTWOFISH *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVTWOFISH context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 16 byte blocks + * @paran iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_twofish_crypt(struct AVTWOFISH *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt); + +/** + * @} + */ +#endif /* AVUTIL_TWOFISH_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tx.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tx.h new file mode 100644 index 0000000..8b13dd4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/tx.h @@ -0,0 +1,82 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TX_H +#define AVUTIL_TX_H + +#include <stdint.h> +#include <stddef.h> + +typedef struct AVTXContext AVTXContext; + +typedef struct AVComplexFloat { + float re, im; +} AVComplexFloat; + +enum AVTXType { + /** + * Standard complex to complex FFT with sample data type AVComplexFloat. + * Scaling currently unsupported + */ + AV_TX_FLOAT_FFT = 0, + /** + * Standard MDCT with sample data type of float and a scale type of + * float. Length is the frame size, not the window size (which is 2x frame) + */ + AV_TX_FLOAT_MDCT = 1, +}; + +/** + * Function pointer to a function to perform the transform. + * + * @note Using a different context than the one allocated during liteav_av_tx_init() + * is not allowed. + * + * @param s the transform context + * @param out the output array + * @param in the input array + * @param stride the input or output stride (depending on transform direction) + * in bytes, currently implemented for all MDCT transforms + */ +typedef void (*av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride); + +/** + * Initialize a transform context with the given configuration + * Currently power of two lengths from 4 to 131072 are supported, along with + * any length decomposable to a power of two and either 3, 5 or 15. + * + * @param ctx the context to allocate, will be NULL on error + * @param tx pointer to the transform function pointer to set + * @param type type the type of transform + * @param inv whether to do an inverse or a forward transform + * @param len the size of the transform in samples + * @param scale pointer to the value to scale the output if supported by type + * @param flags currently unused + * + * @return 0 on success, negative error code on failure + */ +int liteav_av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, + int inv, int len, const void *scale, uint64_t flags); + +/** + * Frees a context and sets ctx to NULL, does nothing when ctx == NULL + */ +void liteav_av_tx_uninit(AVTXContext **ctx); + +#endif /* AVUTIL_TX_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/version.h new file mode 100644 index 0000000..8f6da6a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/version.h @@ -0,0 +1,139 @@ +/* + * copyright (c) 2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu + * Libavutil version macros + */ + +#ifndef AVUTIL_VERSION_H +#define AVUTIL_VERSION_H + +#include "macros.h" + +/** + * @addtogroup version_utils + * + * Useful to check and match library version in order to maintain + * backward compatibility. + * + * The FFmpeg libraries follow a versioning sheme very similar to + * Semantic Versioning (http://semver.org/) + * The difference is that the component called PATCH is called MICRO in FFmpeg + * and its value is reset to 100 instead of 0 to keep it above or equal to 100. + * Also we do not increase MICRO for every bugfix or change in git master. + * + * Prior to FFmpeg 3.2 point releases did not change any lib version number to + * avoid aliassing different git master checkouts. + * Starting with FFmpeg 3.2, the released library versions will occupy + * a separate MAJOR.MINOR that is not used on the master development branch. + * That is if we branch a release of master 55.10.123 we will bump to 55.11.100 + * for the release and master will continue at 55.12.100 after it. Each new + * point release will then bump the MICRO improving the usefulness of the lib + * versions. + * + * @{ + */ + +#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c)) +#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c +#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) + +/** + * Extract version components from the full ::AV_VERSION_INT int as returned + * by functions like ::avformat_version() and ::avcodec_version() + */ +#define AV_VERSION_MAJOR(a) ((a) >> 16) +#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8) +#define AV_VERSION_MICRO(a) ((a) & 0xFF) + +/** + * @} + */ + +/** + * @defgroup lavu_ver Version and Build diagnostics + * + * Macros and function useful to check at compiletime and at runtime + * which version of libavutil is in use. + * + * @{ + */ + +#define LIBAVUTIL_VERSION_MAJOR 56 +#define LIBAVUTIL_VERSION_MINOR 22 +#define LIBAVUTIL_VERSION_MICRO 100 + +#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT + +#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) + +/** + * @defgroup lavu_depr_guards Deprecation Guards + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + * + * @{ + */ + +#ifndef FF_API_VAAPI +#define FF_API_VAAPI (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_FRAME_QP +#define FF_API_FRAME_QP (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_PLUS1_MINUS1 +#define FF_API_PLUS1_MINUS1 (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_ERROR_FRAME +#define FF_API_ERROR_FRAME (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_PKT_PTS +#define FF_API_PKT_PTS (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_CRYPTO_SIZE_T +#define FF_API_CRYPTO_SIZE_T (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_FRAME_GET_SET +#define FF_API_FRAME_GET_SET (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_PSEUDOPAL +#define FF_API_PSEUDOPAL (LIBAVUTIL_VERSION_MAJOR < 57) +#endif + + +/** + * @} + * @} + */ + +#endif /* AVUTIL_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/xtea.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/xtea.h new file mode 100644 index 0000000..66a6842 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libavutil/xtea.h @@ -0,0 +1,95 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * A 32-bit implementation of the XTEA algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_XTEA_H +#define AVUTIL_XTEA_H + +#include <stdint.h> + +/** + * @file + * @brief Public header for libavutil XTEA algorithm + * @defgroup lavu_xtea XTEA + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVXTEA { + uint32_t key[16]; +} AVXTEA; + +/** + * Allocate an AVXTEA context. + */ +AVXTEA *liteav_av_xtea_alloc(void); + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption, + * interpreted as big endian 32 bit numbers + */ +void liteav_av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption, + * interpreted as little endian 32 bit numbers + */ +void liteav_av_xtea_le_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, + * in big endian format. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, + * in little endian format. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_xtea_le_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_XTEA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/swresample.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/swresample.h new file mode 100644 index 0000000..cb7e7b7 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/swresample.h @@ -0,0 +1,581 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_SWRESAMPLE_H +#define SWRESAMPLE_SWRESAMPLE_H + +/** + * @file + * @ingroup lswr + * libswresample public header + */ + +/** + * @defgroup lswr libswresample + * @{ + * + * Audio resampling, sample format conversion and mixing library. + * + * Interaction with lswr is done through SwrContext, which is + * allocated with swr_alloc() or liteav_swr_alloc_set_opts(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * The first thing you will need to do in order to use lswr is to allocate + * SwrContext. This can be done with swr_alloc() or liteav_swr_alloc_set_opts(). If you + * are using the former, you must set options through the @ref avoptions API. + * The latter function provides the same feature, but it allows you to set some + * common options in the same statement. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix). This is using the swr_alloc() function. + * @code + * SwrContext *swr = swr_alloc(); + * liteav_av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * liteav_av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * liteav_av_opt_set_int(swr, "in_sample_rate", 48000, 0); + * liteav_av_opt_set_int(swr, "out_sample_rate", 44100, 0); + * liteav_av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * liteav_av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * The same job can be done using liteav_swr_alloc_set_opts() as well: + * @code + * SwrContext *swr = liteav_swr_alloc_set_opts(NULL, // we're allocating a new context + * AV_CH_LAYOUT_STEREO, // out_ch_layout + * AV_SAMPLE_FMT_S16, // out_sample_fmt + * 44100, // out_sample_rate + * AV_CH_LAYOUT_5POINT1, // in_ch_layout + * AV_SAMPLE_FMT_FLTP, // in_sample_fmt + * 48000, // in_sample_rate + * 0, // log_offset + * NULL); // log_ctx + * @endcode + * + * Once all values have been set, it must be initialized with liteav_swr_init(). If + * you need to change the conversion parameters, you can change the parameters + * using @ref AVOptions, as described above in the first example; or by using + * liteav_swr_alloc_set_opts(), but with the first argument the allocated context. + * You must then call liteav_swr_init() again. + * + * The conversion itself is done by repeatedly calling liteav_swr_convert(). + * Note that the samples may get buffered in swr if you provide insufficient + * output space or if sample rate conversion is done, which requires "future" + * samples. Samples that do not require future input can be retrieved at any + * time by using liteav_swr_convert() (in_count can be set to 0). + * At the end of conversion the resampling buffer can be flushed by calling + * liteav_swr_convert() with NULL in and 0 in_count. + * + * The samples used in the conversion process can be managed with the libavutil + * @ref lavu_sampmanip "samples manipulation" API, including liteav_av_samples_alloc() + * function used in the following example. + * + * The delay between input and output, can at any time be found by using + * liteav_swr_get_delay(). + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_samples; + * + * while (get_input(&input, &in_samples)) { + * uint8_t *output; + * int out_samples = liteav_av_rescale_rnd(liteav_swr_get_delay(swr, 48000) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * liteav_av_samples_alloc(&output, NULL, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = liteav_swr_convert(swr, &output, out_samples, + * input, in_samples); + * handle_output(output, out_samples); + * liteav_av_freep(&output); + * } + * @endcode + * + * When the conversion is finished, the conversion + * context and everything associated with it must be freed with liteav_swr_free(). + * A liteav_swr_close() function is also available, but it exists mainly for + * compatibility with libavresample, and is not required to be called. + * + * There will be no memory leak if the data is not completely flushed before + * liteav_swr_free(). + */ + +#include <stdint.h> +#include "libavutil/channel_layout.h" +#include "libavutil/frame.h" +#include "libavutil/samplefmt.h" + +#include "libswresample/version.h" + +/** + * @name Option constants + * These constants are used for the @ref avoptions interface for lswr. + * @{ + * + */ + +#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate +//TODO use int resample ? +//long term TODO can we enable this dynamically? + +/** Dithering algorithms */ +enum SwrDitherType { + SWR_DITHER_NONE = 0, + SWR_DITHER_RECTANGULAR, + SWR_DITHER_TRIANGULAR, + SWR_DITHER_TRIANGULAR_HIGHPASS, + + SWR_DITHER_NS = 64, ///< not part of API/ABI + SWR_DITHER_NS_LIPSHITZ, + SWR_DITHER_NS_F_WEIGHTED, + SWR_DITHER_NS_MODIFIED_E_WEIGHTED, + SWR_DITHER_NS_IMPROVED_E_WEIGHTED, + SWR_DITHER_NS_SHIBATA, + SWR_DITHER_NS_LOW_SHIBATA, + SWR_DITHER_NS_HIGH_SHIBATA, + SWR_DITHER_NB, ///< not part of API/ABI +}; + +/** Resampling Engines */ +enum SwrEngine { + SWR_ENGINE_SWR, /**< SW Resampler */ + SWR_ENGINE_SOXR, /**< SoX Resampler */ + SWR_ENGINE_NB, ///< not part of API/ABI +}; + +/** Resampling Filter Types */ +enum SwrFilterType { + SWR_FILTER_TYPE_CUBIC, /**< Cubic */ + SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall windowed sinc */ + SWR_FILTER_TYPE_KAISER, /**< Kaiser windowed sinc */ +}; + +/** + * @} + */ + +/** + * The libswresample context. Unlike libavcodec and libavformat, this structure + * is opaque. This means that if you would like to set options, you must use + * the @ref avoptions API and cannot directly set values to members of the + * structure. + */ +typedef struct SwrContext SwrContext; + +/** + * Get the AVClass for SwrContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + * @return the AVClass of SwrContext + */ +const AVClass *swr_get_class(void); + +/** + * @name SwrContext constructor functions + * @{ + */ + +/** + * Allocate SwrContext. + * + * If you use this function you will need to set the parameters (manually or + * with liteav_swr_alloc_set_opts()) before calling liteav_swr_init(). + * + * @see liteav_swr_alloc_set_opts(), liteav_swr_init(), liteav_swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc(void); + +/** + * Initialize context after user parameters have been set. + * @note The context must be configured using the AVOption API. + * + * @see liteav_av_opt_set_int() + * @see liteav_av_opt_set_dict() + * + * @param[in,out] s Swr context to initialize + * @return AVERROR error code in case of failure. + */ +int liteav_swr_init(struct SwrContext *s); + +/** + * Check whether an swr context has been initialized or not. + * + * @param[in] s Swr context to check + * @see liteav_swr_init() + * @return positive if it has been initialized, 0 if not initialized + */ +int liteav_swr_is_initialized(struct SwrContext *s); + +/** + * Allocate SwrContext if needed and set/reset common parameters. + * + * This function does not require s to be allocated with swr_alloc(). On the + * other hand, swr_alloc() can use liteav_swr_alloc_set_opts() to set the parameters + * on the allocated context. + * + * @param s existing Swr context if available, or NULL if not + * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*) + * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*). + * @param out_sample_rate output sample rate (frequency in Hz) + * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*) + * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*). + * @param in_sample_rate input sample rate (frequency in Hz) + * @param log_offset logging level offset + * @param log_ctx parent logging context, can be NULL + * + * @see liteav_swr_init(), liteav_swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *liteav_swr_alloc_set_opts(struct SwrContext *s, + int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, + int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, + int resampleUseSimd, + int log_offset, void *log_ctx); + +/** + * @} + * + * @name SwrContext destructor functions + * @{ + */ + +/** + * Free the given SwrContext and set the pointer to NULL. + * + * @param[in] s a pointer to a pointer to Swr context + */ +void liteav_swr_free(struct SwrContext **s); + +/** + * Closes the context so that liteav_swr_is_initialized() returns 0. + * + * The context can be brought back to life by running liteav_swr_init(), + * liteav_swr_init() can also be used without liteav_swr_close(). + * This function is mainly provided for simplifying the usecase + * where one tries to support libavresample and libswresample. + * + * @param[in,out] s Swr context to be closed + */ +void liteav_swr_close(struct SwrContext *s); + +/** + * @} + * + * @name Core conversion functions + * @{ + */ + +/** Convert audio. + * + * in and in_count can be set to 0 to flush the last few samples out at the + * end. + * + * If more input is provided than output space, then the input will be buffered. + * You can avoid this buffering by using liteav_swr_get_out_samples() to retrieve an + * upper bound on the required number of output samples for the given number of + * input samples. Conversion will run directly without copying whenever possible. + * + * @param s allocated Swr context, with parameters set + * @param out output buffers, only the first one need be set in case of packed audio + * @param out_count amount of space available for output in samples per channel + * @param in input buffers, only the first one need to be set in case of packed audio + * @param in_count number of input samples available in one channel + * + * @return number of samples output per channel, negative value on error + */ +int liteav_swr_convert(struct SwrContext *s, uint8_t **out, int out_count, + const uint8_t **in , int in_count); + +/** + * Convert the next timestamp from input to output + * timestamps are in 1/(in_sample_rate * out_sample_rate) units. + * + * @note There are 2 slightly differently behaving modes. + * @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX) + * in this case timestamps will be passed through with delays compensated + * @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX) + * in this case the output timestamps will match output sample numbers. + * See ffmpeg-resampler(1) for the two modes of compensation. + * + * @param s[in] initialized Swr context + * @param pts[in] timestamp for the next input sample, INT64_MIN if unknown + * @see liteav_swr_set_compensation(), liteav_swr_drop_output(), and liteav_swr_inject_silence() are + * function used internally for timestamp compensation. + * @return the output timestamp for the next output sample + */ +int64_t liteav_swr_next_pts(struct SwrContext *s, int64_t pts); + +/** + * @} + * + * @name Low-level option setting functions + * These functons provide a means to set low-level options that is not possible + * with the AVOption API. + * @{ + */ + +/** + * Activate resampling compensation ("soft" compensation). This function is + * internally called when needed in liteav_swr_next_pts(). + * + * @param[in,out] s allocated Swr context. If it is not initialized, + * or SWR_FLAG_RESAMPLE is not set, liteav_swr_init() is + * called with the flag set. + * @param[in] sample_delta delta in PTS per sample + * @param[in] compensation_distance number of samples to compensate for + * @return >= 0 on success, AVERROR error codes if: + * @li @c s is NULL, + * @li @c compensation_distance is less than 0, + * @li @c compensation_distance is 0 but sample_delta is not, + * @li compensation unsupported by resampler, or + * @li liteav_swr_init() fails when called. + */ +int liteav_swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance); + +/** + * Set a customized input channel mapping. + * + * @param[in,out] s allocated Swr context, not yet initialized + * @param[in] channel_map customized input channel mapping (array of channel + * indexes, -1 for a muted channel) + * @return >= 0 on success, or AVERROR error code in case of failure. + */ +int liteav_swr_set_channel_mapping(struct SwrContext *s, const int *channel_map); + +/** + * Generate a channel mixing matrix. + * + * This function is the one used internally by libswresample for building the + * default mixing matrix. It is made public just as a utility function for + * building custom matrices. + * + * @param in_layout input channel layout + * @param out_layout output channel layout + * @param center_mix_level mix level for the center channel + * @param surround_mix_level mix level for the surround channel(s) + * @param lfe_mix_level mix level for the low-frequency effects channel + * @param rematrix_maxval if 1.0, coefficients will be normalized to prevent + * overflow. if INT_MAX, coefficients will not be + * normalized. + * @param[out] matrix mixing coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o. + * @param stride distance between adjacent input channels in the + * matrix array + * @param matrix_encoding matrixed stereo downmix mode (e.g. dplii) + * @param log_ctx parent logging context, can be NULL + * @return 0 on success, negative AVERROR code on failure + */ +int liteav_swr_build_matrix(uint64_t in_layout, uint64_t out_layout, + double center_mix_level, double surround_mix_level, + double lfe_mix_level, double rematrix_maxval, + double rematrix_volume, double *matrix, + int stride, enum AVMatrixEncoding matrix_encoding, + void *log_ctx); + +/** + * Set a customized remix matrix. + * + * @param s allocated Swr context, not yet initialized + * @param matrix remix coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o + * @param stride offset between lines of the matrix + * @return >= 0 on success, or AVERROR error code in case of failure. + */ +int liteav_swr_set_matrix(struct SwrContext *s, const double *matrix, int stride); + +/** + * @} + * + * @name Sample handling functions + * @{ + */ + +/** + * Drops the specified number of output samples. + * + * This function, along with liteav_swr_inject_silence(), is called by liteav_swr_next_pts() + * if needed for "hard" compensation. + * + * @param s allocated Swr context + * @param count number of samples to be dropped + * + * @return >= 0 on success, or a negative AVERROR code on failure + */ +int liteav_swr_drop_output(struct SwrContext *s, int count); + +/** + * Injects the specified number of silence samples. + * + * This function, along with liteav_swr_drop_output(), is called by liteav_swr_next_pts() + * if needed for "hard" compensation. + * + * @param s allocated Swr context + * @param count number of samples to be dropped + * + * @return >= 0 on success, or a negative AVERROR code on failure + */ +int liteav_swr_inject_silence(struct SwrContext *s, int count); + +/** + * Gets the delay the next input sample will experience relative to the next output sample. + * + * Swresample can buffer data if more input has been provided than available + * output space, also converting between sample rates needs a delay. + * This function returns the sum of all such delays. + * The exact delay is not necessarily an integer value in either input or + * output sample rate. Especially when downsampling by a large value, the + * output sample rate may be a poor choice to represent the delay, similarly + * for upsampling and the input sample rate. + * + * @param s swr context + * @param base timebase in which the returned delay will be: + * @li if it's set to 1 the returned delay is in seconds + * @li if it's set to 1000 the returned delay is in milliseconds + * @li if it's set to the input sample rate then the returned + * delay is in input samples + * @li if it's set to the output sample rate then the returned + * delay is in output samples + * @li if it's the least common multiple of in_sample_rate and + * out_sample_rate then an exact rounding-free delay will be + * returned + * @returns the delay in 1 / @c base units. + */ +int64_t liteav_swr_get_delay(struct SwrContext *s, int64_t base); + +/** + * Find an upper bound on the number of samples that the next liteav_swr_convert + * call will output, if called with in_samples of input samples. This + * depends on the internal state, and anything changing the internal state + * (like further liteav_swr_convert() calls) will may change the number of samples + * liteav_swr_get_out_samples() returns for the same number of input samples. + * + * @param in_samples number of input samples. + * @note any call to liteav_swr_inject_silence(), liteav_swr_convert(), liteav_swr_next_pts() + * or liteav_swr_set_compensation() invalidates this limit + * @note it is recommended to pass the correct available buffer size + * to all functions like liteav_swr_convert() even if liteav_swr_get_out_samples() + * indicates that less would be used. + * @returns an upper bound on the number of samples that the next liteav_swr_convert + * will output or a negative value to indicate an error + */ +int liteav_swr_get_out_samples(struct SwrContext *s, int in_samples); + +/** + * @} + * + * @name Configuration accessors + * @{ + */ + +/** + * Return the @ref LIBSWRESAMPLE_VERSION_INT constant. + * + * This is useful to check if the build-time libswresample has the same version + * as the run-time one. + * + * @returns the unsigned int-typed version + */ +unsigned liteav_swresample_version(void); + +/** + * Return the swr build-time configuration. + * + * @returns the build-time @c ./configure flags + */ +const char *liteav_swresample_configuration(void); + +/** + * Return the swr license. + * + * @returns the license of libswresample, determined at build-time + */ +const char *liteav_swresample_license(void); + +/** + * @} + * + * @name AVFrame based API + * @{ + */ + +/** + * Convert the samples in the input AVFrame and write them to the output AVFrame. + * + * Input and output AVFrames must have channel_layout, sample_rate and format set. + * + * If the output AVFrame does not have the data pointers allocated the nb_samples + * field will be set using liteav_av_frame_get_buffer() + * is called to allocate the frame. + * + * The output AVFrame can be NULL or have fewer allocated samples than required. + * In this case, any remaining samples not written to the output will be added + * to an internal FIFO buffer, to be returned at the next call to this function + * or to liteav_swr_convert(). + * + * If converting sample rate, there may be data remaining in the internal + * resampling delay buffer. liteav_swr_get_delay() tells the number of + * remaining samples. To get this data as output, call this function or + * liteav_swr_convert() with NULL input. + * + * If the SwrContext configuration does not match the output and + * input AVFrame settings the conversion does not take place and depending on + * which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED + * or the result of a bitwise-OR of them is returned. + * + * @see swr_delay() + * @see liteav_swr_convert() + * @see liteav_swr_get_delay() + * + * @param swr audio resample context + * @param output output AVFrame + * @param input input AVFrame + * @return 0 on success, AVERROR on failure or nonmatching + * configuration. + */ +int liteav_swr_convert_frame(SwrContext *swr, + AVFrame *output, const AVFrame *input); + +/** + * Configure or reconfigure the SwrContext using the information + * provided by the AVFrames. + * + * The original resampling context is reset even on failure. + * The function calls liteav_swr_close() internally if the context is open. + * + * @see liteav_swr_close(); + * + * @param swr audio resample context + * @param output output AVFrame + * @param input input AVFrame + * @return 0 on success, AVERROR on failure. + */ +int liteav_swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in); + +/** + * @} + * @} + */ + +#endif /* SWRESAMPLE_SWRESAMPLE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/version.h new file mode 100644 index 0000000..8555d55 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswresample/version.h @@ -0,0 +1,45 @@ +/* + * Version macros. + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_VERSION_H +#define SWRESAMPLE_VERSION_H + +/** + * @file + * Libswresample version macros + */ + +#include "libavutil/avutil.h" + +#define LIBSWRESAMPLE_VERSION_MAJOR 3 +#define LIBSWRESAMPLE_VERSION_MINOR 3 +#define LIBSWRESAMPLE_VERSION_MICRO 100 + +#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT + +#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION) + +#endif /* SWRESAMPLE_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/swscale.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/swscale.h new file mode 100644 index 0000000..312a4fc --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/swscale.h @@ -0,0 +1,337 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_SWSCALE_H +#define SWSCALE_SWSCALE_H + +/** + * @file + * @ingroup libsws + * external API header + */ + +#include <stdint.h> + +#include "libavutil/avutil.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "version.h" + +/** + * @defgroup libsws libswscale + * Color conversion and scaling library. + * + * @{ + * + * Return the LIBSWSCALE_VERSION_INT constant. + */ +unsigned liteav_swscale_version(void); + +/** + * Return the libswscale build-time configuration. + */ +const char *liteav_swscale_configuration(void); + +/** + * Return the libswscale license. + */ +const char *liteav_swscale_license(void); + +/* values for the flags, the stuff on the command line is different */ +#define SWS_FAST_BILINEAR 1 +#define SWS_BILINEAR 2 +#define SWS_BICUBIC 4 +#define SWS_X 8 +#define SWS_POINT 0x10 +#define SWS_AREA 0x20 +#define SWS_BICUBLIN 0x40 +#define SWS_GAUSS 0x80 +#define SWS_SINC 0x100 +#define SWS_LANCZOS 0x200 +#define SWS_SPLINE 0x400 + +#define SWS_SRC_V_CHR_DROP_MASK 0x30000 +#define SWS_SRC_V_CHR_DROP_SHIFT 16 + +#define SWS_PARAM_DEFAULT 123456 + +#define SWS_PRINT_INFO 0x1000 + +//the following 3 flags are not completely implemented +//internal chrominance subsampling info +#define SWS_FULL_CHR_H_INT 0x2000 +//input subsampling info +#define SWS_FULL_CHR_H_INP 0x4000 +#define SWS_DIRECT_BGR 0x8000 +#define SWS_ACCURATE_RND 0x40000 +#define SWS_BITEXACT 0x80000 +#define SWS_ERROR_DIFFUSION 0x800000 + +#define SWS_MAX_REDUCE_CUTOFF 0.002 + +#define SWS_CS_ITU709 1 +#define SWS_CS_FCC 4 +#define SWS_CS_ITU601 5 +#define SWS_CS_ITU624 5 +#define SWS_CS_SMPTE170M 5 +#define SWS_CS_SMPTE240M 7 +#define SWS_CS_DEFAULT 5 +#define SWS_CS_BT2020 9 + +/** + * Return a pointer to yuv<->rgb coefficients for the given colorspace + * suitable for liteav_sws_setColorspaceDetails(). + * + * @param colorspace One of the SWS_CS_* macros. If invalid, + * SWS_CS_DEFAULT is used. + */ +const int *liteav_sws_getCoefficients(int colorspace); + +// when used for filters they must have an odd number of elements +// coeffs cannot be shared between vectors +typedef struct SwsVector { + double *coeff; ///< pointer to the list of coefficients + int length; ///< number of coefficients in the vector +} SwsVector; + +// vectors can be shared +typedef struct SwsFilter { + SwsVector *lumH; + SwsVector *lumV; + SwsVector *chrH; + SwsVector *chrV; +} SwsFilter; + +struct SwsContext; + +/** + * Return a positive value if pix_fmt is a supported input format, 0 + * otherwise. + */ +int liteav_sws_isSupportedInput(enum AVPixelFormat pix_fmt); + +/** + * Return a positive value if pix_fmt is a supported output format, 0 + * otherwise. + */ +int liteav_sws_isSupportedOutput(enum AVPixelFormat pix_fmt); + +/** + * @param[in] pix_fmt the pixel format + * @return a positive value if an endianness conversion for pix_fmt is + * supported, 0 otherwise. + */ +int liteav_sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt); + +/** + * Allocate an empty SwsContext. This must be filled and passed to + * liteav_sws_init_context(). For filling see AVOptions, options.c and + * liteav_sws_setColorspaceDetails(). + */ +struct SwsContext *liteav_sws_alloc_context(void); + +/** + * Initialize the swscaler context sws_context. + * + * @return zero or positive value on success, a negative value on + * error + */ +av_warn_unused_result +int liteav_sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); + +/** + * Free the swscaler context swsContext. + * If swsContext is NULL, then does nothing. + */ +void liteav_sws_freeContext(struct SwsContext *swsContext); + +/** + * Allocate and return an SwsContext. You need it to perform + * scaling/conversion operations using liteav_sws_scale(). + * + * @param srcW the width of the source image + * @param srcH the height of the source image + * @param srcFormat the source image format + * @param dstW the width of the destination image + * @param dstH the height of the destination image + * @param dstFormat the destination image format + * @param flags specify which algorithm and options to use for rescaling + * @param param extra parameters to tune the used scaler + * For SWS_BICUBIC param[0] and [1] tune the shape of the basis + * function, param[0] tunes f(1) and param[1] f´(1) + * For SWS_GAUSS param[0] tunes the exponent and thus cutoff + * frequency + * For SWS_LANCZOS param[0] tunes the width of the window function + * @return a pointer to an allocated context, or NULL in case of error + * @note this function is to be removed after a saner alternative is + * written + */ +struct SwsContext *liteav_sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Scale the image slice in srcSlice and put the resulting scaled + * slice in the image in dst. A slice is a sequence of consecutive + * rows in an image. + * + * Slices have to be provided in sequential order, either in + * top-bottom or bottom-top order. If slices are provided in + * non-sequential order the behavior of the function is undefined. + * + * @param c the scaling context previously created with + * liteav_sws_getContext() + * @param srcSlice the array containing the pointers to the planes of + * the source slice + * @param srcStride the array containing the strides for each plane of + * the source image + * @param srcSliceY the position in the source image of the slice to + * process, that is the number (counted starting from + * zero) in the image of the first row of the slice + * @param srcSliceH the height of the source slice, that is the number + * of rows in the slice + * @param dst the array containing the pointers to the planes of + * the destination image + * @param dstStride the array containing the strides for each plane of + * the destination image + * @return the height of the output slice + */ +int liteav_sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], + const int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *const dst[], const int dstStride[]); + +/** + * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg) + * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg) + * @param table the yuv2rgb coefficients describing the output yuv space, normally liteav_ff_yuv2rgb_coeffs[x] + * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally liteav_ff_yuv2rgb_coeffs[x] + * @param brightness 16.16 fixed point brightness correction + * @param contrast 16.16 fixed point contrast correction + * @param saturation 16.16 fixed point saturation correction + * @return -1 if not supported + */ +int liteav_sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], + int srcRange, const int table[4], int dstRange, + int brightness, int contrast, int saturation); + +/** + * @return -1 if not supported + */ +int liteav_sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, + int *srcRange, int **table, int *dstRange, + int *brightness, int *contrast, int *saturation); + +/** + * Allocate and return an uninitialized vector with length coefficients. + */ +SwsVector *liteav_sws_allocVec(int length); + +/** + * Return a normalized Gaussian curve used to filter stuff + * quality = 3 is high quality, lower is lower quality. + */ +SwsVector *liteav_sws_getGaussianVec(double variance, double quality); + +/** + * Scale all the coefficients of a by the scalar value. + */ +void liteav_sws_scaleVec(SwsVector *a, double scalar); + +/** + * Scale all the coefficients of a so that their sum equals height. + */ +void liteav_sws_normalizeVec(SwsVector *a, double height); + +#if FF_API_SWS_VECTOR +attribute_deprecated SwsVector *liteav_sws_getConstVec(double c, int length); +attribute_deprecated SwsVector *liteav_sws_getIdentityVec(void); +attribute_deprecated void liteav_sws_convVec(SwsVector *a, SwsVector *b); +attribute_deprecated void liteav_sws_addVec(SwsVector *a, SwsVector *b); +attribute_deprecated void liteav_sws_subVec(SwsVector *a, SwsVector *b); +attribute_deprecated void liteav_sws_shiftVec(SwsVector *a, int shift); +attribute_deprecated SwsVector *liteav_sws_cloneVec(SwsVector *a); +attribute_deprecated void liteav_sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); +#endif + +void liteav_sws_freeVec(SwsVector *a); + +SwsFilter *liteav_sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, + float lumaSharpen, float chromaSharpen, + float chromaHShift, float chromaVShift, + int verbose); +void liteav_sws_freeFilter(SwsFilter *filter); + +/** + * Check if context can be reused, otherwise reallocate a new one. + * + * If context is NULL, just calls liteav_sws_getContext() to get a new + * context. Otherwise, checks if the parameters are the ones already + * saved in context. If that is the case, returns the current + * context. Otherwise, frees context and gets a new context with + * the new parameters. + * + * Be warned that srcFilter and dstFilter are not checked, they + * are assumed to remain the same. + */ +struct SwsContext *liteav_sws_getCachedContext(struct SwsContext *context, + int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. + * + * The output frame will have the same packed format as the palette. + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void liteav_sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. + * + * With the palette format "ABCD", the destination frame ends up with the format "ABC". + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void liteav_sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Get the AVClass for swsContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *liteav_sws_get_class(void); + +/** + * @} + */ + +#endif /* SWSCALE_SWSCALE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/version.h new file mode 100644 index 0000000..f1bed09 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Headers/libswscale/version.h @@ -0,0 +1,53 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_VERSION_H +#define SWSCALE_VERSION_H + +/** + * @file + * swscale version macros + */ + +#include "libavutil/version.h" + +#define LIBSWSCALE_VERSION_MAJOR 5 +#define LIBSWSCALE_VERSION_MINOR 3 +#define LIBSWSCALE_VERSION_MICRO 100 + +#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT + +#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_SWS_VECTOR +#define FF_API_SWS_VECTOR (LIBSWSCALE_VERSION_MAJOR < 6) +#endif + +#endif /* SWSCALE_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Info.plist b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Info.plist new file mode 100644 index 0000000..9b90e79 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Info.plist @@ -0,0 +1,55 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>BuildMachineOSBuild</key> + <string>20F71</string> + <key>CFBundleDevelopmentRegion</key> + <string>en</string> + <key>CFBundleExecutable</key> + <string>TXFFmpeg</string> + <key>CFBundleIdentifier</key> + <string>com.tencent.liteav.FFMPEG</string> + <key>CFBundleInfoDictionaryVersion</key> + <string>6.0</string> + <key>CFBundleName</key> + <string>TXFFmpeg</string> + <key>CFBundlePackageType</key> + <string>FMWK</string> + <key>CFBundleShortVersionString</key> + <string>1.0</string> + <key>CFBundleSignature</key> + <string>????</string> + <key>CFBundleSupportedPlatforms</key> + <array> + <string>iPhoneOS</string> + </array> + <key>CFBundleVersion</key> + <string>1.0</string> + <key>DTCompiler</key> + <string>com.apple.compilers.llvm.clang.1_0</string> + <key>DTPlatformBuild</key> + <string>18E182</string> + <key>DTPlatformName</key> + <string>iphoneos</string> + <key>DTPlatformVersion</key> + <string>14.5</string> + <key>DTSDKBuild</key> + <string>18E182</string> + <key>DTSDKName</key> + <string>iphoneos14.5</string> + <key>DTXcode</key> + <string>1250</string> + <key>DTXcodeBuild</key> + <string>12E262</string> + <key>MinimumOSVersion</key> + <string>9.0</string> + <key>NSPrincipalClass</key> + <string></string> + <key>UIDeviceFamily</key> + <array> + <integer>1</integer> + <integer>2</integer> + </array> +</dict> +</plist> diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Modules/module.modulemap b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Modules/module.modulemap new file mode 100644 index 0000000..b5be796 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/Modules/module.modulemap @@ -0,0 +1,6 @@ +framework module TXFFmpeg { + umbrella header "TXFFmpeg.h" + + export * + module * { export * } +} diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/TXFFmpeg b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/TXFFmpeg new file mode 100755 index 0000000..d64fddc Binary files /dev/null and b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-arm64_armv7/TXFFmpeg.framework/TXFFmpeg differ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/.DS_Store b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/.DS_Store new file mode 100644 index 0000000..7fb7a87 Binary files /dev/null and b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/.DS_Store differ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/.DS_Store b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/.DS_Store new file mode 100644 index 0000000..e935736 Binary files /dev/null and b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/.DS_Store differ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/TXFFmpeg.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/TXFFmpeg.h new file mode 100644 index 0000000..9eba835 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/TXFFmpeg.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2022 Tencent. All Rights Reserved. + * + */ + +#import <TXFFmpeg/ffmpeg_rename_defines.h> +#import <TXFFmpeg/libavutil/adler32.h> +#import <TXFFmpeg/libavutil/aes.h> +#import <TXFFmpeg/libavutil/aes_ctr.h> +#import <TXFFmpeg/libavutil/attributes.h> +#import <TXFFmpeg/libavutil/audio_fifo.h> +#import <TXFFmpeg/libavutil/avassert.h> +#import <TXFFmpeg/libavutil/avstring.h> +#import <TXFFmpeg/libavutil/avutil.h> +#import <TXFFmpeg/libavutil/base64.h> +#import <TXFFmpeg/libavutil/blowfish.h> +#import <TXFFmpeg/libavutil/bprint.h> +#import <TXFFmpeg/libavutil/bswap.h> +#import <TXFFmpeg/libavutil/buffer.h> +#import <TXFFmpeg/libavutil/cast5.h> +#import <TXFFmpeg/libavutil/camellia.h> +#import <TXFFmpeg/libavutil/channel_layout.h> +#import <TXFFmpeg/libavutil/common.h> +#import <TXFFmpeg/libavutil/cpu.h> +#import <TXFFmpeg/libavutil/crc.h> +#import <TXFFmpeg/libavutil/des.h> +#import <TXFFmpeg/libavutil/dict.h> +#import <TXFFmpeg/libavutil/display.h> +#import <TXFFmpeg/libavutil/dovi_meta.h> +#import <TXFFmpeg/libavutil/downmix_info.h> +#import <TXFFmpeg/libavutil/encryption_info.h> +#import <TXFFmpeg/libavutil/error.h> +#import <TXFFmpeg/libavutil/eval.h> +#import <TXFFmpeg/libavutil/fifo.h> +#import <TXFFmpeg/libavutil/file.h> +#import <TXFFmpeg/libavutil/frame.h> +#import <TXFFmpeg/libavutil/hash.h> +#import <TXFFmpeg/libavutil/hmac.h> +#import <TXFFmpeg/libavutil/hwcontext.h> +#import <TXFFmpeg/libavutil/hwcontext_cuda.h> +#import <TXFFmpeg/libavutil/hwcontext_d3d11va.h> +#import <TXFFmpeg/libavutil/hwcontext_drm.h> +#import <TXFFmpeg/libavutil/hwcontext_dxva2.h> +#import <TXFFmpeg/libavutil/hwcontext_qsv.h> +#import <TXFFmpeg/libavutil/hwcontext_mediacodec.h> +#import <TXFFmpeg/libavutil/hwcontext_vaapi.h> +#import <TXFFmpeg/libavutil/hwcontext_videotoolbox.h> +#import <TXFFmpeg/libavutil/hwcontext_vdpau.h> +#import <TXFFmpeg/libavutil/imgutils.h> +#import <TXFFmpeg/libavutil/intfloat.h> +#import <TXFFmpeg/libavutil/intreadwrite.h> +#import <TXFFmpeg/libavutil/lfg.h> +#import <TXFFmpeg/libavutil/log.h> +#import <TXFFmpeg/libavutil/macros.h> +#import <TXFFmpeg/libavutil/mathematics.h> +#import <TXFFmpeg/libavutil/mastering_display_metadata.h> +#import <TXFFmpeg/libavutil/md5.h> +#import <TXFFmpeg/libavutil/mem.h> +#import <TXFFmpeg/libavutil/motion_vector.h> +#import <TXFFmpeg/libavutil/murmur3.h> +#import <TXFFmpeg/libavutil/opt.h> +#import <TXFFmpeg/libavutil/parseutils.h> +#import <TXFFmpeg/libavutil/pixdesc.h> +#import <TXFFmpeg/libavutil/pixfmt.h> +#import <TXFFmpeg/libavutil/random_seed.h> +#import <TXFFmpeg/libavutil/rc4.h> +#import <TXFFmpeg/libavutil/rational.h> +#import <TXFFmpeg/libavutil/replaygain.h> +#import <TXFFmpeg/libavutil/ripemd.h> +#import <TXFFmpeg/libavutil/samplefmt.h> +#import <TXFFmpeg/libavutil/sha.h> +#import <TXFFmpeg/libavutil/sha512.h> +#import <TXFFmpeg/libavutil/spherical.h> +#import <TXFFmpeg/libavutil/stereo3d.h> +#import <TXFFmpeg/libavutil/threadmessage.h> +#import <TXFFmpeg/libavutil/time.h> +#import <TXFFmpeg/libavutil/timecode.h> +#import <TXFFmpeg/libavutil/timestamp.h> +#import <TXFFmpeg/libavutil/tree.h> +#import <TXFFmpeg/libavutil/twofish.h> +#import <TXFFmpeg/libavutil/version.h> +#import <TXFFmpeg/libavutil/xtea.h> +#import <TXFFmpeg/libavutil/tea.h> +#import <TXFFmpeg/libavutil/pthread_helper.h> +#import <TXFFmpeg/libavutil/tx.h> +#import <TXFFmpeg/libavutil/avconfig.h> +#import <TXFFmpeg/libavutil/ffversion.h> +#import <TXFFmpeg/libavutil/lzo.h> +#import <TXFFmpeg/libavfilter/avfilter.h> +#import <TXFFmpeg/libavfilter/version.h> +#import <TXFFmpeg/libavfilter/buffersink.h> +#import <TXFFmpeg/libavfilter/buffersrc.h> +#import <TXFFmpeg/libswresample/swresample.h> +#import <TXFFmpeg/libswresample/version.h> +#import <TXFFmpeg/libswscale/swscale.h> +#import <TXFFmpeg/libswscale/version.h> +#import <TXFFmpeg/libavcodec/ac3_parser.h> +#import <TXFFmpeg/libavcodec/adts_parser.h> +#import <TXFFmpeg/libavcodec/avcodec.h> +#import <TXFFmpeg/libavcodec/avdct.h> +#import <TXFFmpeg/libavcodec/avfft.h> +#import <TXFFmpeg/libavcodec/d3d11va.h> +#import <TXFFmpeg/libavcodec/dirac.h> +#import <TXFFmpeg/libavcodec/dv_profile.h> +#import <TXFFmpeg/libavcodec/dxva2.h> +#import <TXFFmpeg/libavcodec/jni.h> +#import <TXFFmpeg/libavcodec/mediacodec.h> +#import <TXFFmpeg/libavcodec/qsv.h> +#import <TXFFmpeg/libavcodec/vaapi.h> +#import <TXFFmpeg/libavcodec/vdpau.h> +#import <TXFFmpeg/libavcodec/version.h> +#import <TXFFmpeg/libavcodec/videotoolbox.h> +#import <TXFFmpeg/libavcodec/vorbis_parser.h> +#import <TXFFmpeg/libavcodec/xvmc.h> +#import <TXFFmpeg/libavcodec/ass_split.h> +#import <TXFFmpeg/libavcodec/bytestream.h> +#import <TXFFmpeg/libavformat/avformat.h> +#import <TXFFmpeg/libavformat/avio.h> +#import <TXFFmpeg/libavformat/version.h> +#import <TXFFmpeg/libavformat/internal.h> +#import <TXFFmpeg/libavformat/os_support.h> +#import <TXFFmpeg/libavformat/avc.h> +#import <TXFFmpeg/libavformat/url.h> diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h new file mode 100644 index 0000000..f90e7b3 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/ffmpeg_rename_defines.h @@ -0,0 +1,3462 @@ +// Copyright (c) 2021 Tencent. All rights reserved. + +// This file generated by rename_symbols_generater.py. +// Do not modify it manually. + +#ifndef THIRD_PARTY_FFMPEG_FFMPEG_RENAME_DEFINES_H +#define THIRD_PARTY_FFMPEG_FFMPEG_RENAME_DEFINES_H + +// clang-format off +#define ff_hevc_merge_flag_decode liteav_ff_hevc_merge_flag_decode +#define ff_deblock_h_chroma_10_avx liteav_ff_deblock_h_chroma_10_avx +#define ff_pred16x16_plane_rv40_8_mmx liteav_ff_pred16x16_plane_rv40_8_mmx +#define ff_avg_h264_qpel8_mc03_neon liteav_ff_avg_h264_qpel8_mc03_neon +#define ff_h264_direct_dist_scale_factor liteav_ff_h264_direct_dist_scale_factor +#define ff_deblock_h_chroma_intra_8_mmxext liteav_ff_deblock_h_chroma_intra_8_mmxext +#define av_buffer_is_writable liteav_av_buffer_is_writable +#define ff_pw_96 liteav_ff_pw_96 +#define webvtt_packet_parse liteav_webvtt_packet_parse +#define ff_dv_frame_profile liteav_ff_dv_frame_profile +#define av_buffer_unref liteav_av_buffer_unref +#define av_opt_query_ranges_default liteav_av_opt_query_ranges_default +#define av_frame_set_color_range liteav_av_frame_set_color_range +#define av_bprint_init liteav_av_bprint_init +#define av_des_mac liteav_av_des_mac +#define ff_init_desc_chscale liteav_ff_init_desc_chscale +#define ff_fdctdsp_init liteav_ff_fdctdsp_init +#define ff_hevcdsp_init_neon_intrinsics liteav_ff_hevcdsp_init_neon_intrinsics +#define ff_pcm_read_seek liteav_ff_pcm_read_seek +#define av_fifo_generic_write liteav_av_fifo_generic_write +#define avio_close_dir liteav_avio_close_dir +#define av_strlcpy liteav_av_strlcpy +#define av_sha_final liteav_av_sha_final +#define avfilter_link liteav_avfilter_link +#define ff_mpeg4_intra_run liteav_ff_mpeg4_intra_run +#define ff_check_interrupt liteav_ff_check_interrupt +#define ff_ps_hybrid_synthesis_deint_neon liteav_ff_ps_hybrid_synthesis_deint_neon +#define av_strdup liteav_av_strdup +#define av_get_channel_layout_nb_channels liteav_av_get_channel_layout_nb_channels +#define ff_sws_init_output_funcs liteav_ff_sws_init_output_funcs +#define ff_pred4x4_horizontal_down_10_ssse3 liteav_ff_pred4x4_horizontal_down_10_ssse3 +#define ff_hevc_pred_angular_16x16_v_zero_neon_8 liteav_ff_hevc_pred_angular_16x16_v_zero_neon_8 +#define ff_put_h264_qpel8_mc30_neon liteav_ff_put_h264_qpel8_mc30_neon +#define vlc_css_declaration_New liteav_vlc_css_declaration_New +#define ff_videotoolbox_alloc_frame liteav_ff_videotoolbox_alloc_frame +#define ff_draw_init liteav_ff_draw_init +#define av_find_best_pix_fmt_of_2 liteav_av_find_best_pix_fmt_of_2 +#define ff_avg_pixels16_xy2_neon liteav_ff_avg_pixels16_xy2_neon +#define avpriv_slicethread_free liteav_avpriv_slicethread_free +#define ff_blockdsp_init_x86 liteav_ff_blockdsp_init_x86 +#define av_tree_node_size liteav_av_tree_node_size +#define ff_pred4x4_down_left_10_sse2 liteav_ff_pred4x4_down_left_10_sse2 +#define av_image_fill_max_pixsteps liteav_av_image_fill_max_pixsteps +#define ff_attach_decode_data liteav_ff_attach_decode_data +#define ff_aic_dc_scale_table liteav_ff_aic_dc_scale_table +#define ff_h264_idct_add16_8_mmxext liteav_ff_h264_idct_add16_8_mmxext +#define ff_mp4_read_descr liteav_ff_mp4_read_descr +#define ffurl_closep liteav_ffurl_closep +#define ff_mov_init_hinting liteav_ff_mov_init_hinting +#define ff_hevc_put_pel_uw_pixels_w4_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w4_neon_8_asm +#define av_packet_new_side_data liteav_av_packet_new_side_data +#define ff_hevc_put_qpel_uw_v3_neon_8 liteav_ff_hevc_put_qpel_uw_v3_neon_8 +#define ff_dct32_float_sse2 liteav_ff_dct32_float_sse2 +#define av_append_path_component liteav_av_append_path_component +#define ff_pack_8ch_float_to_float_u_sse2 liteav_ff_pack_8ch_float_to_float_u_sse2 +#define av_log_set_level liteav_av_log_set_level +#define ff_h264_chroma422_dc_scan liteav_ff_h264_chroma422_dc_scan +#define ff_af_aformat liteav_ff_af_aformat +#define ff_pw_4 liteav_ff_pw_4 +#define ff_fmt_is_in liteav_ff_fmt_is_in +#define ff_pw_2 liteav_ff_pw_2 +#define ff_pw_3 liteav_ff_pw_3 +#define ff_hyscale_fast_c liteav_ff_hyscale_fast_c +#define ff_pw_1 liteav_ff_pw_1 +#define ff_pw_8 liteav_ff_pw_8 +#define av_opt_is_set_to_default liteav_av_opt_is_set_to_default +#define ff_dither_2x2_4 liteav_ff_dither_2x2_4 +#define ff_flac_parse_picture liteav_ff_flac_parse_picture +#define ff_dct32_fixed liteav_ff_dct32_fixed +#define ff_h264_weight_4_10_sse2 liteav_ff_h264_weight_4_10_sse2 +#define ff_put_pixels8_l2_mmxext liteav_ff_put_pixels8_l2_mmxext +#define ff_h263_static_rl_table_store liteav_ff_h263_static_rl_table_store +#define ff_mpv_common_init liteav_ff_mpv_common_init +#define rgb24to32 liteav_rgb24to32 +#define ff_aac_num_swb_128 liteav_ff_aac_num_swb_128 +#define av_videotoolbox_default_free liteav_av_videotoolbox_default_free +#define ff_amf_match_string liteav_ff_amf_match_string +#define ff_h263_h_loop_filter_mmx liteav_ff_h263_h_loop_filter_mmx +#define av_get_colorspace_name liteav_av_get_colorspace_name +#define ff_h264_execute_ref_pic_marking liteav_ff_h264_execute_ref_pic_marking +#define ff_aac_num_swb_120 liteav_ff_aac_num_swb_120 +#define ff_put_h264_chroma_mc8_10_avx liteav_ff_put_h264_chroma_mc8_10_avx +#define av_rescale liteav_av_rescale +#define ffurl_open_whitelist liteav_ffurl_open_whitelist +#define ff_mdct_end liteav_ff_mdct_end +#define av_register_all liteav_av_register_all +#define ff_h264_idct_add8_8_mmxext liteav_ff_h264_idct_add8_8_mmxext +#define ff_sbr_hf_apply_noise_0_neon liteav_ff_sbr_hf_apply_noise_0_neon +#define av_dv_codec_profile liteav_av_dv_codec_profile +#define ff_rtmpts_protocol liteav_ff_rtmpts_protocol +#define ff_j_rev_dct1 liteav_ff_j_rev_dct1 +#define ff_j_rev_dct4 liteav_ff_j_rev_dct4 +#define ff_h264_chroma_dc_dequant_idct_12_c liteav_ff_h264_chroma_dc_dequant_idct_12_c +#define av_tree_destroy liteav_av_tree_destroy +#define av_bsf_list_append2 liteav_av_bsf_list_append2 +#define ff_avg_h264_qpel16_mc22_10_sse2 liteav_ff_avg_h264_qpel16_mc22_10_sse2 +#define ff_videotoolbox_h264_start_frame liteav_ff_videotoolbox_h264_start_frame +#define ff_frame_thread_encoder_init liteav_ff_frame_thread_encoder_init +#define ff_cos_4096 liteav_ff_cos_4096 +#define ff_pred8x8l_dc_8_ssse3 liteav_ff_pred8x8l_dc_8_ssse3 +#define ff_mvtab liteav_ff_mvtab +#define ff_blend_mask liteav_ff_blend_mask +#define ff_hevc_put_qpel_uw_h1v2_neon_8 liteav_ff_hevc_put_qpel_uw_h1v2_neon_8 +#define ff_h263_decode_mb liteav_ff_h263_decode_mb +#define ff_simple_idct_neon liteav_ff_simple_idct_neon +#define av_hwframe_get_buffer liteav_av_hwframe_get_buffer +#define rgb32to16 liteav_rgb32to16 +#define rgb32to15 liteav_rgb32to15 +#define ff_put_pixels8_neon liteav_ff_put_pixels8_neon +#define ff_avg_h264_qpel16_mc10_10_sse2_cache64 liteav_ff_avg_h264_qpel16_mc10_10_sse2_cache64 +#define ff_avg_h264_qpel4_mc20_10_mmxext liteav_ff_avg_h264_qpel4_mc20_10_mmxext +#define ff_ebur128_loudness_momentary liteav_ff_ebur128_loudness_momentary +#define ff_deblock_h_chroma422_intra_8_mmxext liteav_ff_deblock_h_chroma422_intra_8_mmxext +#define av_packet_unpack_dictionary liteav_av_packet_unpack_dictionary +#define ff_sprite_trajectory_tab liteav_ff_sprite_trajectory_tab +#define ff_inlink_peek_frame liteav_ff_inlink_peek_frame +#define avio_wb16 liteav_avio_wb16 +#define ff_int32_to_int16_u_sse2 liteav_ff_int32_to_int16_u_sse2 +#define ff_h263_decode_init liteav_ff_h263_decode_init +#define avcodec_dct_init liteav_avcodec_dct_init +#define ff_hevc_put_qpel_uw_weight_h3v1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h3v1_neon_8 +#define ff_make_formatu64_list liteav_ff_make_formatu64_list +#define ff_h263_update_motion_val liteav_ff_h263_update_motion_val +#define ff_mpeg4video_parser liteav_ff_mpeg4video_parser +#define ff_raw_video_read_header liteav_ff_raw_video_read_header +#define av_dv_codec_profile2 liteav_av_dv_codec_profile2 +#define ff_inlink_check_available_samples liteav_ff_inlink_check_available_samples +#define av_get_bits_per_pixel liteav_av_get_bits_per_pixel +#define ff_yuv2rgb_c_init_tables liteav_ff_yuv2rgb_c_init_tables +#define avio_get_str16le liteav_avio_get_str16le +#define ff_simple_idct_int16_10bit liteav_ff_simple_idct_int16_10bit +#define ff_codec_bmp_tags liteav_ff_codec_bmp_tags +#define ff_h264_idct_dc_add_8_sse2 liteav_ff_h264_idct_dc_add_8_sse2 +#define av_opt_set_defaults2 liteav_av_opt_set_defaults2 +#define ff_avg_h264_qpel4_mc33_10_mmxext liteav_ff_avg_h264_qpel4_mc33_10_mmxext +#define av_audio_fifo_peek liteav_av_audio_fifo_peek +#define ff_mpeg4_default_intra_matrix liteav_ff_mpeg4_default_intra_matrix +#define ff_h264_idct_add16_8_mmx liteav_ff_h264_idct_add16_8_mmx +#define ff_put_h264_qpel8_mc23_10_sse2 liteav_ff_put_h264_qpel8_mc23_10_sse2 +#define av_frame_get_side_data liteav_av_frame_get_side_data +#define avcodec_decode_audio4 liteav_avcodec_decode_audio4 +#define ff_put_pixels8_mmx liteav_ff_put_pixels8_mmx +#define ff_h264_p_mb_type_info liteav_ff_h264_p_mb_type_info +#define ff_mpv_common_end liteav_ff_mpv_common_end +#define ff_cbrt_tab liteav_ff_cbrt_tab +#define swri_rematrix_init_x86 liteav_swri_rematrix_init_x86 +#define ff_avg_h264_qpel4_mc10_10_mmxext liteav_ff_avg_h264_qpel4_mc10_10_mmxext +#define ff_framequeue_take liteav_ff_framequeue_take +#define ff_h263dsp_init_x86 liteav_ff_h263dsp_init_x86 +#define av_packet_move_ref liteav_av_packet_move_ref +#define ff_avg_h264_qpel16_mc02_10_sse2 liteav_ff_avg_h264_qpel16_mc02_10_sse2 +#define av_cpu_max_align liteav_av_cpu_max_align +#define av_buffer_default_free liteav_av_buffer_default_free +#define av_int2i liteav_av_int2i +#define ff_unpack_6ch_float_to_int32_a_avx liteav_ff_unpack_6ch_float_to_int32_a_avx +#define ff_codec_wav_tags liteav_ff_codec_wav_tags +#define ff_pred16x16_dc_8_sse2 liteav_ff_pred16x16_dc_8_sse2 +#define ff_init_ff_sine_windows liteav_ff_init_ff_sine_windows +#define ff_simple_idct10_sse2 liteav_ff_simple_idct10_sse2 +#define av_camellia_size liteav_av_camellia_size +#define ff_put_h264_qpel16_mc10_10_sse2_cache64 liteav_ff_put_h264_qpel16_mc10_10_sse2_cache64 +#define ff_pred8x8_top_dc_8_mmxext liteav_ff_pred8x8_top_dc_8_mmxext +#define rgb64tobgr48_nobswap liteav_rgb64tobgr48_nobswap +#define ff_parse_time_base liteav_ff_parse_time_base +#define av_chroma_location_from_name liteav_av_chroma_location_from_name +#define ff_yuv422p_to_argb_neon liteav_ff_yuv422p_to_argb_neon +#define ff_hevc_put_qpel_uw_h2v3_neon_8 liteav_ff_hevc_put_qpel_uw_h2v3_neon_8 +#define av_get_pix_fmt_loss liteav_av_get_pix_fmt_loss +#define ffio_free_dyn_buf liteav_ffio_free_dyn_buf +#define ff_unpack_2ch_int16_to_int16_a_sse2 liteav_ff_unpack_2ch_int16_to_int16_a_sse2 +#define ff_h264_chroma_dc_dequant_idct_10_c liteav_ff_h264_chroma_dc_dequant_idct_10_c +#define ff_cos_tabs_fixed liteav_ff_cos_tabs_fixed +#define av_frame_set_channel_layout liteav_av_frame_set_channel_layout +#define ff_h264_get_profile liteav_ff_h264_get_profile +#define ff_h264_idct8_add4_14_c liteav_ff_h264_idct8_add4_14_c +#define ff_pred4x4_down_right_8_mmxext liteav_ff_pred4x4_down_right_8_mmxext +#define ff_float_to_int32_u_sse2 liteav_ff_float_to_int32_u_sse2 +#define ff_pred16x16_plane_h264_8_mmx liteav_ff_pred16x16_plane_h264_8_mmx +#define ff_hevc_put_qpel_h1v3_neon_8 liteav_ff_hevc_put_qpel_h1v3_neon_8 +#define ff_hevc_luma_mv_merge_mode liteav_ff_hevc_luma_mv_merge_mode +#define ff_bsf_get_packet_ref liteav_ff_bsf_get_packet_ref +#define ff_hevc_put_qpel_uw_pixels_w24_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w24_neon_8 +#define ff_h264_b_mb_type_info liteav_ff_h264_b_mb_type_info +#define ff_h264_biweight_16_mmxext liteav_ff_h264_biweight_16_mmxext +#define ff_h264qpel_init liteav_ff_h264qpel_init +#define av_opt_get_pixel_fmt liteav_av_opt_get_pixel_fmt +#define ff_int16_to_float_a_sse2 liteav_ff_int16_to_float_a_sse2 +#define ff_mpa_synth_filter_fixed liteav_ff_mpa_synth_filter_fixed +#define ff_qpeldsp_init liteav_ff_qpeldsp_init +#define av_mdct_end liteav_av_mdct_end +#define ff_alloc_packet2 liteav_ff_alloc_packet2 +#define avfilter_config_links liteav_avfilter_config_links +#define ff_aac_scalefactor_bits liteav_ff_aac_scalefactor_bits +#define ff_avg_pixels16_xy2_no_rnd_neon liteav_ff_avg_pixels16_xy2_no_rnd_neon +#define ff_hevc_put_pel_uw_pixels_w16_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w16_neon_8_asm +#define avio_get_str16be liteav_avio_get_str16be +#define ff_pack_2ch_int32_to_int16_u_sse2 liteav_ff_pack_2ch_int32_to_int16_u_sse2 +#define av_thread_message_queue_nb_elems liteav_av_thread_message_queue_nb_elems +#define ff_amf_write_string liteav_ff_amf_write_string +#define ff_vf_rotate liteav_ff_vf_rotate +#define ff_codec_wav_guids liteav_ff_codec_wav_guids +#define ff_put_pixels16_sse2 liteav_ff_put_pixels16_sse2 +#define ff_blockdsp_init liteav_ff_blockdsp_init +#define avio_read liteav_avio_read +#define av_frame_get_best_effort_timestamp liteav_av_frame_get_best_effort_timestamp +#define avcodec_decode_video2 liteav_avcodec_decode_video2 +#define ff_avg_h264_qpel8or16_v_lowpass_op_mmxext liteav_ff_avg_h264_qpel8or16_v_lowpass_op_mmxext +#define ff_swb_offset_1024 liteav_ff_swb_offset_1024 +#define ff_mpv_decode_defaults liteav_ff_mpv_decode_defaults +#define ff_h263_chroma_qscale_table liteav_ff_h263_chroma_qscale_table +#define ff_hevc_sao_edge_eo0_w32_neon_8 liteav_ff_hevc_sao_edge_eo0_w32_neon_8 +#define ff_rtmp_calc_digest liteav_ff_rtmp_calc_digest +#define swr_alloc_set_opts liteav_swr_alloc_set_opts +#define av_thread_message_queue_alloc liteav_av_thread_message_queue_alloc +#define av_strnstr liteav_av_strnstr +#define av_write_trailer liteav_av_write_trailer +#define ff_inlink_acknowledge_status liteav_ff_inlink_acknowledge_status +#define ff_id3v2_parse_chapters liteav_ff_id3v2_parse_chapters +#define avfilter_init_dict liteav_avfilter_init_dict +#define ff_init_cabac_encoder liteav_ff_init_cabac_encoder +#define ff_pred8x8l_down_right_8_mmxext liteav_ff_pred8x8l_down_right_8_mmxext +#define ff_mpeg_draw_horiz_band liteav_ff_mpeg_draw_horiz_band +#define ff_hevc_diag_scan8x8_x liteav_ff_hevc_diag_scan8x8_x +#define ff_hevc_diag_scan8x8_y liteav_ff_hevc_diag_scan8x8_y +#define ff_amf_write_null liteav_ff_amf_write_null +#define ff_avg_h264_qpel16_mc21_neon liteav_ff_avg_h264_qpel16_mc21_neon +#define rgb32tobgr24 liteav_rgb32tobgr24 +#define ff_amf_read_number liteav_ff_amf_read_number +#define ff_h264_idct_add16intra_8_c liteav_ff_h264_idct_add16intra_8_c +#define avio_skip liteav_avio_skip +#define ff_w4_min_w6_lo liteav_ff_w4_min_w6_lo +#define av_probe_input_buffer liteav_av_probe_input_buffer +#define ff_draw_supported_pixel_formats liteav_ff_draw_supported_pixel_formats +#define ff_ac3_muxer liteav_ff_ac3_muxer +#define ff_hevc_reset_sei liteav_ff_hevc_reset_sei +#define ff_h264_idct_add_12_c liteav_ff_h264_idct_add_12_c +#define ff_mp4_muxer liteav_ff_mp4_muxer +#define ff_pack_8ch_float_to_float_a_sse2 liteav_ff_pack_8ch_float_to_float_a_sse2 +#define ff_videotoolbox_hvcc_extradata_create liteav_ff_videotoolbox_hvcc_extradata_create +#define ff_hevc_end_of_slice_flag_decode liteav_ff_hevc_end_of_slice_flag_decode +#define ff_frame_pool_video_init liteav_ff_frame_pool_video_init +#define ff_h264_idct_add_14_c liteav_ff_h264_idct_add_14_c +#define avcodec_pix_fmt_to_codec_tag liteav_avcodec_pix_fmt_to_codec_tag +#define av_dovi_alloc liteav_av_dovi_alloc +#define av_copy_packet liteav_av_copy_packet +#define ff_h264_v_loop_filter_chroma_neon liteav_ff_h264_v_loop_filter_chroma_neon +#define av_opt_find liteav_av_opt_find +#define av_write_uncoded_frame liteav_av_write_uncoded_frame +#define ff_get_chomp_line liteav_ff_get_chomp_line +#define swr_set_matrix liteav_swr_set_matrix +#define ff_listen_bind liteav_ff_listen_bind +#define av_thread_message_queue_set_free_func liteav_av_thread_message_queue_set_free_func +#define av_opt_query_ranges liteav_av_opt_query_ranges +#define sws_addVec liteav_sws_addVec +#define av_hwdevice_ctx_init liteav_av_hwdevice_ctx_init +#define ff_pack_8ch_int32_to_float_u_avx liteav_ff_pack_8ch_int32_to_float_u_avx +#define av_parse_cpu_caps liteav_av_parse_cpu_caps +#define av_mod_i liteav_av_mod_i +#define avfilter_get_matrix liteav_avfilter_get_matrix +#define ff_id3v2_tags liteav_ff_id3v2_tags +#define avpriv_mpa_freq_tab liteav_avpriv_mpa_freq_tab +#define av_frame_get_pkt_duration liteav_av_frame_get_pkt_duration +#define ff_emulated_edge_mc_8 liteav_ff_emulated_edge_mc_8 +#define ff_mpeg4_y_dc_scale_table liteav_ff_mpeg4_y_dc_scale_table +#define avpriv_pix_fmt_bps_mov liteav_avpriv_pix_fmt_bps_mov +#define ff_outlink_get_status liteav_ff_outlink_get_status +#define ff_sws_alphablendaway liteav_ff_sws_alphablendaway +#define ff_avg_pixels16_sse2 liteav_ff_avg_pixels16_sse2 +#define ff_ebur128_loudness_range liteav_ff_ebur128_loudness_range +#define ff_h263_mbtype_b_tab liteav_ff_h263_mbtype_b_tab +#define av_image_get_linesize liteav_av_image_get_linesize +#define ff_cos_16_fixed liteav_ff_cos_16_fixed +#define ff_h264_i_mb_type_info liteav_ff_h264_i_mb_type_info +#define ff_h264_decode_mb_cabac liteav_ff_h264_decode_mb_cabac +#define ff_imdct_half_c liteav_ff_imdct_half_c +#define ff_h264_dequant8_coeff_init liteav_ff_h264_dequant8_coeff_init +#define ff_smil_extract_next_text_chunk liteav_ff_smil_extract_next_text_chunk +#define ff_mpeg4_init_direct_mv liteav_ff_mpeg4_init_direct_mv +#define ff_id3v2_parse_priv_dict liteav_ff_id3v2_parse_priv_dict +#define av_tree_find liteav_av_tree_find +#define av_calloc liteav_av_calloc +#define ff_h264_idct_add8_422_14_c liteav_ff_h264_idct_add8_422_14_c +#define yyset_in liteav_yyset_in +#define av_pix_fmt_get_chroma_sub_sample liteav_av_pix_fmt_get_chroma_sub_sample +#define av_murmur3_final liteav_av_murmur3_final +#define av_frame_get_channel_layout liteav_av_frame_get_channel_layout +#define ff_pack_6ch_float_to_float_a_mmx liteav_ff_pack_6ch_float_to_float_a_mmx +#define av_fft_calc liteav_av_fft_calc +#define ff_init_2d_vlc_rl liteav_ff_init_2d_vlc_rl +#define ff_hevc_put_qpel_uw_h1v1_neon_8 liteav_ff_hevc_put_qpel_uw_h1v1_neon_8 +#define ff_reshuffle_raw_rgb liteav_ff_reshuffle_raw_rgb +#define ff_hevc_put_epel_uw_pixels_w12_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w12_neon_8 +#define av_get_token liteav_av_get_token +#define ff_videodsp_init_aarch64 liteav_ff_videodsp_init_aarch64 +#define ff_vf_hflip liteav_ff_vf_hflip +#define ff_hevc_prev_intra_luma_pred_flag_decode liteav_ff_hevc_prev_intra_luma_pred_flag_decode +#define av_frame_get_pkt_pos liteav_av_frame_get_pkt_pos +#define ff_default_query_formats liteav_ff_default_query_formats +#define ff_h264_idct8_dc_add_8_c liteav_ff_h264_idct8_dc_add_8_c +#define av_packet_free_side_data liteav_av_packet_free_side_data +#define ff_avg_h264_qpel16_mc30_10_sse2_cache64 liteav_ff_avg_h264_qpel16_mc30_10_sse2_cache64 +#define ff_interleaved_peek liteav_ff_interleaved_peek +#define ff_hevc_hls_mvd_coding liteav_ff_hevc_hls_mvd_coding +#define ff_avg_h264_qpel8_mc00_neon liteav_ff_avg_h264_qpel8_mc00_neon +#define ff_rtmp_packet_create liteav_ff_rtmp_packet_create +#define av_expr_eval liteav_av_expr_eval +#define ff_pd_65535 liteav_ff_pd_65535 +#define ff_pred16x16_128_dc_neon liteav_ff_pred16x16_128_dc_neon +#define ff_mpeg12_find_best_frame_rate liteav_ff_mpeg12_find_best_frame_rate +#define ff_hevc_put_qpel_uw_weight_v3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_v3_neon_8 +#define av_bsf_receive_packet liteav_av_bsf_receive_packet +#define ff_simple_idct_int16_8bit liteav_ff_simple_idct_int16_8bit +#define ff_rtmp_packet_dump liteav_ff_rtmp_packet_dump +#define ff_pack_8ch_float_to_int32_a_sse2 liteav_ff_pack_8ch_float_to_int32_a_sse2 +#define ff_pack_6ch_float_to_float_u_sse liteav_ff_pack_6ch_float_to_float_u_sse +#define av_frame_side_data_name liteav_av_frame_side_data_name +#define ff_deblock_h_luma_8_avx liteav_ff_deblock_h_luma_8_avx +#define ff_pred8x8_horizontal_8_mmx liteav_ff_pred8x8_horizontal_8_mmx +#define ff_hevc_put_qpel_uw_weight_h2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h2_neon_8 +#define ff_side_data_set_encoder_stats liteav_ff_side_data_set_encoder_stats +#define av_samples_fill_arrays liteav_av_samples_fill_arrays +#define ff_nv12_to_argb_neon liteav_ff_nv12_to_argb_neon +#define ff_put_h264_qpel4_v_lowpass_mmxext liteav_ff_put_h264_qpel4_v_lowpass_mmxext +#define ff_get_line liteav_ff_get_line +#define ff_simple_idct_put_int32_10bit liteav_ff_simple_idct_put_int32_10bit +#define av_audio_fifo_space liteav_av_audio_fifo_space +#define ff_hevc_videotoolbox_hwaccel liteav_ff_hevc_videotoolbox_hwaccel +#define ff_sws_rgb2rgb_init liteav_ff_sws_rgb2rgb_init +#define ff_vsink_buffer liteav_ff_vsink_buffer +#define av_iformat_next liteav_av_iformat_next +#define ff_hevc_pred_mode_decode liteav_ff_hevc_pred_mode_decode +#define av_fast_mallocz liteav_av_fast_mallocz +#define ff_deblock_h_chroma422_10_sse2 liteav_ff_deblock_h_chroma422_10_sse2 +#define avio_flush liteav_avio_flush +#define av_frame_ref liteav_av_frame_ref +#define ff_hwframe_map_replace liteav_ff_hwframe_map_replace +#define ff_deblock_h_chroma422_8_avx liteav_ff_deblock_h_chroma422_8_avx +#define ff_hevc_put_pel_uw_pixels_w32_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w32_neon_8_asm +#define yuv422ptoyuy2 liteav_yuv422ptoyuy2 +#define ff_hevc_idct_32x32_dc_neon_8 liteav_ff_hevc_idct_32x32_dc_neon_8 +#define yy_create_buffer liteav_yy_create_buffer +#define ff_hevc_add_residual_8x8_neon_8 liteav_ff_hevc_add_residual_8x8_neon_8 +#define av_parser_close liteav_av_parser_close +#define av_buffer_create liteav_av_buffer_create +#define ff_pred4x4_vertical_left_10_avx liteav_ff_pred4x4_vertical_left_10_avx +#define swr_get_delay liteav_swr_get_delay +#define ff_jpeg_fdct_islow_10 liteav_ff_jpeg_fdct_islow_10 +#define ff_h264_idct8_add_8_c liteav_ff_h264_idct8_add_8_c +#define av_frame_get_qp_table liteav_av_frame_get_qp_table +#define avpicture_layout liteav_avpicture_layout +#define ff_deblock_h_chroma_8_avx liteav_ff_deblock_h_chroma_8_avx +#define av_packet_merge_side_data liteav_av_packet_merge_side_data +#define ff_get_buffer liteav_ff_get_buffer +#define av_fft_permute liteav_av_fft_permute +#define av_realloc_array liteav_av_realloc_array +#define ff_h264_chroma_dc_dequant_idct_9_c liteav_ff_h264_chroma_dc_dequant_idct_9_c +#define ff_fetch_timestamp liteav_ff_fetch_timestamp +#define av_buffer_pool_uninit liteav_av_buffer_pool_uninit +#define ff_set_common_samplerates liteav_ff_set_common_samplerates +#define avio_get_dyn_buf liteav_avio_get_dyn_buf +#define ff_put_bmp_header liteav_ff_put_bmp_header +#define av_fifo_alloc liteav_av_fifo_alloc +#define ff_aac_pow34sf_tab liteav_ff_aac_pow34sf_tab +#define ff_float_to_int32_a_sse2 liteav_ff_float_to_int32_a_sse2 +#define ff_deblock_h_chroma422_10_avx liteav_ff_deblock_h_chroma422_10_avx +#define ff_hevc_put_pixels_w32_w48_w64_neon_8 liteav_ff_hevc_put_pixels_w32_w48_w64_neon_8 +#define ff_hevc_frame_nb_refs liteav_ff_hevc_frame_nb_refs +#define yyset_out liteav_yyset_out +#define ff_put_h264_qpel8_mc30_10_ssse3_cache64 liteav_ff_put_h264_qpel8_mc30_10_ssse3_cache64 +#define av_aes_ctr_set_random_iv liteav_av_aes_ctr_set_random_iv +#define ff_hevc_put_qpel_h3v2_neon_8 liteav_ff_hevc_put_qpel_h3v2_neon_8 +#define av_tree_insert liteav_av_tree_insert +#define ff_avg_pixels4_l2_shift5_mmxext liteav_ff_avg_pixels4_l2_shift5_mmxext +#define ff_put_pixels8_x2_neon liteav_ff_put_pixels8_x2_neon +#define ff_mpegts_muxer liteav_ff_mpegts_muxer +#define ff_put_h264_qpel8_mc02_neon liteav_ff_put_h264_qpel8_mc02_neon +#define shuffle_bytes_3012 liteav_shuffle_bytes_3012 +#define ff_h263_parser liteav_ff_h263_parser +#define av_dynarray_add liteav_av_dynarray_add +#define ff_sine_2048_fixed liteav_ff_sine_2048_fixed +#define av_lfg_init_from_data liteav_av_lfg_init_from_data +#define av_hmac_alloc liteav_av_hmac_alloc +#define avpriv_mpeg4audio_get_config liteav_avpriv_mpeg4audio_get_config +#define av_get_pix_fmt_string liteav_av_get_pix_fmt_string +#define ff_hevc_slice_rpl liteav_ff_hevc_slice_rpl +#define ff_h264_idct_dc_add_9_c liteav_ff_h264_idct_dc_add_9_c +#define ff_get_qtpalette liteav_ff_get_qtpalette +#define av_aes_init liteav_av_aes_init +#define ff_avg_h264_qpel16_mc22_neon liteav_ff_avg_h264_qpel16_mc22_neon +#define ff_avg_pixels4_mmxext liteav_ff_avg_pixels4_mmxext +#define ff_put_pixels8_y2_no_rnd_neon liteav_ff_put_pixels8_y2_no_rnd_neon +#define ff_pred4x4_vertical_left_8_mmxext liteav_ff_pred4x4_vertical_left_8_mmxext +#define ff_put_qpel8_mc32_old_c liteav_ff_put_qpel8_mc32_old_c +#define ff_hls_protocol liteav_ff_hls_protocol +#define av_get_pix_fmt liteav_av_get_pix_fmt +#define ff_bsf_get_packet liteav_ff_bsf_get_packet +#define avfilter_get_class liteav_avfilter_get_class +#define ff_h264chroma_init_x86 liteav_ff_h264chroma_init_x86 +#define ff_mpv_report_decode_progress liteav_ff_mpv_report_decode_progress +#define yv12touyvy liteav_yv12touyvy +#define ff_put_h264_qpel8_mc33_10_sse2 liteav_ff_put_h264_qpel8_mc33_10_sse2 +#define ff_frame_pool_get liteav_ff_frame_pool_get +#define ff_h264_direct_ref_list_init liteav_ff_h264_direct_ref_list_init +#define ff_rl_init liteav_ff_rl_init +#define ff_hevc_add_residual_16x16_neon_8 liteav_ff_hevc_add_residual_16x16_neon_8 +#define av_encryption_init_info_alloc liteav_av_encryption_init_info_alloc +#define avfilter_pad_count liteav_avfilter_pad_count +#define ff_idctdsp_init_aarch64 liteav_ff_idctdsp_init_aarch64 +#define ff_imdct36_float_avx liteav_ff_imdct36_float_avx +#define av_get_padded_bits_per_pixel liteav_av_get_padded_bits_per_pixel +#define av_ac3_parse_header liteav_av_ac3_parse_header +#define av_fifo_reset liteav_av_fifo_reset +#define ff_w4_min_w6_hi liteav_ff_w4_min_w6_hi +#define av_bitstream_filter_close liteav_av_bitstream_filter_close +#define avfilter_mul_matrix liteav_avfilter_mul_matrix +#define avcodec_descriptor_get_by_name liteav_avcodec_descriptor_get_by_name +#define ff_put_qpel16_mc13_old_c liteav_ff_put_qpel16_mc13_old_c +#define ff_put_h264_qpel8or16_hv1_lowpass_op_mmxext liteav_ff_put_h264_qpel8or16_hv1_lowpass_op_mmxext +#define yv12toyuy2 liteav_yv12toyuy2 +#define ff_inter_vlc liteav_ff_inter_vlc +#define vlc_css_declarations_Delete liteav_vlc_css_declarations_Delete +#define ff_flacdsp_init_x86 liteav_ff_flacdsp_init_x86 +#define ff_mov_get_channel_layout liteav_ff_mov_get_channel_layout +#define ff_pw_5 liteav_ff_pw_5 +#define ff_deblock_h_luma_intra_8_sse2 liteav_ff_deblock_h_luma_intra_8_sse2 +#define ff_hflip_init liteav_ff_hflip_init +#define ff_h264_idct_add8_8_c liteav_ff_h264_idct_add8_8_c +#define ff_pred16x16_horizontal_10_mmxext liteav_ff_pred16x16_horizontal_10_mmxext +#define ff_pd_8192 liteav_ff_pd_8192 +#define ffio_open_whitelist liteav_ffio_open_whitelist +#define avio_feof liteav_avio_feof +#define ff_flv_demuxer liteav_ff_flv_demuxer +#define avio_rb64 liteav_avio_rb64 +#define av_log_default_callback liteav_av_log_default_callback +#define ff_pred16x16_dc_8_ssse3 liteav_ff_pred16x16_dc_8_ssse3 +#define ff_pred8x8l_top_dc_10_sse2 liteav_ff_pred8x8l_top_dc_10_sse2 +#define av_max_alloc liteav_av_max_alloc +#define ff_put_qpel8_mc11_old_c liteav_ff_put_qpel8_mc11_old_c +#define ff_avg_h264_chroma_mc4_mmxext liteav_ff_avg_h264_chroma_mc4_mmxext +#define ff_mpeg4_resync_prefix liteav_ff_mpeg4_resync_prefix +#define ff_pred16x16_top_dc_10_sse2 liteav_ff_pred16x16_top_dc_10_sse2 +#define swri_resample_dsp_init liteav_swri_resample_dsp_init +#define ff_avfilter_graph_update_heap liteav_ff_avfilter_graph_update_heap +#define ff_hevc_sao_offset_abs_decode liteav_ff_hevc_sao_offset_abs_decode +#define av_buffersrc_parameters_alloc liteav_av_buffersrc_parameters_alloc +#define av_gettime_relative_is_monotonic liteav_av_gettime_relative_is_monotonic +#define avpicture_get_size liteav_avpicture_get_size +#define avcodec_register_all liteav_avcodec_register_all +#define swri_audio_convert_alloc liteav_swri_audio_convert_alloc +#define avpriv_request_sample liteav_avpriv_request_sample +#define ff_put_h264_qpel8_mc31_10_sse2 liteav_ff_put_h264_qpel8_mc31_10_sse2 +#define ff_hevc_inter_pred_idc_decode liteav_ff_hevc_inter_pred_idc_decode +#define ff_pw_9 liteav_ff_pw_9 +#define ff_er_add_slice liteav_ff_er_add_slice +#define ff_pd_16 liteav_ff_pd_16 +#define ff_unpack_2ch_float_to_int16_a_sse2 liteav_ff_unpack_2ch_float_to_int16_a_sse2 +#define ff_subtitles_read_line liteav_ff_subtitles_read_line +#define av_strerror liteav_av_strerror +#define swr_drop_output liteav_swr_drop_output +#define avio_r8 liteav_avio_r8 +#define sws_getIdentityVec liteav_sws_getIdentityVec +#define ff_put_qpel16_mc31_old_c liteav_ff_put_qpel16_mc31_old_c +#define av_audio_fifo_reset liteav_av_audio_fifo_reset +#define ff_pred16x16_plane_rv40_8_mmxext liteav_ff_pred16x16_plane_rv40_8_mmxext +#define sws_freeFilter liteav_sws_freeFilter +#define ff_startcode_find_candidate_c liteav_ff_startcode_find_candidate_c +#define vu9_to_vu12 liteav_vu9_to_vu12 +#define ff_tls_deinit liteav_ff_tls_deinit +#define av_hash_get_name liteav_av_hash_get_name +#define ff_unpack_2ch_float_to_int32_a_sse2 liteav_ff_unpack_2ch_float_to_int32_a_sse2 +#define avfilter_free liteav_avfilter_free +#define swr_set_compensation liteav_swr_set_compensation +#define planar2x liteav_planar2x +#define ff_aac_spectral_bits liteav_ff_aac_spectral_bits +#define ff_mpv_reconstruct_mb liteav_ff_mpv_reconstruct_mb +#define av_buffersink_get_type liteav_av_buffersink_get_type +#define ff_avg_pixels16_l2_mmxext liteav_ff_avg_pixels16_l2_mmxext +#define av_content_light_metadata_alloc liteav_av_content_light_metadata_alloc +#define av_get_sample_fmt liteav_av_get_sample_fmt +#define ff_hevc_put_qpel_uw_bi_h_neon_8 liteav_ff_hevc_put_qpel_uw_bi_h_neon_8 +#define ff_id3v2_parse_priv liteav_ff_id3v2_parse_priv +#define ff_hevc_put_qpel_uw_bi_hv_neon_8 liteav_ff_hevc_put_qpel_uw_bi_hv_neon_8 +#define ff_dither_8x8_128 liteav_ff_dither_8x8_128 +#define ff_mpeg1_videotoolbox_hwaccel liteav_ff_mpeg1_videotoolbox_hwaccel +#define avio_rl64 liteav_avio_rl64 +#define ff_isom_write_av1c liteav_ff_isom_write_av1c +#define sws_scaleVec liteav_sws_scaleVec +#define ff_isom_write_avcc liteav_ff_isom_write_avcc +#define ff_w1_plus_w5 liteav_ff_w1_plus_w5 +#define ff_put_h264_qpel8or16_hv2_lowpass_op_mmxext liteav_ff_put_h264_qpel8or16_hv2_lowpass_op_mmxext +#define yyget_column liteav_yyget_column +#define ff_hevc_put_qpel_uw_pixels_w8_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w8_neon_8 +#define ff_mpeg2_videotoolbox_hwaccel liteav_ff_mpeg2_videotoolbox_hwaccel +#define ff_ape_parse_tag liteav_ff_ape_parse_tag +#define ff_http_match_no_proxy liteav_ff_http_match_no_proxy +#define ff_h264_idct_dc_add_neon liteav_ff_h264_idct_dc_add_neon +#define ff_h264_idct_add8_422_10_sse2 liteav_ff_h264_idct_add8_422_10_sse2 +#define ff_put_h264_qpel4_mc20_10_mmxext liteav_ff_put_h264_qpel4_mc20_10_mmxext +#define ff_cos_32768 liteav_ff_cos_32768 +#define ff_h264_idct_add16_10_c liteav_ff_h264_idct_add16_10_c +#define av_interleaved_write_uncoded_frame liteav_av_interleaved_write_uncoded_frame +#define av_opt_set_dict2 liteav_av_opt_set_dict2 +#define ff_h264_idct8_add_10_c liteav_ff_h264_idct8_add_10_c +#define ff_avg_vc1_chroma_mc8_nornd_mmxext liteav_ff_avg_vc1_chroma_mc8_nornd_mmxext +#define av_nearer_q liteav_av_nearer_q +#define ff_mpeg2_frame_rate_tab liteav_ff_mpeg2_frame_rate_tab +#define avio_write_marker liteav_avio_write_marker +#define av_spherical_alloc liteav_av_spherical_alloc +#define av_fft_init liteav_av_fft_init +#define ff_put_no_rnd_qpel8_mc33_old_c liteav_ff_put_no_rnd_qpel8_mc33_old_c +#define ff_http_averror liteav_ff_http_averror +#define ff_h264_idct_add8_neon liteav_ff_h264_idct_add8_neon +#define ff_put_h264_qpel8_h_lowpass_l2_ssse3 liteav_ff_put_h264_qpel8_h_lowpass_l2_ssse3 +#define ff_decode_get_packet liteav_ff_decode_get_packet +#define ff_mp3on4float_decoder liteav_ff_mp3on4float_decoder +#define ff_avg_qpel16_mc33_old_c liteav_ff_avg_qpel16_mc33_old_c +#define avfilter_graph_parse_ptr liteav_avfilter_graph_parse_ptr +#define ff_interleave_packet_per_dts liteav_ff_interleave_packet_per_dts +#define ff_hevc_sao_band_w64_neon_8 liteav_ff_hevc_sao_band_w64_neon_8 +#define ff_put_qpel16_mc11_old_c liteav_ff_put_qpel16_mc11_old_c +#define ff_frame_thread_init liteav_ff_frame_thread_init +#define ff_webvtt_demuxer liteav_ff_webvtt_demuxer +#define ff_float_to_int16_u_sse2 liteav_ff_float_to_int16_u_sse2 +#define ff_avg_h264_qpel16_mc21_10_sse2 liteav_ff_avg_h264_qpel16_mc21_10_sse2 +#define ff_avg_pixels8_mmxext liteav_ff_avg_pixels8_mmxext +#define ff_avg_h264_qpel4_mc03_10_mmxext liteav_ff_avg_h264_qpel4_mc03_10_mmxext +#define ff_hevc_pred_planar_8x8_neon_8 liteav_ff_hevc_pred_planar_8x8_neon_8 +#define avfilter_sub_matrix liteav_avfilter_sub_matrix +#define rgb15tobgr24 liteav_rgb15tobgr24 +#define ff_init_lls_x86 liteav_ff_init_lls_x86 +#define av_get_packed_sample_fmt liteav_av_get_packed_sample_fmt +#define av_frame_set_pkt_pos liteav_av_frame_set_pkt_pos +#define ff_put_h264_qpel16_mc13_neon liteav_ff_put_h264_qpel16_mc13_neon +#define av_hash_names liteav_av_hash_names +#define ff_h263_v_loop_filter_mmx liteav_ff_h263_v_loop_filter_mmx +#define ff_qdm2_at_decoder liteav_ff_qdm2_at_decoder +#define ff_put_no_rnd_qpel16_mc12_old_c liteav_ff_put_no_rnd_qpel16_mc12_old_c +#define ff_avg_pixels8_neon liteav_ff_avg_pixels8_neon +#define ff_mp4_read_descr_len liteav_ff_mp4_read_descr_len +#define ff_decode_bsfs_uninit liteav_ff_decode_bsfs_uninit +#define ffio_realloc_buf liteav_ffio_realloc_buf +#define av_bmg_get liteav_av_bmg_get +#define av_dump_format liteav_av_dump_format +#define ff_thread_flush liteav_ff_thread_flush +#define ff_hevc_put_qpel_uw_v2_neon_8 liteav_ff_hevc_put_qpel_uw_v2_neon_8 +#define ff_pixblockdsp_init_x86 liteav_ff_pixblockdsp_init_x86 +#define rgb48tobgr64_nobswap liteav_rgb48tobgr64_nobswap +#define ff_mjpegenc_huffman_compute_bits liteav_ff_mjpegenc_huffman_compute_bits +#define ff_aac_codebook_vector_idx liteav_ff_aac_codebook_vector_idx +#define text_segment_chain_delete liteav_text_segment_chain_delete +#define yylex_init_extra liteav_yylex_init_extra +#define ff_avg_qpel8_mc11_old_c liteav_ff_avg_qpel8_mc11_old_c +#define ff_volume_init_x86 liteav_ff_volume_init_x86 +#define ff_mpeg12_init_vlcs liteav_ff_mpeg12_init_vlcs +#define ff_w7_plus_w3_lo liteav_ff_w7_plus_w3_lo +#define av_md5_sum liteav_av_md5_sum +#define ff_pred4x4_horizontal_up_8_mmxext liteav_ff_pred4x4_horizontal_up_8_mmxext +#define ff_imdct_half_avx liteav_ff_imdct_half_avx +#define ff_h264_idct_add8_10_avx liteav_ff_h264_idct_add8_10_avx +#define av_aes_ctr_set_iv liteav_av_aes_ctr_set_iv +#define ff_print_debug_info liteav_ff_print_debug_info +#define ff_cos_2048 liteav_ff_cos_2048 +#define ff_put_h264_qpel16_h_lowpass_l2_ssse3 liteav_ff_put_h264_qpel16_h_lowpass_l2_ssse3 +#define ffurl_open liteav_ffurl_open +#define av_grow_packet liteav_av_grow_packet +#define avpriv_mpegts_parse_open liteav_avpriv_mpegts_parse_open +#define ff_list_bsf liteav_ff_list_bsf +#define ff_put_h264_qpel4_mc11_10_mmxext liteav_ff_put_h264_qpel4_mc11_10_mmxext +#define yyrestart liteav_yyrestart +#define ff_pred8x8_dc_neon liteav_ff_pred8x8_dc_neon +#define ff_isom_write_vpcc liteav_ff_isom_write_vpcc +#define ff_hevc_pred_planar_4x4_neon_8 liteav_ff_hevc_pred_planar_4x4_neon_8 +#define ff_add_pixels_clamped_c liteav_ff_add_pixels_clamped_c +#define avio_wb32 liteav_avio_wb32 +#define av_qsv_alloc_context liteav_av_qsv_alloc_context +#define ff_put_pixels_clamped_c liteav_ff_put_pixels_clamped_c +#define ff_mpeg4_studio_intra liteav_ff_mpeg4_studio_intra +#define av_write_image_line2 liteav_av_write_image_line2 +#define av_vorbis_parse_reset liteav_av_vorbis_parse_reset +#define ff_pred4x4_vertical_right_8_mmxext liteav_ff_pred4x4_vertical_right_8_mmxext +#define ff_h264_decode_seq_parameter_set liteav_ff_h264_decode_seq_parameter_set +#define ff_swb_offset_128 liteav_ff_swb_offset_128 +#define ff_pack_2ch_float_to_int32_a_sse2 liteav_ff_pack_2ch_float_to_int32_a_sse2 +#define ffurl_close liteav_ffurl_close +#define ff_put_v liteav_ff_put_v +#define ff_swb_offset_120 liteav_ff_swb_offset_120 +#define ff_avg_pixels16_neon liteav_ff_avg_pixels16_neon +#define ff_resample_common_apply_filter_x8_float_neon liteav_ff_resample_common_apply_filter_x8_float_neon +#define ff_pred8x8_plane_8_mmx liteav_ff_pred8x8_plane_8_mmx +#define av_new_packet liteav_av_new_packet +#define av_reallocp_array liteav_av_reallocp_array +#define yvu9_to_yuy2 liteav_yvu9_to_yuy2 +#define sws_getConstVec liteav_sws_getConstVec +#define ff_pack_6ch_float_to_float_u_avx liteav_ff_pack_6ch_float_to_float_u_avx +#define ff_mpeg12_vlc_dc_lum_bits liteav_ff_mpeg12_vlc_dc_lum_bits +#define ff_init_mpadsp_tabs_float liteav_ff_init_mpadsp_tabs_float +#define ff_vf_vflip liteav_ff_vf_vflip +#define ff_avg_h264_qpel4_v_lowpass_mmxext liteav_ff_avg_h264_qpel4_v_lowpass_mmxext +#define av_ripemd_init liteav_av_ripemd_init +#define text_style_copy liteav_text_style_copy +#define ff_rtp_get_payload_type liteav_ff_rtp_get_payload_type +#define av_packet_from_data liteav_av_packet_from_data +#define ff_cos_2048_fixed liteav_ff_cos_2048_fixed +#define ff_sine_4096 liteav_ff_sine_4096 +#define ff_aac_num_swb_960 liteav_ff_aac_num_swb_960 +#define swri_resample_dsp_x86_init liteav_swri_resample_dsp_x86_init +#define ff_hevc_put_epel_uw_pixels_w48_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w48_neon_8 +#define ff_sine_windows liteav_ff_sine_windows +#define ff_put_pixels16_xy2_no_rnd_neon liteav_ff_put_pixels16_xy2_no_rnd_neon +#define ff_mov_close_hinting liteav_ff_mov_close_hinting +#define ff_decode_get_hw_frames_ctx liteav_ff_decode_get_hw_frames_ctx +#define ff_put_h264_qpel16_mc10_10_ssse3_cache64 liteav_ff_put_h264_qpel16_mc10_10_ssse3_cache64 +#define av_opt_get_image_size liteav_av_opt_get_image_size +#define av_image_alloc liteav_av_image_alloc +#define ff_parse_close liteav_ff_parse_close +#define ff_h264_dequant8_coeff_init_scan liteav_ff_h264_dequant8_coeff_init_scan +#define ff_put_h264_qpel8_mc03_10_sse2 liteav_ff_put_h264_qpel8_mc03_10_sse2 +#define ff_mpeg12_common_init liteav_ff_mpeg12_common_init +#define ff_pred8x8l_horizontal_down_8_ssse3 liteav_ff_pred8x8l_horizontal_down_8_ssse3 +#define av_hwframe_map liteav_av_hwframe_map +#define ff_hevc_pred_planar_8x8_neon_8_1 liteav_ff_hevc_pred_planar_8x8_neon_8_1 +#define ff_er_frame_end liteav_ff_er_frame_end +#define ff_amf_write_object_start liteav_ff_amf_write_object_start +#define ff_pred16x16_vertical_8_mmx liteav_ff_pred16x16_vertical_8_mmx +#define ff_h264_idct_add_8_avx liteav_ff_h264_idct_add_8_avx +#define av_aes_ctr_get_iv liteav_av_aes_ctr_get_iv +#define av_opt_child_class_next liteav_av_opt_child_class_next +#define ff_codec_movsubtitle_tags liteav_ff_codec_movsubtitle_tags +#define ff_mdct_calcw_c liteav_ff_mdct_calcw_c +#define rgb12to15 liteav_rgb12to15 +#define ff_hevc_idct_8x8_dc_neon_8_asm liteav_ff_hevc_idct_8x8_dc_neon_8_asm +#define avcodec_get_hw_frames_parameters liteav_avcodec_get_hw_frames_parameters +#define ff_yuv422p_to_bgra_neon liteav_ff_yuv422p_to_bgra_neon +#define ff_unpack_2ch_int16_to_float_a_ssse3 liteav_ff_unpack_2ch_int16_to_float_a_ssse3 +#define ff_deblock_v_luma_intra_10_sse2 liteav_ff_deblock_v_luma_intra_10_sse2 +#define ff_avg_h264_qpel16_mc03_neon liteav_ff_avg_h264_qpel16_mc03_neon +#define yyset_extra liteav_yyset_extra +#define av_log_set_callback liteav_av_log_set_callback +#define ff_tlog_link liteav_ff_tlog_link +#define ff_h264_luma_dc_dequant_idct_sse2 liteav_ff_h264_luma_dc_dequant_idct_sse2 +#define text_style_delete liteav_text_style_delete +#define ff_pred8x8l_down_left_10_avx liteav_ff_pred8x8l_down_left_10_avx +#define avcodec_dct_alloc liteav_avcodec_dct_alloc +#define ff_ebur128_destroy liteav_ff_ebur128_destroy +#define ff_int16_to_int32_u_mmx liteav_ff_int16_to_int32_u_mmx +#define ff_mpeg_update_thread_context liteav_ff_mpeg_update_thread_context +#define ff_id3v1_genre_str liteav_ff_id3v1_genre_str +#define av_adts_header_parse liteav_av_adts_header_parse +#define ff_h263_inter_MCBPC_code liteav_ff_h263_inter_MCBPC_code +#define ff_pack_6ch_float_to_float_a_sse liteav_ff_pack_6ch_float_to_float_a_sse +#define ff_butterflies_float_neon liteav_ff_butterflies_float_neon +#define ff_h264_biweight_16_sse2 liteav_ff_h264_biweight_16_sse2 +#define avcodec_descriptor_get liteav_avcodec_descriptor_get +#define ff_put_h264_qpel8_mc11_neon liteav_ff_put_h264_qpel8_mc11_neon +#define av_cmp_i liteav_av_cmp_i +#define uyvytoyuv420 liteav_uyvytoyuv420 +#define ff_pred4x4_vertical_vp8_8_mmxext liteav_ff_pred4x4_vertical_vp8_8_mmxext +#define ff_avg_qpel8_mc13_old_c liteav_ff_avg_qpel8_mc13_old_c +#define av_div_q liteav_av_div_q +#define ff_h263_pred_acdc liteav_ff_h263_pred_acdc +#define av_color_space_name liteav_av_color_space_name +#define ff_h263_videotoolbox_hwaccel liteav_ff_h263_videotoolbox_hwaccel +#define ff_mpa_decode_header liteav_ff_mpa_decode_header +#define ff_isom_write_hvcc liteav_ff_isom_write_hvcc +#define ff_put_pixels8x8_c liteav_ff_put_pixels8x8_c +#define ff_hevc_add_residual_4x4_neon_8 liteav_ff_hevc_add_residual_4x4_neon_8 +#define ff_avg_pixels16_y2_neon liteav_ff_avg_pixels16_y2_neon +#define av_div_i liteav_av_div_i +#define ff_default_get_video_buffer liteav_ff_default_get_video_buffer +#define swri_oldapi_conv_fltp_to_s16_nch_neon liteav_swri_oldapi_conv_fltp_to_s16_nch_neon +#define ff_put_h264_qpel16_mc23_neon liteav_ff_put_h264_qpel16_mc23_neon +#define ff_eac3_demuxer liteav_ff_eac3_demuxer +#define ff_mpeg4_get_video_packet_prefix_length liteav_ff_mpeg4_get_video_packet_prefix_length +#define yuv422ptouyvy liteav_yuv422ptouyvy +#define ff_simple_idct12_avx liteav_ff_simple_idct12_avx +#define ff_unpack_2ch_int16_to_int32_u_sse2 liteav_ff_unpack_2ch_int16_to_int32_u_sse2 +#define ff_pred8x8_0lt_dc_neon liteav_ff_pred8x8_0lt_dc_neon +#define ff_inlink_check_available_frame liteav_ff_inlink_check_available_frame +#define ff_mpa_quant_steps liteav_ff_mpa_quant_steps +#define ff_thread_can_start_frame liteav_ff_thread_can_start_frame +#define ff_h264_filter_mb_fast liteav_ff_h264_filter_mb_fast +#define av_hash_final_hex liteav_av_hash_final_hex +#define ff_put_h264_qpel16_mc13_10_sse2 liteav_ff_put_h264_qpel16_mc13_10_sse2 +#define ffio_set_buf_size liteav_ffio_set_buf_size +#define av_timecode_get_smpte_from_framenum liteav_av_timecode_get_smpte_from_framenum +#define swri_audio_convert_free liteav_swri_audio_convert_free +#define ff_h264_idct_add16intra_12_c liteav_ff_h264_idct_add16intra_12_c +#define ff_metadata_conv liteav_ff_metadata_conv +#define ffurl_get_file_handle liteav_ffurl_get_file_handle +#define ff_put_h264_qpel16_mc11_neon liteav_ff_put_h264_qpel16_mc11_neon +#define ff_h264_golomb_to_inter_cbp liteav_ff_h264_golomb_to_inter_cbp +#define ff_mpeg_unref_picture liteav_ff_mpeg_unref_picture +#define ff_imdct36_blocks_fixed liteav_ff_imdct36_blocks_fixed +#define ff_avg_h264_qpel8_mc11_neon liteav_ff_avg_h264_qpel8_mc11_neon +#define ff_h264_idct_add16intra_14_c liteav_ff_h264_idct_add16intra_14_c +#define ff_cbrt_tableinit liteav_ff_cbrt_tableinit +#define ff_mpeg4_pred_ac liteav_ff_mpeg4_pred_ac +#define ff_h264_weight_16_mmxext liteav_ff_h264_weight_16_mmxext +#define ff_hevc_put_epel_uw_bi_v_neon_8 liteav_ff_hevc_put_epel_uw_bi_v_neon_8 +#define ff_h264_idct8_add4_10_sse2 liteav_ff_h264_idct8_add4_10_sse2 +#define vlc_css_expression_New liteav_vlc_css_expression_New +#define ff_hevc_ps_uninit liteav_ff_hevc_ps_uninit +#define ff_four_imdct36_float_avx liteav_ff_four_imdct36_float_avx +#define ff_hevc_pred_angular_32x32_v_neon_8 liteav_ff_hevc_pred_angular_32x32_v_neon_8 +#define av_mdct_init liteav_av_mdct_init +#define ff_put_h264_qpel8or16_hv2_lowpass_ssse3 liteav_ff_put_h264_qpel8or16_hv2_lowpass_ssse3 +#define ff_pd_32 liteav_ff_pd_32 +#define ff_mpa_l2_select_table liteav_ff_mpa_l2_select_table +#define ff_frame_pool_get_video_config liteav_ff_frame_pool_get_video_config +#define ff_hevc_cu_chroma_qp_offset_idx liteav_ff_hevc_cu_chroma_qp_offset_idx +#define ff_http_init_auth_state liteav_ff_http_init_auth_state +#define sws_freeContext liteav_sws_freeContext +#define av_probe_input_format liteav_av_probe_input_format +#define vlc_css_parser_ParseBytes liteav_vlc_css_parser_ParseBytes +#define av_strireplace liteav_av_strireplace +#define ff_h264_luma_dc_dequant_idct_14_c liteav_ff_h264_luma_dc_dequant_idct_14_c +#define ff_put_h264_qpel4_mc03_10_mmxext liteav_ff_put_h264_qpel4_mc03_10_mmxext +#define avcodec_get_chroma_sub_sample liteav_avcodec_get_chroma_sub_sample +#define av_vlog liteav_av_vlog +#define ff_avg_h264_qpel16_mc00_10_sse2 liteav_ff_avg_h264_qpel16_mc00_10_sse2 +#define swr_get_out_samples liteav_swr_get_out_samples +#define ff_choose_timebase liteav_ff_choose_timebase +#define av_match_name liteav_av_match_name +#define ff_rtmp_packet_read_internal liteav_ff_rtmp_packet_read_internal +#define sws_setColorspaceDetails liteav_sws_setColorspaceDetails +#define ff_pred8x8l_horizontal_down_8_mmxext liteav_ff_pred8x8l_horizontal_down_8_mmxext +#define av_opt_eval_int64 liteav_av_opt_eval_int64 +#define ff_w7_min_w5 liteav_ff_w7_min_w5 +#define ff_put_h264_qpel4_mc12_10_mmxext liteav_ff_put_h264_qpel4_mc12_10_mmxext +#define av_buffersink_get_h liteav_av_buffersink_get_h +#define av_abuffersink_params_alloc liteav_av_abuffersink_params_alloc +#define avio_put_str liteav_avio_put_str +#define sws_isSupportedOutput liteav_sws_isSupportedOutput +#define ff_ps_hybrid_analysis_sse liteav_ff_ps_hybrid_analysis_sse +#define ff_h264_field_end liteav_ff_h264_field_end +#define ff_hevc_put_pixels_w16_neon_8_asm liteav_ff_hevc_put_pixels_w16_neon_8_asm +#define yyparse liteav_yyparse +#define av_sha512_update liteav_av_sha512_update +#define av_buffersink_get_w liteav_av_buffersink_get_w +#define av_vbprintf liteav_av_vbprintf +#define av_image_fill_linesizes liteav_av_image_fill_linesizes +#define ff_deblock_h_luma_mbaff_8_sse2 liteav_ff_deblock_h_luma_mbaff_8_sse2 +#define avcodec_find_encoder liteav_avcodec_find_encoder +#define av_frame_get_pkt_size liteav_av_frame_get_pkt_size +#define yyfree liteav_yyfree +#define ff_hevc_output_frame liteav_ff_hevc_output_frame +#define ff_avg_h264_qpel8or16_hv1_lowpass_op_mmxext liteav_ff_avg_h264_qpel8or16_hv1_lowpass_op_mmxext +#define ff_avg_qpel8_mc12_old_c liteav_ff_avg_qpel8_mc12_old_c +#define ff_ac3_frame_size_tab liteav_ff_ac3_frame_size_tab +#define ff_init_desc_hscale liteav_ff_init_desc_hscale +#define ff_dct_init liteav_ff_dct_init +#define ff_af_loudnorm liteav_ff_af_loudnorm +#define ff_ps_mul_pair_single_sse liteav_ff_ps_mul_pair_single_sse +#define ff_aac_latm_parser liteav_ff_aac_latm_parser +#define ff_h264_luma_dc_dequant_idct_10_c liteav_ff_h264_luma_dc_dequant_idct_10_c +#define avio_open_dyn_buf liteav_avio_open_dyn_buf +#define avcodec_get_pix_fmt_loss liteav_avcodec_get_pix_fmt_loss +#define sws_getCoefficients liteav_sws_getCoefficients +#define ff_merge_samplerates liteav_ff_merge_samplerates +#define avfilter_graph_parse liteav_avfilter_graph_parse +#define sws_cloneVec liteav_sws_cloneVec +#define ff_sbr_hf_apply_noise_3_neon liteav_ff_sbr_hf_apply_noise_3_neon +#define ff_parse_pixel_format liteav_ff_parse_pixel_format +#define ff_h264_alloc_tables liteav_ff_h264_alloc_tables +#define ff_put_h264_qpel8_mc22_10_sse2 liteav_ff_put_h264_qpel8_mc22_10_sse2 +#define ff_h264_luma_dc_dequant_idct_12_c liteav_ff_h264_luma_dc_dequant_idct_12_c +#define av_rc4_init liteav_av_rc4_init +#define ff_network_wait_fd_timeout liteav_ff_network_wait_fd_timeout +#define ff_nv12_to_abgr_neon liteav_ff_nv12_to_abgr_neon +#define ff_hevc_put_epel_uw_h_neon_8 liteav_ff_hevc_put_epel_uw_h_neon_8 +#define yylex_destroy liteav_yylex_destroy +#define sws_getCachedContext liteav_sws_getCachedContext +#define ff_avg_h264_qpel8_mc22_10_sse2 liteav_ff_avg_h264_qpel8_mc22_10_sse2 +#define ffurl_size liteav_ffurl_size +#define swr_free liteav_swr_free +#define ff_simple_idct10_avx liteav_ff_simple_idct10_avx +#define ff_fft_calc_neon liteav_ff_fft_calc_neon +#define ff_rtmp_packet_read liteav_ff_rtmp_packet_read +#define ff_vorbiscomment_metadata_conv liteav_ff_vorbiscomment_metadata_conv +#define ff_asrc_abuffer liteav_ff_asrc_abuffer +#define ff_pw_4096 liteav_ff_pw_4096 +#define ff_hevc_put_pel_uw_pixels_w8_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w8_neon_8_asm +#define ff_h264_pred_direct_motion liteav_ff_h264_pred_direct_motion +#define ff_pw_4095 liteav_ff_pw_4095 +#define av_aes_size liteav_av_aes_size +#define ff_get_audio_buffer liteav_ff_get_audio_buffer +#define ff_hevc_put_qpel_h2v2_neon_8 liteav_ff_hevc_put_qpel_h2v2_neon_8 +#define ff_vorbis_stream_comment liteav_ff_vorbis_stream_comment +#define ff_hevc_put_pixels_w12_neon_8 liteav_ff_hevc_put_pixels_w12_neon_8 +#define ff_pack_2ch_int16_to_int16_u_sse2 liteav_ff_pack_2ch_int16_to_int16_u_sse2 +#define ff_put_no_rnd_qpel8_mc11_old_c liteav_ff_put_no_rnd_qpel8_mc11_old_c +#define ff_hevc_cabac_init liteav_ff_hevc_cabac_init +#define ff_h264_chroma422_dc_dequant_idct_9_c liteav_ff_h264_chroma422_dc_dequant_idct_9_c +#define av_frame_copy liteav_av_frame_copy +#define ff_codec_guid_get_id liteav_ff_codec_guid_get_id +#define ff_h263_decoder liteav_ff_h263_decoder +#define ff_h264_decode_extradata liteav_ff_h264_decode_extradata +#define ff_init_filters liteav_ff_init_filters +#define av_opt_get_double liteav_av_opt_get_double +#define ff_parse_sample_rate liteav_ff_parse_sample_rate +#define ff_ass_split_override_codes liteav_ff_ass_split_override_codes +#define ff_deblock_h_chroma422_8_sse2 liteav_ff_deblock_h_chroma422_8_sse2 +#define ff_rtmp_protocol liteav_ff_rtmp_protocol +#define ff_hevc_decode_nal_pps liteav_ff_hevc_decode_nal_pps +#define ffurl_read liteav_ffurl_read +#define av_get_channel_name liteav_av_get_channel_name +#define ff_crop_tab liteav_ff_crop_tab +#define ff_mpeg4_decode_video_packet_header liteav_ff_mpeg4_decode_video_packet_header +#define av_hwdevice_get_hwframe_constraints liteav_av_hwdevice_get_hwframe_constraints +#define ff_hevc_put_pixels_w24_neon_8_asm liteav_ff_hevc_put_pixels_w24_neon_8_asm +#define ff_rdft_calc_neon liteav_ff_rdft_calc_neon +#define ff_filter_graph_remove_filter liteav_ff_filter_graph_remove_filter +#define ff_ac3_at_decoder liteav_ff_ac3_at_decoder +#define ff_put_h264_qpel8_mc20_10_sse2 liteav_ff_put_h264_qpel8_mc20_10_sse2 +#define ff_hevc_put_pixels_w12_neon_8_asm liteav_ff_hevc_put_pixels_w12_neon_8_asm +#define ff_init_scantable_permutation liteav_ff_init_scantable_permutation +#define av_get_default_channel_layout liteav_av_get_default_channel_layout +#define ff_put_h264_qpel8_mc10_10_sse2_cache64 liteav_ff_put_h264_qpel8_mc10_10_sse2_cache64 +#define avio_wb24 liteav_avio_wb24 +#define av_display_rotation_get liteav_av_display_rotation_get +#define ff_make_format_list liteav_ff_make_format_list +#define ff_get_v_length liteav_ff_get_v_length +#define ff_filter_init_hw_frames liteav_ff_filter_init_hw_frames +#define ff_h264_muxer liteav_ff_h264_muxer +#define ff_unpack_6ch_float_to_int32_u_avx liteav_ff_unpack_6ch_float_to_int32_u_avx +#define av_color_transfer_name liteav_av_color_transfer_name +#define av_sha_alloc liteav_av_sha_alloc +#define ff_text_eof liteav_ff_text_eof +#define av_rc4_alloc liteav_av_rc4_alloc +#define text_style_duplicate liteav_text_style_duplicate +#define av_map_videotoolbox_format_from_pixfmt liteav_av_map_videotoolbox_format_from_pixfmt +#define ff_hevc_put_pixels_w64_neon_8_asm liteav_ff_hevc_put_pixels_w64_neon_8_asm +#define swri_resampler liteav_swri_resampler +#define ff_avg_h264_qpel4_mc23_10_mmxext liteav_ff_avg_h264_qpel4_mc23_10_mmxext +#define ff_h264_decode_ref_pic_marking liteav_ff_h264_decode_ref_pic_marking +#define ff_sws_init_swscale_aarch64 liteav_ff_sws_init_swscale_aarch64 +#define ff_avg_rv40_chroma_mc8_mmxext liteav_ff_avg_rv40_chroma_mc8_mmxext +#define ff_put_h264_qpel8_mc21_neon liteav_ff_put_h264_qpel8_mc21_neon +#define ff_hevc_pred_init liteav_ff_hevc_pred_init +#define ff_sbrdsp_init_aarch64 liteav_ff_sbrdsp_init_aarch64 +#define ff_put_h264_qpel16_mc31_10_sse2 liteav_ff_put_h264_qpel16_mc31_10_sse2 +#define ff_avg_h264_qpel4_mc00_10_mmxext liteav_ff_avg_h264_qpel4_mc00_10_mmxext +#define ff_ps_hybrid_synthesis_deint_sse liteav_ff_ps_hybrid_synthesis_deint_sse +#define av_rdft_end liteav_av_rdft_end +#define ff_avg_pixels16_x2_no_rnd_neon liteav_ff_avg_pixels16_x2_no_rnd_neon +#define ff_sbr_hf_gen_neon liteav_ff_sbr_hf_gen_neon +#define av_packet_shrink_side_data liteav_av_packet_shrink_side_data +#define ff_inlink_evaluate_timeline_at_frame liteav_ff_inlink_evaluate_timeline_at_frame +#define ff_cbpc_b_tab liteav_ff_cbpc_b_tab +#define ff_avg_h264_chroma_mc4_10_mmxext liteav_ff_avg_h264_chroma_mc4_10_mmxext +#define ff_h263_decode_init_vlc liteav_ff_h263_decode_init_vlc +#define ff_pred8x8_vertical_10_sse2 liteav_ff_pred8x8_vertical_10_sse2 +#define av_opt_eval_double liteav_av_opt_eval_double +#define ff_fdct_ifast liteav_ff_fdct_ifast +#define ff_h264_idct_add_10_sse2 liteav_ff_h264_idct_add_10_sse2 +#define ff_vector_fmul_add_neon liteav_ff_vector_fmul_add_neon +#define ff_rtmps_protocol liteav_ff_rtmps_protocol +#define ff_mpeg12_mbPatTable liteav_ff_mpeg12_mbPatTable +#define av_bsf_get_by_name liteav_av_bsf_get_by_name +#define ff_default_get_audio_buffer liteav_ff_default_get_audio_buffer +#define ff_amf_write_field_name liteav_ff_amf_write_field_name +#define ff_yuv422p_to_abgr_neon liteav_ff_yuv422p_to_abgr_neon +#define ff_graph_thread_free liteav_ff_graph_thread_free +#define av_register_codec_parser liteav_av_register_codec_parser +#define ff_avg_h264_qpel8_mc10_10_ssse3_cache64 liteav_ff_avg_h264_qpel8_mc10_10_ssse3_cache64 +#define av_image_fill_arrays liteav_av_image_fill_arrays +#define avfilter_all_channel_layouts liteav_avfilter_all_channel_layouts +#define av_log liteav_av_log +#define ff_network_close liteav_ff_network_close +#define av_pix_fmt_desc_next liteav_av_pix_fmt_desc_next +#define ff_vf_pad liteav_ff_vf_pad +#define av_timecode_check_frame_rate liteav_av_timecode_check_frame_rate +#define ff_mpeg4_rl_intra liteav_ff_mpeg4_rl_intra +#define ff_tls_open_underlying liteav_ff_tls_open_underlying +#define av_timecode_init_from_string liteav_av_timecode_init_from_string +#define ffio_read_indirect liteav_ffio_read_indirect +#define ff_h264_mb_sizes liteav_ff_h264_mb_sizes +#define ff_h263_pixel_aspect liteav_ff_h263_pixel_aspect +#define ff_pred8x8_top_dc_neon liteav_ff_pred8x8_top_dc_neon +#define ff_aac_kbd_long_960 liteav_ff_aac_kbd_long_960 +#define ff_hevc_put_pixels_w6_neon_8 liteav_ff_hevc_put_pixels_w6_neon_8 +#define ff_put_h264_qpel16_mc21_10_sse2 liteav_ff_put_h264_qpel16_mc21_10_sse2 +#define ff_deblock_h_luma_8_sse2 liteav_ff_deblock_h_luma_8_sse2 +#define ff_mpv_frame_start liteav_ff_mpv_frame_start +#define ff_avg_qpel16_mc32_old_c liteav_ff_avg_qpel16_mc32_old_c +#define av_opt_serialize liteav_av_opt_serialize +#define swr_convert_frame liteav_swr_convert_frame +#define ff_pack_8ch_int32_to_float_a_sse2 liteav_ff_pack_8ch_int32_to_float_a_sse2 +#define ff_hevc_epel_filters liteav_ff_hevc_epel_filters +#define rgb12tobgr12 liteav_rgb12tobgr12 +#define ff_alternate_horizontal_scan liteav_ff_alternate_horizontal_scan +#define av_image_check_size2 liteav_av_image_check_size2 +#define ff_mpv_idct_init liteav_ff_mpv_idct_init +#define av_memdup liteav_av_memdup +#define ff_ac3_enc_channel_map liteav_ff_ac3_enc_channel_map +#define ff_amf_write_object_end liteav_ff_amf_write_object_end +#define av_opt_get_channel_layout liteav_av_opt_get_channel_layout +#define ff_hevc_luma_mv_mvp_mode liteav_ff_hevc_luma_mv_mvp_mode +#define av_opt_eval_flags liteav_av_opt_eval_flags +#define ff_sine_64_fixed liteav_ff_sine_64_fixed +#define av_opt_find2 liteav_av_opt_find2 +#define ff_subtitles_queue_seek liteav_ff_subtitles_queue_seek +#define av_tea_crypt liteav_av_tea_crypt +#define ff_simple_idct_add_neon liteav_ff_simple_idct_add_neon +#define ff_put_h264_qpel16_mc22_10_sse2 liteav_ff_put_h264_qpel16_mc22_10_sse2 +#define ff_float_to_int32_u_avx2 liteav_ff_float_to_int32_u_avx2 +#define ff_hevc_put_qpel_h3v1_neon_8 liteav_ff_hevc_put_qpel_h3v1_neon_8 +#define av_frame_set_sample_rate liteav_av_frame_set_sample_rate +#define ffio_open_null_buf liteav_ffio_open_null_buf +#define ff_ac3_bitrate_tab liteav_ff_ac3_bitrate_tab +#define ff_hpeldsp_init_x86 liteav_ff_hpeldsp_init_x86 +#define ff_interleaved_ue_golomb_vlc_code liteav_ff_interleaved_ue_golomb_vlc_code +#define ff_ac3_sample_rate_tab liteav_ff_ac3_sample_rate_tab +#define ff_pred8x8_dc_10_sse2 liteav_ff_pred8x8_dc_10_sse2 +#define sws_getDefaultFilter liteav_sws_getDefaultFilter +#define ff_shuffle_bytes_2103_mmxext liteav_ff_shuffle_bytes_2103_mmxext +#define ff_h264_biweight_16_ssse3 liteav_ff_h264_biweight_16_ssse3 +#define ff_put_pixels16_neon liteav_ff_put_pixels16_neon +#define av_murmur3_update liteav_av_murmur3_update +#define av_get_channel_description liteav_av_get_channel_description +#define av_frame_move_ref liteav_av_frame_move_ref +#define av_opt_set_sample_fmt liteav_av_opt_set_sample_fmt +#define ff_mov_get_channel_layout_tag liteav_ff_mov_get_channel_layout_tag +#define ff_h264_slice_context_init liteav_ff_h264_slice_context_init +#define rgb32tobgr15 liteav_rgb32tobgr15 +#define ff_shuffle_bytes_1230_ssse3 liteav_ff_shuffle_bytes_1230_ssse3 +#define rgb32tobgr16 liteav_rgb32tobgr16 +#define ff_pw_128 liteav_ff_pw_128 +#define ff_mpeg4_static_rl_table_store liteav_ff_mpeg4_static_rl_table_store +#define ff_mpadsp_apply_window_float liteav_ff_mpadsp_apply_window_float +#define ffio_open2_wrapper liteav_ffio_open2_wrapper +#define ff_vector_fmul_vfp liteav_ff_vector_fmul_vfp +#define ff_hevc_put_qpel_uw_h2v1_neon_8 liteav_ff_hevc_put_qpel_uw_h2v1_neon_8 +#define av_opt_child_next liteav_av_opt_child_next +#define av_encryption_info_add_side_data liteav_av_encryption_info_add_side_data +#define ffurl_handshake liteav_ffurl_handshake +#define av_probe_input_format3 liteav_av_probe_input_format3 +#define av_image_fill_pointers liteav_av_image_fill_pointers +#define ff_pred8x8l_dc_8_mmxext liteav_ff_pred8x8l_dc_8_mmxext +#define ff_pred16x16_plane_h264_8_sse2 liteav_ff_pred16x16_plane_h264_8_sse2 +#define av_buffersink_get_hw_frames_ctx liteav_av_buffersink_get_hw_frames_ctx +#define ff_put_h264_qpel16_mc31_neon liteav_ff_put_h264_qpel16_mc31_neon +#define ff_avg_qpel8_mc31_old_c liteav_ff_avg_qpel8_mc31_old_c +#define yy_scan_buffer liteav_yy_scan_buffer +#define ff_avg_h264_chroma_mc8_rnd_mmxext liteav_ff_avg_h264_chroma_mc8_rnd_mmxext +#define avpriv_tempfile liteav_avpriv_tempfile +#define av_camellia_alloc liteav_av_camellia_alloc +#define ff_avg_h264_qpel8_mc10_10_sse2 liteav_ff_avg_h264_qpel8_mc10_10_sse2 +#define av_file_map liteav_av_file_map +#define av_encryption_info_alloc liteav_av_encryption_info_alloc +#define av_hmac_init liteav_av_hmac_init +#define av_hash_final liteav_av_hash_final +#define av_lfg_init liteav_av_lfg_init +#define avcodec_register liteav_avcodec_register +#define text_segment_delete liteav_text_segment_delete +#define ff_hevc_transform_16x16_neon_8_asm liteav_ff_hevc_transform_16x16_neon_8_asm +#define ff_mpadsp_apply_window_float_neon liteav_ff_mpadsp_apply_window_float_neon +#define ff_interleaved_golomb_vlc_len liteav_ff_interleaved_golomb_vlc_len +#define ff_hevc_decode_extradata liteav_ff_hevc_decode_extradata +#define ff_print_debug_info2 liteav_ff_print_debug_info2 +#define av_opt_get_int liteav_av_opt_get_int +#define ff_hevc_put_qpel_h2v3_neon_8 liteav_ff_hevc_put_qpel_h2v3_neon_8 +#define ff_hevc_put_pixels_w2_neon_8_asm liteav_ff_hevc_put_pixels_w2_neon_8_asm +#define av_opt_freep_ranges liteav_av_opt_freep_ranges +#define ff_avg_h264_qpel8_mc13_neon liteav_ff_avg_h264_qpel8_mc13_neon +#define avio_open liteav_avio_open +#define ff_h264_weight_8_10_sse2 liteav_ff_h264_weight_8_10_sse2 +#define ff_h264_weight_8_10_sse4 liteav_ff_h264_weight_8_10_sse4 +#define yyget_debug liteav_yyget_debug +#define av_write_frame liteav_av_write_frame +#define ff_hevc_put_qpel_hv_neon_8_wrapper liteav_ff_hevc_put_qpel_hv_neon_8_wrapper +#define avio_enum_protocols liteav_avio_enum_protocols +#define av_buffer_make_writable liteav_av_buffer_make_writable +#define ff_check_alignment liteav_ff_check_alignment +#define ff_put_pixels16_xy2_neon liteav_ff_put_pixels16_xy2_neon +#define ff_ebur128_loudness_window liteav_ff_ebur128_loudness_window +#define av_fifo_generic_peek_at liteav_av_fifo_generic_peek_at +#define ff_put_rv40_chroma_mc8_mmx liteav_ff_put_rv40_chroma_mc8_mmx +#define ff_h264_idct_add16intra_10_avx liteav_ff_h264_idct_add16intra_10_avx +#define ff_hevc_sao_offset_sign_decode liteav_ff_hevc_sao_offset_sign_decode +#define avio_context_free liteav_avio_context_free +#define ffio_open_dyn_packet_buf liteav_ffio_open_dyn_packet_buf +#define ff_avg_h264_qpel4_mc13_10_mmxext liteav_ff_avg_h264_qpel4_mc13_10_mmxext +#define ff_h264_parse_ref_count liteav_ff_h264_parse_ref_count +#define ff_init_scantable_permutation_x86 liteav_ff_init_scantable_permutation_x86 +#define sws_freeVec liteav_sws_freeVec +#define ff_af_amix liteav_ff_af_amix +#define avpriv_ac3_parse_header liteav_avpriv_ac3_parse_header +#define ff_mp3adu_decoder liteav_ff_mp3adu_decoder +#define ff_deblock_h_chroma422_intra_8_sse2 liteav_ff_deblock_h_chroma422_intra_8_sse2 +#define ff_interleave_add_packet liteav_ff_interleave_add_packet +#define ff_inlink_set_status liteav_ff_inlink_set_status +#define ff_cos_131072_fixed liteav_ff_cos_131072_fixed +#define av_compare_ts liteav_av_compare_ts +#define sws_getGaussianVec liteav_sws_getGaussianVec +#define ff_mov_read_stsd_entries liteav_ff_mov_read_stsd_entries +#define ff_pred8x8l_down_right_10_ssse3 liteav_ff_pred8x8l_down_right_10_ssse3 +#define ff_psdsp_init_x86 liteav_ff_psdsp_init_x86 +#define ff_hevc_put_qpel_v_neon_8_wrapper liteav_ff_hevc_put_qpel_v_neon_8_wrapper +#define ff_dct_init_x86 liteav_ff_dct_init_x86 +#define ff_hevc_set_new_ref liteav_ff_hevc_set_new_ref +#define ff_fft_lut_init liteav_ff_fft_lut_init +#define av_packet_make_refcounted liteav_av_packet_make_refcounted +#define av_hmac_calc liteav_av_hmac_calc +#define av_dup_packet liteav_av_dup_packet +#define ff_swb_offset_960 liteav_ff_swb_offset_960 +#define ff_id3v2_match liteav_ff_id3v2_match +#define ff_put_h264_qpel16_mc20_10_ssse3_cache64 liteav_ff_put_h264_qpel16_mc20_10_ssse3_cache64 +#define ff_hevc_sao_edge_eo3_w32_neon_8 liteav_ff_hevc_sao_edge_eo3_w32_neon_8 +#define av_dirname liteav_av_dirname +#define ff_cos_16384_fixed liteav_ff_cos_16384_fixed +#define ff_avg_h264_qpel8_mc30_10_ssse3_cache64 liteav_ff_avg_h264_qpel8_mc30_10_ssse3_cache64 +#define avfilter_license liteav_avfilter_license +#define ff_pred4x4_tm_vp8_8_mmxext liteav_ff_pred4x4_tm_vp8_8_mmxext +#define av_bprintf liteav_av_bprintf +#define av_audio_fifo_size liteav_av_audio_fifo_size +#define ff_pred16x16_left_dc_neon liteav_ff_pred16x16_left_dc_neon +#define ff_mpadsp_init liteav_ff_mpadsp_init +#define ff_codec_movvideo_tags liteav_ff_codec_movvideo_tags +#define ff_videotoolbox_h264_decode_slice liteav_ff_videotoolbox_h264_decode_slice +#define ff_h264_sei_decode liteav_ff_h264_sei_decode +#define ff_videodsp_init_x86 liteav_ff_videodsp_init_x86 +#define ff_h264_decoder liteav_ff_h264_decoder +#define ff_unpack_2ch_int16_to_float_a_sse2 liteav_ff_unpack_2ch_int16_to_float_a_sse2 +#define ff_hevc_pred_init_aarch64 liteav_ff_hevc_pred_init_aarch64 +#define yy_delete_buffer liteav_yy_delete_buffer +#define ff_avc_parse_nal_units_buf liteav_ff_avc_parse_nal_units_buf +#define ff_vorbis_channel_layout_offsets liteav_ff_vorbis_channel_layout_offsets +#define ff_avg_qpel16_mc31_old_c liteav_ff_avg_qpel16_mc31_old_c +#define avio_find_protocol_name liteav_avio_find_protocol_name +#define ff_mpeg4video_split liteav_ff_mpeg4video_split +#define ff_int32_to_int16_a_sse2 liteav_ff_int32_to_int16_a_sse2 +#define av_opt_set_channel_layout liteav_av_opt_set_channel_layout +#define av_xtea_crypt liteav_av_xtea_crypt +#define ff_thread_decode_frame liteav_ff_thread_decode_frame +#define ff_avg_pixels4_l2_mmxext liteav_ff_avg_pixels4_l2_mmxext +#define av_opt_copy liteav_av_opt_copy +#define av_buffersink_get_frame liteav_av_buffersink_get_frame +#define ff_get_unscaled_swscale_aarch64 liteav_ff_get_unscaled_swscale_aarch64 +#define ff_fft_offsets_lut liteav_ff_fft_offsets_lut +#define yyget_in liteav_yyget_in +#define ff_hevc_res_scale_sign_flag liteav_ff_hevc_res_scale_sign_flag +#define ff_sine_32_fixed liteav_ff_sine_32_fixed +#define avfilter_graph_create_filter liteav_avfilter_graph_create_filter +#define ff_formats_unref liteav_ff_formats_unref +#define ff_ac3_rematrix_band_tab liteav_ff_ac3_rematrix_band_tab +#define ff_mpeg1_dc_scale_table liteav_ff_mpeg1_dc_scale_table +#define ff_yuv420p_to_argb_neon liteav_ff_yuv420p_to_argb_neon +#define ff_ass_get_dialog liteav_ff_ass_get_dialog +#define ff_deblock_v_luma_intra_8_sse2 liteav_ff_deblock_v_luma_intra_8_sse2 +#define ff_pred16x16_horizontal_8_ssse3 liteav_ff_pred16x16_horizontal_8_ssse3 +#define avpriv_io_move liteav_avpriv_io_move +#define ff_videodsp_init liteav_ff_videodsp_init +#define ff_framequeue_peek liteav_ff_framequeue_peek +#define ff_sine_window_init liteav_ff_sine_window_init +#define av_sha_init liteav_av_sha_init +#define ff_mpeg4videodec_static_init liteav_ff_mpeg4videodec_static_init +#define av_camellia_crypt liteav_av_camellia_crypt +#define sws_isSupportedEndiannessConversion liteav_sws_isSupportedEndiannessConversion +#define ff_imdct_half_c_fixed liteav_ff_imdct_half_c_fixed +#define ff_mp3float_decoder liteav_ff_mp3float_decoder +#define ff_int32_to_int16_u_mmx liteav_ff_int32_to_int16_u_mmx +#define ff_h264_idct_dc_add_10_mmxext liteav_ff_h264_idct_dc_add_10_mmxext +#define ff_sine_120 liteav_ff_sine_120 +#define av_read_image_line liteav_av_read_image_line +#define ff_faanidct_add liteav_ff_faanidct_add +#define ff_sine_128 liteav_ff_sine_128 +#define sws_init_context liteav_sws_init_context +#define ff_avg_pixels16_mmx liteav_ff_avg_pixels16_mmx +#define ff_pred8x8l_horizontal_up_10_sse2 liteav_ff_pred8x8l_horizontal_up_10_sse2 +#define ff_draw_round_to_sub liteav_ff_draw_round_to_sub +#define ff_intel_h263_decode_picture_header liteav_ff_intel_h263_decode_picture_header +#define avformat_alloc_output_context2 liteav_avformat_alloc_output_context2 +#define ff_h264_draw_horiz_band liteav_ff_h264_draw_horiz_band +#define ffurl_seek liteav_ffurl_seek +#define av_mallocz_array liteav_av_mallocz_array +#define ff_cos_16384 liteav_ff_cos_16384 +#define ff_hevc_put_qpel_v3_neon_8 liteav_ff_hevc_put_qpel_v3_neon_8 +#define ff_avg_h264_qpel8_mc20_10_ssse3_cache64 liteav_ff_avg_h264_qpel8_mc20_10_ssse3_cache64 +#define ff_update_link_current_pts liteav_ff_update_link_current_pts +#define av_frame_copy_props liteav_av_frame_copy_props +#define av_xtea_le_init liteav_av_xtea_le_init +#define ff_simple_idct248_put liteav_ff_simple_idct248_put +#define avpriv_align_put_bits liteav_avpriv_align_put_bits +#define ff_unpack_6ch_int32_to_float_a_sse2 liteav_ff_unpack_6ch_int32_to_float_a_sse2 +#define ff_af_dynaudnorm liteav_ff_af_dynaudnorm +#define av_log_format_line liteav_av_log_format_line +#define vlc_css_unquoted liteav_vlc_css_unquoted +#define ff_put_h264_chroma_mc2_neon liteav_ff_put_h264_chroma_mc2_neon +#define ff_put_h264_qpel8_mc01_neon liteav_ff_put_h264_qpel8_mc01_neon +#define av_murmur3_init_seeded liteav_av_murmur3_init_seeded +#define av_samples_set_silence liteav_av_samples_set_silence +#define ff_inlink_consume_frame liteav_ff_inlink_consume_frame +#define ff_thread_get_buffer liteav_ff_thread_get_buffer +#define ff_ebur128_add_frames_int liteav_ff_ebur128_add_frames_int +#define ff_hevc_v_loop_filter_chroma_neon liteav_ff_hevc_v_loop_filter_chroma_neon +#define ff_vorbis_comment liteav_ff_vorbis_comment +#define avfilter_make_format64_list liteav_avfilter_make_format64_list +#define ff_pred8x8l_horizontal_10_avx liteav_ff_pred8x8l_horizontal_10_avx +#define av_parser_parse2 liteav_av_parser_parse2 +#define ff_hevc_put_qpel_uw_pixels_w48_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w48_neon_8 +#define ff_mp3on4_decoder liteav_ff_mp3on4_decoder +#define ff_hpeldsp_init liteav_ff_hpeldsp_init +#define ff_h264_idct_dc_add_10_c liteav_ff_h264_idct_dc_add_10_c +#define ff_imdct_half_sse liteav_ff_imdct_half_sse +#define ff_vf_scale2ref liteav_ff_vf_scale2ref +#define ff_aac_kbd_long_1024_fixed liteav_ff_aac_kbd_long_1024_fixed +#define ff_h264_idct_add16_9_c liteav_ff_h264_idct_add16_9_c +#define ff_pack_8ch_float_to_int32_u_avx liteav_ff_pack_8ch_float_to_int32_u_avx +#define ff_avg_h264_qpel8_mc03_10_sse2 liteav_ff_avg_h264_qpel8_mc03_10_sse2 +#define ff_h264_idct_dc_add_12_c liteav_ff_h264_idct_dc_add_12_c +#define avio_seek liteav_avio_seek +#define av_rc4_crypt liteav_av_rc4_crypt +#define ff_h263_decode_picture_header liteav_ff_h263_decode_picture_header +#define ff_ps_hybrid_analysis_sse3 liteav_ff_ps_hybrid_analysis_sse3 +#define av_murmur3_init liteav_av_murmur3_init +#define ff_mpadsp_apply_window_fixed liteav_ff_mpadsp_apply_window_fixed +#define ff_h264_idct_dc_add_14_c liteav_ff_h264_idct_dc_add_14_c +#define ff_ac3_bap_tab liteav_ff_ac3_bap_tab +#define ff_avg_h264_qpel8_h_lowpass_mmxext liteav_ff_avg_h264_qpel8_h_lowpass_mmxext +#define ff_mdct15_init_x86 liteav_ff_mdct15_init_x86 +#define ff_mp4_parse_es_descr liteav_ff_mp4_parse_es_descr +#define ff_mp4_read_dec_config_descr liteav_ff_mp4_read_dec_config_descr +#define ff_sbrdsp_init liteav_ff_sbrdsp_init +#define ff_put_h264_chroma_mc8_10_sse2 liteav_ff_put_h264_chroma_mc8_10_sse2 +#define ff_h264_sei_stereo_mode liteav_ff_h264_sei_stereo_mode +#define rgb16tobgr32 liteav_rgb16tobgr32 +#define avpriv_pix_fmt_bps_avi liteav_avpriv_pix_fmt_bps_avi +#define av_utf8_decode liteav_av_utf8_decode +#define ff_avio_class liteav_ff_avio_class +#define ff_pack_8ch_float_to_float_u_avx liteav_ff_pack_8ch_float_to_float_u_avx +#define ff_hevc_idct_32x32_dc_neon_8_asm liteav_ff_hevc_idct_32x32_dc_neon_8_asm +#define ff_mpeg2_aspect liteav_ff_mpeg2_aspect +#define ff_avg_h264_qpel16_mc30_neon liteav_ff_avg_h264_qpel16_mc30_neon +#define av_opt_get_dict_val liteav_av_opt_get_dict_val +#define ff_h263_inter_MCBPC_bits liteav_ff_h263_inter_MCBPC_bits +#define ff_subtitles_queue_insert liteav_ff_subtitles_queue_insert +#define avcodec_descriptor_next liteav_avcodec_descriptor_next +#define ff_amr_nb_at_decoder liteav_ff_amr_nb_at_decoder +#define ff_h264_quant_rem6 liteav_ff_h264_quant_rem6 +#define ff_mdct_calc_c_fixed_32 liteav_ff_mdct_calc_c_fixed_32 +#define ff_connect_parallel liteav_ff_connect_parallel +#define ff_libfdk_aac_encoder liteav_ff_libfdk_aac_encoder +#define ff_w4_min_w2_hi liteav_ff_w4_min_w2_hi +#define ff_pw_512 liteav_ff_pw_512 +#define avio_rb16 liteav_avio_rb16 +#define ff_unpack_6ch_float_to_float_u_sse liteav_ff_unpack_6ch_float_to_float_u_sse +#define ff_deblock_v_chroma_10_sse2 liteav_ff_deblock_v_chroma_10_sse2 +#define ff_copy_rectangle2 liteav_ff_copy_rectangle2 +#define ff_mpa_enwindow liteav_ff_mpa_enwindow +#define ff_h264_dequant4_coeff_init liteav_ff_h264_dequant4_coeff_init +#define avio_open_dir liteav_avio_open_dir +#define ff_h264_pred_init liteav_ff_h264_pred_init +#define ff_hevc_mp4toannexb_bsf liteav_ff_hevc_mp4toannexb_bsf +#define ff_blend_rectangle liteav_ff_blend_rectangle +#define ff_avc_find_startcode liteav_ff_avc_find_startcode +#define ff_h264_idct_add16intra_8_sse2 liteav_ff_h264_idct_add16intra_8_sse2 +#define ff_pred8x8l_horizontal_down_8_sse2 liteav_ff_pred8x8l_horizontal_down_8_sse2 +#define ff_pred16x16_tm_vp8_8_sse2 liteav_ff_pred16x16_tm_vp8_8_sse2 +#define ff_id3v2_4_metadata_conv liteav_ff_id3v2_4_metadata_conv +#define ff_ue_golomb_len liteav_ff_ue_golomb_len +#define ff_h264_b_sub_mb_type_info liteav_ff_h264_b_sub_mb_type_info +#define ff_rgb24toyv12_c liteav_ff_rgb24toyv12_c +#define sws_isSupportedInput liteav_sws_isSupportedInput +#define ff_resample_common_apply_filter_x4_float_neon liteav_ff_resample_common_apply_filter_x4_float_neon +#define avpriv_scalarproduct_float_c liteav_avpriv_scalarproduct_float_c +#define swri_rematrix liteav_swri_rematrix +#define av_packet_free liteav_av_packet_free +#define ff_deblock_h_chroma_intra_8_avx liteav_ff_deblock_h_chroma_intra_8_avx +#define ff_framequeue_free liteav_ff_framequeue_free +#define ff_pack_2ch_int32_to_int16_a_sse2 liteav_ff_pack_2ch_int32_to_int16_a_sse2 +#define ff_aac_eld_window_480_fixed liteav_ff_aac_eld_window_480_fixed +#define av_mediacodec_default_free liteav_av_mediacodec_default_free +#define av_strtok liteav_av_strtok +#define ff_pred8x8l_horizontal_8_mmxext liteav_ff_pred8x8l_horizontal_8_mmxext +#define ff_avs3_profiles liteav_ff_avs3_profiles +#define ff_w5_plus_w7 liteav_ff_w5_plus_w7 +#define ff_nv12_to_bgra_neon liteav_ff_nv12_to_bgra_neon +#define ff_vorbiscomment_write liteav_ff_vorbiscomment_write +#define ff_hevc_put_qpel_uw_h3_neon_8 liteav_ff_hevc_put_qpel_uw_h3_neon_8 +#define ff_avg_h264_qpel16_mc20_neon liteav_ff_avg_h264_qpel16_mc20_neon +#define av_xtea_init liteav_av_xtea_init +#define ff_pred8x8_vertical_8_mmx liteav_ff_pred8x8_vertical_8_mmx +#define ff_deblock_h_luma_intra_10_avx liteav_ff_deblock_h_luma_intra_10_avx +#define ff_hevcdsp_init_neon_asm liteav_ff_hevcdsp_init_neon_asm +#define avfilter_configuration liteav_avfilter_configuration +#define ff_w7_plus_w3_hi liteav_ff_w7_plus_w3_hi +#define ff_hevc_put_epel_uw_pixels_w16_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w16_neon_8 +#define ff_hevc_transform_16x16_neon_8 liteav_ff_hevc_transform_16x16_neon_8 +#define av_frame_set_pkt_duration liteav_av_frame_set_pkt_duration +#define ff_hevc_part_mode_decode liteav_ff_hevc_part_mode_decode +#define ff_ps_stereo_interpolate_ipdopd_neon liteav_ff_ps_stereo_interpolate_ipdopd_neon +#define ff_h264_idct_add_8_sse2 liteav_ff_h264_idct_add_8_sse2 +#define av_frame_get_metadata liteav_av_frame_get_metadata +#define ff_hevc_put_qpel_h1v1_neon_8 liteav_ff_hevc_put_qpel_h1v1_neon_8 +#define ff_fft_init_fixed_32 liteav_ff_fft_init_fixed_32 +#define av_bsf_list_parse_str liteav_av_bsf_list_parse_str +#define ff_avg_h264_qpel16_mc30_10_sse2 liteav_ff_avg_h264_qpel16_mc30_10_sse2 +#define ff_int16_to_int32_a_sse2 liteav_ff_int16_to_int32_a_sse2 +#define ff_hevc_flush_dpb liteav_ff_hevc_flush_dpb +#define yyset_column liteav_yyset_column +#define ff_request_frame liteav_ff_request_frame +#define ff_pack_2ch_int16_to_int32_u_sse2 liteav_ff_pack_2ch_int16_to_int32_u_sse2 +#define ff_mpa_synth_filter_float liteav_ff_mpa_synth_filter_float +#define ffio_fill liteav_ffio_fill +#define ff_mov_cenc_write_sinf_tag liteav_ff_mov_cenc_write_sinf_tag +#define av_find_input_format liteav_av_find_input_format +#define ff_mpv_common_init_neon liteav_ff_mpv_common_init_neon +#define ff_dct32_float liteav_ff_dct32_float +#define av_oformat_next liteav_av_oformat_next +#define av_audio_fifo_peek_at liteav_av_audio_fifo_peek_at +#define ff_put_h264_qpel4_hv_lowpass_v_mmxext liteav_ff_put_h264_qpel4_hv_lowpass_v_mmxext +#define av_pix_fmt_swap_endianness liteav_av_pix_fmt_swap_endianness +#define ff_hevc_pred_angular_16x16_v_neon_8 liteav_ff_hevc_pred_angular_16x16_v_neon_8 +#define ff_mpv_common_frame_size_change liteav_ff_mpv_common_frame_size_change +#define ff_h264_idct_add_8_c liteav_ff_h264_idct_add_8_c +#define av_cast5_crypt liteav_av_cast5_crypt +#define ff_h264_weight_4_mmxext liteav_ff_h264_weight_4_mmxext +#define ff_graph_thread_init liteav_ff_graph_thread_init +#define av_filter_iterate liteav_av_filter_iterate +#define ff_avg_h264_qpel4_h_lowpass_l2_mmxext liteav_ff_avg_h264_qpel4_h_lowpass_l2_mmxext +#define ff_inlink_process_commands liteav_ff_inlink_process_commands +#define ff_pred8x8_hor_neon liteav_ff_pred8x8_hor_neon +#define ff_aac_codebook_vectors liteav_ff_aac_codebook_vectors +#define avcodec_encode_subtitle liteav_avcodec_encode_subtitle +#define ff_hevc_ref_idx_lx_decode liteav_ff_hevc_ref_idx_lx_decode +#define uyvytoyuv422 liteav_uyvytoyuv422 +#define ff_hevc_sao_band_w32_neon_8 liteav_ff_hevc_sao_band_w32_neon_8 +#define ff_hevc_pred_init_neon_intrinsics liteav_ff_hevc_pred_init_neon_intrinsics +#define ff_read_riff_info liteav_ff_read_riff_info +#define ff_mpeg_ref_picture liteav_ff_mpeg_ref_picture +#define av_d2q liteav_av_d2q +#define av_stristr liteav_av_stristr +#define ff_int32_to_int16_a_mmx liteav_ff_int32_to_int16_a_mmx +#define av_fifo_generic_peek liteav_av_fifo_generic_peek +#define ff_all_samplerates liteav_ff_all_samplerates +#define ff_pack_8ch_int32_to_float_u_sse2 liteav_ff_pack_8ch_int32_to_float_u_sse2 +#define ff_pred8x8_plane_8_mmxext liteav_ff_pred8x8_plane_8_mmxext +#define ff_simple_idct48_add liteav_ff_simple_idct48_add +#define av_image_check_sar liteav_av_image_check_sar +#define av_copy_packet_side_data liteav_av_copy_packet_side_data +#define ff_parse_specific_params liteav_ff_parse_specific_params +#define swri_oldapi_conv_fltp_to_s16_2ch_neon liteav_swri_oldapi_conv_fltp_to_s16_2ch_neon +#define ff_avfilter_link_set_out_status liteav_ff_avfilter_link_set_out_status +#define ff_deblock_v_luma_intra_10_avx liteav_ff_deblock_v_luma_intra_10_avx +#define ff_av1_profiles liteav_ff_av1_profiles +#define avcodec_find_best_pix_fmt_of_2 liteav_avcodec_find_best_pix_fmt_of_2 +#define swri_realloc_audio liteav_swri_realloc_audio +#define ff_add_format liteav_ff_add_format +#define ff_pred8x8l_vertical_10_avx liteav_ff_pred8x8l_vertical_10_avx +#define av_strtod liteav_av_strtod +#define av_encryption_init_info_get_side_data liteav_av_encryption_init_info_get_side_data +#define ff_avg_h264_qpel8_mc20_neon liteav_ff_avg_h264_qpel8_mc20_neon +#define ff_pred16x16_tm_vp8_8_mmxext liteav_ff_pred16x16_tm_vp8_8_mmxext +#define avformat_get_riff_audio_tags liteav_avformat_get_riff_audio_tags +#define ff_rotate_slice liteav_ff_rotate_slice +#define ff_hevc_mvp_lx_flag_decode liteav_ff_hevc_mvp_lx_flag_decode +#define ff_imdct_half_c_fixed_32 liteav_ff_imdct_half_c_fixed_32 +#define swr_config_frame liteav_swr_config_frame +#define av_guess_codec liteav_av_guess_codec +#define ff_check_pixfmt_descriptors liteav_ff_check_pixfmt_descriptors +#define ff_http_auth_handle_header liteav_ff_http_auth_handle_header +#define ff_hevc_put_pixels_w32_neon_8 liteav_ff_hevc_put_pixels_w32_neon_8 +#define ff_unpack_2ch_int32_to_int32_a_sse2 liteav_ff_unpack_2ch_int32_to_int32_a_sse2 +#define ff_flac_get_max_frame_size liteav_ff_flac_get_max_frame_size +#define ff_subtitles_read_chunk liteav_ff_subtitles_read_chunk +#define vlc_css_selector_New liteav_vlc_css_selector_New +#define av_buffersrc_add_frame liteav_av_buffersrc_add_frame +#define ff_sine_256 liteav_ff_sine_256 +#define ff_put_h264_qpel16_mc30_10_ssse3_cache64 liteav_ff_put_h264_qpel16_mc30_10_ssse3_cache64 +#define ff_h263_show_pict_info liteav_ff_h263_show_pict_info +#define ff_jref_idct_put liteav_ff_jref_idct_put +#define ff_rtmpte_protocol liteav_ff_rtmpte_protocol +#define ff_formats_changeref liteav_ff_formats_changeref +#define ff_avg_pixels16_mmxext liteav_ff_avg_pixels16_mmxext +#define av_hwdevice_ctx_alloc liteav_av_hwdevice_ctx_alloc +#define ff_zigzag_direct liteav_ff_zigzag_direct +#define ff_get_codec_guid liteav_ff_get_codec_guid +#define ff_h263_loop_filter_strength liteav_ff_h263_loop_filter_strength +#define ff_inlink_queued_frames liteav_ff_inlink_queued_frames +#define ff_network_sleep_interruptible liteav_ff_network_sleep_interruptible +#define ff_hevc_put_qpel_uw_h1v3_neon_8 liteav_ff_hevc_put_qpel_uw_h1v3_neon_8 +#define ff_put_guid liteav_ff_put_guid +#define av_bsf_get_class liteav_av_bsf_get_class +#define ff_hwframe_map_create liteav_ff_hwframe_map_create +#define ff_amf_read_null liteav_ff_amf_read_null +#define ff_aac_num_swb_512 liteav_ff_aac_num_swb_512 +#define ff_hevc_put_pel_uw_pixels_w48_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w48_neon_8_asm +#define ff_sine_8192 liteav_ff_sine_8192 +#define vlc_css_selector_AddSpecifier liteav_vlc_css_selector_AddSpecifier +#define av_ripemd_size liteav_av_ripemd_size +#define ff_vf_crop liteav_ff_vf_crop +#define ff_float_to_int16_a_sse2 liteav_ff_float_to_int16_a_sse2 +#define ff_h264_h_loop_filter_chroma_neon liteav_ff_h264_h_loop_filter_chroma_neon +#define ff_hevc_cbf_luma_decode liteav_ff_hevc_cbf_luma_decode +#define av_frame_unref liteav_av_frame_unref +#define ff_rdft_end liteav_ff_rdft_end +#define ff_put_qpel16_mc12_old_c liteav_ff_put_qpel16_mc12_old_c +#define vlc_css_parser_AddRule liteav_vlc_css_parser_AddRule +#define ff_mov_add_hinted_packet liteav_ff_mov_add_hinted_packet +#define ff_socket_nonblock liteav_ff_socket_nonblock +#define ff_put_h264_qpel16_mc23_10_sse2 liteav_ff_put_h264_qpel16_mc23_10_sse2 +#define ff_er_frame_start liteav_ff_er_frame_start +#define avio_free_directory_entry liteav_avio_free_directory_entry +#define ff_fill_line_with_color liteav_ff_fill_line_with_color +#define av_bitstream_filter_filter liteav_av_bitstream_filter_filter +#define av_bprint_channel_layout liteav_av_bprint_channel_layout +#define ff_pcm_read_packet liteav_ff_pcm_read_packet +#define av_bitstream_filter_next liteav_av_bitstream_filter_next +#define ff_avg_h264_qpel8_mc23_10_sse2 liteav_ff_avg_h264_qpel8_mc23_10_sse2 +#define av_opt_eval_float liteav_av_opt_eval_float +#define avformat_get_mov_video_tags liteav_avformat_get_mov_video_tags +#define ff_h264_idct_add8_422_8_c liteav_ff_h264_idct_add8_422_8_c +#define ff_put_h264_qpel8_mc13_neon liteav_ff_put_h264_qpel8_mc13_neon +#define ff_cos_128_fixed liteav_ff_cos_128_fixed +#define avcodec_find_decoder_by_name liteav_avcodec_find_decoder_by_name +#define avpriv_slicethread_execute liteav_avpriv_slicethread_execute +#define ff_ssa_decoder liteav_ff_ssa_decoder +#define av_audio_fifo_alloc liteav_av_audio_fifo_alloc +#define ff_unpack_6ch_float_to_float_a_sse liteav_ff_unpack_6ch_float_to_float_a_sse +#define av_dct_calc liteav_av_dct_calc +#define ff_framesync_init liteav_ff_framesync_init +#define ff_hevc_pred_angular_8x8_v_neon_8 liteav_ff_hevc_pred_angular_8x8_v_neon_8 +#define av_find_info_tag liteav_av_find_info_tag +#define av_sha512_final liteav_av_sha512_final +#define swr_is_initialized liteav_swr_is_initialized +#define av_mastering_display_metadata_alloc liteav_av_mastering_display_metadata_alloc +#define av_filter_ffversion liteav_av_filter_ffversion +#define swr_init liteav_swr_init +#define ff_pred4x4_down_right_10_sse2 liteav_ff_pred4x4_down_right_10_sse2 +#define av_dict_free liteav_av_dict_free +#define ff_id3v2_write_apic liteav_ff_id3v2_write_apic +#define ff_codec_movdata_tags liteav_ff_codec_movdata_tags +#define ff_fft_init_x86 liteav_ff_fft_init_x86 +#define ff_hevc_put_epel_uw_bi_hv_neon_8 liteav_ff_hevc_put_epel_uw_bi_hv_neon_8 +#define av_memcpy_backptr liteav_av_memcpy_backptr +#define ff_put_h264_qpel16_mc01_10_sse2 liteav_ff_put_h264_qpel16_mc01_10_sse2 +#define yy_scan_bytes liteav_yy_scan_bytes +#define ff_avg_h264_chroma_mc4_3dnow liteav_ff_avg_h264_chroma_mc4_3dnow +#define ff_https_protocol liteav_ff_https_protocol +#define av_map_videotoolbox_format_to_pixfmt liteav_av_map_videotoolbox_format_to_pixfmt +#define av_opt_set_defaults liteav_av_opt_set_defaults +#define ff_text_init_avio liteav_ff_text_init_avio +#define av_opt_set_dict_val liteav_av_opt_set_dict_val +#define ff_put_no_rnd_qpel16_mc31_old_c liteav_ff_put_no_rnd_qpel16_mc31_old_c +#define av_frame_apply_cropping liteav_av_frame_apply_cropping +#define avfilter_inout_free liteav_avfilter_inout_free +#define av_dynarray2_add liteav_av_dynarray2_add +#define av_get_extended_channel_layout liteav_av_get_extended_channel_layout +#define ff_log2_tab liteav_ff_log2_tab +#define ff_init_desc_fmt_convert liteav_ff_init_desc_fmt_convert +#define ff_get_unscaled_swscale liteav_ff_get_unscaled_swscale +#define ff_shuffle_bytes_2103_ssse3 liteav_ff_shuffle_bytes_2103_ssse3 +#define ff_listen liteav_ff_listen +#define ff_hevc_sao_edge_eo1_w32_neon_8 liteav_ff_hevc_sao_edge_eo1_w32_neon_8 +#define ff_hevc_sao_band_filter_8_neon_asm liteav_ff_hevc_sao_band_filter_8_neon_asm +#define av_packet_alloc liteav_av_packet_alloc +#define ff_avg_h264_qpel8_mc20_10_sse2 liteav_ff_avg_h264_qpel8_mc20_10_sse2 +#define ff_h264chroma_init liteav_ff_h264chroma_init +#define ff_put_h264_qpel8_mc21_10_sse2 liteav_ff_put_h264_qpel8_mc21_10_sse2 +#define ff_h263_resync liteav_ff_h263_resync +#define ff_put_h264_qpel8_h_lowpass_mmxext liteav_ff_put_h264_qpel8_h_lowpass_mmxext +#define ff_unpack_2ch_int32_to_float_a_sse2 liteav_ff_unpack_2ch_int32_to_float_a_sse2 +#define av_gcd liteav_av_gcd +#define ff_ps_add_squares_neon liteav_ff_ps_add_squares_neon +#define ff_free_vlc liteav_ff_free_vlc +#define ff_h264_demuxer liteav_ff_h264_demuxer +#define av_usleep liteav_av_usleep +#define ff_deblock_h_luma_mbaff_8_avx liteav_ff_deblock_h_luma_mbaff_8_avx +#define ff_int32_to_float_u_avx liteav_ff_int32_to_float_u_avx +#define ff_pred16x16_dc_10_sse2 liteav_ff_pred16x16_dc_10_sse2 +#define ff_ac3_slow_decay_tab liteav_ff_ac3_slow_decay_tab +#define avfilter_graph_send_command liteav_avfilter_graph_send_command +#define avpriv_mpeg4audio_sample_rates liteav_avpriv_mpeg4audio_sample_rates +#define ff_null_get_video_buffer liteav_ff_null_get_video_buffer +#define ff_swb_offset_480 liteav_ff_swb_offset_480 +#define ff_hevc_put_pel_bi_neon_8_asm liteav_ff_hevc_put_pel_bi_neon_8_asm +#define ff_eac3_custom_channel_map_locations liteav_ff_eac3_custom_channel_map_locations +#define av_log_get_level liteav_av_log_get_level +#define av_mastering_display_metadata_create_side_data liteav_av_mastering_display_metadata_create_side_data +#define ff_ebur128_loudness_global_multiple liteav_ff_ebur128_loudness_global_multiple +#define ff_mov_cenc_avc_write_nal_units liteav_ff_mov_cenc_avc_write_nal_units +#define ff_ebur128_add_frames_double liteav_ff_ebur128_add_frames_double +#define ff_simple_idct84_add liteav_ff_simple_idct84_add +#define ff_mpa_synth_init_fixed liteav_ff_mpa_synth_init_fixed +#define ff_avg_h264_qpel8_mc31_neon liteav_ff_avg_h264_qpel8_mc31_neon +#define ff_hevc_pred_planar_16x16_neon_8 liteav_ff_hevc_pred_planar_16x16_neon_8 +#define ff_deblock_h_chroma_8_sse2 liteav_ff_deblock_h_chroma_8_sse2 +#define ff_hevc_put_qpel_v2_neon_8 liteav_ff_hevc_put_qpel_v2_neon_8 +#define ff_all_channel_layouts liteav_ff_all_channel_layouts +#define ff_pred16x16_top_dc_neon liteav_ff_pred16x16_top_dc_neon +#define av_malloc_array liteav_av_malloc_array +#define ff_mp4_obj_type liteav_ff_mp4_obj_type +#define ff_put_vc1_chroma_mc8_nornd_mmx liteav_ff_put_vc1_chroma_mc8_nornd_mmx +#define av_frame_new_side_data_from_buf liteav_av_frame_new_side_data_from_buf +#define ff_mpeg_flush liteav_ff_mpeg_flush +#define av_encryption_info_free liteav_av_encryption_info_free +#define av_parse_cpu_flags liteav_av_parse_cpu_flags +#define ff_avg_h264_qpel16_mc10_neon liteav_ff_avg_h264_qpel16_mc10_neon +#define ff_avg_h264_qpel16_mc02_neon liteav_ff_avg_h264_qpel16_mc02_neon +#define avfilter_graph_queue_command liteav_avfilter_graph_queue_command +#define avpriv_copy_bits liteav_avpriv_copy_bits +#define av_malloc liteav_av_malloc +#define ff_avg_pixels8_mmx liteav_ff_avg_pixels8_mmx +#define ff_sine_256_fixed liteav_ff_sine_256_fixed +#define av_hwframe_transfer_get_formats liteav_av_hwframe_transfer_get_formats +#define av_log_set_flags liteav_av_log_set_flags +#define ff_int16_to_int32_a_mmx liteav_ff_int16_to_int32_a_mmx +#define ff_amf_write_bool liteav_ff_amf_write_bool +#define avio_rb24 liteav_avio_rb24 +#define ff_copy_rectangle liteav_ff_copy_rectangle +#define avpriv_split_xiph_headers liteav_avpriv_split_xiph_headers +#define ff_aac_eld_window_512_fixed liteav_ff_aac_eld_window_512_fixed +#define ff_avg_vc1_chroma_mc8_nornd_ssse3 liteav_ff_avg_vc1_chroma_mc8_nornd_ssse3 +#define ff_rl_intra_aic liteav_ff_rl_intra_aic +#define avfilter_link_free liteav_avfilter_link_free +#define ff_weight_h264_pixels_16_neon liteav_ff_weight_h264_pixels_16_neon +#define ff_prefetch_aarch64 liteav_ff_prefetch_aarch64 +#define vlc_css_unescape liteav_vlc_css_unescape +#define av_tea_init liteav_av_tea_init +#define ff_avg_h264_qpel16_mc11_neon liteav_ff_avg_h264_qpel16_mc11_neon +#define av_buffersrc_parameters_set liteav_av_buffersrc_parameters_set +#define av_picture_crop liteav_av_picture_crop +#define ff_h264_decode_mb_cavlc liteav_ff_h264_decode_mb_cavlc +#define ff_simple_idct_add_int16_8bit liteav_ff_simple_idct_add_int16_8bit +#define ff_put_h264_qpel16_mc20_neon liteav_ff_put_h264_qpel16_mc20_neon +#define ff_pred16x16_horizontal_8_mmxext liteav_ff_pred16x16_horizontal_8_mmxext +#define av_fast_malloc liteav_av_fast_malloc +#define ff_put_h264_qpel8_mc02_10_sse2 liteav_ff_put_h264_qpel8_mc02_10_sse2 +#define ff_pack_6ch_int32_to_float_a_avx liteav_ff_pack_6ch_int32_to_float_a_avx +#define ff_thread_video_encode_frame liteav_ff_thread_video_encode_frame +#define ff_avg_h264_qpel4_mc32_10_mmxext liteav_ff_avg_h264_qpel4_mc32_10_mmxext +#define ff_jpeg2000_profiles liteav_ff_jpeg2000_profiles +#define avio_size liteav_avio_size +#define ff_fft_calc_avx liteav_ff_fft_calc_avx +#define av_aes_alloc liteav_av_aes_alloc +#define ff_pw_1019 liteav_ff_pw_1019 +#define ff_sqrt_tab liteav_ff_sqrt_tab +#define ff_unpack_2ch_int32_to_int16_a_sse2 liteav_ff_unpack_2ch_int32_to_int16_a_sse2 +#define ff_mpeg12_vlc_dc_lum_code liteav_ff_mpeg12_vlc_dc_lum_code +#define av_sub_i liteav_av_sub_i +#define ff_nv21_to_argb_neon liteav_ff_nv21_to_argb_neon +#define avio_alloc_context liteav_avio_alloc_context +#define avfilter_inout_alloc liteav_avfilter_inout_alloc +#define ff_imdct36_float_ssse3 liteav_ff_imdct36_float_ssse3 +#define ff_sbr_qmf_pre_shuffle_neon liteav_ff_sbr_qmf_pre_shuffle_neon +#define ff_mdct_init_fixed_32 liteav_ff_mdct_init_fixed_32 +#define ff_flac_lpc_16_arm liteav_ff_flac_lpc_16_arm +#define ff_avg_pixels16_x2_neon liteav_ff_avg_pixels16_x2_neon +#define ff_put_qpel8_mc12_old_c liteav_ff_put_qpel8_mc12_old_c +#define ff_mpeg4_frame_end liteav_ff_mpeg4_frame_end +#define av_sub_q liteav_av_sub_q +#define avpriv_register_devices liteav_avpriv_register_devices +#define ff_sine_128_fixed liteav_ff_sine_128_fixed +#define av_opt_set_bin liteav_av_opt_set_bin +#define ff_deblock_v_chroma_intra_8_mmxext liteav_ff_deblock_v_chroma_intra_8_mmxext +#define ff_h264_idct8_dc_add_10_avx liteav_ff_h264_idct8_dc_add_10_avx +#define ff_h264chroma_init_aarch64 liteav_ff_h264chroma_init_aarch64 +#define ff_raw_data_read_header liteav_ff_raw_data_read_header +#define swresample_license liteav_swresample_license +#define ff_put_h264_qpel16_mc32_neon liteav_ff_put_h264_qpel16_mc32_neon +#define ff_cos_65536_fixed liteav_ff_cos_65536_fixed +#define ff_pw_15 liteav_ff_pw_15 +#define ff_pw_16 liteav_ff_pw_16 +#define ff_pw_17 liteav_ff_pw_17 +#define ff_h264_remove_all_refs liteav_ff_h264_remove_all_refs +#define avio_put_str16le liteav_avio_put_str16le +#define webvtt_FillStyleFromCssDeclaration liteav_webvtt_FillStyleFromCssDeclaration +#define avpriv_float_dsp_alloc liteav_avpriv_float_dsp_alloc +#define codec_ism_tags liteav_codec_ism_tags +#define ff_raw_read_partial_packet liteav_ff_raw_read_partial_packet +#define av_pix_fmt_desc_get_id liteav_av_pix_fmt_desc_get_id +#define ff_pred8x8_tm_vp8_8_ssse3 liteav_ff_pred8x8_tm_vp8_8_ssse3 +#define text_segment_new liteav_text_segment_new +#define ff_ebur128_sample_peak liteav_ff_ebur128_sample_peak +#define ff_framesync_dualinput_get_writable liteav_ff_framesync_dualinput_get_writable +#define ff_h264_idct8_dc_add_8_mmxext liteav_ff_h264_idct8_dc_add_8_mmxext +#define ffurl_shutdown liteav_ffurl_shutdown +#define ff_h264_idct8_add4_8_mmx liteav_ff_h264_idct8_add4_8_mmx +#define ff_avs3_muxer liteav_ff_avs3_muxer +#define ff_put_h264_qpel16_mc30_10_sse2_cache64 liteav_ff_put_h264_qpel16_mc30_10_sse2_cache64 +#define av_strstart liteav_av_strstart +#define ff_h264_luma_dc_dequant_idct_9_c liteav_ff_h264_luma_dc_dequant_idct_9_c +#define ff_h264_pred_init_aarch64 liteav_ff_h264_pred_init_aarch64 +#define av_free liteav_av_free +#define ff_simple_idct12_put_avx liteav_ff_simple_idct12_put_avx +#define av_aes_crypt liteav_av_aes_crypt +#define ff_hevc_dsp_init_aarch64 liteav_ff_hevc_dsp_init_aarch64 +#define parse_sequence_header_info liteav_parse_sequence_header_info +#define ff_hevc_transform_8x8_neon_8 liteav_ff_hevc_transform_8x8_neon_8 +#define ff_put_qpel8_mc31_old_c liteav_ff_put_qpel8_mc31_old_c +#define avio_wl32 liteav_avio_wl32 +#define ff_mov_cenc_avc_parse_nal_units liteav_ff_mov_cenc_avc_parse_nal_units +#define ff_h264_chroma422_dc_dequant_idct_10_c liteav_ff_h264_chroma422_dc_dequant_idct_10_c +#define av_stereo3d_create_side_data liteav_av_stereo3d_create_side_data +#define av_hash_alloc liteav_av_hash_alloc +#define ff_wav_codec_get_id liteav_ff_wav_codec_get_id +#define ff_pack_6ch_int32_to_float_u_sse2 liteav_ff_pack_6ch_int32_to_float_u_sse2 +#define ff_hevc_put_pixels_w6_neon_8_asm liteav_ff_hevc_put_pixels_w6_neon_8_asm +#define ff_id3v2_4_tags liteav_ff_id3v2_4_tags +#define ff_imdct_calc_c_fixed liteav_ff_imdct_calc_c_fixed +#define ff_hevc_transform_add_16x16_neon_8_asm liteav_ff_hevc_transform_add_16x16_neon_8_asm +#define ff_mdct_end_fixed_32 liteav_ff_mdct_end_fixed_32 +#define ff_avg_h264_qpel4_mc02_10_mmxext liteav_ff_avg_h264_qpel4_mc02_10_mmxext +#define av_bsf_list_alloc liteav_av_bsf_list_alloc +#define ff_hevc_put_qpel_uw_h3v1_neon_8 liteav_ff_hevc_put_qpel_uw_h3v1_neon_8 +#define avio_handshake liteav_avio_handshake +#define ff_mpeg4_video_profiles liteav_ff_mpeg4_video_profiles +#define ff_h2645_packet_uninit liteav_ff_h2645_packet_uninit +#define ff_h264_chroma422_dc_dequant_idct_14_c liteav_ff_h264_chroma422_dc_dequant_idct_14_c +#define ff_subtitles_queue_finalize liteav_ff_subtitles_queue_finalize +#define ff_hevc_merge_idx_decode liteav_ff_hevc_merge_idx_decode +#define ff_set_common_channel_layouts liteav_ff_set_common_channel_layouts +#define rgb15tobgr32 liteav_rgb15tobgr32 +#define ff_h264_idct8_dc_add_10_c liteav_ff_h264_idct8_dc_add_10_c +#define ff_h264_decode_picture_parameter_set liteav_ff_h264_decode_picture_parameter_set +#define ff_deblock_v_chroma_intra_10_sse2 liteav_ff_deblock_v_chroma_intra_10_sse2 +#define ff_flac_parse_streaminfo liteav_ff_flac_parse_streaminfo +#define ff_hls_demuxer liteav_ff_hls_demuxer +#define ff_h264_idct8_dc_add_12_c liteav_ff_h264_idct8_dc_add_12_c +#define av_bprint_chars liteav_av_bprint_chars +#define ff_pred8x8l_horizontal_up_10_avx liteav_ff_pred8x8l_horizontal_up_10_avx +#define ff_avg_h264_chroma_mc8_neon liteav_ff_avg_h264_chroma_mc8_neon +#define ff_mpa_sblimit_table liteav_ff_mpa_sblimit_table +#define ff_put_h264_chroma_mc4_10_mmxext liteav_ff_put_h264_chroma_mc4_10_mmxext +#define ff_framesync_activate liteav_ff_framesync_activate +#define ff_subtitles_queue_read_packet liteav_ff_subtitles_queue_read_packet +#define ff_h264_idct8_dc_add_14_c liteav_ff_h264_idct8_dc_add_14_c +#define av_sample_fmt_is_planar liteav_av_sample_fmt_is_planar +#define ff_pred8x8l_dc_10_sse2 liteav_ff_pred8x8l_dc_10_sse2 +#define yyget_out liteav_yyget_out +#define sws_convVec liteav_sws_convVec +#define ff_vorbiscomment_length liteav_ff_vorbiscomment_length +#define ff_hevc_put_qpel_uw_pixels_w12_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w12_neon_8 +#define ff_hevc_put_pixels_w2_neon_8 liteav_ff_hevc_put_pixels_w2_neon_8 +#define ff_h264_biweight_8_sse2 liteav_ff_h264_biweight_8_sse2 +#define ff_pack_6ch_float_to_int32_a_avx liteav_ff_pack_6ch_float_to_int32_a_avx +#define ff_thread_finish_setup liteav_ff_thread_finish_setup +#define avfilter_get_by_name liteav_avfilter_get_by_name +#define ff_h264_videotoolbox_hwaccel liteav_ff_h264_videotoolbox_hwaccel +#define ff_aac_profiles liteav_ff_aac_profiles +#define ff_slice_thread_free liteav_ff_slice_thread_free +#define rendition_matched_tags liteav_rendition_matched_tags +#define rgb64to48_bswap liteav_rgb64to48_bswap +#define ff_h264_chroma422_dc_dequant_idct_12_c liteav_ff_h264_chroma422_dc_dequant_idct_12_c +#define ff_ps_hybrid_analysis_neon liteav_ff_ps_hybrid_analysis_neon +#define ff_mpeg2_video_profiles liteav_ff_mpeg2_video_profiles +#define ff_w4_plus_w2_hi liteav_ff_w4_plus_w2_hi +#define ff_h264_idct_add16_10_avx liteav_ff_h264_idct_add16_10_avx +#define yyset_lineno liteav_yyset_lineno +#define av_des_alloc liteav_av_des_alloc +#define ff_pred8x8l_down_left_10_ssse3 liteav_ff_pred8x8l_down_left_10_ssse3 +#define ff_h264_biweight_16_10_sse4 liteav_ff_h264_biweight_16_10_sse4 +#define ff_framesync_dualinput_get liteav_ff_framesync_dualinput_get +#define ff_hevc_dsp_init liteav_ff_hevc_dsp_init +#define ff_h264_biweight_16_10_sse2 liteav_ff_h264_biweight_16_10_sse2 +#define av_parse_video_rate liteav_av_parse_video_rate +#define av_register_bitstream_filter liteav_av_register_bitstream_filter +#define av_packet_rescale_ts liteav_av_packet_rescale_ts +#define ff_aac_scalefactor_code liteav_ff_aac_scalefactor_code +#define ff_rvlc_rl_inter liteav_ff_rvlc_rl_inter +#define ff_pred16x16_vert_neon liteav_ff_pred16x16_vert_neon +#define ff_crc04C11DB7_update liteav_ff_crc04C11DB7_update +#define ff_mov_write_chan liteav_ff_mov_write_chan +#define ff_sbr_apply liteav_ff_sbr_apply +#define ff_query_formats_all_layouts liteav_ff_query_formats_all_layouts +#define ff_h264_idct_add16intra_8_mmxext liteav_ff_h264_idct_add16intra_8_mmxext +#define ff_rtmp_packet_write liteav_ff_rtmp_packet_write +#define avfilter_add_matrix liteav_avfilter_add_matrix +#define yyrealloc liteav_yyrealloc +#define ff_hevc_put_qpel_uw_pixels_w32_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w32_neon_8 +#define ff_yuv422p_to_rgba_neon liteav_ff_yuv422p_to_rgba_neon +#define ff_put_pixels16_mmx liteav_ff_put_pixels16_mmx +#define av_bprint_init_for_buffer liteav_av_bprint_init_for_buffer +#define av_aes_ctr_init liteav_av_aes_ctr_init +#define av_opt_free liteav_av_opt_free +#define ff_avg_h264_qpel16_mc32_10_sse2 liteav_ff_avg_h264_qpel16_mc32_10_sse2 +#define ff_mjpeg_encode_huffman_close liteav_ff_mjpeg_encode_huffman_close +#define ff_clean_intra_table_entries liteav_ff_clean_intra_table_entries +#define ff_pred8x8_0l0_dc_neon liteav_ff_pred8x8_0l0_dc_neon +#define ff_mpeg1_clean_buffers liteav_ff_mpeg1_clean_buffers +#define ff_image_copy_plane_uc_from_x86 liteav_ff_image_copy_plane_uc_from_x86 +#define ff_ebur128_add_frames_planar_double liteav_ff_ebur128_add_frames_planar_double +#define av_audio_fifo_write liteav_av_audio_fifo_write +#define ff_deblock_h_luma_intra_10_sse2 liteav_ff_deblock_h_luma_intra_10_sse2 +#define av_rdft_calc liteav_av_rdft_calc +#define ff_pw_1023 liteav_ff_pw_1023 +#define ff_inlink_consume_samples liteav_ff_inlink_consume_samples +#define av_get_alt_sample_fmt liteav_av_get_alt_sample_fmt +#define ff_hpeldsp_init_aarch64 liteav_ff_hpeldsp_init_aarch64 +#define av_spherical_from_name liteav_av_spherical_from_name +#define ff_openssl_deinit liteav_ff_openssl_deinit +#define ff_hevc_pred_angular_16x16_h_neon_8 liteav_ff_hevc_pred_angular_16x16_h_neon_8 +#define ff_subtitles_queue_clean liteav_ff_subtitles_queue_clean +#define ff_put_h264_qpel16_mc11_10_sse2 liteav_ff_put_h264_qpel16_mc11_10_sse2 +#define ff_amf_read_string liteav_ff_amf_read_string +#define ff_id3v2_read liteav_ff_id3v2_read +#define ff_simple_idct8_sse2 liteav_ff_simple_idct8_sse2 +#define av_base64_encode liteav_av_base64_encode +#define ff_hevc_sao_edge_eo0_w64_neon_8 liteav_ff_hevc_sao_edge_eo0_w64_neon_8 +#define ff_hevc_transform_luma_4x4_neon_8_asm liteav_ff_hevc_transform_luma_4x4_neon_8_asm +#define av_buffer_ref liteav_av_buffer_ref +#define rgb48to64_nobswap liteav_rgb48to64_nobswap +#define ff_idctdsp_init liteav_ff_idctdsp_init +#define swresample_configuration liteav_swresample_configuration +#define openssl_mutexes liteav_openssl_mutexes +#define ff_alloc_entries liteav_ff_alloc_entries +#define ff_hevc_put_qpel_uw_h_neon_8 liteav_ff_hevc_put_qpel_uw_h_neon_8 +#define av_bprint_append_data liteav_av_bprint_append_data +#define ff_h264_idct_add_neon liteav_ff_h264_idct_add_neon +#define ff_tns_max_bands_128 liteav_ff_tns_max_bands_128 +#define ff_cos_512_fixed liteav_ff_cos_512_fixed +#define ff_sine_64 liteav_ff_sine_64 +#define av_fifo_freep liteav_av_fifo_freep +#define ffurl_get_multi_file_handle liteav_ffurl_get_multi_file_handle +#define ff_prores_idct liteav_ff_prores_idct +#define ff_hevc_put_epel_v_neon_8 liteav_ff_hevc_put_epel_v_neon_8 +#define ff_ac3_db_per_bit_tab liteav_ff_ac3_db_per_bit_tab +#define ff_put_h264_chroma_mc8_rnd_mmx liteav_ff_put_h264_chroma_mc8_rnd_mmx +#define ff_smil_get_attr_ptr liteav_ff_smil_get_attr_ptr +#define ff_pb_3 liteav_ff_pb_3 +#define ff_pb_2 liteav_ff_pb_2 +#define ff_pb_1 liteav_ff_pb_1 +#define ff_pb_0 liteav_ff_pb_0 +#define ff_w3_min_w1_lo liteav_ff_w3_min_w1_lo +#define ff_h264_biweight_8_mmxext liteav_ff_h264_biweight_8_mmxext +#define ff_hevc_put_pixels_w4_neon_8 liteav_ff_hevc_put_pixels_w4_neon_8 +#define av_imdct_half liteav_av_imdct_half +#define av_add_i liteav_av_add_i +#define sws_alloc_context liteav_sws_alloc_context +#define ff_thread_report_progress liteav_ff_thread_report_progress +#define ff_h264_set_erpic liteav_ff_h264_set_erpic +#define ff_pred4x4_down_left_10_avx liteav_ff_pred4x4_down_left_10_avx +#define ff_init_gamma_convert liteav_ff_init_gamma_convert +#define ff_put_no_rnd_qpel8_mc32_old_c liteav_ff_put_no_rnd_qpel8_mc32_old_c +#define ff_subtitles_read_text_chunk liteav_ff_subtitles_read_text_chunk +#define swr_ffversion liteav_swr_ffversion +#define av_add_q liteav_av_add_q +#define ff_insert_pad liteav_ff_insert_pad +#define avio_w8 liteav_avio_w8 +#define ff_zigzag_scan liteav_ff_zigzag_scan +#define ff_pred16x16_dc_10_mmxext liteav_ff_pred16x16_dc_10_mmxext +#define ff_choose_chroma_location liteav_ff_choose_chroma_location +#define ff_put_h264_qpel4_mc10_10_mmxext liteav_ff_put_h264_qpel4_mc10_10_mmxext +#define ff_deblock_h_chroma422_8_mmxext liteav_ff_deblock_h_chroma422_8_mmxext +#define ff_mpeg4_c_dc_scale_table liteav_ff_mpeg4_c_dc_scale_table +#define ff_put_h264_qpel4_mc00_10_mmxext liteav_ff_put_h264_qpel4_mc00_10_mmxext +#define ff_frame_pool_uninit liteav_ff_frame_pool_uninit +#define ff_ps_init liteav_ff_ps_init +#define ff_hevc_put_pixels_w48_neon_8 liteav_ff_hevc_put_pixels_w48_neon_8 +#define av_rescale_delta liteav_av_rescale_delta +#define ff_unpack_2ch_int32_to_int32_u_sse2 liteav_ff_unpack_2ch_int32_to_int32_u_sse2 +#define av_hash_update liteav_av_hash_update +#define ff_hevc_put_pixels_w48_neon_8_asm liteav_ff_hevc_put_pixels_w48_neon_8_asm +#define av_opt_set_int liteav_av_opt_set_int +#define av_mediacodec_alloc_context liteav_av_mediacodec_alloc_context +#define ff_avg_h264_chroma_mc8_10_sse2 liteav_ff_avg_h264_chroma_mc8_10_sse2 +#define ff_filter_graph_run_once liteav_ff_filter_graph_run_once +#define ff_alternate_vertical_scan liteav_ff_alternate_vertical_scan +#define ff_avg_h264_qpel4_h_lowpass_mmxext liteav_ff_avg_h264_qpel4_h_lowpass_mmxext +#define av_gettime_relative liteav_av_gettime_relative +#define av_md5_size liteav_av_md5_size +#define ff_dct32_float_avx liteav_ff_dct32_float_avx +#define avio_rb32 liteav_avio_rb32 +#define ff_hevc_no_residual_syntax_flag_decode liteav_ff_hevc_no_residual_syntax_flag_decode +#define rgb16tobgr15 liteav_rgb16tobgr15 +#define ff_put_h264_chroma_mc4_ssse3 liteav_ff_put_h264_chroma_mc4_ssse3 +#define ff_avg_h264_qpel8_mc22_neon liteav_ff_avg_h264_qpel8_mc22_neon +#define ff_sbr_autocorrelate_neon liteav_ff_sbr_autocorrelate_neon +#define ff_vc1_profiles liteav_ff_vc1_profiles +#define av_frame_alloc liteav_av_frame_alloc +#define av_hash_final_b64 liteav_av_hash_final_b64 +#define ff_pred8x8l_down_left_8_ssse3 liteav_ff_pred8x8l_down_left_8_ssse3 +#define ff_avg_h264_qpel16_mc33_neon liteav_ff_avg_h264_qpel16_mc33_neon +#define ff_pred8x8_dc_8_mmxext liteav_ff_pred8x8_dc_8_mmxext +#define avfilter_graph_alloc_filter liteav_avfilter_graph_alloc_filter +#define ff_avg_qpel16_mc11_old_c liteav_ff_avg_qpel16_mc11_old_c +#define ff_ebur128_relative_threshold liteav_ff_ebur128_relative_threshold +#define ff_ps_stereo_interpolate_ipdopd_sse3 liteav_ff_ps_stereo_interpolate_ipdopd_sse3 +#define avio_rl32 liteav_avio_rl32 +#define av_write_image_line liteav_av_write_image_line +#define ff_aac_spectral_codes liteav_ff_aac_spectral_codes +#define ff_pb_15 liteav_ff_pb_15 +#define swri_audio_convert_init_x86 liteav_swri_audio_convert_init_x86 +#define ff_dither_2x2_8 liteav_ff_dither_2x2_8 +#define ff_mpeg4_decoder liteav_ff_mpeg4_decoder +#define ff_put_h264_qpel8_mc11_10_sse2 liteav_ff_put_h264_qpel8_mc11_10_sse2 +#define ff_me_cmp_init_x86 liteav_ff_me_cmp_init_x86 +#define ff_simple_idct_int16_12bit liteav_ff_simple_idct_int16_12bit +#define ff_pred16x16_tm_vp8_8_avx2 liteav_ff_pred16x16_tm_vp8_8_avx2 +#define ff_pred4x4_horizontal_down_8_mmxext liteav_ff_pred4x4_horizontal_down_8_mmxext +#define ff_vector_fmul_reverse_vfp liteav_ff_vector_fmul_reverse_vfp +#define ffio_init_context liteav_ffio_init_context +#define ff_riff_info_conv liteav_ff_riff_info_conv +#define ff_hevc_put_pixels_w8_neon_8 liteav_ff_hevc_put_pixels_w8_neon_8 +#define ff_avg_rv40_chroma_mc4_mmxext liteav_ff_avg_rv40_chroma_mc4_mmxext +#define av_frame_get_plane_buffer liteav_av_frame_get_plane_buffer +#define ff_hevc_put_qpel_uw_weight_h3v2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h3v2_neon_8 +#define ff_img_tags liteav_ff_img_tags +#define ff_init_ff_cos_tabs_fixed_32 liteav_ff_init_ff_cos_tabs_fixed_32 +#define ff_httpproxy_protocol liteav_ff_httpproxy_protocol +#define ff_h264_chroma_dc_dequant_idct_14_c liteav_ff_h264_chroma_dc_dequant_idct_14_c +#define ff_amf_tag_size liteav_ff_amf_tag_size +#define av_aes_ctr_free liteav_av_aes_ctr_free +#define ff_simple_idct_add_int16_12bit liteav_ff_simple_idct_add_int16_12bit +#define ff_pred4x4_dc_10_mmxext liteav_ff_pred4x4_dc_10_mmxext +#define ff_flac_set_channel_layout liteav_ff_flac_set_channel_layout +#define ff_put_no_rnd_qpel16_mc32_old_c liteav_ff_put_no_rnd_qpel16_mc32_old_c +#define swri_rematrix_free liteav_swri_rematrix_free +#define avpriv_solve_lls liteav_avpriv_solve_lls +#define ff_fft_init_fixed liteav_ff_fft_init_fixed +#define ff_h263_loop_filter liteav_ff_h263_loop_filter +#define ff_init_scantable liteav_ff_init_scantable +#define ff_put_h264_qpel16_mc20_10_sse2_cache64 liteav_ff_put_h264_qpel16_mc20_10_sse2_cache64 +#define av_opt_eval_q liteav_av_opt_eval_q +#define av_downmix_info_update_side_data liteav_av_downmix_info_update_side_data +#define ff_flac_parser liteav_ff_flac_parser +#define ff_mp3_decoder liteav_ff_mp3_decoder +#define av_des_init liteav_av_des_init +#define ff_listen_connect liteav_ff_listen_connect +#define ff_hevc_bump_frame liteav_ff_hevc_bump_frame +#define ff_filter_alloc liteav_ff_filter_alloc +#define ff_parse_channel_layout liteav_ff_parse_channel_layout +#define av_frame_set_qp_table liteav_av_frame_set_qp_table +#define ff_h263_cbpy_vlc liteav_ff_h263_cbpy_vlc +#define ff_put_pixels16_l2_mmxext liteav_ff_put_pixels16_l2_mmxext +#define ff_mdct_win_float liteav_ff_mdct_win_float +#define ff_avg_h264_chroma_mc8_rnd_ssse3 liteav_ff_avg_h264_chroma_mc8_rnd_ssse3 +#define ff_mpv_motion liteav_ff_mpv_motion +#define swri_rematrix_init liteav_swri_rematrix_init +#define ff_put_h264_qpel4_mc32_10_mmxext liteav_ff_put_h264_qpel4_mc32_10_mmxext +#define vlc_css_term_Clean liteav_vlc_css_term_Clean +#define ff_avc_mp4_find_startcode liteav_ff_avc_mp4_find_startcode +#define ff_h264_biweight_4_10_sse2 liteav_ff_h264_biweight_4_10_sse2 +#define ff_vf_scale liteav_ff_vf_scale +#define ff_h264_biweight_4_10_sse4 liteav_ff_h264_biweight_4_10_sse4 +#define avio_wl24 liteav_avio_wl24 +#define ff_hevc_put_qpel_uw_h2_neon_8 liteav_ff_hevc_put_qpel_uw_h2_neon_8 +#define ff_put_pixels4_l2_mmxext liteav_ff_put_pixels4_l2_mmxext +#define ff_hevc_put_pixels_w24_neon_8 liteav_ff_hevc_put_pixels_w24_neon_8 +#define ff_pack_8ch_float_to_int32_u_sse2 liteav_ff_pack_8ch_float_to_int32_u_sse2 +#define shuffle_bytes_0321 liteav_shuffle_bytes_0321 +#define ff_pred8x8l_top_dc_8_mmxext liteav_ff_pred8x8l_top_dc_8_mmxext +#define av_image_copy_to_buffer liteav_av_image_copy_to_buffer +#define ff_vector_fmul_scalar_neon liteav_ff_vector_fmul_scalar_neon +#define ff_h264_idct_add16intra_9_c liteav_ff_h264_idct_add16intra_9_c +#define ff_put_pixels4_mmx liteav_ff_put_pixels4_mmx +#define av_color_transfer_from_name liteav_av_color_transfer_from_name +#define av_ripemd_alloc liteav_av_ripemd_alloc +#define ff_getSwsFunc liteav_ff_getSwsFunc +#define av_cast5_size liteav_av_cast5_size +#define ff_pw_8192 liteav_ff_pw_8192 +#define ff_w_tab_sr liteav_ff_w_tab_sr +#define ff_hevc_decode_nal_vps liteav_ff_hevc_decode_nal_vps +#define av_get_channel_layout_channel_index liteav_av_get_channel_layout_channel_index +#define ff_tcp_protocol liteav_ff_tcp_protocol +#define ff_h264_golomb_to_intra4x4_cbp liteav_ff_h264_golomb_to_intra4x4_cbp +#define ff_avc_parse_nal_units liteav_ff_avc_parse_nal_units +#define ff_put_h264_qpel8_mc32_neon liteav_ff_put_h264_qpel8_mc32_neon +#define av_imdct_calc liteav_av_imdct_calc +#define ff_mpeg4_DCtab_chrom liteav_ff_mpeg4_DCtab_chrom +#define ff_unpack_2ch_int16_to_int16_u_sse2 liteav_ff_unpack_2ch_int16_to_int16_u_sse2 +#define shuffle_bytes_3210 liteav_shuffle_bytes_3210 +#define ff_codec_movaudio_tags liteav_ff_codec_movaudio_tags +#define ff_ps_apply liteav_ff_ps_apply +#define avpriv_get_trc_function_from_trc liteav_avpriv_get_trc_function_from_trc +#define ff_h264_check_intra4x4_pred_mode liteav_ff_h264_check_intra4x4_pred_mode +#define ff_sbrdsp_init_x86 liteav_ff_sbrdsp_init_x86 +#define av_buffer_allocz liteav_av_buffer_allocz +#define ff_hevc_diag_scan4x4_x liteav_ff_hevc_diag_scan4x4_x +#define ff_hevc_diag_scan4x4_y liteav_ff_hevc_diag_scan4x4_y +#define ff_simple_idct_put_int16_12bit liteav_ff_simple_idct_put_int16_12bit +#define ff_imdct_calc_neon liteav_ff_imdct_calc_neon +#define swri_noise_shaping_float liteav_swri_noise_shaping_float +#define av_audio_fifo_drain liteav_av_audio_fifo_drain +#define ff_h264_idct_add16_8_sse2 liteav_ff_h264_idct_add16_8_sse2 +#define ff_id3v2_3_tags liteav_ff_id3v2_3_tags +#define webvtt_parser_close liteav_webvtt_parser_close +#define avfilter_graph_parse2 liteav_avfilter_graph_parse2 +#define ff_avg_pixels8_l2_mmxext liteav_ff_avg_pixels8_l2_mmxext +#define ff_h264_mp4toannexb_bsf liteav_ff_h264_mp4toannexb_bsf +#define ff_pcm_mulaw_at_decoder liteav_ff_pcm_mulaw_at_decoder +#define ff_avg_h264_qpel16_mc31_10_sse2 liteav_ff_avg_h264_qpel16_mc31_10_sse2 +#define ff_hevc_put_qpel_h1v2_neon_8 liteav_ff_hevc_put_qpel_h1v2_neon_8 +#define avpriv_init_lls liteav_avpriv_init_lls +#define av_pixelutils_get_sad_fn liteav_av_pixelutils_get_sad_fn +#define ff_avg_h264_chroma_mc2_mmxext liteav_ff_avg_h264_chroma_mc2_mmxext +#define av_d3d11va_alloc_context liteav_av_d3d11va_alloc_context +#define av_buffersrc_add_frame_flags liteav_av_buffersrc_add_frame_flags +#define ff_hevc_idct_8x8_dc_neon_8 liteav_ff_hevc_idct_8x8_dc_neon_8 +#define ff_mpa_quant_bits liteav_ff_mpa_quant_bits +#define ff_h263_rl_inter liteav_ff_h263_rl_inter +#define ff_cos_131072 liteav_ff_cos_131072 +#define ff_put_h264_qpel4_mc22_10_mmxext liteav_ff_put_h264_qpel4_mc22_10_mmxext +#define sws_getColorspaceDetails liteav_sws_getColorspaceDetails +#define av_stereo3d_alloc liteav_av_stereo3d_alloc +#define ff_mpeg4_DCtab_lum liteav_ff_mpeg4_DCtab_lum +#define av_bprint_get_buffer liteav_av_bprint_get_buffer +#define av_hash_final_bin liteav_av_hash_final_bin +#define ff_h264_idct8_add4_8_mmxext liteav_ff_h264_idct8_add4_8_mmxext +#define ff_hevc_put_qpel_uw_v_neon_8 liteav_ff_hevc_put_qpel_uw_v_neon_8 +#define ff_sine_2048 liteav_ff_sine_2048 +#define ff_unicode_ass_add_rect liteav_ff_unicode_ass_add_rect +#define ff_put_h264_chroma_mc8_neon liteav_ff_put_h264_chroma_mc8_neon +#define avfilter_process_command liteav_avfilter_process_command +#define avfilter_graph_free liteav_avfilter_graph_free +#define ff_subtitles_unicode_external_read_chunk liteav_ff_subtitles_unicode_external_read_chunk +#define ff_deblock_h_luma_intra_8_avx liteav_ff_deblock_h_luma_intra_8_avx +#define rgb64to48_nobswap liteav_rgb64to48_nobswap +#define ff_frame_thread_encoder_free liteav_ff_frame_thread_encoder_free +#define ff_ps_hybrid_synthesis_deint_sse4 liteav_ff_ps_hybrid_synthesis_deint_sse4 +#define ff_frame_thread_free liteav_ff_frame_thread_free +#define av_buffersink_get_channels liteav_av_buffersink_get_channels +#define av_md5_final liteav_av_md5_final +#define ff_put_h264_qpel4_hv_lowpass_h_mmxext liteav_ff_put_h264_qpel4_hv_lowpass_h_mmxext +#define ff_reget_buffer liteav_ff_reget_buffer +#define ff_put_h264_qpel16_mc02_neon liteav_ff_put_h264_qpel16_mc02_neon +#define ff_framesync_uninit liteav_ff_framesync_uninit +#define ff_aac_kbd_long_1024 liteav_ff_aac_kbd_long_1024 +#define av_cast5_crypt2 liteav_av_cast5_crypt2 +#define ff_avg_h264_qpel8or16_hv2_lowpass_op_mmxext liteav_ff_avg_h264_qpel8or16_hv2_lowpass_op_mmxext +#define ff_hevc_put_qpel_uw_weight_h3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h3_neon_8 +#define ff_pred8x8_plane_10_sse2 liteav_ff_pred8x8_plane_10_sse2 +#define ff_avg_h264_qpel16_mc31_neon liteav_ff_avg_h264_qpel16_mc31_neon +#define av_parse_ratio liteav_av_parse_ratio +#define ff_put_qpel8_mc13_old_c liteav_ff_put_qpel8_mc13_old_c +#define rgb48tobgr48_nobswap liteav_rgb48tobgr48_nobswap +#define ff_put_h264_qpel8or16_v_lowpass_sse2 liteav_ff_put_h264_qpel8or16_v_lowpass_sse2 +#define ff_af_queue_add liteav_ff_af_queue_add +#define ff_h263i_decoder liteav_ff_h263i_decoder +#define ff_avg_vc1_chroma_mc8_nornd_3dnow liteav_ff_avg_vc1_chroma_mc8_nornd_3dnow +#define ff_sbc_profiles liteav_ff_sbc_profiles +#define ff_frame_pool_get_audio_config liteav_ff_frame_pool_get_audio_config +#define av_twofish_crypt liteav_av_twofish_crypt +#define av_sha512_alloc liteav_av_sha512_alloc +#define avio_close_dyn_buf liteav_avio_close_dyn_buf +#define ff_ac3_window liteav_ff_ac3_window +#define ff_avg_h264_qpel16_mc10_10_sse2 liteav_ff_avg_h264_qpel16_mc10_10_sse2 +#define av_buffersrc_get_nb_failed_requests liteav_av_buffersrc_get_nb_failed_requests +#define ff_avg_h264_qpel8_mc10_10_sse2_cache64 liteav_ff_avg_h264_qpel8_mc10_10_sse2_cache64 +#define ff_ilbc_at_decoder liteav_ff_ilbc_at_decoder +#define ff_pred8x8l_vertical_right_10_sse2 liteav_ff_pred8x8l_vertical_right_10_sse2 +#define ff_hevc_transform_add_4x4_neon_8_asm liteav_ff_hevc_transform_add_4x4_neon_8_asm +#define av_ripemd_final liteav_av_ripemd_final +#define ff_get_cpu_max_align_aarch64 liteav_ff_get_cpu_max_align_aarch64 +#define ff_ebur128_add_frames_float liteav_ff_ebur128_add_frames_float +#define ff_avg_qpel16_mc12_old_c liteav_ff_avg_qpel16_mc12_old_c +#define ff_sbr_neg_odd_64_neon liteav_ff_sbr_neg_odd_64_neon +#define ff_pred8x8l_vertical_8_mmxext liteav_ff_pred8x8l_vertical_8_mmxext +#define ff_slice_thread_execute_with_mainfunc liteav_ff_slice_thread_execute_with_mainfunc +#define av_hmac_free liteav_av_hmac_free +#define av_thread_message_flush liteav_av_thread_message_flush +#define ff_hevc_hls_residual_coding liteav_ff_hevc_hls_residual_coding +#define av_get_output_timestamp liteav_av_get_output_timestamp +#define ff_tns_max_bands_512 liteav_ff_tns_max_bands_512 +#define ff_h264_idct_add_10_c liteav_ff_h264_idct_add_10_c +#define ff_pred8x8_vert_neon liteav_ff_pred8x8_vert_neon +#define ff_av1_filter_obus liteav_ff_av1_filter_obus +#define ff_framesync_init_dualinput liteav_ff_framesync_init_dualinput +#define ff_init_ff_sine_windows_fixed liteav_ff_init_ff_sine_windows_fixed +#define ff_h264_weight_16_sse2 liteav_ff_h264_weight_16_sse2 +#define ff_free_filters liteav_ff_free_filters +#define av_d2str liteav_av_d2str +#define ff_pw_255 liteav_ff_pw_255 +#define av_probe_input_buffer2 liteav_av_probe_input_buffer2 +#define ff_pw_256 liteav_ff_pw_256 +#define avfilter_transform liteav_avfilter_transform +#define ff_cos_8192_fixed liteav_ff_cos_8192_fixed +#define av_parse_time liteav_av_parse_time +#define ff_pack_2ch_int32_to_int32_u_sse2 liteav_ff_pack_2ch_int32_to_int32_u_sse2 +#define ff_simple_idct_put_neon liteav_ff_simple_idct_put_neon +#define av_color_range_name liteav_av_color_range_name +#define rgb15to16 liteav_rgb15to16 +#define ff_fft_permute_neon liteav_ff_fft_permute_neon +#define av_dv_frame_profile liteav_av_dv_frame_profile +#define ff_h264_idct8_add_9_c liteav_ff_h264_idct8_add_9_c +#define ff_avg_h264_qpel8_h_lowpass_l2_ssse3 liteav_ff_avg_h264_qpel8_h_lowpass_l2_ssse3 +#define av_buffersink_get_sample_rate liteav_av_buffersink_get_sample_rate +#define ff_mpeg4_workaround_bugs liteav_ff_mpeg4_workaround_bugs +#define ff_pred16x16_left_dc_10_mmxext liteav_ff_pred16x16_left_dc_10_mmxext +#define ff_id3v2_free_extra_meta liteav_ff_id3v2_free_extra_meta +#define ff_pw_2048 liteav_ff_pw_2048 +#define vlc_css_parser_ParseString liteav_vlc_css_parser_ParseString +#define ff_pred8x8l_down_left_8_sse2 liteav_ff_pred8x8l_down_left_8_sse2 +#define ff_h264_idct_dc_add_8_mmxext liteav_ff_h264_idct_dc_add_8_mmxext +#define ff_unpack_2ch_int16_to_int16_u_ssse3 liteav_ff_unpack_2ch_int16_to_int16_u_ssse3 +#define sws_scale liteav_sws_scale +#define av_parse_video_size liteav_av_parse_video_size +#define ff_hevc_sao_band_w8_neon_8 liteav_ff_hevc_sao_band_w8_neon_8 +#define ff_nv21_to_bgra_neon liteav_ff_nv21_to_bgra_neon +#define ff_nv21_to_abgr_neon liteav_ff_nv21_to_abgr_neon +#define deinterleaveBytes liteav_deinterleaveBytes +#define ff_put_pixels8_l2_shift5_mmxext liteav_ff_put_pixels8_l2_shift5_mmxext +#define av_opt_is_set_to_default_by_name liteav_av_opt_is_set_to_default_by_name +#define swri_resample_dsp_aarch64_init liteav_swri_resample_dsp_aarch64_init +#define ff_avg_pixels8_xy2_neon liteav_ff_avg_pixels8_xy2_neon +#define ff_hscale_8_to_15_neon liteav_ff_hscale_8_to_15_neon +#define ff_avg_h264_qpel16_mc01_10_sse2 liteav_ff_avg_h264_qpel16_mc01_10_sse2 +#define ff_put_h264_chroma_mc2_mmxext liteav_ff_put_h264_chroma_mc2_mmxext +#define ff_simple_idct_put_int16_10bit liteav_ff_simple_idct_put_int16_10bit +#define ff_put_no_rnd_qpel8_mc31_old_c liteav_ff_put_no_rnd_qpel8_mc31_old_c +#define ff_simple_idct_add_int16_10bit liteav_ff_simple_idct_add_int16_10bit +#define av_timecode_init liteav_av_timecode_init +#define av_frame_get_buffer liteav_av_frame_get_buffer +#define ff_int32_to_float_a_sse2 liteav_ff_int32_to_float_a_sse2 +#define ff_ue_golomb_vlc_code liteav_ff_ue_golomb_vlc_code +#define ff_ac3_hearing_threshold_tab liteav_ff_ac3_hearing_threshold_tab +#define ff_put_h264_qpel4_h_lowpass_mmxext liteav_ff_put_h264_qpel4_h_lowpass_mmxext +#define ff_put_h264_qpel8_mc12_10_sse2 liteav_ff_put_h264_qpel8_mc12_10_sse2 +#define ff_h264_idct_add8_422_12_c liteav_ff_h264_idct_add8_422_12_c +#define av_frame_set_best_effort_timestamp liteav_av_frame_set_best_effort_timestamp +#define ff_h263_inter_MCBPC_vlc liteav_ff_h263_inter_MCBPC_vlc +#define ff_w4_plus_w6_hi liteav_ff_w4_plus_w6_hi +#define ffio_geturlcontext liteav_ffio_geturlcontext +#define av_fifo_space liteav_av_fifo_space +#define ff_h264_idct_add8_422_10_c liteav_ff_h264_idct_add8_422_10_c +#define ff_vector_fmul_window_neon liteav_ff_vector_fmul_window_neon +#define ff_deblock_h_luma_10_sse2 liteav_ff_deblock_h_luma_10_sse2 +#define av_xtea_le_crypt liteav_av_xtea_le_crypt +#define ff_cos_8192 liteav_ff_cos_8192 +#define rgb24to15 liteav_rgb24to15 +#define ff_mpeg_framesize_alloc liteav_ff_mpeg_framesize_alloc +#define ff_aac_eld_window_480 liteav_ff_aac_eld_window_480 +#define av_frame_remove_side_data liteav_av_frame_remove_side_data +#define ff_hevc_put_qpel_uw_hv_neon_8 liteav_ff_hevc_put_qpel_uw_hv_neon_8 +#define ff_h264_idct8_dc_add_9_c liteav_ff_h264_idct8_dc_add_9_c +#define ff_inlink_request_frame liteav_ff_inlink_request_frame +#define ff_hevc_put_pixels_w32_neon_8_asm liteav_ff_hevc_put_pixels_w32_neon_8_asm +#define ff_mpegts_demuxer liteav_ff_mpegts_demuxer +#define sws_get_class liteav_sws_get_class +#define av_buffersink_get_frame_flags liteav_av_buffersink_get_frame_flags +#define av_frame_get_channels liteav_av_frame_get_channels +#define avcodec_get_type liteav_avcodec_get_type +#define ff_pred8x8l_vertical_right_8_mmxext liteav_ff_pred8x8l_vertical_right_8_mmxext +#define ff_fft16_vfp liteav_ff_fft16_vfp +#define ff_log2_run liteav_ff_log2_run +#define av_chroma_location_name liteav_av_chroma_location_name +#define av_blowfish_init liteav_av_blowfish_init +#define ff_avg_h264_qpel8_mc33_neon liteav_ff_avg_h264_qpel8_mc33_neon +#define ff_deblock_v_chroma_8_avx liteav_ff_deblock_v_chroma_8_avx +#define ff_h264_idct_add8_9_c liteav_ff_h264_idct_add8_9_c +#define av_tea_alloc liteav_av_tea_alloc +#define av_strncasecmp liteav_av_strncasecmp +#define av_bsf_next liteav_av_bsf_next +#define rgb24to16 liteav_rgb24to16 +#define ff_pw_32 liteav_ff_pw_32 +#define ff_put_h264_qpel4_mc33_10_mmxext liteav_ff_put_h264_qpel4_mc33_10_mmxext +#define ff_ac3_slow_gain_tab liteav_ff_ac3_slow_gain_tab +#define ff_h264_filter_mb liteav_ff_h264_filter_mb +#define ff_mdct15_uninit liteav_ff_mdct15_uninit +#define ff_h264_loop_filter_strength_mmxext liteav_ff_h264_loop_filter_strength_mmxext +#define avpriv_set_systematic_pal2 liteav_avpriv_set_systematic_pal2 +#define ff_avg_h264_qpel8_mc30_10_sse2 liteav_ff_avg_h264_qpel8_mc30_10_sse2 +#define ff_framequeue_add liteav_ff_framequeue_add +#define ff_simple_idct_put_int16_8bit liteav_ff_simple_idct_put_int16_8bit +#define av_hash_get_size liteav_av_hash_get_size +#define ff_hevc_put_qpel_uw_weight_v2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_v2_neon_8 +#define av_twofish_alloc liteav_av_twofish_alloc +#define ff_put_h264_qpel4_mc31_10_mmxext liteav_ff_put_h264_qpel4_mc31_10_mmxext +#define av_buffersink_params_alloc liteav_av_buffersink_params_alloc +#define avformat_write_header liteav_avformat_write_header +#define av_reduce liteav_av_reduce +#define ff_set_qscale liteav_ff_set_qscale +#define ff_hevc_sao_band_filter_8_neon liteav_ff_hevc_sao_band_filter_8_neon +#define ff_mpadsp_apply_window_fixed_neon liteav_ff_mpadsp_apply_window_fixed_neon +#define ff_pred8x8l_vertical_right_8_ssse3 liteav_ff_pred8x8l_vertical_right_8_ssse3 +#define ff_text_r8 liteav_ff_text_r8 +#define avfilter_graph_dump liteav_avfilter_graph_dump +#define ff_put_h264_qpel8_mc33_neon liteav_ff_put_h264_qpel8_mc33_neon +#define ff_avg_h264_qpel4_mc31_10_mmxext liteav_ff_avg_h264_qpel4_mc31_10_mmxext +#define rgb48to64_bswap liteav_rgb48to64_bswap +#define swri_audio_convert_init_aarch64 liteav_swri_audio_convert_init_aarch64 +#define ff_flac_sample_rate_table liteav_ff_flac_sample_rate_table +#define ff_hevc_pred_angular_8x8_neon_8 liteav_ff_hevc_pred_angular_8x8_neon_8 +#define ff_hevc_put_epel_uw_pixels_w64_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w64_neon_8 +#define ff_flacdsp_init liteav_ff_flacdsp_init +#define ff_put_h264_chroma_mc4_neon liteav_ff_put_h264_chroma_mc4_neon +#define av_hex_dump liteav_av_hex_dump +#define avio_wl16 liteav_avio_wl16 +#define ff_hevc_put_qpel_h_neon_8_wrapper liteav_ff_hevc_put_qpel_h_neon_8_wrapper +#define av_bsf_list_finalize liteav_av_bsf_list_finalize +#define av_bprint_escape liteav_av_bprint_escape +#define av_hwframe_ctx_create_derived liteav_av_hwframe_ctx_create_derived +#define ff_deblock_v_chroma_intra_10_avx liteav_ff_deblock_v_chroma_intra_10_avx +#define ff_videotoolbox_avcc_extradata_create liteav_ff_videotoolbox_avcc_extradata_create +#define ff_pred16x16_top_dc_10_mmxext liteav_ff_pred16x16_top_dc_10_mmxext +#define av_jni_get_java_vm liteav_av_jni_get_java_vm +#define ff_gif_encoder liteav_ff_gif_encoder +#define ff_riff_write_info_tag liteav_ff_riff_write_info_tag +#define av_interleaved_write_frame liteav_av_interleaved_write_frame +#define ff_h264_biweight_8_10_sse4 liteav_ff_h264_biweight_8_10_sse4 +#define ff_h264_biweight_8_10_sse2 liteav_ff_h264_biweight_8_10_sse2 +#define avcodec_decode_subtitle2 liteav_avcodec_decode_subtitle2 +#define ff_hevc_put_qpel_h2_neon_8 liteav_ff_hevc_put_qpel_h2_neon_8 +#define av_crc_init liteav_av_crc_init +#define ff_hevc_intra_chroma_pred_mode_decode liteav_ff_hevc_intra_chroma_pred_mode_decode +#define ff_put_h264_qpel16_mc10_neon liteav_ff_put_h264_qpel16_mc10_neon +#define ff_mpeg1_default_non_intra_matrix liteav_ff_mpeg1_default_non_intra_matrix +#define rgb15tobgr16 liteav_rgb15tobgr16 +#define ff_mov_read_chan liteav_ff_mov_read_chan +#define rgb15tobgr15 liteav_rgb15tobgr15 +#define ff_amf_get_string liteav_ff_amf_get_string +#define av_parse_color liteav_av_parse_color +#define ff_pixblockdsp_init liteav_ff_pixblockdsp_init +#define ff_pred16x16_tm_vp8_8_mmx liteav_ff_pred16x16_tm_vp8_8_mmx +#define ff_deblock_v_chroma_10_avx liteav_ff_deblock_v_chroma_10_avx +#define vlc_css_unquotedunescaped liteav_vlc_css_unquotedunescaped +#define ff_rdft_init liteav_ff_rdft_init +#define ff_hevc_put_epel_uw_v_neon_8 liteav_ff_hevc_put_epel_uw_v_neon_8 +#define ff_lzw_encode_init liteav_ff_lzw_encode_init +#define avfilter_graph_get_filter liteav_avfilter_graph_get_filter +#define yypush_buffer_state liteav_yypush_buffer_state +#define ff_hevc_put_qpel_h3_neon_8 liteav_ff_hevc_put_qpel_h3_neon_8 +#define av_match_ext liteav_av_match_ext +#define ff_int32_to_float_u_sse2 liteav_ff_int32_to_float_u_sse2 +#define avio_check liteav_avio_check +#define ff_openssl_init liteav_ff_openssl_init +#define ff_simple_idct8_put_avx liteav_ff_simple_idct8_put_avx +#define avcodec_receive_frame liteav_avcodec_receive_frame +#define ff_id3v2_write_simple liteav_ff_id3v2_write_simple +#define ff_pred4x4_tm_vp8_8_mmx liteav_ff_pred4x4_tm_vp8_8_mmx +#define av_sha_update liteav_av_sha_update +#define av_demuxer_iterate liteav_av_demuxer_iterate +#define ff_h264_idct_add16_14_c liteav_ff_h264_idct_add16_14_c +#define ff_deblock_h_luma_10_avx liteav_ff_deblock_h_luma_10_avx +#define av_hwdevice_iterate_types liteav_av_hwdevice_iterate_types +#define ff_vector_fmul_neon liteav_ff_vector_fmul_neon +#define ff_avg_h264_qpel8_mc21_10_sse2 liteav_ff_avg_h264_qpel8_mc21_10_sse2 +#define ff_mpeg4_decode_studio_slice_header liteav_ff_mpeg4_decode_studio_slice_header +#define avio_accept liteav_avio_accept +#define ff_put_h264_qpel4_mc23_10_mmxext liteav_ff_put_h264_qpel4_mc23_10_mmxext +#define ff_vsrc_buffer liteav_ff_vsrc_buffer +#define ff_log_net_error liteav_ff_log_net_error +#define ff_set_common_formats liteav_ff_set_common_formats +#define avpriv_ac3_channel_layout_tab liteav_avpriv_ac3_channel_layout_tab +#define av_timecode_make_string liteav_av_timecode_make_string +#define av_tree_node_alloc liteav_av_tree_node_alloc +#define av_frame_free liteav_av_frame_free +#define ff_h264_idct_add8_8_mmx liteav_ff_h264_idct_add8_8_mmx +#define ff_put_pixels16_x2_no_rnd_neon liteav_ff_put_pixels16_x2_no_rnd_neon +#define av_opt_set_q liteav_av_opt_set_q +#define ff_raw_audio_read_header liteav_ff_raw_audio_read_header +#define swri_noise_shaping_double liteav_swri_noise_shaping_double +#define ff_modified_quant_tab liteav_ff_modified_quant_tab +#define ff_pack_8ch_float_to_float_a_avx liteav_ff_pack_8ch_float_to_float_a_avx +#define ff_sws_init_range_convert liteav_ff_sws_init_range_convert +#define ff_cos_512 liteav_ff_cos_512 +#define ff_sine_1024 liteav_ff_sine_1024 +#define av_frame_get_sample_rate liteav_av_frame_get_sample_rate +#define ff_hevc_put_qpel_uw_pixels_w16_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w16_neon_8 +#define ff_pred8x8l_128_dc_10_mmxext liteav_ff_pred8x8l_128_dc_10_mmxext +#define ff_h264_idct8_add4_10_c liteav_ff_h264_idct8_add4_10_c +#define ff_h264_free_tables liteav_ff_h264_free_tables +#define ff_mpeg1_find_frame_end liteav_ff_mpeg1_find_frame_end +#define ff_hevc_put_qpel_uw_weight_h2v3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h2v3_neon_8 +#define ff_cos_128 liteav_ff_cos_128 +#define av_hmac_update liteav_av_hmac_update +#define ff_se_golomb_vlc_code liteav_ff_se_golomb_vlc_code +#define av_get_channel_layout liteav_av_get_channel_layout +#define ff_hevc_put_qpel_uw_weight_h3v3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h3v3_neon_8 +#define av_bprint_strftime liteav_av_bprint_strftime +#define avcodec_flush_buffers liteav_avcodec_flush_buffers +#define ff_mpeg4_default_non_intra_matrix liteav_ff_mpeg4_default_non_intra_matrix +#define vlc_css_declarations_Append liteav_vlc_css_declarations_Append +#define ff_sine_8192_fixed liteav_ff_sine_8192_fixed +#define ff_pred4x4_tm_vp8_8_ssse3 liteav_ff_pred4x4_tm_vp8_8_ssse3 +#define ff_rtmp_calc_digest_pos liteav_ff_rtmp_calc_digest_pos +#define ff_mpv_frame_end liteav_ff_mpv_frame_end +#define ff_h264_idct8_add4_12_c liteav_ff_h264_idct8_add4_12_c +#define ff_reset_entries liteav_ff_reset_entries +#define avfilter_graph_request_oldest liteav_avfilter_graph_request_oldest +#define ff_socket liteav_ff_socket +#define ff_fdctdsp_init_x86 liteav_ff_fdctdsp_init_x86 +#define ff_mpeg4_studio_dc_luma liteav_ff_mpeg4_studio_dc_luma +#define av_blowfish_alloc liteav_av_blowfish_alloc +#define ff_put_pixels8_xy2_neon liteav_ff_put_pixels8_xy2_neon +#define ff_pred16x16_plane_rv40_8_sse2 liteav_ff_pred16x16_plane_rv40_8_sse2 +#define ff_hevc_sao_eo_class_decode liteav_ff_hevc_sao_eo_class_decode +#define av_bsf_get_null_filter liteav_av_bsf_get_null_filter +#define avio_get_str liteav_avio_get_str +#define av_packet_clone liteav_av_packet_clone +#define ff_hevc_put_pel_uw_pixels_w6_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w6_neon_8_asm +#define yuyvtoyuv422 liteav_yuyvtoyuv422 +#define yuyvtoyuv420 liteav_yuyvtoyuv420 +#define ff_pack_2ch_int32_to_float_u_sse2 liteav_ff_pack_2ch_int32_to_float_u_sse2 +#define yypop_buffer_state liteav_yypop_buffer_state +#define ff_h264_idct_add8_422_8_mmx liteav_ff_h264_idct_add8_422_8_mmx +#define ff_pred8x8l_horizontal_up_10_ssse3 liteav_ff_pred8x8l_horizontal_up_10_ssse3 +#define rgb16to15 liteav_rgb16to15 +#define ff_avg_h264_chroma_mc2_neon liteav_ff_avg_h264_chroma_mc2_neon +#define av_packet_pack_dictionary liteav_av_packet_pack_dictionary +#define av_basename liteav_av_basename +#define ff_sws_context_class liteav_ff_sws_context_class +#define ff_w4_min_w2_lo liteav_ff_w4_min_w2_lo +#define ff_channel_layouts_unref liteav_ff_channel_layouts_unref +#define vlc_css_parser_Init liteav_vlc_css_parser_Init +#define ff_put_no_rnd_qpel8_mc12_old_c liteav_ff_put_no_rnd_qpel8_mc12_old_c +#define ff_deblock_v_luma_8_avx liteav_ff_deblock_v_luma_8_avx +#define av_write_uncoded_frame_query liteav_av_write_uncoded_frame_query +#define ff_hevc_h_loop_filter_chroma_neon liteav_ff_hevc_h_loop_filter_chroma_neon +#define avio_printf liteav_avio_printf +#define av_parser_init liteav_av_parser_init +#define ff_cos_64_fixed liteav_ff_cos_64_fixed +#define avcodec_send_packet liteav_avcodec_send_packet +#define ff_put_no_rnd_qpel16_mc13_old_c liteav_ff_put_no_rnd_qpel16_mc13_old_c +#define ff_h263_format liteav_ff_h263_format +#define ff_cos_tabs liteav_ff_cos_tabs +#define ff_url_join liteav_ff_url_join +#define av_aes_ctr_increment_iv liteav_av_aes_ctr_increment_iv +#define ff_network_init liteav_ff_network_init +#define ff_avg_h264_chroma_mc4_neon liteav_ff_avg_h264_chroma_mc4_neon +#define ff_hevc_profiles liteav_ff_hevc_profiles +#define ff_thread_get_format liteav_ff_thread_get_format +#define ff_flac_is_extradata_valid liteav_ff_flac_is_extradata_valid +#define ff_ass_subtitle_header_default liteav_ff_ass_subtitle_header_default +#define ff_hevc_put_epel_uw_pixels_w4_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w4_neon_8 +#define ff_read_line_to_bprint_overwrite liteav_ff_read_line_to_bprint_overwrite +#define rgb15to24 liteav_rgb15to24 +#define ff_h264_idct8_dc_add_10_sse2 liteav_ff_h264_idct8_dc_add_10_sse2 +#define ff_avg_qpel8_mc32_old_c liteav_ff_avg_qpel8_mc32_old_c +#define yyget_text liteav_yyget_text +#define shuffle_bytes_1230 liteav_shuffle_bytes_1230 +#define ff_decode_bsfs_init liteav_ff_decode_bsfs_init +#define ff_fft_end liteav_ff_fft_end +#define ff_start_tag liteav_ff_start_tag +#define ff_crcA001_update liteav_ff_crcA001_update +#define ff_sbr_hf_apply_noise_1_neon liteav_ff_sbr_hf_apply_noise_1_neon +#define av_realloc_f liteav_av_realloc_f +#define ff_pred8x8l_horizontal_10_sse2 liteav_ff_pred8x8l_horizontal_10_sse2 +#define av_image_copy_plane liteav_av_image_copy_plane +#define ff_mp3adufloat_decoder liteav_ff_mp3adufloat_decoder +#define av_buffersink_get_sample_aspect_ratio liteav_av_buffersink_get_sample_aspect_ratio +#define ff_vp9_profiles liteav_ff_vp9_profiles +#define ff_overlay_init_x86 liteav_ff_overlay_init_x86 +#define av_bprint_clear liteav_av_bprint_clear +#define av_get_pix_fmt_name liteav_av_get_pix_fmt_name +#define av_tx_uninit liteav_av_tx_uninit +#define ff_hevc_sao_band_filter_neon_8 liteav_ff_hevc_sao_band_filter_neon_8 +#define av_opt_flag_is_set liteav_av_opt_flag_is_set +#define ff_aac_sbr_init liteav_ff_aac_sbr_init +#define ff_ps_hybrid_analysis_ileave_sse liteav_ff_ps_hybrid_analysis_ileave_sse +#define ff_h264_build_ref_list liteav_ff_h264_build_ref_list +#define ff_h264_idct_dc_add_8_c liteav_ff_h264_idct_dc_add_8_c +#define ff_h263_intra_MCBPC_vlc liteav_ff_h263_intra_MCBPC_vlc +#define av_md5_init liteav_av_md5_init +#define av_thread_message_queue_free liteav_av_thread_message_queue_free +#define av_dynarray_add_nofree liteav_av_dynarray_add_nofree +#define ff_psdsp_init liteav_ff_psdsp_init +#define ff_avg_h264_qpel8_mc20_10_sse2_cache64 liteav_ff_avg_h264_qpel8_mc20_10_sse2_cache64 +#define av_match_list liteav_av_match_list +#define ff_mpeg12_frame_rate_tab liteav_ff_mpeg12_frame_rate_tab +#define ff_thread_await_progress liteav_ff_thread_await_progress +#define ff_put_h264_qpel8_mc10_neon liteav_ff_put_h264_qpel8_mc10_neon +#define ff_pred4x4_horizontal_up_10_mmxext liteav_ff_pred4x4_horizontal_up_10_mmxext +#define ff_float_to_int32_a_avx2 liteav_ff_float_to_int32_a_avx2 +#define ff_ps_mul_pair_single_neon liteav_ff_ps_mul_pair_single_neon +#define ff_null_get_audio_buffer liteav_ff_null_get_audio_buffer +#define ff_init_ff_cos_tabs liteav_ff_init_ff_cos_tabs +#define ff_h264_idct8_add4_8_c liteav_ff_h264_idct8_add4_8_c +#define ff_cos_1024_fixed liteav_ff_cos_1024_fixed +#define ff_fdct248_islow_8 liteav_ff_fdct248_islow_8 +#define av_buffersink_set_frame_size liteav_av_buffersink_set_frame_size +#define yyset_lval liteav_yyset_lval +#define ff_aac_kbd_short_128_fixed liteav_ff_aac_kbd_short_128_fixed +#define ff_avg_h264_qpel16_mc00_neon liteav_ff_avg_h264_qpel16_mc00_neon +#define avio_rl16 liteav_avio_rl16 +#define ff_hevc_put_epel_uw_hv_neon_8 liteav_ff_hevc_put_epel_uw_hv_neon_8 +#define ff_hevc_hls_filter liteav_ff_hevc_hls_filter +#define ff_aac_pow2sf_tab liteav_ff_aac_pow2sf_tab +#define av_buffersrc_close liteav_av_buffersrc_close +#define avcodec_receive_packet liteav_avcodec_receive_packet +#define ff_mpegvideo_parser liteav_ff_mpegvideo_parser +#define ff_hevc_put_qpel_uw_weight_h1v2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h1v2_neon_8 +#define swr_next_pts liteav_swr_next_pts +#define av_get_sample_fmt_string liteav_av_get_sample_fmt_string +#define av_thread_message_queue_send liteav_av_thread_message_queue_send +#define ff_h264_idct_add16_12_c liteav_ff_h264_idct_add16_12_c +#define ff_h264_idct8_add_neon liteav_ff_h264_idct8_add_neon +#define ff_yuv2rgb_get_func_ptr liteav_ff_yuv2rgb_get_func_ptr +#define av_packet_ref liteav_av_packet_ref +#define ff_fdct_ifast248 liteav_ff_fdct_ifast248 +#define ff_pw_18 liteav_ff_pw_18 +#define av_opt_set_dict liteav_av_opt_set_dict +#define ff_hevc_put_pixels_w8_neon_8_asm liteav_ff_hevc_put_pixels_w8_neon_8_asm +#define ff_ps_read_data liteav_ff_ps_read_data +#define av_channel_layout_extract_channel liteav_av_channel_layout_extract_channel +#define av_encryption_info_clone liteav_av_encryption_info_clone +#define sws_allocVec liteav_sws_allocVec +#define ff_hevc_set_neighbour_available liteav_ff_hevc_set_neighbour_available +#define ff_yuv2planeX_8_neon liteav_ff_yuv2planeX_8_neon +#define ff_flac_blocksize_table liteav_ff_flac_blocksize_table +#define vlc_css_selector_Append liteav_vlc_css_selector_Append +#define ff_parse_mpeg2_descriptor liteav_ff_parse_mpeg2_descriptor +#define ffio_read_varlen liteav_ffio_read_varlen +#define ffio_read_size liteav_ffio_read_size +#define ff_accept liteav_ff_accept +#define ff_ebur128_add_frames_planar_float liteav_ff_ebur128_add_frames_planar_float +#define ff_draw_color liteav_ff_draw_color +#define ff_isom_get_vpcc_features liteav_ff_isom_get_vpcc_features +#define ff_framesync_preinit liteav_ff_framesync_preinit +#define ff_hevc_parser liteav_ff_hevc_parser +#define ff_pred8x8l_down_right_8_ssse3 liteav_ff_pred8x8l_down_right_8_ssse3 +#define av_buffer_pool_init2 liteav_av_buffer_pool_init2 +#define ff_hevc_put_qpel_uni_neon_wrapper liteav_ff_hevc_put_qpel_uni_neon_wrapper +#define ff_avg_h264_qpel8or16_hv2_lowpass_ssse3 liteav_ff_avg_h264_qpel8or16_hv2_lowpass_ssse3 +#define ffurl_get_short_seek liteav_ffurl_get_short_seek +#define ff_pred16x16_vertical_8_sse liteav_ff_pred16x16_vertical_8_sse +#define ff_mb_type_b_tab liteav_ff_mb_type_b_tab +#define ff_h263_decode_motion liteav_ff_h263_decode_motion +#define ff_hevc_put_pixels_w4_neon_8_asm liteav_ff_hevc_put_pixels_w4_neon_8_asm +#define ff_free_picture_tables liteav_ff_free_picture_tables +#define av_timecode_make_smpte_tc_string liteav_av_timecode_make_smpte_tc_string +#define av_murmur3_alloc liteav_av_murmur3_alloc +#define ff_deblock_v_luma_10_avx liteav_ff_deblock_v_luma_10_avx +#define ff_mpeg12_vlc_dc_chroma_bits liteav_ff_mpeg12_vlc_dc_chroma_bits +#define ff_put_pixels8_x2_no_rnd_neon liteav_ff_put_pixels8_x2_no_rnd_neon +#define ff_simple_idct8_avx liteav_ff_simple_idct8_avx +#define ff_nv21_to_rgba_neon liteav_ff_nv21_to_rgba_neon +#define ff_h264_chroma_dc_dequant_idct_8_c liteav_ff_h264_chroma_dc_dequant_idct_8_c +#define ff_hevc_sao_band_w16_neon_8 liteav_ff_hevc_sao_band_w16_neon_8 +#define av_hwdevice_ctx_create liteav_av_hwdevice_ctx_create +#define av_muxer_iterate liteav_av_muxer_iterate +#define ff_faandct248 liteav_ff_faandct248 +#define ff_pack_2ch_int16_to_float_a_sse2 liteav_ff_pack_2ch_int16_to_float_a_sse2 +#define ff_mov_read_esds liteav_ff_mov_read_esds +#define avformat_init_output liteav_avformat_init_output +#define av_strndup liteav_av_strndup +#define ff_simple_idct12_sse2 liteav_ff_simple_idct12_sse2 +#define av_msg liteav_av_msg +#define ff_hevc_compute_poc liteav_ff_hevc_compute_poc +#define text_style_merge liteav_text_style_merge +#define av_strlcat liteav_av_strlcat +#define ff_h2645_packet_split liteav_ff_h2645_packet_split +#define ff_avg_h264_qpel16_mc23_10_sse2 liteav_ff_avg_h264_qpel16_mc23_10_sse2 +#define av_buffer_realloc liteav_av_buffer_realloc +#define ff_ass_split_dialog liteav_ff_ass_split_dialog +#define ff_hevc_deblocking_boundary_strengths liteav_ff_hevc_deblocking_boundary_strengths +#define ff_w1_plus_w3_lo liteav_ff_w1_plus_w3_lo +#define ff_hevc_transform_8x8_neon_8_asm liteav_ff_hevc_transform_8x8_neon_8_asm +#define av_thread_message_queue_set_err_send liteav_av_thread_message_queue_set_err_send +#define av_log_get_flags liteav_av_log_get_flags +#define ff_get_format liteav_ff_get_format +#define ff_framesync_get_class liteav_ff_framesync_get_class +#define ff_h264_cabac_tables liteav_ff_h264_cabac_tables +#define ff_hevc_put_qpel_h3v3_neon_8 liteav_ff_hevc_put_qpel_h3v3_neon_8 +#define ff_update_picture_tables liteav_ff_update_picture_tables +#define ff_w3_min_w7_lo liteav_ff_w3_min_w7_lo +#define av_bsf_init liteav_av_bsf_init +#define av_frame_set_colorspace liteav_av_frame_set_colorspace +#define ff_fdct248_islow_10 liteav_ff_fdct248_islow_10 +#define ff_put_h264_qpel16_mc33_neon liteav_ff_put_h264_qpel16_mc33_neon +#define ff_ac3_log_add_tab liteav_ff_ac3_log_add_tab +#define ff_hevc_put_qpel_uw_weight_h2v1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h2v1_neon_8 +#define ff_tns_max_bands_1024 liteav_ff_tns_max_bands_1024 +#define ff_avg_h264_qpel8_mc31_10_sse2 liteav_ff_avg_h264_qpel8_mc31_10_sse2 +#define ff_pred8x8l_vertical_right_8_sse2 liteav_ff_pred8x8l_vertical_right_8_sse2 +#define av_bsf_alloc liteav_av_bsf_alloc +#define ff_h263_find_frame_end liteav_ff_h263_find_frame_end +#define ff_dither_8x8_32 liteav_ff_dither_8x8_32 +#define ff_h264_weight_4_10_sse4 liteav_ff_h264_weight_4_10_sse4 +#define avcodec_default_get_format liteav_avcodec_default_get_format +#define ff_biweight_h264_pixels_16_neon liteav_ff_biweight_h264_pixels_16_neon +#define ff_pred8x8_dc_rv40_8_mmxext liteav_ff_pred8x8_dc_rv40_8_mmxext +#define av_set_options_string liteav_av_set_options_string +#define ff_srt_demuxer liteav_ff_srt_demuxer +#define swri_oldapi_conv_flt_to_s16_neon liteav_swri_oldapi_conv_flt_to_s16_neon +#define ff_h264_idct8_add_14_c liteav_ff_h264_idct8_add_14_c +#define swri_dither_init liteav_swri_dither_init +#define ff_h264_pred_weight_table liteav_ff_h264_pred_weight_table +#define ff_h264_h_loop_filter_luma_neon liteav_ff_h264_h_loop_filter_luma_neon +#define ff_put_pixels4_l2_shift5_mmxext liteav_ff_put_pixels4_l2_shift5_mmxext +#define yydebug liteav_yydebug +#define av_packet_unref liteav_av_packet_unref +#define ff_hevc_put_qpel_uw_h3v3_neon_8 liteav_ff_hevc_put_qpel_uw_h3v3_neon_8 +#define ff_pack_8ch_int32_to_float_a_avx liteav_ff_pack_8ch_int32_to_float_a_avx +#define av_spherical_projection_name liteav_av_spherical_projection_name +#define ff_flac_demuxer liteav_ff_flac_demuxer +#define ff_tls_protocol liteav_ff_tls_protocol +#define avcodec_find_encoder_by_name liteav_avcodec_find_encoder_by_name +#define ff_mpeg4_decode_partitions liteav_ff_mpeg4_decode_partitions +#define ff_put_no_rnd_qpel8_mc13_old_c liteav_ff_put_no_rnd_qpel8_mc13_old_c +#define av_bsf_send_packet liteav_av_bsf_send_packet +#define ff_ass_add_rect liteav_ff_ass_add_rect +#define ff_faandct liteav_ff_faandct +#define ff_put_h264_qpel8or16_hv1_lowpass_op_sse2 liteav_ff_put_h264_qpel8or16_hv1_lowpass_op_sse2 +#define ff_avg_h264_qpel8_mc30_10_sse2_cache64 liteav_ff_avg_h264_qpel8_mc30_10_sse2_cache64 +#define ff_alloc_dir_entry liteav_ff_alloc_dir_entry +#define ff_hevc_qpel_filters liteav_ff_hevc_qpel_filters +#define ff_mdct_win_fixed liteav_ff_mdct_win_fixed +#define ff_mov_write_packet liteav_ff_mov_write_packet +#define ff_sine_512 liteav_ff_sine_512 +#define ff_rtmp_check_alloc_array liteav_ff_rtmp_check_alloc_array +#define av_image_check_size liteav_av_image_check_size +#define ff_pred8x8_plane_neon liteav_ff_pred8x8_plane_neon +#define ff_h264_weight_8_mmxext liteav_ff_h264_weight_8_mmxext +#define ff_aac_codebook_vector_vals liteav_ff_aac_codebook_vector_vals +#define ff_af_queue_init liteav_ff_af_queue_init +#define ff_pred8x8l_top_dc_8_ssse3 liteav_ff_pred8x8l_top_dc_8_ssse3 +#define ff_swb_offset_512 liteav_ff_swb_offset_512 +#define vlc_css_expression_AddTerm liteav_vlc_css_expression_AddTerm +#define sws_getContext liteav_sws_getContext +#define ff_h264_update_thread_context liteav_ff_h264_update_thread_context +#define ff_hevc_put_qpel_uw_weight_h1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h1_neon_8 +#define avio_read_dir liteav_avio_read_dir +#define sws_printVec2 liteav_sws_printVec2 +#define av_get_channel_layout_string liteav_av_get_channel_layout_string +#define av_audio_fifo_read liteav_av_audio_fifo_read +#define ff_put_h264_qpel4_mc02_10_mmxext liteav_ff_put_h264_qpel4_mc02_10_mmxext +#define ff_filter_activate liteav_ff_filter_activate +#define av_cpu_count liteav_av_cpu_count +#define ff_put_pixels8_y2_neon liteav_ff_put_pixels8_y2_neon +#define av_parser_iterate liteav_av_parser_iterate +#define ffio_ensure_seekback liteav_ffio_ensure_seekback +#define ff_lzw_encode_flush liteav_ff_lzw_encode_flush +#define ff_sbr_sum_square_neon liteav_ff_sbr_sum_square_neon +#define ff_pw_m1 liteav_ff_pw_m1 +#define av_get_known_color_name liteav_av_get_known_color_name +#define ffio_get_checksum liteav_ffio_get_checksum +#define ff_put_h264_qpel16_mc01_neon liteav_ff_put_h264_qpel16_mc01_neon +#define ff_live_flv_demuxer liteav_ff_live_flv_demuxer +#define avpriv_get_raw_pix_fmt_tags liteav_avpriv_get_raw_pix_fmt_tags +#define ff_mpa_synth_window_float liteav_ff_mpa_synth_window_float +#define av_display_rotation_set liteav_av_display_rotation_set +#define ff_rgb24toyv12 liteav_ff_rgb24toyv12 +#define av_hex_dump_log liteav_av_hex_dump_log +#define av_encryption_init_info_free liteav_av_encryption_init_info_free +#define ff_pred8x8l_vertical_8_ssse3 liteav_ff_pred8x8l_vertical_8_ssse3 +#define av_find_nearest_q_idx liteav_av_find_nearest_q_idx +#define ff_put_h264_qpel16_mc10_10_sse2 liteav_ff_put_h264_qpel16_mc10_10_sse2 +#define ff_hevc_put_qpel_uw_h3v2_neon_8 liteav_ff_hevc_put_qpel_uw_h3v2_neon_8 +#define ff_avg_h264_qpel16_mc23_neon liteav_ff_avg_h264_qpel16_mc23_neon +#define av_dict_set_int liteav_av_dict_set_int +#define ff_h264_weight_16_10_sse4 liteav_ff_h264_weight_16_10_sse4 +#define av_get_planar_sample_fmt liteav_av_get_planar_sample_fmt +#define ff_w3_min_w7_hi liteav_ff_w3_min_w7_hi +#define ff_sine_windows_fixed liteav_ff_sine_windows_fixed +#define ff_deblock_v_luma_intra_8_avx liteav_ff_deblock_v_luma_intra_8_avx +#define ff_yuv420p_to_rgba_neon liteav_ff_yuv420p_to_rgba_neon +#define av_packet_add_side_data liteav_av_packet_add_side_data +#define ff_unpack_6ch_float_to_float_a_avx liteav_ff_unpack_6ch_float_to_float_a_avx +#define avio_read_partial liteav_avio_read_partial +#define avpriv_dict_set_timestamp liteav_avpriv_dict_set_timestamp +#define ff_h263_pred_dc liteav_ff_h263_pred_dc +#define ff_hevc_put_qpel_neon_wrapper liteav_ff_hevc_put_qpel_neon_wrapper +#define ff_sine_4096_fixed liteav_ff_sine_4096_fixed +#define ff_id3v2_write_metadata liteav_ff_id3v2_write_metadata +#define av_pkt_dump_log2 liteav_av_pkt_dump_log2 +#define ff_rtp_codec_id liteav_ff_rtp_codec_id +#define av_get_random_seed liteav_av_get_random_seed +#define av_opt_eval_int liteav_av_opt_eval_int +#define ff_alac_at_decoder liteav_ff_alac_at_decoder +#define ff_ac3_parse_header liteav_ff_ac3_parse_header +#define ff_avg_h264_qpel4_mc12_10_mmxext liteav_ff_avg_h264_qpel4_mc12_10_mmxext +#define ff_ass_decoder_flush liteav_ff_ass_decoder_flush +#define ff_hevc_skip_flag_decode liteav_ff_hevc_skip_flag_decode +#define avpriv_vga16_font liteav_avpriv_vga16_font +#define av_tx_init liteav_av_tx_init +#define ff_af_aresample liteav_ff_af_aresample +#define av_ripemd_update liteav_av_ripemd_update +#define ff_hevc_h_loop_filter_luma_neon liteav_ff_hevc_h_loop_filter_luma_neon +#define ff_raw_write_packet liteav_ff_raw_write_packet +#define ff_null_bsf liteav_ff_null_bsf +#define ff_jpeg_fdct_islow_8 liteav_ff_jpeg_fdct_islow_8 +#define ff_h264_idct_add16intra_10_c liteav_ff_h264_idct_add16intra_10_c +#define ff_write_chained liteav_ff_write_chained +#define ffio_close_null_buf liteav_ffio_close_null_buf +#define ff_pred8x8_plane_8_sse2 liteav_ff_pred8x8_plane_8_sse2 +#define ff_filter_frame liteav_ff_filter_frame +#define ff_filter_get_nb_threads liteav_ff_filter_get_nb_threads +#define ff_h263_decode_end liteav_ff_h263_decode_end +#define avpriv_cga_font liteav_avpriv_cga_font +#define ff_hevc_decode_short_term_rps liteav_ff_hevc_decode_short_term_rps +#define ff_pred8x8l_horizontal_8_ssse3 liteav_ff_pred8x8l_horizontal_8_ssse3 +#define ff_aac_adtstoasc_bsf liteav_ff_aac_adtstoasc_bsf +#define ff_hevc_cu_qp_delta_sign_flag liteav_ff_hevc_cu_qp_delta_sign_flag +#define av_bprint_finalize liteav_av_bprint_finalize +#define ff_hevc_unref_frame liteav_ff_hevc_unref_frame +#define ff_mpegaudio_parser liteav_ff_mpegaudio_parser +#define ff_put_h264_qpel8_mc03_neon liteav_ff_put_h264_qpel8_mc03_neon +#define av_packet_make_writable liteav_av_packet_make_writable +#define av_force_cpu_flags liteav_av_force_cpu_flags +#define av_fast_realloc liteav_av_fast_realloc +#define ff_default_chroma_qscale_table liteav_ff_default_chroma_qscale_table +#define av_bsf_list_free liteav_av_bsf_list_free +#define av_frame_set_channels liteav_av_frame_set_channels +#define ff_put_h264_qpel16_mc30_neon liteav_ff_put_h264_qpel16_mc30_neon +#define ff_vorbis_channel_layouts liteav_ff_vorbis_channel_layouts +#define ff_cos_32768_fixed liteav_ff_cos_32768_fixed +#define ff_flv_muxer liteav_ff_flv_muxer +#define ff_hevc_idct_16x16_dc_neon_8 liteav_ff_hevc_idct_16x16_dc_neon_8 +#define ff_h264_execute_decode_slices liteav_ff_h264_execute_decode_slices +#define ff_af_queue_remove liteav_ff_af_queue_remove +#define avpicture_alloc liteav_avpicture_alloc +#define ff_mpeg2_non_linear_qscale liteav_ff_mpeg2_non_linear_qscale +#define ff_mpegvideodsp_init liteav_ff_mpegvideodsp_init +#define ff_hevc_sao_edge_eo1_w64_neon_8 liteav_ff_hevc_sao_edge_eo1_w64_neon_8 +#define interleaveBytes liteav_interleaveBytes +#define ff_avg_h264_qpel16_mc13_10_sse2 liteav_ff_avg_h264_qpel16_mc13_10_sse2 +#define av_parser_next liteav_av_parser_next +#define ff_pred8x8_top_dc_10_sse2 liteav_ff_pred8x8_top_dc_10_sse2 +#define ff_avg_h264_qpel4_mc11_10_mmxext liteav_ff_avg_h264_qpel4_mc11_10_mmxext +#define ff_wavpack_decoder liteav_ff_wavpack_decoder +#define avio_seek_time liteav_avio_seek_time +#define ff_hevc_add_residual_32x32_neon_8 liteav_ff_hevc_add_residual_32x32_neon_8 +#define av_small_strptime liteav_av_small_strptime +#define ff_put_pixels16_y2_neon liteav_ff_put_pixels16_y2_neon +#define ff_hevc_put_epel_h_neon_8 liteav_ff_hevc_put_epel_h_neon_8 +#define ff_imdct_calc_sse liteav_ff_imdct_calc_sse +#define av_picture_copy liteav_av_picture_copy +#define av_stereo3d_type_name liteav_av_stereo3d_type_name +#define av_frame_set_metadata liteav_av_frame_set_metadata +#define av_hwdevice_ctx_create_derived liteav_av_hwdevice_ctx_create_derived +#define av_sdp_create liteav_av_sdp_create +#define ff_mpeg4_intra_level liteav_ff_mpeg4_intra_level +#define ff_hevc_transform_32x32_neon_8 liteav_ff_hevc_transform_32x32_neon_8 +#define ff_pred8x8_dc_10_mmxext liteav_ff_pred8x8_dc_10_mmxext +#define rgb15to32 liteav_rgb15to32 +#define av_opt_set liteav_av_opt_set +#define ff_h264_luma_dc_dequant_idct_mmx liteav_ff_h264_luma_dc_dequant_idct_mmx +#define ff_avg_h264_qpel8_h_lowpass_l2_mmxext liteav_ff_avg_h264_qpel8_h_lowpass_l2_mmxext +#define avio_pause liteav_avio_pause +#define ff_fill_rgba_map liteav_ff_fill_rgba_map +#define ff_yuv420p_to_bgra_neon liteav_ff_yuv420p_to_bgra_neon +#define av_dict_get_string liteav_av_dict_get_string +#define ff_hcscale_fast_c liteav_ff_hcscale_fast_c +#define ff_is_multicast_address liteav_ff_is_multicast_address +#define ff_replaygain_export_raw liteav_ff_replaygain_export_raw +#define ff_fft_permute_sse liteav_ff_fft_permute_sse +#define ff_mba_max liteav_ff_mba_max +#define vlc_css_rules_Delete liteav_vlc_css_rules_Delete +#define ff_shuffle_bytes_3210_ssse3 liteav_ff_shuffle_bytes_3210_ssse3 +#define ff_put_h264_qpel16_mc20_10_sse2 liteav_ff_put_h264_qpel16_mc20_10_sse2 +#define ff_htmlmarkup_to_ass liteav_ff_htmlmarkup_to_ass +#define av_frame_get_color_range liteav_av_frame_get_color_range +#define ff_h263_pred_motion liteav_ff_h263_pred_motion +#define av_fifo_free liteav_av_fifo_free +#define ff_urlcontext_child_class_next liteav_ff_urlcontext_child_class_next +#define ff_avg_h264_qpel8_mc33_10_sse2 liteav_ff_avg_h264_qpel8_mc33_10_sse2 +#define ff_pw_53 liteav_ff_pw_53 +#define ff_h263_decode_mba liteav_ff_h263_decode_mba +#define ff_avg_h264_qpel4_hv_lowpass_v_mmxext liteav_ff_avg_h264_qpel4_hv_lowpass_v_mmxext +#define ff_sbr_sum64x5_neon liteav_ff_sbr_sum64x5_neon +#define av_samples_alloc_array_and_samples liteav_av_samples_alloc_array_and_samples +#define av_audio_fifo_realloc liteav_av_audio_fifo_realloc +#define ff_thread_release_buffer liteav_ff_thread_release_buffer +#define ff_pack_2ch_int32_to_int32_a_sse2 liteav_ff_pack_2ch_int32_to_int32_a_sse2 +#define ff_hevc_put_qpel_uni_w_neon_8 liteav_ff_hevc_put_qpel_uni_w_neon_8 +#define ff_hevc_pred_angular_8x8_h_neon_8 liteav_ff_hevc_pred_angular_8x8_h_neon_8 +#define ff_mpv_export_qp_table liteav_ff_mpv_export_qp_table +#define vlc_css_unescaped liteav_vlc_css_unescaped +#define ff_avg_h264_qpel8_mc10_neon liteav_ff_avg_h264_qpel8_mc10_neon +#define av_rescale_q liteav_av_rescale_q +#define ff_psdsp_init_aarch64 liteav_ff_psdsp_init_aarch64 +#define av_q2intfloat liteav_av_q2intfloat +#define ff_pred8x8l_down_left_10_sse2 liteav_ff_pred8x8l_down_left_10_sse2 +#define ff_aac_at_decoder liteav_ff_aac_at_decoder +#define ff_filter_process_command liteav_ff_filter_process_command +#define ff_ass_split_free liteav_ff_ass_split_free +#define av_stristart liteav_av_stristart +#define ff_simple_idct8_put_sse2 liteav_ff_simple_idct8_put_sse2 +#define ff_mp3_at_decoder liteav_ff_mp3_at_decoder +#define avcodec_find_best_pix_fmt_of_list liteav_avcodec_find_best_pix_fmt_of_list +#define ff_hevc_put_qpel_h1_neon_8 liteav_ff_hevc_put_qpel_h1_neon_8 +#define ff_avg_h264_qpel16_mc20_10_sse2 liteav_ff_avg_h264_qpel16_mc20_10_sse2 +#define avpriv_io_delete liteav_avpriv_io_delete +#define ff_h264_v_loop_filter_luma_neon liteav_ff_h264_v_loop_filter_luma_neon +#define ff_ebur128_add_frames_planar_short liteav_ff_ebur128_add_frames_planar_short +#define ff_mpeg4_decode_picture_header liteav_ff_mpeg4_decode_picture_header +#define av_buffersrc_write_frame liteav_av_buffersrc_write_frame +#define av_crc_get_table liteav_av_crc_get_table +#define ff_ebur128_set_channel liteav_ff_ebur128_set_channel +#define ff_dither_4x4_16 liteav_ff_dither_4x4_16 +#define ff_framesync_get_frame liteav_ff_framesync_get_frame +#define ff_aac_ac3_parse liteav_ff_aac_ac3_parse +#define ff_vector_fmul_reverse_neon liteav_ff_vector_fmul_reverse_neon +#define ff_mpeg2_dc_scale_table liteav_ff_mpeg2_dc_scale_table +#define webvtt_parser_init liteav_webvtt_parser_init +#define sws_alloc_set_opts liteav_sws_alloc_set_opts +#define ff_text_peek_r8 liteav_ff_text_peek_r8 +#define ff_framesync_configure liteav_ff_framesync_configure +#define ff_aac_parser liteav_ff_aac_parser +#define ff_hevc_put_qpel_h2v1_neon_8 liteav_ff_hevc_put_qpel_h2v1_neon_8 +#define ff_put_h264_qpel8_mc20_10_ssse3_cache64 liteav_ff_put_h264_qpel8_mc20_10_ssse3_cache64 +#define ff_hevc_transform_add_8x8_neon_8_asm liteav_ff_hevc_transform_add_8x8_neon_8_asm +#define ff_pred8x8_horizontal_8_ssse3 liteav_ff_pred8x8_horizontal_8_ssse3 +#define ff_w4_plus_w6_lo liteav_ff_w4_plus_w6_lo +#define av_fifo_alloc_array liteav_av_fifo_alloc_array +#define ff_fft_end_fixed_32 liteav_ff_fft_end_fixed_32 +#define ff_avg_pixels8_y2_neon liteav_ff_avg_pixels8_y2_neon +#define ff_init_vlc_sparse liteav_ff_init_vlc_sparse +#define ff_hevc_put_pel_uw_pixels_w24_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w24_neon_8_asm +#define ff_sbr_qmf_deint_bfly_neon liteav_ff_sbr_qmf_deint_bfly_neon +#define sws_subVec liteav_sws_subVec +#define ff_ac3_channels_tab liteav_ff_ac3_channels_tab +#define avformat_get_riff_video_tags liteav_avformat_get_riff_video_tags +#define ff_faanidct_put liteav_ff_faanidct_put +#define ff_pred4x4_down_right_10_avx liteav_ff_pred4x4_down_right_10_avx +#define ff_put_h264_qpel4_h_lowpass_l2_mmxext liteav_ff_put_h264_qpel4_h_lowpass_l2_mmxext +#define ff_get_cpu_flags_aarch64 liteav_ff_get_cpu_flags_aarch64 +#define ffurl_alloc liteav_ffurl_alloc +#define av_set_cpu_flags_mask liteav_av_set_cpu_flags_mask +#define ff_avg_h264_chroma_mc8_rnd_3dnow liteav_ff_avg_h264_chroma_mc8_rnd_3dnow +#define ff_merge_formats liteav_ff_merge_formats +#define ff_h264_p_sub_mb_type_info liteav_ff_h264_p_sub_mb_type_info +#define ff_mpeg1_decode_block_intra liteav_ff_mpeg1_decode_block_intra +#define yylex liteav_yylex +#define ff_sdp_write_media liteav_ff_sdp_write_media +#define ff_idctdsp_init_x86 liteav_ff_idctdsp_init_x86 +#define ff_hevc_rem_intra_luma_pred_mode_decode liteav_ff_hevc_rem_intra_luma_pred_mode_decode +#define ff_aac_sbr_ctx_init liteav_ff_aac_sbr_ctx_init +#define ff_put_h264_qpel16_mc02_10_sse2 liteav_ff_put_h264_qpel16_mc02_10_sse2 +#define ff_mpeg12_mbAddrIncrTable liteav_ff_mpeg12_mbAddrIncrTable +#define av_get_cpu_flags liteav_av_get_cpu_flags +#define ff_avg_h264_qpel8_mc11_10_sse2 liteav_ff_avg_h264_qpel8_mc11_10_sse2 +#define ff_eac3_default_cpl_band_struct liteav_ff_eac3_default_cpl_band_struct +#define ff_h264_decode_init_vlc liteav_ff_h264_decode_init_vlc +#define ff_frame_pool_audio_init liteav_ff_frame_pool_audio_init +#define ff_tlog_ref liteav_ff_tlog_ref +#define ff_w4_plus_w2_lo liteav_ff_w4_plus_w2_lo +#define ff_h264_quant_div6 liteav_ff_h264_quant_div6 +#define ff_get_guid liteav_ff_get_guid +#define ff_pack_2ch_float_to_int16_a_sse2 liteav_ff_pack_2ch_float_to_int16_a_sse2 +#define avcodec_send_frame liteav_avcodec_send_frame +#define ff_hevc_pred_angular_32x32_h_zero_neon_8 liteav_ff_hevc_pred_angular_32x32_h_zero_neon_8 +#define ff_put_h264_qpel16_mc30_10_sse2 liteav_ff_put_h264_qpel16_mc30_10_sse2 +#define ff_h264_init_poc liteav_ff_h264_init_poc +#define avfilter_graph_set_auto_convert liteav_avfilter_graph_set_auto_convert +#define ff_adts_header_parse liteav_ff_adts_header_parse +#define ff_h264dsp_init liteav_ff_h264dsp_init +#define ff_jref_idct_add liteav_ff_jref_idct_add +#define ff_bswapdsp_init liteav_ff_bswapdsp_init +#define av_des_crypt liteav_av_des_crypt +#define ff_put_h264_qpel16_mc00_10_sse2 liteav_ff_put_h264_qpel16_mc00_10_sse2 +#define ff_deblock_v_chroma_8_mmxext liteav_ff_deblock_v_chroma_8_mmxext +#define rgb48tobgr64_bswap liteav_rgb48tobgr64_bswap +#define ff_yuv2rgb_coeffs liteav_ff_yuv2rgb_coeffs +#define ff_hevc_pred_planar_4x4_neon_8_1 liteav_ff_hevc_pred_planar_4x4_neon_8_1 +#define av_samples_get_buffer_size liteav_av_samples_get_buffer_size +#define ff_pred16x16_plane_svq3_8_ssse3 liteav_ff_pred16x16_plane_svq3_8_ssse3 +#define ff_filter_set_ready liteav_ff_filter_set_ready +#define avcodec_find_best_pix_fmt2 liteav_avcodec_find_best_pix_fmt2 +#define ff_w5_min_w1 liteav_ff_w5_min_w1 +#define ff_mpeg4_intra_vlc liteav_ff_mpeg4_intra_vlc +#define ff_metadata_conv_ctx liteav_ff_metadata_conv_ctx +#define ff_raw_pix_fmt_tags liteav_ff_raw_pix_fmt_tags +#define ff_avg_h264_qpel16_mc33_10_sse2 liteav_ff_avg_h264_qpel16_mc33_10_sse2 +#define ff_framequeue_init liteav_ff_framequeue_init +#define ff_mdct_calc_c liteav_ff_mdct_calc_c +#define ff_h264qpel_init_x86 liteav_ff_h264qpel_init_x86 +#define ff_pred8x8_left_dc_neon liteav_ff_pred8x8_left_dc_neon +#define ff_hevc_pred_angular_4x4_neon_8 liteav_ff_hevc_pred_angular_4x4_neon_8 +#define ff_unpack_6ch_float_to_int32_u_sse2 liteav_ff_unpack_6ch_float_to_int32_u_sse2 +#define ff_mov_cenc_free liteav_ff_mov_cenc_free +#define ff_text_pos liteav_ff_text_pos +#define ff_int16_to_int32_u_sse2 liteav_ff_int16_to_int32_u_sse2 +#define av_opt_get_sample_fmt liteav_av_opt_get_sample_fmt +#define swr_set_channel_mapping liteav_swr_set_channel_mapping +#define av_hwdevice_hwconfig_alloc liteav_av_hwdevice_hwconfig_alloc +#define ff_hevc_sao_band_position_decode liteav_ff_hevc_sao_band_position_decode +#define ff_unpack_2ch_int16_to_float_u_ssse3 liteav_ff_unpack_2ch_int16_to_float_u_ssse3 +#define av_content_light_metadata_create_side_data liteav_av_content_light_metadata_create_side_data +#define ff_lzw_encode_state_size liteav_ff_lzw_encode_state_size +#define ff_mpv_decode_init liteav_ff_mpv_decode_init +#define ff_mov_demuxer liteav_ff_mov_demuxer +#define ff_cos_32_fixed liteav_ff_cos_32_fixed +#define ffio_init_checksum liteav_ffio_init_checksum +#define ff_h264_idct_add_8_mmx liteav_ff_h264_idct_add_8_mmx +#define ff_h264_profiles liteav_ff_h264_profiles +#define ffurl_context_class liteav_ffurl_context_class +#define ff_ebur128_loudness_shortterm liteav_ff_ebur128_loudness_shortterm +#define ff_framequeue_global_init liteav_ff_framequeue_global_init +#define ff_latm_muxer liteav_ff_latm_muxer +#define av_hwframe_ctx_init liteav_av_hwframe_ctx_init +#define ff_qdmc_at_decoder liteav_ff_qdmc_at_decoder +#define ff_avg_h264_qpel16_mc20_10_sse2_cache64 liteav_ff_avg_h264_qpel16_mc20_10_sse2_cache64 +#define yy_switch_to_buffer liteav_yy_switch_to_buffer +#define ff_thread_await_progress2 liteav_ff_thread_await_progress2 +#define avpriv_mpa_bitrate_tab liteav_avpriv_mpa_bitrate_tab +#define ff_avg_h264_qpel8_mc32_10_sse2 liteav_ff_avg_h264_qpel8_mc32_10_sse2 +#define ff_mpeg4audio_get_config_gb liteav_ff_mpeg4audio_get_config_gb +#define ff_hevc_put_qpel_uw_weight_v1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_v1_neon_8 +#define av_fopen_utf8 liteav_av_fopen_utf8 +#define ff_fft_calc_sse liteav_ff_fft_calc_sse +#define ff_h264_parser liteav_ff_h264_parser +#define ff_aac_sbr_ctx_close liteav_ff_aac_sbr_ctx_close +#define ff_avg_h264_qpel8_mc23_neon liteav_ff_avg_h264_qpel8_mc23_neon +#define rgb16to32 liteav_rgb16to32 +#define ff_shuffle_bytes_3012_ssse3 liteav_ff_shuffle_bytes_3012_ssse3 +#define av_sha512_size liteav_av_sha512_size +#define ff_pred16x16_plane_rv40_8_ssse3 liteav_ff_pred16x16_plane_rv40_8_ssse3 +#define ff_h264_queue_decode_slice liteav_ff_h264_queue_decode_slice +#define ff_weight_h264_pixels_8_neon liteav_ff_weight_h264_pixels_8_neon +#define ff_pred8x8l_horizontal_up_8_ssse3 liteav_ff_pred8x8l_horizontal_up_8_ssse3 +#define av_packet_split_side_data liteav_av_packet_split_side_data +#define ff_put_h264_qpel8_mc00_10_sse2 liteav_ff_put_h264_qpel8_mc00_10_sse2 +#define av_color_space_from_name liteav_av_color_space_from_name +#define ff_nv12_to_rgba_neon liteav_ff_nv12_to_rgba_neon +#define ff_put_h264_qpel8_mc12_neon liteav_ff_put_h264_qpel8_mc12_neon +#define ff_mdct_calc_neon liteav_ff_mdct_calc_neon +#define ff_init_desc_no_chr liteav_ff_init_desc_no_chr +#define ff_unpack_2ch_int16_to_int16_a_ssse3 liteav_ff_unpack_2ch_int16_to_int16_a_ssse3 +#define ff_hevc_put_qpel_bi_w_neon_8 liteav_ff_hevc_put_qpel_bi_w_neon_8 +#define ff_init_slice_from_src liteav_ff_init_slice_from_src +#define ff_mpa_synth_window_fixed liteav_ff_mpa_synth_window_fixed +#define ff_all_channel_counts liteav_ff_all_channel_counts +#define ff_me_cmp_init liteav_ff_me_cmp_init +#define ff_pred4x4_dc_8_mmxext liteav_ff_pred4x4_dc_8_mmxext +#define av_opt_set_double liteav_av_opt_set_double +#define av_hash_init liteav_av_hash_init +#define ff_weight_h264_pixels_4_neon liteav_ff_weight_h264_pixels_4_neon +#define av_fifo_realloc2 liteav_av_fifo_realloc2 +#define ff_fft_end_fixed liteav_ff_fft_end_fixed +#define ff_amf_write_number liteav_ff_amf_write_number +#define ff_sbr_qmf_deint_neg_neon liteav_ff_sbr_qmf_deint_neg_neon +#define ff_poll_frame liteav_ff_poll_frame +#define av_codec_iterate liteav_av_codec_iterate +#define ff_unpack_6ch_int32_to_float_u_sse2 liteav_ff_unpack_6ch_int32_to_float_u_sse2 +#define ff_mpeg4audio_channels liteav_ff_mpeg4audio_channels +#define ff_hevc_sao_edge_filter_8_neon liteav_ff_hevc_sao_edge_filter_8_neon +#define ff_h263_hwaccel_pixfmt_list_420 liteav_ff_h263_hwaccel_pixfmt_list_420 +#define av_buffersink_get_channel_layout liteav_av_buffersink_get_channel_layout +#define av_buffer_alloc liteav_av_buffer_alloc +#define yyget_leng liteav_yyget_leng +#define av_buffer_pool_get liteav_av_buffer_pool_get +#define ff_pred16x16_plane_svq3_8_sse2 liteav_ff_pred16x16_plane_svq3_8_sse2 +#define ff_hevc_decode_nal_sei liteav_ff_hevc_decode_nal_sei +#define ff_pack_6ch_int32_to_float_a_sse2 liteav_ff_pack_6ch_int32_to_float_a_sse2 +#define av_stereo3d_from_name liteav_av_stereo3d_from_name +#define ff_guess_image2_codec liteav_ff_guess_image2_codec +#define ff_mba_length liteav_ff_mba_length +#define ff_id3v2_picture_types liteav_ff_id3v2_picture_types +#define ff_pred8x8l_128_dc_10_sse2 liteav_ff_pred8x8l_128_dc_10_sse2 +#define avio_read_to_bprint liteav_avio_read_to_bprint +#define ff_decode_frame_props liteav_ff_decode_frame_props +#define av_encryption_info_get_side_data liteav_av_encryption_info_get_side_data +#define ff_pack_6ch_float_to_float_a_avx liteav_ff_pack_6ch_float_to_float_a_avx +#define ff_hevc_pred_angular_16x16_h_zero_neon_8 liteav_ff_hevc_pred_angular_16x16_h_zero_neon_8 +#define avpriv_get_gamma_from_trc liteav_avpriv_get_gamma_from_trc +#define av_fifo_drain liteav_av_fifo_drain +#define ff_inter_level liteav_ff_inter_level +#define ff_xvid_idct_init liteav_ff_xvid_idct_init +#define av_vorbis_parse_frame liteav_av_vorbis_parse_frame +#define ff_h264_flush_change liteav_ff_h264_flush_change +#define ff_h264_idct_dc_add_8_avx liteav_ff_h264_idct_dc_add_8_avx +#define ff_dither_8x8_73 liteav_ff_dither_8x8_73 +#define ff_hevc_put_qpel_uw_h2v2_neon_8 liteav_ff_hevc_put_qpel_uw_h2v2_neon_8 +#define ff_id3v2_finish liteav_ff_id3v2_finish +#define av_buffersink_get_frame_rate liteav_av_buffersink_get_frame_rate +#define ff_ps_add_squares_sse liteav_ff_ps_add_squares_sse +#define ff_aac_eld_window_512 liteav_ff_aac_eld_window_512 +#define ff_mov_cenc_init liteav_ff_mov_cenc_init +#define av_expr_parse_and_eval liteav_av_expr_parse_and_eval +#define ff_af_queue_close liteav_ff_af_queue_close +#define av_bitstream_filter_init liteav_av_bitstream_filter_init +#define ff_put_h264_qpel16_mc32_10_sse2 liteav_ff_put_h264_qpel16_mc32_10_sse2 +#define av_color_primaries_from_name liteav_av_color_primaries_from_name +#define ff_pred16x16_left_dc_10_sse2 liteav_ff_pred16x16_left_dc_10_sse2 +#define ff_hevc_sao_merge_flag_decode liteav_ff_hevc_sao_merge_flag_decode +#define ff_avg_h264_qpel4_mc01_10_mmxext liteav_ff_avg_h264_qpel4_mc01_10_mmxext +#define av_init_packet liteav_av_init_packet +#define ff_cos_64 liteav_ff_cos_64 +#define avpicture_fill liteav_avpicture_fill +#define swri_get_dither liteav_swri_get_dither +#define av_frame_is_writable liteav_av_frame_is_writable +#define ff_pred8x8l_horizontal_10_ssse3 liteav_ff_pred8x8l_horizontal_10_ssse3 +#define ff_hevc_put_qpel_uw_weight_h2v2_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h2v2_neon_8 +#define ff_ac3_fast_gain_tab liteav_ff_ac3_fast_gain_tab +#define ffio_rewind_with_probe_data liteav_ffio_rewind_with_probe_data +#define ff_unpack_2ch_float_to_int16_u_sse2 liteav_ff_unpack_2ch_float_to_int16_u_sse2 +#define ff_h264_idct_add16_8_c liteav_ff_h264_idct_add16_8_c +#define ff_sbr_qmf_post_shuffle_neon liteav_ff_sbr_qmf_post_shuffle_neon +#define ff_h264_chroma422_dc_dequant_idct_8_c liteav_ff_h264_chroma422_dc_dequant_idct_8_c +#define ff_ps_ctx_init liteav_ff_ps_ctx_init +#define ff_rtmpt_protocol liteav_ff_rtmpt_protocol +#define ff_h264_biweight_8_ssse3 liteav_ff_h264_biweight_8_ssse3 +#define ff_put_no_rnd_qpel16_mc33_old_c liteav_ff_put_no_rnd_qpel16_mc33_old_c +#define ff_pred8x8_tm_vp8_8_mmxext liteav_ff_pred8x8_tm_vp8_8_mmxext +#define ff_put_h264_chroma_mc8_rnd_ssse3 liteav_ff_put_h264_chroma_mc8_rnd_ssse3 +#define avpriv_h264_has_num_reorder_frames liteav_avpriv_h264_has_num_reorder_frames +#define ff_vorbis_vwin liteav_ff_vorbis_vwin +#define ff_put_h264_qpel8or16_v_lowpass_op_mmxext liteav_ff_put_h264_qpel8or16_v_lowpass_op_mmxext +#define ff_hevc_transform_4x4_neon_8_asm liteav_ff_hevc_transform_4x4_neon_8_asm +#define ff_mpa_alloc_tables liteav_ff_mpa_alloc_tables +#define ff_hevc_split_coding_unit_flag_decode liteav_ff_hevc_split_coding_unit_flag_decode +#define ff_channel_layouts_ref liteav_ff_channel_layouts_ref +#define ff_mdct_end_fixed liteav_ff_mdct_end_fixed +#define ff_gmc_c liteav_ff_gmc_c +#define ff_pred8x8_l0t_dc_neon liteav_ff_pred8x8_l0t_dc_neon +#define ff_pw_42 liteav_ff_pw_42 +#define avcodec_dct_get_class liteav_avcodec_dct_get_class +#define ff_h263p_decoder liteav_ff_h263p_decoder +#define ff_sine_window_init_fixed liteav_ff_sine_window_init_fixed +#define ff_w3_min_w1_hi liteav_ff_w3_min_w1_hi +#define ff_hevc_get_ref_list liteav_ff_hevc_get_ref_list +#define av_hmac_final liteav_av_hmac_final +#define av_vorbis_parse_frame_flags liteav_av_vorbis_parse_frame_flags +#define ff_h264_golomb_to_pict_type liteav_ff_h264_golomb_to_pict_type +#define ff_h264_pred_init_x86 liteav_ff_h264_pred_init_x86 +#define av_tea_size liteav_av_tea_size +#define av_display_matrix_flip liteav_av_display_matrix_flip +#define avfilter_init_str liteav_avfilter_init_str +#define ff_ass_style_get liteav_ff_ass_style_get +#define av_md5_alloc liteav_av_md5_alloc +#define rgb48tobgr48_bswap liteav_rgb48tobgr48_bswap +#define ff_avg_h264_qpel16_mc13_neon liteav_ff_avg_h264_qpel16_mc13_neon +#define ff_rtp_enc_name liteav_ff_rtp_enc_name +#define ff_mpadsp_init_aarch64 liteav_ff_mpadsp_init_aarch64 +#define ff_avg_pixels4_mmx liteav_ff_avg_pixels4_mmx +#define av_bsf_list_append liteav_av_bsf_list_append +#define av_vorbis_parse_free liteav_av_vorbis_parse_free +#define swri_noise_shaping_int16 liteav_swri_noise_shaping_int16 +#define av_mallocz liteav_av_mallocz +#define ff_cpu_xgetbv liteav_ff_cpu_xgetbv +#define ff_pred8x8_128_dc_neon liteav_ff_pred8x8_128_dc_neon +#define ff_hevc_v_loop_filter_luma_neon liteav_ff_hevc_v_loop_filter_luma_neon +#define ff_pack_6ch_float_to_float_u_mmx liteav_ff_pack_6ch_float_to_float_u_mmx +#define ff_hevc_idct_16x16_dc_neon_8_asm liteav_ff_hevc_idct_16x16_dc_neon_8_asm +#define avio_wl64 liteav_avio_wl64 +#define vlc_css_parser_Clean liteav_vlc_css_parser_Clean +#define av_videotoolbox_alloc_context liteav_av_videotoolbox_alloc_context +#define ff_h264_idct_add16intra_neon liteav_ff_h264_idct_add16intra_neon +#define ff_pred8x8l_vertical_10_sse2 liteav_ff_pred8x8l_vertical_10_sse2 +#define av_escape liteav_av_escape +#define ff_draw_horiz_band liteav_ff_draw_horiz_band +#define ff_hevc_put_qpel_bi_neon_wrapper liteav_ff_hevc_put_qpel_bi_neon_wrapper +#define ff_mpeg_er_init liteav_ff_mpeg_er_init +#define ff_hevc_hls_filters liteav_ff_hevc_hls_filters +#define av_freep liteav_av_freep +#define ff_pred4x4_vertical_left_10_sse2 liteav_ff_pred4x4_vertical_left_10_sse2 +#define av_tempfile liteav_av_tempfile +#define ff_ps_add_squares_sse3 liteav_ff_ps_add_squares_sse3 +#define ff_ape_write_tag liteav_ff_ape_write_tag +#define ff_pred8x8_tm_vp8_8_mmx liteav_ff_pred8x8_tm_vp8_8_mmx +#define ff_avg_h264_qpel16_mc12_neon liteav_ff_avg_h264_qpel16_mc12_neon +#define ff_hevc_cbf_cb_cr_decode liteav_ff_hevc_cbf_cb_cr_decode +#define ff_pred4x4_vertical_right_10_avx liteav_ff_pred4x4_vertical_right_10_avx +#define ff_h264_idct_add16_neon liteav_ff_h264_idct_add16_neon +#define ff_mdct_init_fixed liteav_ff_mdct_init_fixed +#define ff_put_pixels16x16_c liteav_ff_put_pixels16x16_c +#define ff_hevc_put_pixels_w64_neon_8 liteav_ff_hevc_put_pixels_w64_neon_8 +#define ff_simple_idct12_put_sse2 liteav_ff_simple_idct12_put_sse2 +#define av_asprintf liteav_av_asprintf +#define ff_dither_8x8_220 liteav_ff_dither_8x8_220 +#define av_dict_get liteav_av_dict_get +#define ff_h264_idct8_add4_neon liteav_ff_h264_idct8_add4_neon +#define ff_hevc_pred_planar_32x32_neon_8 liteav_ff_hevc_pred_planar_32x32_neon_8 +#define avpriv_alloc_fixed_dsp liteav_avpriv_alloc_fixed_dsp +#define ff_hevc_sao_edge_filter_neon_8 liteav_ff_hevc_sao_edge_filter_neon_8 +#define ff_put_h264_qpel8_mc22_neon liteav_ff_put_h264_qpel8_mc22_neon +#define ff_http_do_new_request liteav_ff_http_do_new_request +#define ff_pred4x4_down_right_10_ssse3 liteav_ff_pred4x4_down_right_10_ssse3 +#define ff_pcm_bluray_decoder liteav_ff_pcm_bluray_decoder +#define ff_aac_num_swb_480 liteav_ff_aac_num_swb_480 +#define ff_put_qpel16_mc33_old_c liteav_ff_put_qpel16_mc33_old_c +#define ff_put_h264_qpel8_mc20_10_sse2_cache64 liteav_ff_put_h264_qpel8_mc20_10_sse2_cache64 +#define ff_hevc_put_qpel_uw_weight_h1v3_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h1v3_neon_8 +#define ff_sine_32 liteav_ff_sine_32 +#define av_log2_16bit liteav_av_log2_16bit +#define avio_write liteav_avio_write +#define rgb16tobgr16 liteav_rgb16tobgr16 +#define ff_hevc_sao_edge_eo2_w32_neon_8 liteav_ff_hevc_sao_edge_eo2_w32_neon_8 +#define ff_simple_idct10_put_sse2 liteav_ff_simple_idct10_put_sse2 +#define ff_mpeg_er_frame_start liteav_ff_mpeg_er_frame_start +#define yylex_init liteav_yylex_init +#define ff_rtp_chain_mux_open liteav_ff_rtp_chain_mux_open +#define ff_h264_idct_add_9_c liteav_ff_h264_idct_add_9_c +#define ff_deblock_v_chroma_intra_8_sse2 liteav_ff_deblock_v_chroma_intra_8_sse2 +#define ff_pred16x16_plane_svq3_8_mmx liteav_ff_pred16x16_plane_svq3_8_mmx +#define ff_h264_idct8_add_10_sse2 liteav_ff_h264_idct8_add_10_sse2 +#define ffurl_write liteav_ffurl_write +#define ff_avg_h264_qpel8_mc12_10_sse2 liteav_ff_avg_h264_qpel8_mc12_10_sse2 +#define av_opt_get liteav_av_opt_get +#define ff_qpeldsp_init_x86 liteav_ff_qpeldsp_init_x86 +#define ff_hevc_pred_angular_16x16_neon_8 liteav_ff_hevc_pred_angular_16x16_neon_8 +#define ff_avg_h264_qpel16_mc11_10_sse2 liteav_ff_avg_h264_qpel16_mc11_10_sse2 +#define ff_rtp_get_codec_info liteav_ff_rtp_get_codec_info +#define ff_butterflies_float_vfp liteav_ff_butterflies_float_vfp +#define ff_hevc_put_qpel_uw_bi_v_neon_8 liteav_ff_hevc_put_qpel_uw_bi_v_neon_8 +#define ff_resample_common_apply_filter_x4_s16_neon liteav_ff_resample_common_apply_filter_x4_s16_neon +#define ff_j_rev_dct2 liteav_ff_j_rev_dct2 +#define av_videotoolbox_default_init liteav_av_videotoolbox_default_init +#define av_fft_end liteav_av_fft_end +#define ff_set_cmp liteav_ff_set_cmp +#define ff_sine_960 liteav_ff_sine_960 +#define ff_hevc_transform_add_32x32_neon_8_asm liteav_ff_hevc_transform_add_32x32_neon_8_asm +#define ff_avg_h264_chroma_mc4_ssse3 liteav_ff_avg_h264_chroma_mc4_ssse3 +#define ff_unpack_2ch_float_to_int32_u_sse2 liteav_ff_unpack_2ch_float_to_int32_u_sse2 +#define ff_avg_qpel16_mc13_old_c liteav_ff_avg_qpel16_mc13_old_c +#define ff_avg_pixels8_l2_shift5_mmxext liteav_ff_avg_pixels8_l2_shift5_mmxext +#define ff_pred8x8l_down_right_8_sse2 liteav_ff_pred8x8l_down_right_8_sse2 +#define avcodec_encode_video2 liteav_avcodec_encode_video2 +#define ff_pred4x4_vertical_right_10_sse2 liteav_ff_pred4x4_vertical_right_10_sse2 +#define rgb24tobgr32 liteav_rgb24tobgr32 +#define ff_sine_1024_fixed liteav_ff_sine_1024_fixed +#define ff_avg_h264_qpel4_mc22_10_mmxext liteav_ff_avg_h264_qpel4_mc22_10_mmxext +#define av_mdct_calc liteav_av_mdct_calc +#define swscale_license liteav_swscale_license +#define ff_ass_split liteav_ff_ass_split +#define ff_pred8x8l_down_right_10_avx liteav_ff_pred8x8l_down_right_10_avx +#define ff_avg_h264_qpel4_hv_lowpass_h_mmxext liteav_ff_avg_h264_qpel4_hv_lowpass_h_mmxext +#define av_i2int liteav_av_i2int +#define ff_pred4x4_horizontal_down_10_sse2 liteav_ff_pred4x4_horizontal_down_10_sse2 +#define ff_avg_h264_qpel8_mc30_neon liteav_ff_avg_h264_qpel8_mc30_neon +#define av_frame_set_pkt_size liteav_av_frame_set_pkt_size +#define ff_cos_4096_fixed liteav_ff_cos_4096_fixed +#define ff_put_h264_chroma_mc2_10_mmxext liteav_ff_put_h264_chroma_mc2_10_mmxext +#define av_strlcatf liteav_av_strlcatf +#define ff_mpeg1_aspect liteav_ff_mpeg1_aspect +#define av_strcasecmp liteav_av_strcasecmp +#define ff_id3v2_34_metadata_conv liteav_ff_id3v2_34_metadata_conv +#define ff_thread_report_progress2 liteav_ff_thread_report_progress2 +#define ff_simple_idct8_add_sse2 liteav_ff_simple_idct8_add_sse2 +#define avcodec_default_get_buffer2 liteav_avcodec_default_get_buffer2 +#define ff_mpv_common_defaults liteav_ff_mpv_common_defaults +#define ff_pred8x8l_down_right_10_sse2 liteav_ff_pred8x8l_down_right_10_sse2 +#define ff_ps_neg liteav_ff_ps_neg +#define ff_pack_2ch_int32_to_float_a_sse2 liteav_ff_pack_2ch_int32_to_float_a_sse2 +#define ff_pcm_dvd_decoder liteav_ff_pcm_dvd_decoder +#define ff_unpack_2ch_int16_to_float_u_sse2 liteav_ff_unpack_2ch_int16_to_float_u_sse2 +#define ff_pred8x8l_vertical_right_10_ssse3 liteav_ff_pred8x8l_vertical_right_10_ssse3 +#define av_opt_get_key_value liteav_av_opt_get_key_value +#define rgb16to24 liteav_rgb16to24 +#define ff_aac_kbd_short_128 liteav_ff_aac_kbd_short_128 +#define ff_hevc_split_transform_flag_decode liteav_ff_hevc_split_transform_flag_decode +#define ff_init_vscale_pfn liteav_ff_init_vscale_pfn +#define ff_hevc_pred_angular_32x32_neon_8 liteav_ff_hevc_pred_angular_32x32_neon_8 +#define ff_pack_6ch_int32_to_float_u_avx liteav_ff_pack_6ch_int32_to_float_u_avx +#define ff_pred16x16_hor_neon liteav_ff_pred16x16_hor_neon +#define av_default_item_name liteav_av_default_item_name +#define ff_h263_intra_MCBPC_bits liteav_ff_h263_intra_MCBPC_bits +#define av_timegm liteav_av_timegm +#define ff_pred8x8l_top_dc_10_avx liteav_ff_pred8x8l_top_dc_10_avx +#define ff_h264_idct_add16intra_10_sse2 liteav_ff_h264_idct_add16intra_10_sse2 +#define ff_h264_ref_picture liteav_ff_h264_ref_picture +#define ff_mp1_at_decoder liteav_ff_mp1_at_decoder +#define av_buffer_get_ref_count liteav_av_buffer_get_ref_count +#define ff_rawvideo_options liteav_ff_rawvideo_options +#define ff_parse_sample_format liteav_ff_parse_sample_format +#define ff_ac3_fast_decay_tab liteav_ff_ac3_fast_decay_tab +#define ff_avg_h264_qpel8_mc12_neon liteav_ff_avg_h264_qpel8_mc12_neon +#define av_spherical_tile_bounds liteav_av_spherical_tile_bounds +#define av_fifo_size liteav_av_fifo_size +#define ff_avg_h264_qpel16_mc03_10_sse2 liteav_ff_avg_h264_qpel16_mc03_10_sse2 +#define ff_avc_write_annexb_extradata liteav_ff_avc_write_annexb_extradata +#define av_buffer_pool_init liteav_av_buffer_pool_init +#define av_shrink_packet liteav_av_shrink_packet +#define ff_sine_512_fixed liteav_ff_sine_512_fixed +#define swr_inject_silence liteav_swr_inject_silence +#define ff_pred8x8l_vertical_right_10_avx liteav_ff_pred8x8l_vertical_right_10_avx +#define ff_hevc_transform_32x32_neon_8_asm liteav_ff_hevc_transform_32x32_neon_8_asm +#define ff_hevc_cu_chroma_qp_offset_flag liteav_ff_hevc_cu_chroma_qp_offset_flag +#define ff_put_h264_qpel16_mc33_10_sse2 liteav_ff_put_h264_qpel16_mc33_10_sse2 +#define ff_hevc_pred_angular_8x8_h_zero_neon_8 liteav_ff_hevc_pred_angular_8x8_h_zero_neon_8 +#define ff_m4v_demuxer liteav_ff_m4v_demuxer +#define ff_hevc_sao_edge_eo3_w64_neon_8 liteav_ff_hevc_sao_edge_eo3_w64_neon_8 +#define av_opt_set_video_rate liteav_av_opt_set_video_rate +#define ff_vorbis_codec liteav_ff_vorbis_codec +#define ff_h264_idct_add8_8_sse2 liteav_ff_h264_idct_add8_8_sse2 +#define ff_mdct_calc_c_fixed liteav_ff_mdct_calc_c_fixed +#define ff_avg_h264_qpel8_mc02_10_sse2 liteav_ff_avg_h264_qpel8_mc02_10_sse2 +#define avpriv_put_string liteav_avpriv_put_string +#define ff_h264_idct8_add4_8_sse2 liteav_ff_h264_idct8_add4_8_sse2 +#define av_sha_size liteav_av_sha_size +#define ff_id3v2_mime_tags liteav_ff_id3v2_mime_tags +#define ff_init_mpadsp_tabs_fixed liteav_ff_init_mpadsp_tabs_fixed +#define ff_put_h264_qpel8_mc32_10_sse2 liteav_ff_put_h264_qpel8_mc32_10_sse2 +#define av_dict_copy liteav_av_dict_copy +#define ff_pred8x8l_vertical_left_8_sse2 liteav_ff_pred8x8l_vertical_left_8_sse2 +#define ff_kbd_window_init_fixed liteav_ff_kbd_window_init_fixed +#define avfilter_link_get_channels liteav_avfilter_link_get_channels +#define ff_command_queue_pop liteav_ff_command_queue_pop +#define ff_hevc_put_epel_uw_pixels_w32_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w32_neon_8 +#define ff_imdct_half_vfp liteav_ff_imdct_half_vfp +#define ff_put_h264_qpel8_h_lowpass_l2_mmxext liteav_ff_put_h264_qpel8_h_lowpass_l2_mmxext +#define ff_rtmp_packet_destroy liteav_ff_rtmp_packet_destroy +#define ff_mpeg4_dc_threshold liteav_ff_mpeg4_dc_threshold +#define ff_hevc_transform_4x4_neon_8 liteav_ff_hevc_transform_4x4_neon_8 +#define av_adler32_update liteav_av_adler32_update +#define swresample_version liteav_swresample_version +#define ff_avg_qpel8_mc33_old_c liteav_ff_avg_qpel8_mc33_old_c +#define ff_update_duplicate_context liteav_ff_update_duplicate_context +#define ff_h264_check_intra_pred_mode liteav_ff_h264_check_intra_pred_mode +#define av_frame_get_decode_error_flags liteav_av_frame_get_decode_error_flags +#define ff_inlink_queued_samples liteav_ff_inlink_queued_samples +#define ff_avg_h264_qpel8_mc13_10_sse2 liteav_ff_avg_h264_qpel8_mc13_10_sse2 +#define ff_init_desc_cfmt_convert liteav_ff_init_desc_cfmt_convert +#define av_rescale_rnd liteav_av_rescale_rnd +#define av_hwframe_ctx_alloc liteav_av_hwframe_ctx_alloc +#define ff_find_unused_picture liteav_ff_find_unused_picture +#define swr_build_matrix liteav_swr_build_matrix +#define ff_simple_idct10_put_avx liteav_ff_simple_idct10_put_avx +#define ff_alloc_picture liteav_ff_alloc_picture +#define ff_cos_16 liteav_ff_cos_16 +#define avpicture_free liteav_avpicture_free +#define ff_put_no_rnd_qpel16_mc11_old_c liteav_ff_put_no_rnd_qpel16_mc11_old_c +#define av_hwdevice_get_type_name liteav_av_hwdevice_get_type_name +#define ff_rvlc_rl_intra liteav_ff_rvlc_rl_intra +#define av_log2 liteav_av_log2 +#define ff_pred16x16_plane_neon liteav_ff_pred16x16_plane_neon +#define ff_avg_pixels16x16_c liteav_ff_avg_pixels16x16_c +#define ff_check_h264_startcode liteav_ff_check_h264_startcode +#define ff_aac_num_swb_1024 liteav_ff_aac_num_swb_1024 +#define ff_mov_iso639_to_lang liteav_ff_mov_iso639_to_lang +#define ff_pred8x8_l00_dc_neon liteav_ff_pred8x8_l00_dc_neon +#define ff_af_volume liteav_ff_af_volume +#define ff_put_pixels16_x2_neon liteav_ff_put_pixels16_x2_neon +#define ff_pb_80 liteav_ff_pb_80 +#define ff_mpeg4_studio_dc_chroma liteav_ff_mpeg4_studio_dc_chroma +#define ffurl_accept liteav_ffurl_accept +#define ff_vorbis_encoding_channel_layout_offsets liteav_ff_vorbis_encoding_channel_layout_offsets +#define ff_pred16x16_plane_svq3_8_mmxext liteav_ff_pred16x16_plane_svq3_8_mmxext +#define ff_put_vc1_chroma_mc8_nornd_ssse3 liteav_ff_put_vc1_chroma_mc8_nornd_ssse3 +#define ff_avg_h264_qpel8_mc00_10_sse2 liteav_ff_avg_h264_qpel8_mc00_10_sse2 +#define ff_avg_h264_qpel16_mc10_10_ssse3_cache64 liteav_ff_avg_h264_qpel16_mc10_10_ssse3_cache64 +#define av_samples_copy liteav_av_samples_copy +#define ff_text_read liteav_ff_text_read +#define avio_close liteav_avio_close +#define ff_init_block_index liteav_ff_init_block_index +#define ff_put_h264_qpel16_mc12_10_sse2 liteav_ff_put_h264_qpel16_mc12_10_sse2 +#define ff_mov_lang_to_iso639 liteav_ff_mov_lang_to_iso639 +#define ff_avg_h264_chroma_mc2_10_mmxext liteav_ff_avg_h264_chroma_mc2_10_mmxext +#define ff_put_h264_qpel4_mc13_10_mmxext liteav_ff_put_h264_qpel4_mc13_10_mmxext +#define yy_flush_buffer liteav_yy_flush_buffer +#define av_dict_set liteav_av_dict_set +#define ff_pred4x4_horizontal_down_10_avx liteav_ff_pred4x4_horizontal_down_10_avx +#define vlc_css_expression_Delete liteav_vlc_css_expression_Delete +#define av_twofish_size liteav_av_twofish_size +#define ff_put_pixels8_l2_8 liteav_ff_put_pixels8_l2_8 +#define ff_imdct36_blocks_float liteav_ff_imdct36_blocks_float +#define ff_h263_decode_frame liteav_ff_h263_decode_frame +#define ff_pw_1024 liteav_ff_pw_1024 +#define ff_hevc_cu_transquant_bypass_flag_decode liteav_ff_hevc_cu_transquant_bypass_flag_decode +#define ff_h264_idct8_add4_10_avx liteav_ff_h264_idct8_add4_10_avx +#define av_mediacodec_render_buffer_at_time liteav_av_mediacodec_render_buffer_at_time +#define ff_pack_8ch_float_to_int32_a_avx liteav_ff_pack_8ch_float_to_int32_a_avx +#define ff_mpeg12_vlc_dc_chroma_code liteav_ff_mpeg12_vlc_dc_chroma_code +#define ff_flac_decode_frame_header liteav_ff_flac_decode_frame_header +#define ff_id3v2_start liteav_ff_id3v2_start +#define ff_put_h264_qpel16_mc22_neon liteav_ff_put_h264_qpel16_mc22_neon +#define ff_put_h264_qpel8_mc31_neon liteav_ff_put_h264_qpel8_mc31_neon +#define ff_pred16x16_128_dc_10_sse2 liteav_ff_pred16x16_128_dc_10_sse2 +#define ff_avg_h264_qpel8_h_lowpass_ssse3 liteav_ff_avg_h264_qpel8_h_lowpass_ssse3 +#define av_default_get_category liteav_av_default_get_category +#define ff_pack_6ch_float_to_int32_u_sse2 liteav_ff_pack_6ch_float_to_int32_u_sse2 +#define ff_cos_1024 liteav_ff_cos_1024 +#define ff_crcEDB88320_update liteav_ff_crcEDB88320_update +#define yyalloc liteav_yyalloc +#define ff_hevc_parse_sps liteav_ff_hevc_parse_sps +#define ff_avg_h264_qpel8_mc01_10_sse2 liteav_ff_avg_h264_qpel8_mc01_10_sse2 +#define ff_merge_channel_layouts liteav_ff_merge_channel_layouts +#define av_hwframe_transfer_data liteav_av_hwframe_transfer_data +#define ff_all_formats liteav_ff_all_formats +#define ff_h264_weight_16_10_sse2 liteav_ff_h264_weight_16_10_sse2 +#define ff_mdct15_init liteav_ff_mdct15_init +#define av_thread_message_queue_recv liteav_av_thread_message_queue_recv +#define ff_avg_h264_qpel16_mc30_10_ssse3_cache64 liteav_ff_avg_h264_qpel16_mc30_10_ssse3_cache64 +#define av_samples_alloc liteav_av_samples_alloc +#define ff_http_protocol liteav_ff_http_protocol +#define avio_closep liteav_avio_closep +#define ff_add_channel_layout liteav_ff_add_channel_layout +#define ff_h264_hl_decode_mb liteav_ff_h264_hl_decode_mb +#define ff_hevc_put_epel_uw_pixels_w24_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w24_neon_8 +#define ff_dca_profiles liteav_ff_dca_profiles +#define ff_deblock_h_chroma_10_sse2 liteav_ff_deblock_h_chroma_10_sse2 +#define ff_avg_h264_qpel16_mc32_neon liteav_ff_avg_h264_qpel16_mc32_neon +#define avfilter_link_set_closed liteav_avfilter_link_set_closed +#define ff_pred8x8l_down_left_8_mmxext liteav_ff_pred8x8l_down_left_8_mmxext +#define avpriv_slicethread_create liteav_avpriv_slicethread_create +#define ff_put_h264_chroma_mc4_mmx liteav_ff_put_h264_chroma_mc4_mmx +#define ff_ac3_parser liteav_ff_ac3_parser +#define ff_uyvytoyuv422_sse2 liteav_ff_uyvytoyuv422_sse2 +#define av_opt_get_video_rate liteav_av_opt_get_video_rate +#define ffio_fdopen liteav_ffio_fdopen +#define avfilter_register liteav_avfilter_register +#define ff_init_ff_cos_tabs_fixed liteav_ff_init_ff_cos_tabs_fixed +#define yyget_lval liteav_yyget_lval +#define av_file_unmap liteav_av_file_unmap +#define ff_hevc_sao_type_idx_decode liteav_ff_hevc_sao_type_idx_decode +#define ff_hevc_idct_4x4_dc_neon_8_asm liteav_ff_hevc_idct_4x4_dc_neon_8_asm +#define av_compare_mod liteav_av_compare_mod +#define av_realloc liteav_av_realloc +#define yyset_debug liteav_yyset_debug +#define av_fifo_generic_read liteav_av_fifo_generic_read +#define avio_put_str16be liteav_avio_put_str16be +#define ff_ebur128_add_frames_planar_int liteav_ff_ebur128_add_frames_planar_int +#define avfilter_graph_config liteav_avfilter_graph_config +#define ff_mpa_synth_init_float liteav_ff_mpa_synth_init_float +#define av_md5_update liteav_av_md5_update +#define ff_h264_idct_add8_10_sse2 liteav_ff_h264_idct_add8_10_sse2 +#define av_cast5_init liteav_av_cast5_init +#define ff_imdct_calc_c liteav_ff_imdct_calc_c +#define ff_resample_common_apply_filter_x8_s16_neon liteav_ff_resample_common_apply_filter_x8_s16_neon +#define ff_unpack_2ch_int16_to_int32_a_ssse3 liteav_ff_unpack_2ch_int16_to_int32_a_ssse3 +#define ff_put_pixels8_xy2_no_rnd_neon liteav_ff_put_pixels8_xy2_no_rnd_neon +#define ff_bsf_child_class_next liteav_ff_bsf_child_class_next +#define av_xtea_alloc liteav_av_xtea_alloc +#define ff_pcm_alaw_at_decoder liteav_ff_pcm_alaw_at_decoder +#define av_fifo_grow liteav_av_fifo_grow +#define ff_biweight_h264_pixels_8_neon liteav_ff_biweight_h264_pixels_8_neon +#define av_image_fill_black liteav_av_image_fill_black +#define av_sha512_init liteav_av_sha512_init +#define ff_avg_h264_qpel16_mc20_10_ssse3_cache64 liteav_ff_avg_h264_qpel16_mc20_10_ssse3_cache64 +#define ff_hevc_save_states liteav_ff_hevc_save_states +#define ff_mdct_init liteav_ff_mdct_init +#define ff_put_h264_qpel8_mc30_10_sse2_cache64 liteav_ff_put_h264_qpel8_mc30_10_sse2_cache64 +#define rgb16tobgr24 liteav_rgb16tobgr24 +#define av_tree_enumerate liteav_av_tree_enumerate +#define swscale_version liteav_swscale_version +#define ff_sbr_hf_apply_noise_2_neon liteav_ff_sbr_hf_apply_noise_2_neon +#define ff_slice_thread_init liteav_ff_slice_thread_init +#define av_dict_parse_string liteav_av_dict_parse_string +#define ff_fixed_dsp_init_x86 liteav_ff_fixed_dsp_init_x86 +#define ff_ps_stereo_interpolate_sse3 liteav_ff_ps_stereo_interpolate_sse3 +#define av_buffer_get_opaque liteav_av_buffer_get_opaque +#define ff_pack_2ch_int16_to_int32_a_sse2 liteav_ff_pack_2ch_int16_to_int32_a_sse2 +#define ff_imdct_calc_c_fixed_32 liteav_ff_imdct_calc_c_fixed_32 +#define av_base64_decode liteav_av_base64_decode +#define av_reallocp liteav_av_reallocp +#define av_jni_set_java_vm liteav_av_jni_set_java_vm +#define ff_cos_256 liteav_ff_cos_256 +#define ff_h263dsp_init liteav_ff_h263dsp_init +#define ff_pack_2ch_float_to_int32_u_sse2 liteav_ff_pack_2ch_float_to_int32_u_sse2 +#define ff_rl_free liteav_ff_rl_free +#define ff_h264_chroma_dc_scan liteav_ff_h264_chroma_dc_scan +#define av_packet_copy_props liteav_av_packet_copy_props +#define yyget_lineno liteav_yyget_lineno +#define ff_pred8x8l_horizontal_up_8_mmxext liteav_ff_pred8x8l_horizontal_up_8_mmxext +#define ff_h264_weight_8_sse2 liteav_ff_h264_weight_8_sse2 +#define ff_hevc_idct_4x4_dc_neon_8 liteav_ff_hevc_idct_4x4_dc_neon_8 +#define ff_ebur128_loudness_global liteav_ff_ebur128_loudness_global +#define ff_j_rev_dct liteav_ff_j_rev_dct +#define ff_pred16x16_horizontal_10_sse2 liteav_ff_pred16x16_horizontal_10_sse2 +#define ff_hevc_put_epel_h_neon_8_wrapper liteav_ff_hevc_put_epel_h_neon_8_wrapper +#define yy_scan_string liteav_yy_scan_string +#define ff_end_tag liteav_ff_end_tag +#define rgb24tobgr24 liteav_rgb24tobgr24 +#define avcodec_find_decoder liteav_avcodec_find_decoder +#define av_dict_count liteav_av_dict_count +#define sws_convertPalette8ToPacked32 liteav_sws_convertPalette8ToPacked32 +#define ff_hevc_mpm_idx_decode liteav_ff_hevc_mpm_idx_decode +#define ff_put_pixels16_y2_no_rnd_neon liteav_ff_put_pixels16_y2_no_rnd_neon +#define ff_avg_h264_chroma_mc8_10_avx liteav_ff_avg_h264_chroma_mc8_10_avx +#define av_pkt_dump2 liteav_av_pkt_dump2 +#define ff_put_h264_qpel8_mc01_10_sse2 liteav_ff_put_h264_qpel8_mc01_10_sse2 +#define av_buffersink_get_format liteav_av_buffersink_get_format +#define avfilter_next liteav_avfilter_next +#define ff_hwcontext_type_videotoolbox liteav_ff_hwcontext_type_videotoolbox +#define ff_combine_frame liteav_ff_combine_frame +#define ff_dnxhd_profiles liteav_ff_dnxhd_profiles +#define ff_id3v1_read liteav_ff_id3v1_read +#define ff_hevc_clear_refs liteav_ff_hevc_clear_refs +#define ff_pred8x8_plane_8_ssse3 liteav_ff_pred8x8_plane_8_ssse3 +#define av_timecode_make_mpeg_tc_string liteav_av_timecode_make_mpeg_tc_string +#define ff_ass_bprint_text_event liteav_ff_ass_bprint_text_event +#define av_log_format_line2 liteav_av_log_format_line2 +#define ff_h264_idct_add16intra_8_mmx liteav_ff_h264_idct_add16intra_8_mmx +#define ff_uyvytoyuv422_avx liteav_ff_uyvytoyuv422_avx +#define ff_inter_run liteav_ff_inter_run +#define ff_id3v2_parse_apic liteav_ff_id3v2_parse_apic +#define ff_deblock_h_chroma422_intra_8_avx liteav_ff_deblock_h_chroma422_intra_8_avx +#define ff_formats_ref liteav_ff_formats_ref +#define ff_rtmpe_protocol liteav_ff_rtmpe_protocol +#define ff_mov_cenc_write_stbl_atoms liteav_ff_mov_cenc_write_stbl_atoms +#define ff_faanidct liteav_ff_faanidct +#define av_aes_ctr_alloc liteav_av_aes_ctr_alloc +#define ff_put_rv40_chroma_mc4_mmx liteav_ff_put_rv40_chroma_mc4_mmx +#define ff_h264_ps_uninit liteav_ff_h264_ps_uninit +#define sws_normalizeVec liteav_sws_normalizeVec +#define ff_h264_chroma_qp liteav_ff_h264_chroma_qp +#define av_vorbis_parse_init liteav_av_vorbis_parse_init +#define ff_h264_sei_uninit liteav_ff_h264_sei_uninit +#define ff_pred8x8_horizontal_8_mmxext liteav_ff_pred8x8_horizontal_8_mmxext +#define ff_hevc_put_qpel_uw_pixels_w64_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w64_neon_8 +#define ff_avg_rv40_chroma_mc8_3dnow liteav_ff_avg_rv40_chroma_mc8_3dnow +#define ff_videotoolbox_uninit liteav_ff_videotoolbox_uninit +#define ff_vorbis_floor1_inverse_db_table liteav_ff_vorbis_floor1_inverse_db_table +#define ff_cos_256_fixed liteav_ff_cos_256_fixed +#define ff_inverse liteav_ff_inverse +#define avpriv_mpegts_parse_packet liteav_avpriv_mpegts_parse_packet +#define ff_hevc_annexb2mp4_buf liteav_ff_hevc_annexb2mp4_buf +#define ff_amf_get_field_value liteav_ff_amf_get_field_value +#define ff_pred16x16_128_dc_10_mmxext liteav_ff_pred16x16_128_dc_10_mmxext +#define ff_pred8x8_horizontal_10_sse2 liteav_ff_pred8x8_horizontal_10_sse2 +#define ff_hevc_pred_planar_16x16_neon_8_1 liteav_ff_hevc_pred_planar_16x16_neon_8_1 +#define ff_hevc_transform_luma_4x4_neon_8 liteav_ff_hevc_transform_luma_4x4_neon_8 +#define av_expr_parse liteav_av_expr_parse +#define ff_mpegtsraw_demuxer liteav_ff_mpegtsraw_demuxer +#define ff_put_qpel8_mc33_old_c liteav_ff_put_qpel8_mc33_old_c +#define av_crc liteav_av_crc +#define ff_hevc_demuxer liteav_ff_hevc_demuxer +#define ff_hevc_put_qpel_uw_v1_neon_8 liteav_ff_hevc_put_qpel_uw_v1_neon_8 +#define av_opt_set_from_string liteav_av_opt_set_from_string +#define ff_http_auth_create_response liteav_ff_http_auth_create_response +#define vlc_css_rule_New liteav_vlc_css_rule_New +#define avfilter_pad_get_name liteav_avfilter_pad_get_name +#define ff_hevc_set_qPy liteav_ff_hevc_set_qPy +#define av_picture_pad liteav_av_picture_pad +#define ff_yuv420p_to_abgr_neon liteav_ff_yuv420p_to_abgr_neon +#define av_probe_input_format2 liteav_av_probe_input_format2 +#define ff_vector_fmac_scalar_neon liteav_ff_vector_fmac_scalar_neon +#define av_frame_clone liteav_av_frame_clone +#define ff_pred16x16_dc_8_mmxext liteav_ff_pred16x16_dc_8_mmxext +#define ff_mov_cenc_write_packet liteav_ff_mov_cenc_write_packet +#define ff_h264_decode_ref_pic_list_reordering liteav_ff_h264_decode_ref_pic_list_reordering +#define ff_aac_pred_sfb_max liteav_ff_aac_pred_sfb_max +#define ff_put_wav_header liteav_ff_put_wav_header +#define ff_put_h264_qpel8_mc30_10_sse2 liteav_ff_put_h264_qpel8_mc30_10_sse2 +#define av_gettime liteav_av_gettime +#define ff_pw_20 liteav_ff_pw_20 +#define ff_framequeue_skip_samples liteav_ff_framequeue_skip_samples +#define ff_h263_cbpy_tab liteav_ff_h263_cbpy_tab +#define ff_avg_pixels16_y2_no_rnd_neon liteav_ff_avg_pixels16_y2_no_rnd_neon +#define ff_read_line_to_bprint liteav_ff_read_line_to_bprint +#define ff_draw_rectangle liteav_ff_draw_rectangle +#define ff_kbd_window_init liteav_ff_kbd_window_init +#define av_dirac_parse_sequence_header liteav_av_dirac_parse_sequence_header +#define ff_put_h264_qpel8_mc10_10_ssse3_cache64 liteav_ff_put_h264_qpel8_mc10_10_ssse3_cache64 +#define av_color_primaries_name liteav_av_color_primaries_name +#define av_log2_i liteav_av_log2_i +#define ff_h264_idct8_add_10_avx liteav_ff_h264_idct8_add_10_avx +#define av_parser_change liteav_av_parser_change +#define ff_hevc_put_epel_uw_bi_h_neon_8 liteav_ff_hevc_put_epel_uw_bi_h_neon_8 +#define ff_aac_demuxer liteav_ff_aac_demuxer +#define ff_hevc_sao_edge_eo2_w64_neon_8 liteav_ff_hevc_sao_edge_eo2_w64_neon_8 +#define ff_h264_idct_add8_12_c liteav_ff_h264_idct_add8_12_c +#define av_aes_ctr_crypt liteav_av_aes_ctr_crypt +#define ff_hevc_log2_res_scale_abs liteav_ff_hevc_log2_res_scale_abs +#define av_timecode_adjust_ntsc_framenum2 liteav_av_timecode_adjust_ntsc_framenum2 +#define ff_interleaved_dirac_golomb_vlc_code liteav_ff_interleaved_dirac_golomb_vlc_code +#define swr_convert liteav_swr_convert +#define ff_hevc_put_qpel_uw_pixels_w4_neon_8 liteav_ff_hevc_put_qpel_uw_pixels_w4_neon_8 +#define av_color_range_from_name liteav_av_color_range_from_name +#define ff_hevc_put_qpel_uw_weight_h1v1_neon_8 liteav_ff_hevc_put_qpel_uw_weight_h1v1_neon_8 +#define av_buffersink_get_samples liteav_av_buffersink_get_samples +#define ff_pred8x8l_dc_10_avx liteav_ff_pred8x8l_dc_10_avx +#define ff_fill_rectangle liteav_ff_fill_rectangle +#define ff_ebur128_init liteav_ff_ebur128_init +#define ff_unpack_6ch_int32_to_float_u_avx liteav_ff_unpack_6ch_int32_to_float_u_avx +#define ff_put_h264_qpel8_mc23_neon liteav_ff_put_h264_qpel8_mc23_neon +#define ff_unpack_2ch_int32_to_int16_u_sse2 liteav_ff_unpack_2ch_int32_to_int16_u_sse2 +#define ff_unpack_6ch_float_to_float_u_avx liteav_ff_unpack_6ch_float_to_float_u_avx +#define av_thread_message_queue_set_err_recv liteav_av_thread_message_queue_set_err_recv +#define ff_put_h264_qpel8_mc10_10_sse2 liteav_ff_put_h264_qpel8_mc10_10_sse2 +#define av_dct_end liteav_av_dct_end +#define ff_float_dsp_init_aarch64 liteav_ff_float_dsp_init_aarch64 +#define ff_h264_idct_add8_422_10_avx liteav_ff_h264_idct_add8_422_10_avx +#define ff_ass_subtitle_header liteav_ff_ass_subtitle_header +#define avfilter_pad_get_type liteav_avfilter_pad_get_type +#define ff_hevc_put_epel_hv_neon_8_wrapper liteav_ff_hevc_put_epel_hv_neon_8_wrapper +#define ff_adts_muxer liteav_ff_adts_muxer +#define ff_mpeg1_default_intra_matrix liteav_ff_mpeg1_default_intra_matrix +#define ff_gsm_ms_at_decoder liteav_ff_gsm_ms_at_decoder +#define ff_pack_2ch_int16_to_int16_a_sse2 liteav_ff_pack_2ch_int16_to_int16_a_sse2 +#define ff_cpu_cpuid liteav_ff_cpu_cpuid +#define av_opt_get_q liteav_av_opt_get_q +#define ff_avg_h264_qpel4_mc21_10_mmxext liteav_ff_avg_h264_qpel4_mc21_10_mmxext +#define ff_vector_fmul_window_vfp liteav_ff_vector_fmul_window_vfp +#define av_image_copy_uc_from liteav_av_image_copy_uc_from +#define ffurl_get_protocols liteav_ffurl_get_protocols +#define av_frame_get_colorspace liteav_av_frame_get_colorspace +#define avfilter_graph_alloc liteav_avfilter_graph_alloc +#define ff_avg_h264_qpel8_mc32_neon liteav_ff_avg_h264_qpel8_mc32_neon +#define av_mul_q liteav_av_mul_q +#define ff_hevc_cu_qp_delta_abs liteav_ff_hevc_cu_qp_delta_abs +#define ff_unpack_2ch_int32_to_float_u_sse2 liteav_ff_unpack_2ch_int32_to_float_u_sse2 +#define av_mul_i liteav_av_mul_i +#define ff_sws_init_input_funcs liteav_ff_sws_init_input_funcs +#define ff_h264_init_cabac_states liteav_ff_h264_init_cabac_states +#define ff_alloc_packet liteav_ff_alloc_packet +#define ff_ac3_demuxer liteav_ff_ac3_demuxer +#define av_add_stable liteav_av_add_stable +#define ff_pw_64 liteav_ff_pw_64 +#define ff_imdct36_float_sse3 liteav_ff_imdct36_float_sse3 +#define ff_imdct36_float_sse2 liteav_ff_imdct36_float_sse2 +#define ff_deblock_v_chroma_8_sse2 liteav_ff_deblock_v_chroma_8_sse2 +#define ff_mov_muxer liteav_ff_mov_muxer +#define av_rdft_init liteav_av_rdft_init +#define ff_hevc_put_qpel_uw_h1_neon_8 liteav_ff_hevc_put_qpel_uw_h1_neon_8 +#define ff_hevc_pcm_flag_decode liteav_ff_hevc_pcm_flag_decode +#define ff_deblock_v_luma_8_sse2 liteav_ff_deblock_v_luma_8_sse2 +#define ff_ebur128_loudness_range_multiple liteav_ff_ebur128_loudness_range_multiple +#define ff_tls_init liteav_ff_tls_init +#define ff_avg_pixels8x8_c liteav_ff_avg_pixels8x8_c +#define av_blowfish_crypt liteav_av_blowfish_crypt +#define av_image_copy liteav_av_image_copy +#define av_frame_new_side_data liteav_av_frame_new_side_data +#define ff_put_h264_qpel8_h_lowpass_ssse3 liteav_ff_put_h264_qpel8_h_lowpass_ssse3 +#define av_register_input_format liteav_av_register_input_format +#define ff_pred16x16_plane_h264_8_ssse3 liteav_ff_pred16x16_plane_h264_8_ssse3 +#define ff_h264_idct8_add4_9_c liteav_ff_h264_idct8_add4_9_c +#define av_bsf_free liteav_av_bsf_free +#define ff_pred4x4_vertical_right_10_ssse3 liteav_ff_pred4x4_vertical_right_10_ssse3 +#define ff_unpack_2ch_int16_to_int32_u_ssse3 liteav_ff_unpack_2ch_int16_to_int32_u_ssse3 +#define variant_matched_tags liteav_variant_matched_tags +#define ff_amf_write_string2 liteav_ff_amf_write_string2 +#define av_register_output_format liteav_av_register_output_format +#define ff_pred16x16_vertical_10_mmxext liteav_ff_pred16x16_vertical_10_mmxext +#define ff_mpeg4_set_direct_mv liteav_ff_mpeg4_set_direct_mv +#define av_rescale_q_rnd liteav_av_rescale_q_rnd +#define ff_amf_read_bool liteav_ff_amf_read_bool +#define av_opt_set_image_size liteav_av_opt_set_image_size +#define av_audio_fifo_free liteav_av_audio_fifo_free +#define ff_h264_idct8_dc_add_neon liteav_ff_h264_idct8_dc_add_neon +#define av_packet_get_side_data liteav_av_packet_get_side_data +#define av_blowfish_crypt_ecb liteav_av_blowfish_crypt_ecb +#define ff_deblock_h_chroma_intra_8_sse2 liteav_ff_deblock_h_chroma_intra_8_sse2 +#define ff_eac3_default_chmap liteav_ff_eac3_default_chmap +#define ffurl_read_complete liteav_ffurl_read_complete +#define ff_fft_calc_vfp liteav_ff_fft_calc_vfp +#define avcodec_encode_audio2 liteav_avcodec_encode_audio2 +#define swri_noise_shaping_int32 liteav_swri_noise_shaping_int32 +#define ff_avg_pixels8_x2_neon liteav_ff_avg_pixels8_x2_neon +#define ff_avg_h264_qpel8or16_v_lowpass_sse2 liteav_ff_avg_h264_qpel8or16_v_lowpass_sse2 +#define ff_text_init_buf liteav_ff_text_init_buf +#define ff_int16_to_float_u_sse2 liteav_ff_int16_to_float_u_sse2 +#define avio_rl24 liteav_avio_rl24 +#define ff_network_wait_fd liteav_ff_network_wait_fd +#define ff_avg_rv40_chroma_mc4_3dnow liteav_ff_avg_rv40_chroma_mc4_3dnow +#define ff_hevc_annexb2mp4 liteav_ff_hevc_annexb2mp4 +#define ff_put_qpel16_mc32_old_c liteav_ff_put_qpel16_mc32_old_c +#define ff_h264_get_slice_type liteav_ff_h264_get_slice_type +#define ff_w1_plus_w3_hi liteav_ff_w1_plus_w3_hi +#define ff_h264_idct_add8_422_9_c liteav_ff_h264_idct_add8_422_9_c +#define ff_planar_sample_fmts liteav_ff_planar_sample_fmts +#define ff_simple_idct8_add_avx liteav_ff_simple_idct8_add_avx +#define ff_init_vscale liteav_ff_init_vscale +#define ff_deblock_v_chroma_intra_8_avx liteav_ff_deblock_v_chroma_intra_8_avx +#define ff_put_h264_qpel16_mc12_neon liteav_ff_put_h264_qpel16_mc12_neon +#define ff_hevc_pred_angular_32x32_v_zero_neon_8 liteav_ff_hevc_pred_angular_32x32_v_zero_neon_8 +#define av_msg_set_callback liteav_av_msg_set_callback +#define ff_hevc_put_epel_v_neon_8_wrapper liteav_ff_hevc_put_epel_v_neon_8_wrapper +#define yyget_extra liteav_yyget_extra +#define ff_init_cabac_decoder liteav_ff_init_cabac_decoder +#define ff_scale_eval_dimensions liteav_ff_scale_eval_dimensions +#define avfilter_version liteav_avfilter_version +#define ff_fft_init liteav_ff_fft_init +#define av_get_sample_fmt_name liteav_av_get_sample_fmt_name +#define av_hwdevice_find_type_by_name liteav_av_hwdevice_find_type_by_name +#define ff_deblock_h_chroma_8_mmxext liteav_ff_deblock_h_chroma_8_mmxext +#define ff_put_h264_qpel4_mc01_10_mmxext liteav_ff_put_h264_qpel4_mc01_10_mmxext +#define ff_channel_layouts_changeref liteav_ff_channel_layouts_changeref +#define ff_ass_split_dialog2 liteav_ff_ass_split_dialog2 +#define ff_rl_init_vlc liteav_ff_rl_init_vlc +#define ff_wait_thread liteav_ff_wait_thread +#define ff_put_h264_qpel8_mc20_neon liteav_ff_put_h264_qpel8_mc20_neon +#define avfilter_insert_filter liteav_avfilter_insert_filter +#define ff_square_tab liteav_ff_square_tab +#define av_frame_make_writable liteav_av_frame_make_writable +#define ff_pb_FE liteav_ff_pb_FE +#define ff_pb_FC liteav_ff_pb_FC +#define ff_pred16x16_plane_h264_8_mmxext liteav_ff_pred16x16_plane_h264_8_mmxext +#define ff_urldecode liteav_ff_urldecode +#define ff_pack_2ch_int16_to_float_u_sse2 liteav_ff_pack_2ch_int16_to_float_u_sse2 +#define ff_interleaved_se_golomb_vlc_code liteav_ff_interleaved_se_golomb_vlc_code +#define ff_avg_h264_qpel8_mc21_neon liteav_ff_avg_h264_qpel8_mc21_neon +#define ff_pd_1 liteav_ff_pd_1 +#define ff_biweight_h264_pixels_4_neon liteav_ff_biweight_h264_pixels_4_neon +#define av_image_get_buffer_size liteav_av_image_get_buffer_size +#define av_get_standard_channel_layout liteav_av_get_standard_channel_layout +#define ff_rl_mpeg2 liteav_ff_rl_mpeg2 +#define av_cast5_alloc liteav_av_cast5_alloc +#define ff_rl_mpeg1 liteav_ff_rl_mpeg1 +#define ff_hevc_pel_bi_pixels_w8_neon_8 liteav_ff_hevc_pel_bi_pixels_w8_neon_8 +#define av_codec_next liteav_av_codec_next +#define ff_lzw_encode liteav_ff_lzw_encode +#define ff_unpack_6ch_float_to_int32_a_sse2 liteav_ff_unpack_6ch_float_to_int32_a_sse2 +#define av_hwframe_constraints_free liteav_av_hwframe_constraints_free +#define ff_avg_h264_qpel8_mc02_neon liteav_ff_avg_h264_qpel8_mc02_neon +#define ff_pack_6ch_float_to_int32_u_avx liteav_ff_pack_6ch_float_to_int32_u_avx +#define av_bsf_flush liteav_av_bsf_flush +#define ff_fft_init_aarch64 liteav_ff_fft_init_aarch64 +#define ff_vf_transpose liteav_ff_vf_transpose +#define ff_get_video_buffer liteav_ff_get_video_buffer +#define ff_avfilter_link_set_in_status liteav_ff_avfilter_link_set_in_status +#define av_encryption_init_info_add_side_data liteav_av_encryption_init_info_add_side_data +#define av_buffersink_get_time_base liteav_av_buffersink_get_time_base +#define av_expr_free liteav_av_expr_free +#define ff_h264_idct8_add_8_sse2 liteav_ff_h264_idct8_add_8_sse2 +#define avio_open2 liteav_avio_open2 +#define ff_simple_idct44_add liteav_ff_simple_idct44_add +#define ff_put_h264_qpel16_mc00_neon liteav_ff_put_h264_qpel16_mc00_neon +#define ff_replaygain_export liteav_ff_replaygain_export +#define ff_eac3_at_decoder liteav_ff_eac3_at_decoder +#define rgb24tobgr16 liteav_rgb24tobgr16 +#define ff_hevc_put_pel_uw_pixels_w12_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w12_neon_8_asm +#define text_style_create liteav_text_style_create +#define ff_h264dsp_init_x86 liteav_ff_h264dsp_init_x86 +#define ff_pred16x16_vertical_10_sse2 liteav_ff_pred16x16_vertical_10_sse2 +#define swri_audio_convert liteav_swri_audio_convert +#define av_twofish_init liteav_av_twofish_init +#define av_free_packet liteav_av_free_packet +#define vlc_css_selectors_Delete liteav_vlc_css_selectors_Delete +#define ff_hevc_pred_planar_32x32_neon_8_1 liteav_ff_hevc_pred_planar_32x32_neon_8_1 +#define ff_hevc_put_qpel_v1_neon_8 liteav_ff_hevc_put_qpel_v1_neon_8 +#define av_bsf_iterate liteav_av_bsf_iterate +#define sws_convertPalette8ToPacked24 liteav_sws_convertPalette8ToPacked24 +#define ff_h264dsp_init_aarch64 liteav_ff_h264dsp_init_aarch64 +#define ff_dct_end liteav_ff_dct_end +#define rgb32to24 liteav_rgb32to24 +#define ff_sbr_hf_g_filt_neon liteav_ff_sbr_hf_g_filt_neon +#define ff_av1_filter_obus_buf liteav_ff_av1_filter_obus_buf +#define ff_pack_2ch_float_to_int16_u_sse2 liteav_ff_pack_2ch_float_to_int16_u_sse2 +#define ff_wav_demuxer liteav_ff_wav_demuxer +#define ff_put_h264_qpel8_mc00_neon liteav_ff_put_h264_qpel8_mc00_neon +#define av_mediacodec_release_buffer liteav_av_mediacodec_release_buffer +#define av_get_bytes_per_sample liteav_av_get_bytes_per_sample +#define av_mediacodec_default_init liteav_av_mediacodec_default_init +#define avfilter_register_all liteav_avfilter_register_all +#define avio_wb64 liteav_avio_wb64 +#define av_opt_ptr liteav_av_opt_ptr +#define ff_asink_abuffer liteav_ff_asink_abuffer +#define ff_cos_65536 liteav_ff_cos_65536 +#define swr_close liteav_swr_close +#define av_aes_ctr_set_full_iv liteav_av_aes_ctr_set_full_iv +#define ff_aac_spectral_sizes liteav_ff_aac_spectral_sizes +#define shuffle_bytes_2103 liteav_shuffle_bytes_2103 +#define ff_id3v2_tag_len liteav_ff_id3v2_tag_len +#define ff_hevc_put_epel_hv_neon_8 liteav_ff_hevc_put_epel_hv_neon_8 +#define ff_mjpeg_encode_huffman_init liteav_ff_mjpeg_encode_huffman_init +#define ff_vector_dmul_scalar_neon liteav_ff_vector_dmul_scalar_neon +#define ff_ac3_dec_channel_map liteav_ff_ac3_dec_channel_map +#define ff_get_bmp_header liteav_ff_get_bmp_header +#define rgb64tobgr48_bswap liteav_rgb64tobgr48_bswap +#define ff_get_wav_header liteav_ff_get_wav_header +#define av_videotoolbox_default_init2 liteav_av_videotoolbox_default_init2 +#define ff_ac3_floor_tab liteav_ff_ac3_floor_tab +#define ff_h264_idct8_add_8_mmx liteav_ff_h264_idct8_add_8_mmx +#define ff_mp3_demuxer liteav_ff_mp3_demuxer +#define ff_aac_kbd_short_120 liteav_ff_aac_kbd_short_120 +#define ff_hevc_put_pel_uw_pixels_w64_neon_8_asm liteav_ff_hevc_put_pel_uw_pixels_w64_neon_8_asm +#define av_shr_i liteav_av_shr_i +#define ff_unpack_6ch_int32_to_float_a_avx liteav_ff_unpack_6ch_int32_to_float_a_avx +#define ff_h2645_extract_rbsp liteav_ff_h2645_extract_rbsp +#define ff_h264qpel_init_aarch64 liteav_ff_h264qpel_init_aarch64 +#define ff_avg_h264_qpel16_mc12_10_sse2 liteav_ff_avg_h264_qpel16_mc12_10_sse2 +#define ff_ebur128_add_frames_short liteav_ff_ebur128_add_frames_short +#define av_guess_format liteav_av_guess_format +#define ff_avg_h264_qpel4_mc30_10_mmxext liteav_ff_avg_h264_qpel4_mc30_10_mmxext +#define ff_h264_idct8_add_12_c liteav_ff_h264_idct8_add_12_c +#define ff_hevc_decode_nal_sps liteav_ff_hevc_decode_nal_sps +#define ff_reverse liteav_ff_reverse +#define yuy2toyv12 liteav_yuy2toyv12 +#define av_frame_set_decode_error_flags liteav_av_frame_set_decode_error_flags +#define sws_shiftVec liteav_sws_shiftVec +#define ff_pred4x4_down_left_8_mmxext liteav_ff_pred4x4_down_left_8_mmxext +#define ff_put_h264_qpel16_mc03_10_sse2 liteav_ff_put_h264_qpel16_mc03_10_sse2 +#define av_pix_fmt_desc_get liteav_av_pix_fmt_desc_get +#define ff_pred8x8_tm_vp8_8_sse2 liteav_ff_pred8x8_tm_vp8_8_sse2 +#define ff_hevc_put_pixels_w16_neon_8 liteav_ff_hevc_put_pixels_w16_neon_8 +#define ff_h264_unref_picture liteav_ff_h264_unref_picture +#define ff_adpcm_ima_qt_at_decoder liteav_ff_adpcm_ima_qt_at_decoder +#define av_read_image_line2 liteav_av_read_image_line2 +#define ff_brktimegm liteav_ff_brktimegm +#define ff_hevc_pred_angular_8x8_v_zero_neon_8 liteav_ff_hevc_pred_angular_8x8_v_zero_neon_8 +#define avpriv_report_missing_feature liteav_avpriv_report_missing_feature +#define ff_mp2_at_decoder liteav_ff_mp2_at_decoder +#define ff_h264_biweight_4_mmxext liteav_ff_h264_biweight_4_mmxext +#define ff_tns_max_bands_480 liteav_ff_tns_max_bands_480 +#define av_hash_freep liteav_av_hash_freep +#define ff_golomb_vlc_len liteav_ff_golomb_vlc_len +#define ff_pred8x8l_vertical_left_8_ssse3 liteav_ff_pred8x8l_vertical_left_8_ssse3 +#define ff_hevc_frame_rps liteav_ff_hevc_frame_rps +#define av_pix_fmt_count_planes liteav_av_pix_fmt_count_planes +#define av_camellia_init liteav_av_camellia_init +#define ff_emulated_edge_mc_16 liteav_ff_emulated_edge_mc_16 +#define ff_put_h264_qpel8_mc13_10_sse2 liteav_ff_put_h264_qpel8_mc13_10_sse2 +#define ff_ps_stereo_interpolate_neon liteav_ff_ps_stereo_interpolate_neon +#define ff_subtitles_utf8_external_read_chunk liteav_ff_subtitles_utf8_external_read_chunk +#define av_opt_next liteav_av_opt_next +#define ff_avg_h264_qpel8_mc01_neon liteav_ff_avg_h264_qpel8_mc01_neon +#define avpriv_mpegts_parse_close liteav_avpriv_mpegts_parse_close +#define ff_four_imdct36_float_sse liteav_ff_four_imdct36_float_sse +#define ff_sbr_noise_table liteav_ff_sbr_noise_table +#define codec_mp4_tags liteav_codec_mp4_tags +#define ff_h264_idct_add8_10_c liteav_ff_h264_idct_add8_10_c +#define av_opt_show2 liteav_av_opt_show2 +#define ff_decode_sbr_extension liteav_ff_decode_sbr_extension +#define ff_avg_h264_qpel16_h_lowpass_l2_ssse3 liteav_ff_avg_h264_qpel16_h_lowpass_l2_ssse3 +#define ff_make_absolute_url liteav_ff_make_absolute_url +#define ff_unpack_2ch_int16_to_int32_a_sse2 liteav_ff_unpack_2ch_int16_to_int32_a_sse2 +#define ff_mpeg4_find_frame_end liteav_ff_mpeg4_find_frame_end +#define ff_float_dsp_init_x86 liteav_ff_float_dsp_init_x86 +#define ff_pack_6ch_float_to_int32_a_sse2 liteav_ff_pack_6ch_float_to_int32_a_sse2 +#define ff_deblock_v_luma_10_sse2 liteav_ff_deblock_v_luma_10_sse2 +#define ff_mpeg4_clean_buffers liteav_ff_mpeg4_clean_buffers +#define ffurl_connect liteav_ffurl_connect +#define avpriv_open liteav_avpriv_open +#define ff_h264_idct_add16_10_sse2 liteav_ff_h264_idct_add16_10_sse2 +#define ff_put_h264_qpel4_mc21_10_mmxext liteav_ff_put_h264_qpel4_mc21_10_mmxext +#define ff_pred16x16_dc_neon liteav_ff_pred16x16_dc_neon +#define ff_int32_to_float_a_avx liteav_ff_int32_to_float_a_avx +#define ff_put_h264_qpel4_mc30_10_mmxext liteav_ff_put_h264_qpel4_mc30_10_mmxext +#define ff_h263_intra_MCBPC_code liteav_ff_h263_intra_MCBPC_code +#define ff_h264_idct_add8_14_c liteav_ff_h264_idct_add8_14_c +#define ff_mpeg12_mbMotionVectorTable liteav_ff_mpeg12_mbMotionVectorTable +#define avformat_get_mov_audio_tags liteav_avformat_get_mov_audio_tags +#define ff_inlink_make_frame_writable liteav_ff_inlink_make_frame_writable +#define rgb24tobgr15 liteav_rgb24tobgr15 +#define ff_h264_idct_add_10_avx liteav_ff_h264_idct_add_10_avx +#define ff_shuffle_bytes_0321_ssse3 liteav_ff_shuffle_bytes_0321_ssse3 +#define av_opt_set_pixel_fmt liteav_av_opt_set_pixel_fmt +#define ff_ass_free_dialog liteav_ff_ass_free_dialog +#define ff_xvid_idct_init_x86 liteav_ff_xvid_idct_init_x86 +#define ff_cos_32 liteav_ff_cos_32 +#define avpriv_mpegaudio_decode_header liteav_avpriv_mpegaudio_decode_header +#define ff_vf_overlay liteav_ff_vf_overlay +#define ff_imdct_half_neon liteav_ff_imdct_half_neon +#define ff_hevc_put_epel_uw_pixels_w8_neon_8 liteav_ff_hevc_put_epel_uw_pixels_w8_neon_8 +#define ff_put_h264_qpel16_mc21_neon liteav_ff_put_h264_qpel16_mc21_neon +#define ff_avg_h264_qpel16_mc01_neon liteav_ff_avg_h264_qpel16_mc01_neon +#define ff_id3v2_read_dict liteav_ff_id3v2_read_dict +#define ff_put_h264_qpel16_mc03_neon liteav_ff_put_h264_qpel16_mc03_neon +#define ff_riff_write_info liteav_ff_riff_write_info +#define av_dct_init liteav_av_dct_init +#define ff_mpeg4_videotoolbox_hwaccel liteav_ff_mpeg4_videotoolbox_hwaccel +#define ff_pred16x16_horizontal_8_mmx liteav_ff_pred16x16_horizontal_8_mmx +#define swscale_configuration liteav_swscale_configuration +#define av_packet_side_data_name liteav_av_packet_side_data_name +#define ff_h264_luma_dc_dequant_idct_8_c liteav_ff_h264_luma_dc_dequant_idct_8_c +#define ff_xvid_idct liteav_ff_xvid_idct +#define ff_scalarproduct_float_neon liteav_ff_scalarproduct_float_neon +#define vlc_css_parser_Debug liteav_vlc_css_parser_Debug +#define avformat_close liteav_avformat_close +#define avformat_new_stream liteav_avformat_new_stream +#define avformat_close_input liteav_avformat_close_input +#define avformat_free_context liteav_avformat_free_context +#define avformat_alloc_context liteav_avformat_alloc_context +#define avformat_open_input liteav_avformat_open_input +#define avformat_find_stream_info liteav_avformat_find_stream_info +#define av_find_best_stream liteav_av_find_best_stream +#define avcodec_open2 liteav_avcodec_open2 +#define av_read_frame liteav_av_read_frame +#define av_seek_frame liteav_av_seek_frame +#define av_codec_get_tag liteav_av_codec_get_tag +#define avcodec_parameters_from_context liteav_avcodec_parameters_from_context +#define avcodec_parameters_to_context liteav_avcodec_parameters_to_context +#define avcodec_alloc_context3 liteav_avcodec_alloc_context3 +#define avcodec_get_name liteav_avcodec_get_name +#define avcodec_free_context liteav_avcodec_free_context +#define avcodec_close liteav_avcodec_close +//vod +#define avsubtitle_free liteav_avsubtitle_free +#define av_version_info liteav_av_version_info +#define av_find_default_stream_index liteav_av_find_default_stream_index +#define av_stream_get_side_data liteav_av_stream_get_side_data +#define av_get_media_type_string liteav_av_get_media_type_string +#define avcodec_parameters_alloc liteav_avcodec_parameters_alloc +#define avformat_network_init liteav_avformat_network_init +#define av_int_list_length_for_size liteav_av_int_list_length_for_size +#define avcodec_parameters_free liteav_avcodec_parameters_free +// clang-format on + +#endif // THIRD_PARTY_FFMPEG_FFMPEG_RENAME_DEFINES_H diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h new file mode 100644 index 0000000..7f9780b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ac3_parser.h @@ -0,0 +1,37 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * AC-3 parser prototypes + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2003 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AC3_PARSER_H +#define AVCODEC_AC3_PARSER_H + +#include <stddef.h> +#include <stdint.h> + +/** + * Extract the bitstream ID and the frame size from AC-3 data. + */ +int liteav_av_ac3_parse_header(const uint8_t *buf, size_t size, + uint8_t *bitstream_id, uint16_t *frame_size); + + +#endif /* AVCODEC_AC3_PARSER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h new file mode 100644 index 0000000..8f38526 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/adts_parser.h @@ -0,0 +1,38 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_ADTS_PARSER_H +#define AVCODEC_ADTS_PARSER_H + +#include <stddef.h> +#include <stdint.h> + +#define AV_AAC_ADTS_HEADER_SIZE 7 + +/** + * Extract the number of samples and frames from AAC data. + * @param[in] buf pointer to AAC data buffer + * @param[out] samples Pointer to where number of samples is written + * @param[out] frames Pointer to where number of frames is written + * @return Returns 0 on success, error code on failure. + */ +int liteav_av_adts_header_parse(const uint8_t *buf, uint32_t *samples, + uint8_t *frames); + +#endif /* AVCODEC_ADTS_PARSER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ass_split.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ass_split.h new file mode 100644 index 0000000..42f32f8 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/ass_split.h @@ -0,0 +1,331 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * SSA/ASS spliting functions + * Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_ASS_SPLIT_H +#define AVCODEC_ASS_SPLIT_H + +/** + * fields extracted from the [Script Info] section + */ +typedef struct { + char *script_type; /**< SSA script format version (eg. v4.00) */ + char *collisions; /**< how subtitles are moved to prevent collisions */ + int play_res_x; /**< video width that ASS coords are referring to */ + int play_res_y; /**< video height that ASS coords are referring to */ + float timer; /**< time multiplier to apply to SSA clock (in %) */ +} ASSScriptInfo; + +/** + * fields extracted from the [V4(+) Styles] section + */ +typedef struct { + char *name; /**< name of the tyle (case sensitive) */ + char *font_name; /**< font face (case sensitive) */ + int font_size; /**< font height */ + int primary_color; /**< color that a subtitle will normally appear in */ + int secondary_color; + int outline_color; /**< color for outline in ASS, called tertiary in SSA */ + int back_color; /**< color of the subtitle outline or shadow */ + int bold; /**< whether text is bold (1) or not (0) */ + int italic; /**< whether text is italic (1) or not (0) */ + int underline; /**< whether text is underlined (1) or not (0) */ + int strikeout; + float scalex; + float scaley; + float spacing; + float angle; + int border_style; + float outline; + float shadow; + int alignment; /**< position of the text (left, center, top...), + defined after the layout of the numpad + (1-3 sub, 4-6 mid, 7-9 top) */ + int margin_l; + int margin_r; + int margin_v; + int alpha_level; + int encoding; +} ASSStyle; + +/** + * fields extracted from the [Events] section + */ +typedef struct { + int readorder; + int layer; /**< higher numbered layers are drawn over lower numbered */ + int start; /**< start time of the dialog in centiseconds */ + int end; /**< end time of the dialog in centiseconds */ + char *style; /**< name of the ASSStyle to use with this dialog */ + char *name; + int margin_l; + int margin_r; + int margin_v; + char *effect; + char *text; /**< actual text which will be displayed as a subtitle, + can include style override control codes (see + liteav_ff_ass_split_override_codes()) */ +} ASSDialog; + +/** + * structure containing the whole split ASS data + */ +typedef struct { + ASSScriptInfo script_info; /**< general information about the SSA script*/ + ASSStyle *styles; /**< array of split out styles */ + int styles_count; /**< number of ASSStyle in the styles array */ + ASSDialog *dialogs; /**< array of split out dialogs */ + int dialogs_count; /**< number of ASSDialog in the dialogs array*/ +} ASS; + +typedef enum { + ASS_STR, + ASS_INT, + ASS_FLT, + ASS_COLOR, + ASS_TIMESTAMP, + ASS_ALGN, +} ASSFieldType; + +typedef struct { + const char *name; + int type; + int offset; +} ASSFields; + +typedef struct { + const char *section; + const char *format_header; + const char *fields_header; + int size; + int offset; + int offset_count; + ASSFields fields[24]; +} ASSSection; + +static const ASSSection ass_sections[] = { + { .section = "Script Info", + .offset = offsetof(ASS, script_info), + .fields = {{"ScriptType", ASS_STR, offsetof(ASSScriptInfo, script_type)}, + {"Collisions", ASS_STR, offsetof(ASSScriptInfo, collisions) }, + {"PlayResX", ASS_INT, offsetof(ASSScriptInfo, play_res_x) }, + {"PlayResY", ASS_INT, offsetof(ASSScriptInfo, play_res_y) }, + {"Timer", ASS_FLT, offsetof(ASSScriptInfo, timer) }, + {0}, + } + }, + { .section = "V4+ Styles", + .format_header = "Format", + .fields_header = "Style", + .size = sizeof(ASSStyle), + .offset = offsetof(ASS, styles), + .offset_count = offsetof(ASS, styles_count), + .fields = {{"Name", ASS_STR, offsetof(ASSStyle, name) }, + {"Fontname", ASS_STR, offsetof(ASSStyle, font_name) }, + {"Fontsize", ASS_INT, offsetof(ASSStyle, font_size) }, + {"PrimaryColour", ASS_COLOR, offsetof(ASSStyle, primary_color) }, + {"SecondaryColour", ASS_COLOR, offsetof(ASSStyle, secondary_color)}, + {"OutlineColour", ASS_COLOR, offsetof(ASSStyle, outline_color) }, + {"BackColour", ASS_COLOR, offsetof(ASSStyle, back_color) }, + {"Bold", ASS_INT, offsetof(ASSStyle, bold) }, + {"Italic", ASS_INT, offsetof(ASSStyle, italic) }, + {"Underline", ASS_INT, offsetof(ASSStyle, underline) }, + {"StrikeOut", ASS_INT, offsetof(ASSStyle, strikeout) }, + {"ScaleX", ASS_FLT, offsetof(ASSStyle, scalex) }, + {"ScaleY", ASS_FLT, offsetof(ASSStyle, scaley) }, + {"Spacing", ASS_FLT, offsetof(ASSStyle, spacing) }, + {"Angle", ASS_FLT, offsetof(ASSStyle, angle) }, + {"BorderStyle", ASS_INT, offsetof(ASSStyle, border_style) }, + {"Outline", ASS_FLT, offsetof(ASSStyle, outline) }, + {"Shadow", ASS_FLT, offsetof(ASSStyle, shadow) }, + {"Alignment", ASS_INT, offsetof(ASSStyle, alignment) }, + {"MarginL", ASS_INT, offsetof(ASSStyle, margin_l) }, + {"MarginR", ASS_INT, offsetof(ASSStyle, margin_r) }, + {"MarginV", ASS_INT, offsetof(ASSStyle, margin_v) }, + {"Encoding", ASS_INT, offsetof(ASSStyle, encoding) }, + {0}, + } + }, + { .section = "V4 Styles", + .format_header = "Format", + .fields_header = "Style", + .size = sizeof(ASSStyle), + .offset = offsetof(ASS, styles), + .offset_count = offsetof(ASS, styles_count), + .fields = {{"Name", ASS_STR, offsetof(ASSStyle, name) }, + {"Fontname", ASS_STR, offsetof(ASSStyle, font_name) }, + {"Fontsize", ASS_INT, offsetof(ASSStyle, font_size) }, + {"PrimaryColour", ASS_COLOR, offsetof(ASSStyle, primary_color) }, + {"SecondaryColour", ASS_COLOR, offsetof(ASSStyle, secondary_color)}, + {"TertiaryColour", ASS_COLOR, offsetof(ASSStyle, outline_color) }, + {"BackColour", ASS_COLOR, offsetof(ASSStyle, back_color) }, + {"Bold", ASS_INT, offsetof(ASSStyle, bold) }, + {"Italic", ASS_INT, offsetof(ASSStyle, italic) }, + {"BorderStyle", ASS_INT, offsetof(ASSStyle, border_style) }, + {"Outline", ASS_FLT, offsetof(ASSStyle, outline) }, + {"Shadow", ASS_FLT, offsetof(ASSStyle, shadow) }, + {"Alignment", ASS_ALGN, offsetof(ASSStyle, alignment) }, + {"MarginL", ASS_INT, offsetof(ASSStyle, margin_l) }, + {"MarginR", ASS_INT, offsetof(ASSStyle, margin_r) }, + {"MarginV", ASS_INT, offsetof(ASSStyle, margin_v) }, + {"AlphaLevel", ASS_INT, offsetof(ASSStyle, alpha_level) }, + {"Encoding", ASS_INT, offsetof(ASSStyle, encoding) }, + {0}, + } + }, + { .section = "Events", + .format_header = "Format", + .fields_header = "Dialogue", + .size = sizeof(ASSDialog), + .offset = offsetof(ASS, dialogs), + .offset_count = offsetof(ASS, dialogs_count), + .fields = {{"Layer", ASS_INT, offsetof(ASSDialog, layer) }, + {"Start", ASS_TIMESTAMP, offsetof(ASSDialog, start) }, + {"End", ASS_TIMESTAMP, offsetof(ASSDialog, end) }, + {"Style", ASS_STR, offsetof(ASSDialog, style) }, + {"Name", ASS_STR, offsetof(ASSDialog, name) }, + {"MarginL", ASS_INT, offsetof(ASSDialog, margin_l)}, + {"MarginR", ASS_INT, offsetof(ASSDialog, margin_r)}, + {"MarginV", ASS_INT, offsetof(ASSDialog, margin_v)}, + {"Effect", ASS_STR, offsetof(ASSDialog, effect) }, + {"Text", ASS_STR, offsetof(ASSDialog, text) }, + {0}, + } + }, +}; + +struct ASSSplitContext { + ASS ass; + int current_section; + int field_number[FF_ARRAY_ELEMS(ass_sections)]; + int *field_order[FF_ARRAY_ELEMS(ass_sections)]; +}; + +/** + * This struct can be casted to ASS to access to the split data. + */ +typedef struct ASSSplitContext ASSSplitContext; + +/** + * Split a full ASS file or a ASS header from a string buffer and store + * the split structure in a newly allocated context. + * + * @param buf String containing the ASS formatted data. + * @return Newly allocated struct containing split data. + */ +ASSSplitContext *liteav_ff_ass_split(const char *buf); + +/** + * Split one or several ASS "Dialogue" lines from a string buffer and store + * them in an already initialized context. + * + * @param ctx Context previously initialized by liteav_ff_ass_split(). + * @param buf String containing the ASS "Dialogue" lines. + * @param cache Set to 1 to keep all the previously split ASSDialog in + * the context, or set to 0 to free all the previously split + * ASSDialog. + * @param number If not NULL, the pointed integer will be set to the number + * of split ASSDialog. + * @return Pointer to the first split ASSDialog. + */ +ASSDialog *liteav_ff_ass_split_dialog(ASSSplitContext *ctx, const char *buf, + int cache, int *number); + +/** + * Free a dialogue obtained from liteav_ff_ass_split_dialog2(). + */ +void liteav_ff_ass_free_dialog(ASSDialog **dialogp); + +/** + * Split one ASS Dialogue line from a string buffer. + * + * @param ctx Context previously initialized by liteav_ff_ass_split(). + * @param buf String containing the ASS "Dialogue" line. + * @return Pointer to the split ASSDialog. Must be freed with liteav_ff_ass_free_dialog() + */ +ASSDialog *liteav_ff_ass_split_dialog2(ASSSplitContext *ctx, const char *buf); + +/** + * Free all the memory allocated for an ASSSplitContext. + * + * @param ctx Context previously initialized by liteav_ff_ass_split(). + */ +void liteav_ff_ass_split_free(ASSSplitContext *ctx); + + +/** + * Set of callback functions corresponding to each override codes that can + * be encountered in a "Dialogue" Text field. + */ +typedef struct { + /** + * @defgroup ass_styles ASS styles + * @{ + */ + void (*text)(void *priv, const char *text, int len); + void (*new_line)(void *priv, int forced); + void (*style)(void *priv, char style, int close); + void (*color)(void *priv, unsigned int /* color */, unsigned int color_id); + void (*alpha)(void *priv, int alpha, int alpha_id); + void (*font_name)(void *priv, const char *name); + void (*font_size)(void *priv, int size); + void (*alignment)(void *priv, int alignment); + void (*cancel_overrides)(void *priv, const char *style); + /** @} */ + + /** + * @defgroup ass_functions ASS functions + * @{ + */ + void (*move)(void *priv, int x1, int y1, int x2, int y2, int t1, int t2); + void (*origin)(void *priv, int x, int y); + /** @} */ + + /** + * @defgroup ass_end end of Dialogue Event + * @{ + */ + void (*end)(void *priv); + /** @} */ +} ASSCodesCallbacks; + +/** + * Split override codes out of a ASS "Dialogue" Text field. + * + * @param callbacks Set of callback functions called for each override code + * encountered. + * @param priv Opaque pointer passed to the callback functions. + * @param buf The ASS "Dialogue" Text field to split. + * @return >= 0 on success otherwise an error code <0 + */ +int liteav_ff_ass_split_override_codes(const ASSCodesCallbacks *callbacks, void *priv, + const char *buf); + +/** + * Find an ASSStyle structure by its name. + * + * @param ctx Context previously initialized by liteav_ff_ass_split(). + * @param style name of the style to search for. + * @return the ASSStyle corresponding to style, or NULL if style can't be found + */ +ASSStyle *liteav_ff_ass_style_get(ASSSplitContext *ctx, const char *style); + +#endif /* AVCODEC_ASS_SPLIT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avcodec.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avcodec.h new file mode 100755 index 0000000..b8e0974 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avcodec.h @@ -0,0 +1,6630 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVCODEC_H +#define AVCODEC_AVCODEC_H + +/** + * @file + * @ingroup libavc + * Libavcodec external API header + */ + +#include <errno.h> +#include "libavutil/samplefmt.h" +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/buffer.h" +#include "libavutil/cpu.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/hwcontext.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "version.h" + +/** + * @defgroup libavc libavcodec + * Encoding/Decoding Library + * + * @{ + * + * @defgroup lavc_decoding Decoding + * @{ + * @} + * + * @defgroup lavc_encoding Encoding + * @{ + * @} + * + * @defgroup lavc_codec Codecs + * @{ + * @defgroup lavc_codec_native Native Codecs + * @{ + * @} + * @defgroup lavc_codec_wrappers External library wrappers + * @{ + * @} + * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge + * @{ + * @} + * @} + * @defgroup lavc_internal Internal + * @{ + * @} + * @} + */ + +/** + * @ingroup libavc + * @defgroup lavc_encdec send/receive encoding and decoding API overview + * @{ + * + * The liteav_avcodec_send_packet()/liteav_avcodec_receive_frame()/liteav_avcodec_send_frame()/ + * liteav_avcodec_receive_packet() functions provide an encode/decode API, which + * decouples input and output. + * + * The API is very similar for encoding/decoding and audio/video, and works as + * follows: + * - Set up and open the AVCodecContext as usual. + * - Send valid input: + * - For decoding, call liteav_avcodec_send_packet() to give the decoder raw + * compressed data in an AVPacket. + * - For encoding, call liteav_avcodec_send_frame() to give the encoder an AVFrame + * containing uncompressed audio or video. + * In both cases, it is recommended that AVPackets and AVFrames are + * refcounted, or libavcodec might have to copy the input data. (libavformat + * always returns refcounted AVPackets, and liteav_av_frame_get_buffer() allocates + * refcounted AVFrames.) + * - Receive output in a loop. Periodically call one of the avcodec_receive_*() + * functions and process their output: + * - For decoding, call liteav_avcodec_receive_frame(). On success, it will return + * an AVFrame containing uncompressed audio or video data. + * - For encoding, call liteav_avcodec_receive_packet(). On success, it will return + * an AVPacket with a compressed frame. + * Repeat this call until it returns AVERROR(EAGAIN) or an error. The + * AVERROR(EAGAIN) return value means that new input data is required to + * return new output. In this case, continue with sending input. For each + * input frame/packet, the codec will typically return 1 output frame/packet, + * but it can also be 0 or more than 1. + * + * At the beginning of decoding or encoding, the codec might accept multiple + * input frames/packets without returning a frame, until its internal buffers + * are filled. This situation is handled transparently if you follow the steps + * outlined above. + * + * In theory, sending input can result in EAGAIN - this should happen only if + * not all output was received. You can use this to structure alternative decode + * or encode loops other than the one suggested above. For example, you could + * try sending new input on each iteration, and try to receive output if that + * returns EAGAIN. + * + * End of stream situations. These require "flushing" (aka draining) the codec, + * as the codec might buffer multiple frames or packets internally for + * performance or out of necessity (consider B-frames). + * This is handled as follows: + * - Instead of valid input, send NULL to the liteav_avcodec_send_packet() (decoding) + * or liteav_avcodec_send_frame() (encoding) functions. This will enter draining + * mode. + * - Call liteav_avcodec_receive_frame() (decoding) or liteav_avcodec_receive_packet() + * (encoding) in a loop until AVERROR_EOF is returned. The functions will + * not return AVERROR(EAGAIN), unless you forgot to enter draining mode. + * - Before decoding can be resumed again, the codec has to be reset with + * liteav_avcodec_flush_buffers(). + * + * Using the API as outlined above is highly recommended. But it is also + * possible to call functions outside of this rigid schema. For example, you can + * call liteav_avcodec_send_packet() repeatedly without calling + * liteav_avcodec_receive_frame(). In this case, liteav_avcodec_send_packet() will succeed + * until the codec's internal buffer has been filled up (which is typically of + * size 1 per output frame, after initial input), and then reject input with + * AVERROR(EAGAIN). Once it starts rejecting input, you have no choice but to + * read at least some output. + * + * Not all codecs will follow a rigid and predictable dataflow; the only + * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call on + * one end implies that a receive/send call on the other end will succeed, or + * at least will not fail with AVERROR(EAGAIN). In general, no codec will + * permit unlimited buffering of input or output. + * + * This API replaces the following legacy functions: + * - liteav_avcodec_decode_video2() and liteav_avcodec_decode_audio4(): + * Use liteav_avcodec_send_packet() to feed input to the decoder, then use + * liteav_avcodec_receive_frame() to receive decoded frames after each packet. + * Unlike with the old video decoding API, multiple frames might result from + * a packet. For audio, splitting the input packet into frames by partially + * decoding packets becomes transparent to the API user. You never need to + * feed an AVPacket to the API twice (unless it is rejected with AVERROR(EAGAIN) - then + * no data was read from the packet). + * Additionally, sending a flush/draining packet is required only once. + * - liteav_avcodec_encode_video2()/liteav_avcodec_encode_audio2(): + * Use liteav_avcodec_send_frame() to feed input to the encoder, then use + * liteav_avcodec_receive_packet() to receive encoded packets. + * Providing user-allocated buffers for liteav_avcodec_receive_packet() is not + * possible. + * - The new API does not handle subtitles yet. + * + * Mixing new and old function calls on the same AVCodecContext is not allowed, + * and will result in undefined behavior. + * + * Some codecs might require using the new API; using the old API will return + * an error when calling it. All codecs support the new API. + * + * A codec is not allowed to return AVERROR(EAGAIN) for both sending and receiving. This + * would be an invalid state, which could put the codec user into an endless + * loop. The API has no concept of time either: it cannot happen that trying to + * do liteav_avcodec_send_packet() results in AVERROR(EAGAIN), but a repeated call 1 second + * later accepts the packet (with no other receive/flush API calls involved). + * The API is a strict state machine, and the passage of time is not supposed + * to influence it. Some timing-dependent behavior might still be deemed + * acceptable in certain cases. But it must never result in both send/receive + * returning EAGAIN at the same time at any point. It must also absolutely be + * avoided that the current state is "unstable" and can "flip-flop" between + * the send/receive APIs allowing progress. For example, it's not allowed that + * the codec randomly decides that it actually wants to consume a packet now + * instead of returning a frame, after it just returned AVERROR(EAGAIN) on an + * liteav_avcodec_send_packet() call. + * @} + */ + +/** + * @defgroup lavc_core Core functions/structures. + * @ingroup libavc + * + * Basic definitions, functions for querying libavcodec capabilities, + * allocating core structures, etc. + * @{ + */ + + +/** + * Identify the syntax and semantics of the bitstream. + * The principle is roughly: + * Two decoders with the same ID can decode the same streams. + * Two encoders with the same ID can encode compatible streams. + * There may be slight deviations from the principle due to implementation + * details. + * + * If you add a codec ID to this list, add it so that + * 1. no value of an existing codec ID changes (that would break ABI), + * 2. it is as close as possible to similar codecs + * + * After adding new codec IDs, do not forget to add an entry to the codec + * descriptor list and bump libavcodec minor version. + */ +enum AVCodecID { + AV_CODEC_ID_NONE, + + /* video codecs */ + AV_CODEC_ID_MPEG1VIDEO, + AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding + AV_CODEC_ID_H261, + AV_CODEC_ID_H263, + AV_CODEC_ID_RV10, + AV_CODEC_ID_RV20, + AV_CODEC_ID_MJPEG, + AV_CODEC_ID_MJPEGB, + AV_CODEC_ID_LJPEG, + AV_CODEC_ID_SP5X, + AV_CODEC_ID_JPEGLS, + AV_CODEC_ID_MPEG4, + AV_CODEC_ID_RAWVIDEO, + AV_CODEC_ID_MSMPEG4V1, + AV_CODEC_ID_MSMPEG4V2, + AV_CODEC_ID_MSMPEG4V3, + AV_CODEC_ID_WMV1, + AV_CODEC_ID_WMV2, + AV_CODEC_ID_H263P, + AV_CODEC_ID_H263I, + AV_CODEC_ID_FLV1, + AV_CODEC_ID_SVQ1, + AV_CODEC_ID_SVQ3, + AV_CODEC_ID_DVVIDEO, + AV_CODEC_ID_HUFFYUV, + AV_CODEC_ID_CYUV, + AV_CODEC_ID_H264, + AV_CODEC_ID_INDEO3, + AV_CODEC_ID_VP3, + AV_CODEC_ID_THEORA, + AV_CODEC_ID_ASV1, + AV_CODEC_ID_ASV2, + AV_CODEC_ID_FFV1, + AV_CODEC_ID_4XM, + AV_CODEC_ID_VCR1, + AV_CODEC_ID_CLJR, + AV_CODEC_ID_MDEC, + AV_CODEC_ID_ROQ, + AV_CODEC_ID_INTERPLAY_VIDEO, + AV_CODEC_ID_XAN_WC3, + AV_CODEC_ID_XAN_WC4, + AV_CODEC_ID_RPZA, + AV_CODEC_ID_CINEPAK, + AV_CODEC_ID_WS_VQA, + AV_CODEC_ID_MSRLE, + AV_CODEC_ID_MSVIDEO1, + AV_CODEC_ID_IDCIN, + AV_CODEC_ID_8BPS, + AV_CODEC_ID_SMC, + AV_CODEC_ID_FLIC, + AV_CODEC_ID_TRUEMOTION1, + AV_CODEC_ID_VMDVIDEO, + AV_CODEC_ID_MSZH, + AV_CODEC_ID_ZLIB, + AV_CODEC_ID_QTRLE, + AV_CODEC_ID_TSCC, + AV_CODEC_ID_ULTI, + AV_CODEC_ID_QDRAW, + AV_CODEC_ID_VIXL, + AV_CODEC_ID_QPEG, + AV_CODEC_ID_PNG, + AV_CODEC_ID_PPM, + AV_CODEC_ID_PBM, + AV_CODEC_ID_PGM, + AV_CODEC_ID_PGMYUV, + AV_CODEC_ID_PAM, + AV_CODEC_ID_FFVHUFF, + AV_CODEC_ID_RV30, + AV_CODEC_ID_RV40, + AV_CODEC_ID_VC1, + AV_CODEC_ID_WMV3, + AV_CODEC_ID_LOCO, + AV_CODEC_ID_WNV1, + AV_CODEC_ID_AASC, + AV_CODEC_ID_INDEO2, + AV_CODEC_ID_FRAPS, + AV_CODEC_ID_TRUEMOTION2, + AV_CODEC_ID_BMP, + AV_CODEC_ID_CSCD, + AV_CODEC_ID_MMVIDEO, + AV_CODEC_ID_ZMBV, + AV_CODEC_ID_AVS, + AV_CODEC_ID_SMACKVIDEO, + AV_CODEC_ID_NUV, + AV_CODEC_ID_KMVC, + AV_CODEC_ID_FLASHSV, + AV_CODEC_ID_CAVS, + AV_CODEC_ID_JPEG2000, + AV_CODEC_ID_VMNC, + AV_CODEC_ID_VP5, + AV_CODEC_ID_VP6, + AV_CODEC_ID_VP6F, + AV_CODEC_ID_TARGA, + AV_CODEC_ID_DSICINVIDEO, + AV_CODEC_ID_TIERTEXSEQVIDEO, + AV_CODEC_ID_TIFF, + AV_CODEC_ID_GIF, + AV_CODEC_ID_DXA, + AV_CODEC_ID_DNXHD, + AV_CODEC_ID_THP, + AV_CODEC_ID_SGI, + AV_CODEC_ID_C93, + AV_CODEC_ID_BETHSOFTVID, + AV_CODEC_ID_PTX, + AV_CODEC_ID_TXD, + AV_CODEC_ID_VP6A, + AV_CODEC_ID_AMV, + AV_CODEC_ID_VB, + AV_CODEC_ID_PCX, + AV_CODEC_ID_SUNRAST, + AV_CODEC_ID_INDEO4, + AV_CODEC_ID_INDEO5, + AV_CODEC_ID_MIMIC, + AV_CODEC_ID_RL2, + AV_CODEC_ID_ESCAPE124, + AV_CODEC_ID_DIRAC, + AV_CODEC_ID_BFI, + AV_CODEC_ID_CMV, + AV_CODEC_ID_MOTIONPIXELS, + AV_CODEC_ID_TGV, + AV_CODEC_ID_TGQ, + AV_CODEC_ID_TQI, + AV_CODEC_ID_AURA, + AV_CODEC_ID_AURA2, + AV_CODEC_ID_V210X, + AV_CODEC_ID_TMV, + AV_CODEC_ID_V210, + AV_CODEC_ID_DPX, + AV_CODEC_ID_MAD, + AV_CODEC_ID_FRWU, + AV_CODEC_ID_FLASHSV2, + AV_CODEC_ID_CDGRAPHICS, + AV_CODEC_ID_R210, + AV_CODEC_ID_ANM, + AV_CODEC_ID_BINKVIDEO, + AV_CODEC_ID_IFF_ILBM, +#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM + AV_CODEC_ID_KGV1, + AV_CODEC_ID_YOP, + AV_CODEC_ID_VP8, + AV_CODEC_ID_PICTOR, + AV_CODEC_ID_ANSI, + AV_CODEC_ID_A64_MULTI, + AV_CODEC_ID_A64_MULTI5, + AV_CODEC_ID_R10K, + AV_CODEC_ID_MXPEG, + AV_CODEC_ID_LAGARITH, + AV_CODEC_ID_PRORES, + AV_CODEC_ID_JV, + AV_CODEC_ID_DFA, + AV_CODEC_ID_WMV3IMAGE, + AV_CODEC_ID_VC1IMAGE, + AV_CODEC_ID_UTVIDEO, + AV_CODEC_ID_BMV_VIDEO, + AV_CODEC_ID_VBLE, + AV_CODEC_ID_DXTORY, + AV_CODEC_ID_V410, + AV_CODEC_ID_XWD, + AV_CODEC_ID_CDXL, + AV_CODEC_ID_XBM, + AV_CODEC_ID_ZEROCODEC, + AV_CODEC_ID_MSS1, + AV_CODEC_ID_MSA1, + AV_CODEC_ID_TSCC2, + AV_CODEC_ID_MTS2, + AV_CODEC_ID_CLLC, + AV_CODEC_ID_MSS2, + AV_CODEC_ID_VP9, + AV_CODEC_ID_AIC, + AV_CODEC_ID_ESCAPE130, + AV_CODEC_ID_G2M, + AV_CODEC_ID_WEBP, + AV_CODEC_ID_HNM4_VIDEO, + AV_CODEC_ID_HEVC, +#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC + AV_CODEC_ID_FIC, + AV_CODEC_ID_ALIAS_PIX, + AV_CODEC_ID_BRENDER_PIX, + AV_CODEC_ID_PAF_VIDEO, + AV_CODEC_ID_EXR, + AV_CODEC_ID_VP7, + AV_CODEC_ID_SANM, + AV_CODEC_ID_SGIRLE, + AV_CODEC_ID_MVC1, + AV_CODEC_ID_MVC2, + AV_CODEC_ID_HQX, + AV_CODEC_ID_TDSC, + AV_CODEC_ID_HQ_HQA, + AV_CODEC_ID_HAP, + AV_CODEC_ID_DDS, + AV_CODEC_ID_DXV, + AV_CODEC_ID_SCREENPRESSO, + AV_CODEC_ID_RSCC, + AV_CODEC_ID_AVS2, + AV_CODEC_ID_AVS3, + + AV_CODEC_ID_Y41P = 0x8000, + AV_CODEC_ID_AVRP, + AV_CODEC_ID_012V, + AV_CODEC_ID_AVUI, + AV_CODEC_ID_AYUV, + AV_CODEC_ID_TARGA_Y216, + AV_CODEC_ID_V308, + AV_CODEC_ID_V408, + AV_CODEC_ID_YUV4, + AV_CODEC_ID_AVRN, + AV_CODEC_ID_CPIA, + AV_CODEC_ID_XFACE, + AV_CODEC_ID_SNOW, + AV_CODEC_ID_SMVJPEG, + AV_CODEC_ID_APNG, + AV_CODEC_ID_DAALA, + AV_CODEC_ID_CFHD, + AV_CODEC_ID_TRUEMOTION2RT, + AV_CODEC_ID_M101, + AV_CODEC_ID_MAGICYUV, + AV_CODEC_ID_SHEERVIDEO, + AV_CODEC_ID_YLC, + AV_CODEC_ID_PSD, + AV_CODEC_ID_PIXLET, + AV_CODEC_ID_SPEEDHQ, + AV_CODEC_ID_FMVC, + AV_CODEC_ID_SCPR, + AV_CODEC_ID_CLEARVIDEO, + AV_CODEC_ID_XPM, + AV_CODEC_ID_AV1, + AV_CODEC_ID_BITPACKED, + AV_CODEC_ID_MSCC, + AV_CODEC_ID_SRGC, + AV_CODEC_ID_SVG, + AV_CODEC_ID_GDV, + AV_CODEC_ID_FITS, + AV_CODEC_ID_IMM4, + AV_CODEC_ID_PROSUMER, + AV_CODEC_ID_MWSC, + AV_CODEC_ID_WCMV, + AV_CODEC_ID_RASC, + + /* various PCM "codecs" */ + AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + AV_CODEC_ID_PCM_S16LE = 0x10000, + AV_CODEC_ID_PCM_S16BE, + AV_CODEC_ID_PCM_U16LE, + AV_CODEC_ID_PCM_U16BE, + AV_CODEC_ID_PCM_S8, + AV_CODEC_ID_PCM_U8, + AV_CODEC_ID_PCM_MULAW, + AV_CODEC_ID_PCM_ALAW, + AV_CODEC_ID_PCM_S32LE, + AV_CODEC_ID_PCM_S32BE, + AV_CODEC_ID_PCM_U32LE, + AV_CODEC_ID_PCM_U32BE, + AV_CODEC_ID_PCM_S24LE, + AV_CODEC_ID_PCM_S24BE, + AV_CODEC_ID_PCM_U24LE, + AV_CODEC_ID_PCM_U24BE, + AV_CODEC_ID_PCM_S24DAUD, + AV_CODEC_ID_PCM_ZORK, + AV_CODEC_ID_PCM_S16LE_PLANAR, + AV_CODEC_ID_PCM_DVD, + AV_CODEC_ID_PCM_F32BE, + AV_CODEC_ID_PCM_F32LE, + AV_CODEC_ID_PCM_F64BE, + AV_CODEC_ID_PCM_F64LE, + AV_CODEC_ID_PCM_BLURAY, + AV_CODEC_ID_PCM_LXF, + AV_CODEC_ID_S302M, + AV_CODEC_ID_PCM_S8_PLANAR, + AV_CODEC_ID_PCM_S24LE_PLANAR, + AV_CODEC_ID_PCM_S32LE_PLANAR, + AV_CODEC_ID_PCM_S16BE_PLANAR, + + AV_CODEC_ID_PCM_S64LE = 0x10800, + AV_CODEC_ID_PCM_S64BE, + AV_CODEC_ID_PCM_F16LE, + AV_CODEC_ID_PCM_F24LE, + AV_CODEC_ID_PCM_VIDC, + + /* various ADPCM codecs */ + AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, + AV_CODEC_ID_ADPCM_IMA_WAV, + AV_CODEC_ID_ADPCM_IMA_DK3, + AV_CODEC_ID_ADPCM_IMA_DK4, + AV_CODEC_ID_ADPCM_IMA_WS, + AV_CODEC_ID_ADPCM_IMA_SMJPEG, + AV_CODEC_ID_ADPCM_MS, + AV_CODEC_ID_ADPCM_4XM, + AV_CODEC_ID_ADPCM_XA, + AV_CODEC_ID_ADPCM_ADX, + AV_CODEC_ID_ADPCM_EA, + AV_CODEC_ID_ADPCM_G726, + AV_CODEC_ID_ADPCM_CT, + AV_CODEC_ID_ADPCM_SWF, + AV_CODEC_ID_ADPCM_YAMAHA, + AV_CODEC_ID_ADPCM_SBPRO_4, + AV_CODEC_ID_ADPCM_SBPRO_3, + AV_CODEC_ID_ADPCM_SBPRO_2, + AV_CODEC_ID_ADPCM_THP, + AV_CODEC_ID_ADPCM_IMA_AMV, + AV_CODEC_ID_ADPCM_EA_R1, + AV_CODEC_ID_ADPCM_EA_R3, + AV_CODEC_ID_ADPCM_EA_R2, + AV_CODEC_ID_ADPCM_IMA_EA_SEAD, + AV_CODEC_ID_ADPCM_IMA_EA_EACS, + AV_CODEC_ID_ADPCM_EA_XAS, + AV_CODEC_ID_ADPCM_EA_MAXIS_XA, + AV_CODEC_ID_ADPCM_IMA_ISS, + AV_CODEC_ID_ADPCM_G722, + AV_CODEC_ID_ADPCM_IMA_APC, + AV_CODEC_ID_ADPCM_VIMA, + + AV_CODEC_ID_ADPCM_AFC = 0x11800, + AV_CODEC_ID_ADPCM_IMA_OKI, + AV_CODEC_ID_ADPCM_DTK, + AV_CODEC_ID_ADPCM_IMA_RAD, + AV_CODEC_ID_ADPCM_G726LE, + AV_CODEC_ID_ADPCM_THP_LE, + AV_CODEC_ID_ADPCM_PSX, + AV_CODEC_ID_ADPCM_AICA, + AV_CODEC_ID_ADPCM_IMA_DAT4, + AV_CODEC_ID_ADPCM_MTAF, + + /* AMR */ + AV_CODEC_ID_AMR_NB = 0x12000, + AV_CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + AV_CODEC_ID_RA_144 = 0x13000, + AV_CODEC_ID_RA_288, + + /* various DPCM codecs */ + AV_CODEC_ID_ROQ_DPCM = 0x14000, + AV_CODEC_ID_INTERPLAY_DPCM, + AV_CODEC_ID_XAN_DPCM, + AV_CODEC_ID_SOL_DPCM, + + AV_CODEC_ID_SDX2_DPCM = 0x14800, + AV_CODEC_ID_GREMLIN_DPCM, + + /* audio codecs */ + AV_CODEC_ID_MP2 = 0x15000, + AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + AV_CODEC_ID_AAC, + AV_CODEC_ID_AC3, + AV_CODEC_ID_DTS, + AV_CODEC_ID_VORBIS, + AV_CODEC_ID_DVAUDIO, + AV_CODEC_ID_WMAV1, + AV_CODEC_ID_WMAV2, + AV_CODEC_ID_MACE3, + AV_CODEC_ID_MACE6, + AV_CODEC_ID_VMDAUDIO, + AV_CODEC_ID_FLAC, + AV_CODEC_ID_MP3ADU, + AV_CODEC_ID_MP3ON4, + AV_CODEC_ID_SHORTEN, + AV_CODEC_ID_ALAC, + AV_CODEC_ID_WESTWOOD_SND1, + AV_CODEC_ID_GSM, ///< as in Berlin toast format + AV_CODEC_ID_QDM2, + AV_CODEC_ID_COOK, + AV_CODEC_ID_TRUESPEECH, + AV_CODEC_ID_TTA, + AV_CODEC_ID_SMACKAUDIO, + AV_CODEC_ID_QCELP, + AV_CODEC_ID_WAVPACK, + AV_CODEC_ID_DSICINAUDIO, + AV_CODEC_ID_IMC, + AV_CODEC_ID_MUSEPACK7, + AV_CODEC_ID_MLP, + AV_CODEC_ID_GSM_MS, /* as found in WAV */ + AV_CODEC_ID_ATRAC3, + AV_CODEC_ID_APE, + AV_CODEC_ID_NELLYMOSER, + AV_CODEC_ID_MUSEPACK8, + AV_CODEC_ID_SPEEX, + AV_CODEC_ID_WMAVOICE, + AV_CODEC_ID_WMAPRO, + AV_CODEC_ID_WMALOSSLESS, + AV_CODEC_ID_ATRAC3P, + AV_CODEC_ID_EAC3, + AV_CODEC_ID_SIPR, + AV_CODEC_ID_MP1, + AV_CODEC_ID_TWINVQ, + AV_CODEC_ID_TRUEHD, + AV_CODEC_ID_MP4ALS, + AV_CODEC_ID_ATRAC1, + AV_CODEC_ID_BINKAUDIO_RDFT, + AV_CODEC_ID_BINKAUDIO_DCT, + AV_CODEC_ID_AAC_LATM, + AV_CODEC_ID_QDMC, + AV_CODEC_ID_CELT, + AV_CODEC_ID_G723_1, + AV_CODEC_ID_G729, + AV_CODEC_ID_8SVX_EXP, + AV_CODEC_ID_8SVX_FIB, + AV_CODEC_ID_BMV_AUDIO, + AV_CODEC_ID_RALF, + AV_CODEC_ID_IAC, + AV_CODEC_ID_ILBC, + AV_CODEC_ID_OPUS, + AV_CODEC_ID_COMFORT_NOISE, + AV_CODEC_ID_TAK, + AV_CODEC_ID_METASOUND, + AV_CODEC_ID_PAF_AUDIO, + AV_CODEC_ID_ON2AVC, + AV_CODEC_ID_DSS_SP, + AV_CODEC_ID_CODEC2, + + AV_CODEC_ID_FFWAVESYNTH = 0x15800, + AV_CODEC_ID_SONIC, + AV_CODEC_ID_SONIC_LS, + AV_CODEC_ID_EVRC, + AV_CODEC_ID_SMV, + AV_CODEC_ID_DSD_LSBF, + AV_CODEC_ID_DSD_MSBF, + AV_CODEC_ID_DSD_LSBF_PLANAR, + AV_CODEC_ID_DSD_MSBF_PLANAR, + AV_CODEC_ID_4GV, + AV_CODEC_ID_INTERPLAY_ACM, + AV_CODEC_ID_XMA1, + AV_CODEC_ID_XMA2, + AV_CODEC_ID_DST, + AV_CODEC_ID_ATRAC3AL, + AV_CODEC_ID_ATRAC3PAL, + AV_CODEC_ID_DOLBY_E, + AV_CODEC_ID_APTX, + AV_CODEC_ID_APTX_HD, + AV_CODEC_ID_SBC, + AV_CODEC_ID_ATRAC9, + + /* subtitle codecs */ + AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + AV_CODEC_ID_DVD_SUBTITLE = 0x17000, + AV_CODEC_ID_DVB_SUBTITLE, + AV_CODEC_ID_TEXT, ///< raw UTF-8 text + AV_CODEC_ID_XSUB, + AV_CODEC_ID_SSA, + AV_CODEC_ID_MOV_TEXT, + AV_CODEC_ID_HDMV_PGS_SUBTITLE, + AV_CODEC_ID_DVB_TELETEXT, + AV_CODEC_ID_SRT, + + AV_CODEC_ID_MICRODVD = 0x17800, + AV_CODEC_ID_EIA_608, + AV_CODEC_ID_JACOSUB, + AV_CODEC_ID_SAMI, + AV_CODEC_ID_REALTEXT, + AV_CODEC_ID_STL, + AV_CODEC_ID_SUBVIEWER1, + AV_CODEC_ID_SUBVIEWER, + AV_CODEC_ID_SUBRIP, + AV_CODEC_ID_WEBVTT, + AV_CODEC_ID_MPL2, + AV_CODEC_ID_VPLAYER, + AV_CODEC_ID_PJS, + AV_CODEC_ID_ASS, + AV_CODEC_ID_HDMV_TEXT_SUBTITLE, + AV_CODEC_ID_TTML, + + /* other specific kind of codecs (generally used for attachments) */ + AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + AV_CODEC_ID_TTF = 0x18000, + + AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream. + AV_CODEC_ID_BINTEXT = 0x18800, + AV_CODEC_ID_XBIN, + AV_CODEC_ID_IDF, + AV_CODEC_ID_OTF, + AV_CODEC_ID_SMPTE_KLV, + AV_CODEC_ID_DVD_NAV, + AV_CODEC_ID_TIMED_ID3, + AV_CODEC_ID_BIN_DATA, + + + AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it + + AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG2PS = 0x20002, /**< _FAKE_ codec to indicate a raw MPEG-2 PS + * stream (only used by libavformat) */ + AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket +}; + +/** + * This struct describes the properties of a single codec described by an + * AVCodecID. + * @see liteav_avcodec_descriptor_get() + */ +typedef struct AVCodecDescriptor { + enum AVCodecID id; + enum AVMediaType type; + /** + * Name of the codec described by this descriptor. It is non-empty and + * unique for each codec descriptor. It should contain alphanumeric + * characters and '_' only. + */ + const char *name; + /** + * A more descriptive name for this codec. May be NULL. + */ + const char *long_name; + /** + * Codec properties, a combination of AV_CODEC_PROP_* flags. + */ + int props; + /** + * MIME type(s) associated with the codec. + * May be NULL; if not, a NULL-terminated array of MIME types. + * The first item is always non-NULL and is the preferred MIME type. + */ + const char *const *mime_types; + /** + * If non-NULL, an array of profiles recognized for this codec. + * Terminated with FF_PROFILE_UNKNOWN. + */ + const struct AVProfile *profiles; +} AVCodecDescriptor; + +/** + * Codec uses only intra compression. + * Video and audio codecs only. + */ +#define AV_CODEC_PROP_INTRA_ONLY (1 << 0) +/** + * Codec supports lossy compression. Audio and video codecs only. + * @note a codec may support both lossy and lossless + * compression modes + */ +#define AV_CODEC_PROP_LOSSY (1 << 1) +/** + * Codec supports lossless compression. Audio and video codecs only. + */ +#define AV_CODEC_PROP_LOSSLESS (1 << 2) +/** + * Codec supports frame reordering. That is, the coded order (the order in which + * the encoded packets are output by the encoders / stored / input to the + * decoders) may be different from the presentation order of the corresponding + * frames. + * + * For codecs that do not have this property set, PTS and DTS should always be + * equal. + */ +#define AV_CODEC_PROP_REORDER (1 << 3) +/** + * Subtitle codec is bitmap based + * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field. + */ +#define AV_CODEC_PROP_BITMAP_SUB (1 << 16) +/** + * Subtitle codec is text based. + * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field. + */ +#define AV_CODEC_PROP_TEXT_SUB (1 << 17) + +/** + * @ingroup lavc_decoding + * Required number of additionally allocated bytes at the end of the input bitstream for decoding. + * This is mainly needed because some optimized bitstream readers read + * 32 or 64 bit at once and could read over the end.<br> + * Note: If the first 23 bits of the additional bytes are not 0, then damaged + * MPEG bitstreams could cause overread and segfault. + */ +#define AV_INPUT_BUFFER_PADDING_SIZE 64 + +/** + * @ingroup lavc_encoding + * minimum encoding buffer size + * Used to avoid some checks during header writing. + */ +#define AV_INPUT_BUFFER_MIN_SIZE 16384 + +#if FF_API_WITHOUT_PREFIX +/** + * @deprecated use AV_INPUT_BUFFER_PADDING_SIZE instead + */ +#define FF_INPUT_BUFFER_PADDING_SIZE 32 + +/** + * @deprecated use AV_INPUT_BUFFER_MIN_SIZE instead + */ +#define FF_MIN_BUFFER_SIZE 16384 +#endif /* FF_API_WITHOUT_PREFIX */ + +/** + * @ingroup lavc_decoding + */ +enum AVDiscard{ + /* We leave some space between them for extensions (drop some + * keyframes for intra-only or drop just some bidir frames). */ + AVDISCARD_NONE =-16, ///< discard nothing + AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi + AVDISCARD_NONREF = 8, ///< discard all non reference + AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames + AVDISCARD_NONINTRA= 24, ///< discard all non intra frames + AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes + AVDISCARD_ALL = 48, ///< discard all +}; + +enum AVAudioServiceType { + AV_AUDIO_SERVICE_TYPE_MAIN = 0, + AV_AUDIO_SERVICE_TYPE_EFFECTS = 1, + AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2, + AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3, + AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4, + AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5, + AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6, + AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7, + AV_AUDIO_SERVICE_TYPE_KARAOKE = 8, + AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI +}; + +/** + * @ingroup lavc_encoding + */ +typedef struct RcOverride{ + int start_frame; + int end_frame; + int qscale; // If this is 0 then quality_factor will be used instead. + float quality_factor; +} RcOverride; + +/* encoding support + These flags can be passed in AVCodecContext.flags before initialization. + Note: Not everything is supported yet. +*/ + +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define AV_CODEC_FLAG_UNALIGNED (1 << 0) +/** + * Use fixed qscale. + */ +#define AV_CODEC_FLAG_QSCALE (1 << 1) +/** + * 4 MV per MB allowed / advanced prediction for H.263. + */ +#define AV_CODEC_FLAG_4MV (1 << 2) +/** + * Output even those frames that might be corrupted. + */ +#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3) +/** + * Use qpel MC. + */ +#define AV_CODEC_FLAG_QPEL (1 << 4) +/** + * Use internal 2pass ratecontrol in first pass mode. + */ +#define AV_CODEC_FLAG_PASS1 (1 << 9) +/** + * Use internal 2pass ratecontrol in second pass mode. + */ +#define AV_CODEC_FLAG_PASS2 (1 << 10) +/** + * loop filter. + */ +#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11) +/** + * Only decode/encode grayscale. + */ +#define AV_CODEC_FLAG_GRAY (1 << 13) +/** + * error[?] variables will be set during encoding. + */ +#define AV_CODEC_FLAG_PSNR (1 << 15) +/** + * Input bitstream might be truncated at a random location + * instead of only at frame boundaries. + */ +#define AV_CODEC_FLAG_TRUNCATED (1 << 16) +/** + * Use interlaced DCT. + */ +#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18) +/** + * Force low delay. + */ +#define AV_CODEC_FLAG_LOW_DELAY (1 << 19) +/** + * Place global headers in extradata instead of every keyframe. + */ +#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22) +/** + * Use only bitexact stuff (except (I)DCT). + */ +#define AV_CODEC_FLAG_BITEXACT (1 << 23) +/* Fx : Flag for H.263+ extra options */ +/** + * H.263 advanced intra coding / MPEG-4 AC prediction + */ +#define AV_CODEC_FLAG_AC_PRED (1 << 24) +/** + * interlaced motion estimation + */ +#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29) +#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31) + +/** + * Allow non spec compliant speedup tricks. + */ +#define AV_CODEC_FLAG2_FAST (1 << 0) +/** + * Skip bitstream encoding. + */ +#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2) +/** + * Place global headers at every keyframe instead of in extradata. + */ +#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3) + +/** + * timecode is in drop frame format. DEPRECATED!!!! + */ +#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE (1 << 13) + +/** + * Input bitstream might be truncated at a packet boundaries + * instead of only at frame boundaries. + */ +#define AV_CODEC_FLAG2_CHUNKS (1 << 15) +/** + * Discard cropping information from SPS. + */ +#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16) + +/** + * Show all frames before the first keyframe + */ +#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22) +/** + * Export motion vectors through frame side data + */ +#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28) +/** + * Do not skip samples and export skip information as frame side data + */ +#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29) +/** + * Do not reset ASS ReadOrder field on flush (subtitles decoding) + */ +#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30) + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +/** + * Decoder can use draw_horiz_band callback. + */ +#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0) +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define AV_CODEC_CAP_DR1 (1 << 1) +#define AV_CODEC_CAP_TRUNCATED (1 << 3) +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define AV_CODEC_CAP_DELAY (1 << 5) +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6) + +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carrying such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define AV_CODEC_CAP_SUBFRAMES (1 << 8) +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9) +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10) +/** + * Codec supports frame-level multithreading. + */ +#define AV_CODEC_CAP_FRAME_THREADS (1 << 12) +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define AV_CODEC_CAP_SLICE_THREADS (1 << 13) +/** + * Codec supports changed parameters at any point. + */ +#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14) +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define AV_CODEC_CAP_AUTO_THREADS (1 << 15) +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16) +/** + * Decoder is not a preferred choice for probing. + * This indicates that the decoder is not a good choice for probing. + * It could for example be an expensive to spin up hardware decoder, + * or it could simply not provide a lot of useful information about + * the stream. + * A decoder marked with this flag should only be used as last resort + * choice for probing. + */ +#define AV_CODEC_CAP_AVOID_PROBING (1 << 17) +/** + * Codec is intra only. + */ +#define AV_CODEC_CAP_INTRA_ONLY 0x40000000 +/** + * Codec is lossless. + */ +#define AV_CODEC_CAP_LOSSLESS 0x80000000 + +#if FF_API_WITHOUT_PREFIX +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define CODEC_FLAG_UNALIGNED AV_CODEC_FLAG_UNALIGNED +#define CODEC_FLAG_QSCALE AV_CODEC_FLAG_QSCALE +#define CODEC_FLAG_4MV AV_CODEC_FLAG_4MV +#define CODEC_FLAG_OUTPUT_CORRUPT AV_CODEC_FLAG_OUTPUT_CORRUPT +#define CODEC_FLAG_QPEL AV_CODEC_FLAG_QPEL +#if FF_API_GMC +/** + * @deprecated use the "gmc" private option of the libxvid encoder + */ +#define CODEC_FLAG_GMC 0x0020 ///< Use GMC. +#endif +#if FF_API_MV0 +/** + * @deprecated use the flag "mv0" in the "mpv_flags" private option of the + * mpegvideo encoders + */ +#define CODEC_FLAG_MV0 0x0040 +#endif +#if FF_API_INPUT_PRESERVED +/** + * @deprecated passing reference-counted frames to the encoders replaces this + * flag + */ +#define CODEC_FLAG_INPUT_PRESERVED 0x0100 +#endif +#define CODEC_FLAG_PASS1 AV_CODEC_FLAG_PASS1 +#define CODEC_FLAG_PASS2 AV_CODEC_FLAG_PASS2 +#define CODEC_FLAG_GRAY AV_CODEC_FLAG_GRAY +#if FF_API_EMU_EDGE +/** + * @deprecated edges are not used/required anymore. I.e. this flag is now always + * set. + */ +#define CODEC_FLAG_EMU_EDGE 0x4000 +#endif +#define CODEC_FLAG_PSNR AV_CODEC_FLAG_PSNR +#define CODEC_FLAG_TRUNCATED AV_CODEC_FLAG_TRUNCATED + +#if FF_API_NORMALIZE_AQP +/** + * @deprecated use the flag "naq" in the "mpv_flags" private option of the + * mpegvideo encoders + */ +#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 +#endif +#define CODEC_FLAG_INTERLACED_DCT AV_CODEC_FLAG_INTERLACED_DCT +#define CODEC_FLAG_LOW_DELAY AV_CODEC_FLAG_LOW_DELAY +#define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER +#define CODEC_FLAG_BITEXACT AV_CODEC_FLAG_BITEXACT +#define CODEC_FLAG_AC_PRED AV_CODEC_FLAG_AC_PRED +#define CODEC_FLAG_LOOP_FILTER AV_CODEC_FLAG_LOOP_FILTER +#define CODEC_FLAG_INTERLACED_ME AV_CODEC_FLAG_INTERLACED_ME +#define CODEC_FLAG_CLOSED_GOP AV_CODEC_FLAG_CLOSED_GOP +#define CODEC_FLAG2_FAST AV_CODEC_FLAG2_FAST +#define CODEC_FLAG2_NO_OUTPUT AV_CODEC_FLAG2_NO_OUTPUT +#define CODEC_FLAG2_LOCAL_HEADER AV_CODEC_FLAG2_LOCAL_HEADER +#define CODEC_FLAG2_DROP_FRAME_TIMECODE AV_CODEC_FLAG2_DROP_FRAME_TIMECODE +#define CODEC_FLAG2_IGNORE_CROP AV_CODEC_FLAG2_IGNORE_CROP + +#define CODEC_FLAG2_CHUNKS AV_CODEC_FLAG2_CHUNKS +#define CODEC_FLAG2_SHOW_ALL AV_CODEC_FLAG2_SHOW_ALL +#define CODEC_FLAG2_EXPORT_MVS AV_CODEC_FLAG2_EXPORT_MVS +#define CODEC_FLAG2_SKIP_MANUAL AV_CODEC_FLAG2_SKIP_MANUAL + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ +#define CODEC_CAP_DRAW_HORIZ_BAND AV_CODEC_CAP_DRAW_HORIZ_BAND ///< Decoder can use draw_horiz_band callback. +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define CODEC_CAP_DR1 AV_CODEC_CAP_DR1 +#define CODEC_CAP_TRUNCATED AV_CODEC_CAP_TRUNCATED +#if FF_API_XVMC +/* Codec can export data for HW decoding. This flag indicates that + * the codec would call get_format() with list that might contain HW accelerated + * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them + * including raw image format. + * The application can use the passed context to determine bitstream version, + * chroma format, resolution etc. + */ +#define CODEC_CAP_HWACCEL 0x0010 +#endif /* FF_API_XVMC */ +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define CODEC_CAP_DELAY AV_CODEC_CAP_DELAY +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define CODEC_CAP_HWACCEL_VDPAU AV_CODEC_CAP_HWACCEL_VDPAU +#endif +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carrying such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define CODEC_CAP_SUBFRAMES AV_CODEC_CAP_SUBFRAMES +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define CODEC_CAP_EXPERIMENTAL AV_CODEC_CAP_EXPERIMENTAL +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define CODEC_CAP_CHANNEL_CONF AV_CODEC_CAP_CHANNEL_CONF +#if FF_API_NEG_LINESIZES +/** + * @deprecated no codecs use this capability + */ +#define CODEC_CAP_NEG_LINESIZES 0x0800 +#endif +/** + * Codec supports frame-level multithreading. + */ +#define CODEC_CAP_FRAME_THREADS AV_CODEC_CAP_FRAME_THREADS +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define CODEC_CAP_SLICE_THREADS AV_CODEC_CAP_SLICE_THREADS +/** + * Codec supports changed parameters at any point. + */ +#define CODEC_CAP_PARAM_CHANGE AV_CODEC_CAP_PARAM_CHANGE +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define CODEC_CAP_AUTO_THREADS AV_CODEC_CAP_AUTO_THREADS +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define CODEC_CAP_VARIABLE_FRAME_SIZE AV_CODEC_CAP_VARIABLE_FRAME_SIZE +/** + * Codec is intra only. + */ +#define CODEC_CAP_INTRA_ONLY AV_CODEC_CAP_INTRA_ONLY +/** + * Codec is lossless. + */ +#define CODEC_CAP_LOSSLESS AV_CODEC_CAP_LOSSLESS + +/** + * HWAccel is experimental and is thus avoided in favor of non experimental + * codecs + */ +#define HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200 +#endif /* FF_API_WITHOUT_PREFIX */ + +/** + * Codec is backed by a hardware implementation. Typically used to + * identify a non-hwaccel hardware decoder. For information about hwaccels, use + * avcodec_get_hw_config() instead. + */ +#define AV_CODEC_CAP_HARDWARE (1 << 18) + +/** + * Codec is potentially backed by a hardware implementation, but not + * necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the + * implementation provides some sort of internal fallback. + */ +#define AV_CODEC_CAP_HYBRID (1 << 19) + +/** + * Pan Scan area. + * This specifies the area which should be displayed. + * Note there may be multiple such areas for one frame. + */ +typedef struct AVPanScan { + /** + * id + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int id; + + /** + * width and height in 1/16 pel + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int width; + int height; + + /** + * position of the top left corner in 1/16 pel for up to 3 fields/frames + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int16_t position[3][2]; +} AVPanScan; + +/** + * This structure describes the bitrate properties of an encoded bitstream. It + * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD + * parameters for H.264/HEVC. + */ +typedef struct AVCPBProperties { + /** + * Maximum bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int max_bitrate; + /** + * Minimum bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int min_bitrate; + /** + * Average bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int avg_bitrate; + + /** + * The size of the buffer to which the ratecontrol is applied, in bits. + * Zero if unknown or unspecified. + */ + int buffer_size; + + /** + * The delay between the time the packet this structure is associated with + * is received and the time when it should be decoded, in periods of a 27MHz + * clock. + * + * UINT64_MAX when unknown or unspecified. + */ + uint64_t vbv_delay; +} AVCPBProperties; + +/** + * The decoder will keep a reference to the frame and may reuse it later. + */ +#define AV_GET_BUFFER_FLAG_REF (1 << 0) + +/** + * @defgroup lavc_packet AVPacket + * + * Types and functions for working with AVPacket. + * @{ + */ +enum AVPacketSideDataType { + /** + * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE + * bytes worth of palette. This side data signals that a new palette is + * present. + */ + AV_PKT_DATA_PALETTE, + + /** + * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format + * that the extradata buffer was changed and the receiving side should + * act upon it appropriately. The new extradata is embedded in the side + * data buffer and should be immediately used for processing the current + * frame or packet. + */ + AV_PKT_DATA_NEW_EXTRADATA, + + /** + * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * @code + * u32le param_flags + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) + * s32le channel_count + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) + * u64le channel_layout + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) + * s32le sample_rate + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) + * s32le width + * s32le height + * @endcode + */ + AV_PKT_DATA_PARAM_CHANGE, + + /** + * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of + * structures with info about macroblocks relevant to splitting the + * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). + * That is, it does not necessarily contain info about all macroblocks, + * as long as the distance between macroblocks in the info is smaller + * than the target payload size. + * Each MB info structure is 12 bytes, and is laid out as follows: + * @code + * u32le bit offset from the start of the packet + * u8 current quantizer at the start of the macroblock + * u8 GOB number + * u16le macroblock address within the GOB + * u8 horizontal MV predictor + * u8 vertical MV predictor + * u8 horizontal MV predictor for block number 3 + * u8 vertical MV predictor for block number 3 + * @endcode + */ + AV_PKT_DATA_H263_MB_INFO, + + /** + * This side data should be associated with an audio stream and contains + * ReplayGain information in form of the AVReplayGain struct. + */ + AV_PKT_DATA_REPLAYGAIN, + + /** + * This side data contains a 3x3 transformation matrix describing an affine + * transformation that needs to be applied to the decoded video frames for + * correct presentation. + * + * See libavutil/display.h for a detailed description of the data. + */ + AV_PKT_DATA_DISPLAYMATRIX, + + /** + * This side data should be associated with a video stream and contains + * Stereoscopic 3D information in form of the AVStereo3D struct. + */ + AV_PKT_DATA_STEREO3D, + + /** + * This side data should be associated with an audio stream and corresponds + * to enum AVAudioServiceType. + */ + AV_PKT_DATA_AUDIO_SERVICE_TYPE, + + /** + * This side data contains quality related information from the encoder. + * @code + * u32le quality factor of the compressed frame. Allowed range is between 1 (good) and FF_LAMBDA_MAX (bad). + * u8 picture type + * u8 error count + * u16 reserved + * u64le[error count] sum of squared differences between encoder in and output + * @endcode + */ + AV_PKT_DATA_QUALITY_STATS, + + /** + * This side data contains an integer value representing the stream index + * of a "fallback" track. A fallback track indicates an alternate + * track to use when the current track can not be decoded for some reason. + * e.g. no decoder available for codec. + */ + AV_PKT_DATA_FALLBACK_TRACK, + + /** + * This side data corresponds to the AVCPBProperties struct. + */ + AV_PKT_DATA_CPB_PROPERTIES, + + /** + * Recommmends skipping the specified number of samples + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_PKT_DATA_SKIP_SAMPLES, + + /** + * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that + * the packet may contain "dual mono" audio specific to Japanese DTV + * and if it is true, recommends only the selected channel to be used. + * @code + * u8 selected channels (0=mail/left, 1=sub/right, 2=both) + * @endcode + */ + AV_PKT_DATA_JP_DUALMONO, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. + */ + AV_PKT_DATA_STRINGS_METADATA, + + /** + * Subtitle event position + * @code + * u32le x1 + * u32le y1 + * u32le x2 + * u32le y2 + * @endcode + */ + AV_PKT_DATA_SUBTITLE_POSITION, + + /** + * Data found in BlockAdditional element of matroska container. There is + * no end marker for the data, so it is required to rely on the side data + * size to recognize the end. 8 byte id (as found in BlockAddId) followed + * by data. + */ + AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, + + /** + * The optional first identifier line of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_IDENTIFIER, + + /** + * The optional settings (rendering instructions) that immediately + * follow the timestamp specifier of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_SETTINGS, + + /** + * The optional WebVTT NOTE. + */ + AV_PKT_DATA_WEBVTT_NOTE, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. This + * side data includes updated metadata which appeared in the stream. + */ + AV_PKT_DATA_METADATA_UPDATE, + + /** + * MPEGTS stream ID as uint8_t, this is required to pass the stream ID + * information from the demuxer to the corresponding muxer. + */ + AV_PKT_DATA_MPEGTS_STREAM_ID, + + /** + * Mastering display metadata (based on SMPTE-2086:2014). This metadata + * should be associated with a video stream and contains data in the form + * of the AVMasteringDisplayMetadata struct. + */ + AV_PKT_DATA_MASTERING_DISPLAY_METADATA, + + /** + * This side data should be associated with a video stream and corresponds + * to the AVSphericalMapping structure. + */ + AV_PKT_DATA_SPHERICAL, + + /** + * Content light level (based on CTA-861.3). This metadata should be + * associated with a video stream and contains data in the form of the + * AVContentLightMetadata struct. + */ + AV_PKT_DATA_CONTENT_LIGHT_LEVEL, + + /** + * ATSC A53 Part 4 Closed Captions. This metadata should be associated with + * a video stream. A53 CC bitstream is stored as uint8_t in AVPacketSideData.data. + * The number of bytes of CC data is AVPacketSideData.size. + */ + AV_PKT_DATA_A53_CC, + + /** + * This side data is encryption initialization data. + * The format is not part of ABI, use av_encryption_init_info_* methods to + * access. + */ + AV_PKT_DATA_ENCRYPTION_INIT_INFO, + + /** + * This side data contains encryption info for how to decrypt the packet. + * The format is not part of ABI, use av_encryption_info_* methods to access. + */ + AV_PKT_DATA_ENCRYPTION_INFO, + + /** + * Active Format Description data consisting of a single byte as specified + * in ETSI TS 101 154 using AVActiveFormatDescription enum. + */ + AV_PKT_DATA_AFD, + + /** + * Used to record the time offset of each packet from the start of the current + * playlist. using int64_t, in AV_TIME_BASE. + * @code + * i64le duration + * @endcode + */ + AV_PKT_DATA_PASS_DURATION, + + /** + * Used to pass hls media tags to caller. + * Tag strings are stored in struct HLSMediaTags. + * @code + * u32le tag_num, total number of tags + * u32le len_1, lenght of next tag string, includes tail '\0' + * u8 *tag_1, '\0' terminated + * u32le len_2, lenght of next tag string, includes tail '\0' + * u8 *tag_2, '\0' terminated + * ... + * @endcode + */ + AV_PKT_DATA_HLS_MEDIA_TAGS, + + /** + * DOVI configuration + * ref: + * dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2, section 2.2 + * dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2, section 3.3 + * Tags are stored in struct AVDOVIDecoderConfigurationRecord. + */ + AV_PKT_DATA_DOVI_CONF, + + /** + * The number of side data types. + * This is not part of the public API/ABI in the sense that it may + * change when new side data types are added. + * This must stay the last enum value. + * If its value becomes huge, some code using it + * needs to be updated as it assumes it to be smaller than other limits. + */ + AV_PKT_DATA_NB +}; + +#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED + +typedef struct AVPacketSideData { + uint8_t *data; + int size; + enum AVPacketSideDataType type; +} AVPacketSideData; + +#define MAX_PRIVATE_TAG_NUM 5 +#define MAX_PRIVATE_TAG_SIZE 4096 + +/** + * This structure stores compressed data. It is typically exported by demuxers + * and then passed as input to decoders, or received as output from encoders and + * then passed to muxers. + * + * For video, it should typically contain one compressed frame. For audio it may + * contain several compressed frames. Encoders are allowed to output empty + * packets, with no compressed data, containing only side data + * (e.g. to update some stream parameters at the end of encoding). + * + * AVPacket is one of the few structs in FFmpeg, whose size is a part of public + * ABI. Thus it may be allocated on stack and no new fields can be added to it + * without libavcodec and libavformat major bump. + * + * The semantics of data ownership depends on the buf field. + * If it is set, the packet data is dynamically allocated and is + * valid indefinitely until a call to liteav_av_packet_unref() reduces the + * reference count to 0. + * + * If the buf field is not set liteav_av_packet_ref() would make a copy instead + * of increasing the reference count. + * + * The side data is always allocated with liteav_av_malloc(), copied by + * liteav_av_packet_ref() and freed by liteav_av_packet_unref(). + * + * @see liteav_av_packet_ref + * @see liteav_av_packet_unref + */ +typedef struct AVPacket { + /** + * A reference to the reference-counted buffer where the packet data is + * stored. + * May be NULL, then the packet data is not reference-counted. + */ + AVBufferRef *buf; + /** + * Presentation timestamp in AVStream->time_base units; the time at which + * the decompressed packet will be presented to the user. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + * pts MUST be larger or equal to dts as presentation cannot happen before + * decompression, unless one wants to view hex dumps. Some formats misuse + * the terms dts and pts/cts to mean something different. Such timestamps + * must be converted to true pts/dts before they are stored in AVPacket. + */ + int64_t pts; + /** + * Decompression timestamp in AVStream->time_base units; the time at which + * the packet is decompressed. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + */ + int64_t dts; + uint8_t *data; + int size; + int stream_index; + /** + * A combination of AV_PKT_FLAG values + */ + int flags; + /** + * Additional packet data that can be provided by the container. + * Packet can contain several types of side information. + */ + AVPacketSideData *side_data; + int side_data_elems; + + /** + * Duration of this packet in AVStream->time_base units, 0 if unknown. + * Equals next_pts - this_pts in presentation order. + */ + int64_t duration; + + int64_t pos; ///< byte position in stream, -1 if unknown + +#if FF_API_CONVERGENCE_DURATION + /** + * @deprecated Same as the duration field, but as int64_t. This was required + * for Matroska subtitles, whose duration values could overflow when the + * duration field was still an int. + */ + attribute_deprecated + int64_t convergence_duration; + + int sequence_num; + + int64_t last_segment_total_dur; + + int private_tag_num; + char *private_tag_container[MAX_PRIVATE_TAG_NUM]; + int seg_no; +#endif +} AVPacket; +#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe +#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted +#define AV_PKT_FLAG_HLS_NEW_SEGMENT 0x1000 ///< The packet is the first one of a HLS segment +#define AV_PKT_FLAG_HLS_DISCONTINUITY 0x2000 ///< The packet is the first one after receiving an HLS DISCONTINUITY tag + +/** + * Flag is used to discard packets which are required to maintain valid + * decoder state but are not required for output and should be dropped + * after decoding. + **/ +#define AV_PKT_FLAG_DISCARD 0x0004 +/** + * The packet comes from a trusted source. + * + * Otherwise-unsafe constructs such as arbitrary pointers to data + * outside the packet may be followed. + */ +#define AV_PKT_FLAG_TRUSTED 0x0008 +/** + * Flag is used to indicate packets that contain frames that can + * be discarded by the decoder. I.e. Non-reference frames. + */ +#define AV_PKT_FLAG_DISPOSABLE 0x0010 + + +enum AVSideDataParamChangeFlags { + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, + AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, + AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, +}; +/** + * @} + */ + +struct AVCodecInternal; + +enum AVFieldOrder { + AV_FIELD_UNKNOWN, + AV_FIELD_PROGRESSIVE, + AV_FIELD_TT, //< Top coded_first, top displayed first + AV_FIELD_BB, //< Bottom coded first, bottom displayed first + AV_FIELD_TB, //< Top coded first, bottom displayed first + AV_FIELD_BT, //< Bottom coded first, top displayed first +}; + +/** + * main external API structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * You can use AVOptions (av_opt* / av_set/get*()) to access these fields from user + * applications. + * The name string for AVOptions options matches the associated command line + * parameter name and can be found in libavcodec/options_table.h + * The AVOption/command line parameter names differ in some cases from the C + * structure field names for historic reasons or brevity. + * sizeof(AVCodecContext) must not be used outside libav*. + */ +typedef struct AVCodecContext { + /** + * information on struct for liteav_av_log + * - set by avcodec_alloc_context3 + */ + const AVClass *av_class; + int log_level_offset; + + enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ + const struct AVCodec *codec; +#if FF_API_CODEC_NAME + /** + * @deprecated this field is not used for anything in libavcodec + */ + attribute_deprecated + char codec_name[32]; +#endif + enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ + + /** + * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * A demuxer should set this to what is stored in the field used to identify the codec. + * If there are multiple such fields in a container then the demuxer should choose the one + * which maximizes the information about the used codec. + * If the codec tag field in a container is larger than 32 bits then the demuxer should + * remap the longer ID to 32 bits with a table or other structure. Alternatively a new + * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated + * first. + * - encoding: Set by user, if not then the default based on codec_id will be used. + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int codec_tag; + + void *priv_data; + + /** + * Private context used for internal data. + * + * Unlike priv_data, this is not codec-specific. It is used in general + * libavcodec functions. + */ + struct AVCodecInternal *internal; + + /** + * Private data of the user, can be used to carry app specific stuff. + * - encoding: Set by user. + * - decoding: Set by user. + */ + void *opaque; + + /** + * the average bitrate + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: Set by user, may be overwritten by libavcodec + * if this info is available in the stream + */ + int64_t bit_rate; + + /** + * number of bits the bitstream is allowed to diverge from the reference. + * the reference can be CBR (for CBR pass1) or VBR (for pass2) + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: unused + */ + int bit_rate_tolerance; + + /** + * Global quality for codecs which cannot change it per frame. + * This should be proportional to MPEG-1/2/4 qscale. + * - encoding: Set by user. + * - decoding: unused + */ + int global_quality; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int compression_level; +#define FF_COMPRESSION_DEFAULT -1 + + /** + * AV_CODEC_FLAG_*. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags; + + /** + * AV_CODEC_FLAG2_* + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags2; + + /** + * some codecs need / can use extradata like Huffman tables. + * MJPEG: Huffman tables + * rv10: additional flags + * MPEG-4: global headers (they can be in the bitstream or here) + * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger + * than extradata_size to avoid problems if it is read with the bitstream reader. + * The bytewise contents of extradata must not depend on the architecture or CPU endianness. + * Must be allocated with the liteav_av_malloc() family of functions. + * - encoding: Set/allocated/freed by libavcodec. + * - decoding: Set/allocated/freed by user. + */ + uint8_t *extradata; + int extradata_size; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identically 1. + * This often, but not always is the inverse of the frame rate or field rate + * for video. 1/time_base is not the average frame rate if the frame rate is not + * constant. + * + * Like containers, elementary streams also can store timestamps, 1/time_base + * is the unit in which these timestamps are specified. + * As example of such codec time base see ISO/IEC 14496-2:2001(E) + * vop_time_increment_resolution and fixed_vop_rate + * (fixed_vop_rate == 0 implies that it is different from the framerate) + * + * - encoding: MUST be set by user. + * - decoding: the use of this field for decoding is deprecated. + * Use framerate instead. + */ + AVRational time_base; + + /** + * For some codecs, the time base is closer to the field rate than the frame rate. + * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration + * if no telecine is used ... + * + * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. + */ + int ticks_per_frame; + + /** + * Codec delay. + * + * Encoding: Number of frames delay there will be from the encoder input to + * the decoder output. (we assume the decoder matches the spec) + * Decoding: Number of frames delay in addition to what a standard decoder + * as specified in the spec would produce. + * + * Video: + * Number of frames the decoded output will be delayed relative to the + * encoded input. + * + * Audio: + * For encoding, this field is unused (see initial_padding). + * + * For decoding, this is the number of samples the decoder needs to + * output before the decoder's output is valid. When seeking, you should + * start decoding this many samples prior to your desired seek point. + * + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int delay; + + + /* video only */ + /** + * picture width / height. + * + * @note Those fields may not match the values of the last + * AVFrame output by liteav_avcodec_decode_video2 due frame + * reordering. + * + * - encoding: MUST be set by user. + * - decoding: May be set by the user before opening the decoder if known e.g. + * from the container. Some decoders will require the dimensions + * to be set by the caller. During decoding, the decoder may + * overwrite those values as required while parsing the data. + */ + int width, height; + + /** + * Bitstream width / height, may be different from width/height e.g. when + * the decoded frame is cropped before being output or lowres is enabled. + * + * @note Those field may not match the value of the last + * AVFrame output by liteav_avcodec_receive_frame() due frame + * reordering. + * + * - encoding: unused + * - decoding: May be set by the user before opening the decoder if known + * e.g. from the container. During decoding, the decoder may + * overwrite those values as required while parsing the data. + */ + int coded_width, coded_height; + + /** + * the number of pictures in a group of pictures, or 0 for intra_only + * - encoding: Set by user. + * - decoding: unused + */ + int gop_size; + + /** + * Pixel format, see AV_PIX_FMT_xxx. + * May be set by the demuxer if known from headers. + * May be overridden by the decoder if it knows better. + * + * @note This field may not match the value of the last + * AVFrame output by liteav_avcodec_receive_frame() due frame + * reordering. + * + * - encoding: Set by user. + * - decoding: Set by user if known, overridden by libavcodec while + * parsing the data. + */ + enum AVPixelFormat pix_fmt; + + /** + * If non NULL, 'draw_horiz_band' is called by the libavcodec + * decoder to draw a horizontal band. It improves cache usage. Not + * all codecs can do that. You must check the codec capabilities + * beforehand. + * When multithreading is used, it may be called from multiple threads + * at the same time; threads might draw different parts of the same AVFrame, + * or multiple AVFrames, and there is no guarantee that slices will be drawn + * in order. + * The function is also used by hardware acceleration APIs. + * It is called at least once during frame decoding to pass + * the data needed for hardware render. + * In that mode instead of pixel data, AVFrame points to + * a structure specific to the acceleration API. The application + * reads the structure and can change some fields to indicate progress + * or mark state. + * - encoding: unused + * - decoding: Set by user. + * @param height the height of the slice + * @param y the y position of the slice + * @param type 1->top field, 2->bottom field, 3->frame + * @param offset offset into the AVFrame.data from which the slice should be read + */ + void (*draw_horiz_band)(struct AVCodecContext *s, + const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], + int y, int type, int height); + + /** + * callback to negotiate the pixelFormat + * @param fmt is the list of formats which are supported by the codec, + * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. + * The first is always the native one. + * @note The callback may be called again immediately if initialization for + * the selected (hardware-accelerated) pixel format failed. + * @warning Behavior is undefined if the callback returns a value not + * in the fmt list of formats. + * @return the chosen format + * - encoding: unused + * - decoding: Set by user, if not set the native format will be chosen. + */ + enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + + /** + * maximum number of B-frames between non-B-frames + * Note: The output will be delayed by max_b_frames+1 relative to the input. + * - encoding: Set by user. + * - decoding: unused + */ + int max_b_frames; + + /** + * qscale factor between IP and B-frames + * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_factor; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int b_frame_strategy; +#endif + + /** + * qscale offset between IP and B-frames + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_offset; + + /** + * Size of the frame reordering buffer in the decoder. + * For MPEG-2 it is 1 IPB or 0 low delay IP. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int has_b_frames; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int mpeg_quant; +#endif + + /** + * qscale factor between P- and I-frames + * If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor + offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_factor; + + /** + * qscale offset between P and I-frames + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_offset; + + /** + * luminance masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float lumi_masking; + + /** + * temporary complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float temporal_cplx_masking; + + /** + * spatial complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float spatial_cplx_masking; + + /** + * p block masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float p_masking; + + /** + * darkness masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float dark_masking; + + /** + * slice count + * - encoding: Set by libavcodec. + * - decoding: Set by user (or 0). + */ + int slice_count; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int prediction_method; +#define FF_PRED_LEFT 0 +#define FF_PRED_PLANE 1 +#define FF_PRED_MEDIAN 2 +#endif + + /** + * slice offsets in the frame in bytes + * - encoding: Set/allocated by libavcodec. + * - decoding: Set/allocated by user (or NULL). + */ + int *slice_offset; + + /** + * sample aspect ratio (0 if unknown) + * That is the width of a pixel divided by the height of the pixel. + * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVRational sample_aspect_ratio; + + /** + * motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_cmp; + /** + * subpixel motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_sub_cmp; + /** + * macroblock comparison function (not supported yet) + * - encoding: Set by user. + * - decoding: unused + */ + int mb_cmp; + /** + * interlaced DCT comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int ildct_cmp; +#define FF_CMP_SAD 0 +#define FF_CMP_SSE 1 +#define FF_CMP_SATD 2 +#define FF_CMP_DCT 3 +#define FF_CMP_PSNR 4 +#define FF_CMP_BIT 5 +#define FF_CMP_RD 6 +#define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 +#define FF_CMP_NSSE 10 +#define FF_CMP_W53 11 +#define FF_CMP_W97 12 +#define FF_CMP_DCTMAX 13 +#define FF_CMP_DCT264 14 +#define FF_CMP_MEDIAN_SAD 15 +#define FF_CMP_CHROMA 256 + + /** + * ME diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int dia_size; + + /** + * amount of previous MV predictors (2a+1 x 2a+1 square) + * - encoding: Set by user. + * - decoding: unused + */ + int last_predictor_count; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int pre_me; +#endif + + /** + * motion estimation prepass comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_pre_cmp; + + /** + * ME prepass diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int pre_dia_size; + + /** + * subpel ME quality + * - encoding: Set by user. + * - decoding: unused + */ + int me_subpel_quality; + + /** + * maximum motion estimation search range in subpel units + * If 0 then no limit. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_range; + + /** + * slice flags + * - encoding: unused + * - decoding: Set by user. + */ + int slice_flags; +#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display +#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG-2 field pics) +#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) + + /** + * macroblock decision mode + * - encoding: Set by user. + * - decoding: unused + */ + int mb_decision; +#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp +#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits +#define FF_MB_DECISION_RD 2 ///< rate distortion + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *intra_matrix; + + /** + * custom inter quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *inter_matrix; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int scenechange_threshold; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int noise_reduction; +#endif + + /** + * precision of the intra DC coefficient - 8 + * - encoding: Set by user. + * - decoding: Set by libavcodec + */ + int intra_dc_precision; + + /** + * Number of macroblock rows at the top which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_top; + + /** + * Number of macroblock rows at the bottom which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_bottom; + + /** + * minimum MB Lagrange multiplier + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmin; + + /** + * maximum MB Lagrange multiplier + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmax; + +#if FF_API_PRIVATE_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int me_penalty_compensation; +#endif + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int bidir_refine; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int brd_scale; +#endif + + /** + * minimum GOP size + * - encoding: Set by user. + * - decoding: unused + */ + int keyint_min; + + /** + * number of reference frames + * - encoding: Set by user. + * - decoding: Set by lavc. + */ + int refs; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int chromaoffset; +#endif + + /** + * Note: Value depends upon the compare function used for fullpel ME. + * - encoding: Set by user. + * - decoding: unused + */ + int mv0_threshold; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int b_sensitivity; +#endif + + /** + * Chromaticity coordinates of the source primaries. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorPrimaries color_primaries; + + /** + * Color Transfer Characteristic. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + /** + * This defines the location of chroma samples. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVChromaLocation chroma_sample_location; + + /** + * Number of slices. + * Indicates number of picture subdivisions. Used for parallelized + * decoding. + * - encoding: Set by user + * - decoding: unused + */ + int slices; + + /** Field order + * - encoding: set by libavcodec + * - decoding: Set by user. + */ + enum AVFieldOrder field_order; + + /* audio only */ + int sample_rate; ///< samples per second + int channels; ///< number of audio channels + + /** + * audio sample format + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVSampleFormat sample_fmt; ///< sample format + + /* The following data should not be initialized. */ + /** + * Number of samples per channel in an audio frame. + * + * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame + * except the last must contain exactly frame_size samples per channel. + * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then the + * frame size is not restricted. + * - decoding: may be set by some decoders to indicate constant frame size + */ + int frame_size; + + /** + * Frame counter, set by libavcodec. + * + * - decoding: total number of frames returned from the decoder so far. + * - encoding: total number of frames passed to the encoder so far. + * + * @note the counter is not incremented if encoding/decoding resulted in + * an error. + */ + int frame_number; + + /** + * number of bytes per packet if constant and known or 0 + * Used by some WAV based audio codecs. + */ + int block_align; + + /** + * Audio cutoff bandwidth (0 means "automatic") + * - encoding: Set by user. + * - decoding: unused + */ + int cutoff; + + /** + * Audio channel layout. + * - encoding: set by user. + * - decoding: set by user, may be overwritten by libavcodec. + */ + uint64_t channel_layout; + + /** + * Request decoder to use this channel layout if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + */ + uint64_t request_channel_layout; + + /** + * Type of service that the audio stream conveys. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVAudioServiceType audio_service_type; + + /** + * desired sample format + * - encoding: Not used. + * - decoding: Set by user. + * Decoder will decode to this format if it can. + */ + enum AVSampleFormat request_sample_fmt; + + /** + * This callback is called at the beginning of each frame to get data + * buffer(s) for it. There may be one contiguous buffer for all the data or + * there may be a buffer per each data plane or anything in between. What + * this means is, you may set however many entries in buf[] you feel necessary. + * Each buffer must be reference-counted using the AVBuffer API (see description + * of buf[] below). + * + * The following fields will be set in the frame before this callback is + * called: + * - format + * - width, height (video only) + * - sample_rate, channel_layout, nb_samples (audio only) + * Their values may differ from the corresponding values in + * AVCodecContext. This callback must use the frame values, not the codec + * context values, to calculate the required buffer size. + * + * This callback must fill the following fields in the frame: + * - data[] + * - linesize[] + * - extended_data: + * * if the data is planar audio with more than 8 channels, then this + * callback must allocate and fill extended_data to contain all pointers + * to all data planes. data[] must hold as many pointers as it can. + * extended_data must be allocated with liteav_av_malloc() and will be freed in + * liteav_av_frame_unref(). + * * otherwise extended_data must point to data + * - buf[] must contain one or more pointers to AVBufferRef structures. Each of + * the frame's data and extended_data pointers must be contained in these. That + * is, one AVBufferRef for each allocated chunk of memory, not necessarily one + * AVBufferRef per data[] entry. See: liteav_av_buffer_create(), liteav_av_buffer_alloc(), + * and liteav_av_buffer_ref(). + * - extended_buf and nb_extended_buf must be allocated with liteav_av_malloc() by + * this callback and filled with the extra buffers if there are more + * buffers than buf[] can hold. extended_buf will be freed in + * liteav_av_frame_unref(). + * + * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call + * liteav_avcodec_default_get_buffer2() instead of providing buffers allocated by + * some other means. + * + * Each data plane must be aligned to the maximum required by the target + * CPU. + * + * @see liteav_avcodec_default_get_buffer2() + * + * Video: + * + * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused + * (read and/or written to if it is writable) later by libavcodec. + * + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * Some decoders do not support linesizes changing between frames. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * this callback may be called from a different thread, but not from more + * than one at once. Does not need to be reentrant. + * + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * As a convenience, liteav_av_samples_get_buffer_size() and + * liteav_av_samples_fill_arrays() in libavutil may be used by custom get_buffer2() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see liteav_av_samples_get_buffer_size(), liteav_av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags); + + /** + * If non-zero, the decoded audio and video frames returned from + * liteav_avcodec_decode_video2() and liteav_avcodec_decode_audio4() are reference-counted + * and are valid indefinitely. The caller must free them with + * liteav_av_frame_unref() when they are not needed anymore. + * Otherwise, the decoded frames must not be freed by the caller and are + * only valid until the next decode call. + * + * This is always automatically enabled if liteav_avcodec_receive_frame() is used. + * + * - encoding: unused + * - decoding: set by the caller before avcodec_open2(). + */ + attribute_deprecated + int refcounted_frames; + + /* - encoding parameters */ + float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) + float qblur; ///< amount of qscale smoothing over time (0.0-1.0) + + /** + * minimum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmin; + + /** + * maximum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmax; + + /** + * maximum quantizer difference between frames + * - encoding: Set by user. + * - decoding: unused + */ + int max_qdiff; + + /** + * decoder bitstream buffer size + * - encoding: Set by user. + * - decoding: unused + */ + int rc_buffer_size; + + /** + * ratecontrol override, see RcOverride + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + int rc_override_count; + RcOverride *rc_override; + + /** + * maximum bitrate + * - encoding: Set by user. + * - decoding: Set by user, may be overwritten by libavcodec. + */ + int64_t rc_max_rate; + + /** + * minimum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int64_t rc_min_rate; + + /** + * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_max_available_vbv_use; + + /** + * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_min_vbv_overflow_use; + + /** + * Number of bits which should be loaded into the rc buffer before decoding starts. + * - encoding: Set by user. + * - decoding: unused + */ + int rc_initial_buffer_occupancy; + +#if FF_API_CODER_TYPE +#define FF_CODER_TYPE_VLC 0 +#define FF_CODER_TYPE_AC 1 +#define FF_CODER_TYPE_RAW 2 +#define FF_CODER_TYPE_RLE 3 + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int coder_type; +#endif /* FF_API_CODER_TYPE */ + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int context_model; +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_threshold; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_factor; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_exp; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_cmp; +#endif /* FF_API_PRIVATE_OPT */ + + /** + * trellis RD quantization + * - encoding: Set by user. + * - decoding: unused + */ + int trellis; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int min_prediction_order; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int max_prediction_order; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int64_t timecode_frame_start; +#endif + +#if FF_API_RTP_CALLBACK + /** + * @deprecated unused + */ + /* The RTP callback: This function is called */ + /* every time the encoder has a packet to send. */ + /* It depends on the encoder if the data starts */ + /* with a Start Code (it should). H.263 does. */ + /* mb_nb contains the number of macroblocks */ + /* encoded in the RTP payload. */ + attribute_deprecated + void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int rtp_payload_size; /* The size of the RTP payload: the coder will */ + /* do its best to deliver a chunk with size */ + /* below rtp_payload_size, the chunk will start */ + /* with a start code on some codecs like H.263. */ + /* This doesn't take account of any particular */ + /* headers inside the transmitted RTP payload. */ +#endif + +#if FF_API_STAT_BITS + /* statistics, used for 2-pass encoding */ + attribute_deprecated + int mv_bits; + attribute_deprecated + int header_bits; + attribute_deprecated + int i_tex_bits; + attribute_deprecated + int p_tex_bits; + attribute_deprecated + int i_count; + attribute_deprecated + int p_count; + attribute_deprecated + int skip_count; + attribute_deprecated + int misc_bits; + + /** @deprecated this field is unused */ + attribute_deprecated + int frame_bits; +#endif + + /** + * pass1 encoding statistics output buffer + * - encoding: Set by libavcodec. + * - decoding: unused + */ + char *stats_out; + + /** + * pass2 encoding statistics input buffer + * Concatenated stuff from stats_out of pass1 should be placed here. + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + char *stats_in; + + /** + * Work around bugs in encoders which sometimes cannot be detected automatically. + * - encoding: Set by user + * - decoding: Set by user + */ + int workaround_bugs; +#define FF_BUG_AUTODETECT 1 ///< autodetection +#define FF_BUG_XVID_ILACE 4 +#define FF_BUG_UMP4 8 +#define FF_BUG_NO_PADDING 16 +#define FF_BUG_AMV 32 +#define FF_BUG_QPEL_CHROMA 64 +#define FF_BUG_STD_QPEL 128 +#define FF_BUG_QPEL_CHROMA2 256 +#define FF_BUG_DIRECT_BLOCKSIZE 512 +#define FF_BUG_EDGE 1024 +#define FF_BUG_HPEL_CHROMA 2048 +#define FF_BUG_DC_CLIP 4096 +#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. +#define FF_BUG_TRUNCATED 16384 +#define FF_BUG_IEDGE 32768 + + /** + * strictly follow the standard (MPEG-4, ...). + * - encoding: Set by user. + * - decoding: Set by user. + * Setting this to STRICT or higher means the encoder and decoder will + * generally do stupid things, whereas setting it to unofficial or lower + * will mean the encoder might produce output that is not supported by all + * spec-compliant decoders. Decoders don't differentiate between normal, + * unofficial and experimental (that is, they always try to decode things + * when they can) unless they are explicitly asked to behave stupidly + * (=strictly conform to the specs) + */ + int strict_std_compliance; +#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software. +#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. +#define FF_COMPLIANCE_NORMAL 0 +#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions +#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. + + /** + * error concealment flags + * - encoding: unused + * - decoding: Set by user. + */ + int error_concealment; +#define FF_EC_GUESS_MVS 1 +#define FF_EC_DEBLOCK 2 +#define FF_EC_FAVOR_INTER 256 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug; +#define FF_DEBUG_PICT_INFO 1 +#define FF_DEBUG_RC 2 +#define FF_DEBUG_BITSTREAM 4 +#define FF_DEBUG_MB_TYPE 8 +#define FF_DEBUG_QP 16 +#if FF_API_DEBUG_MV +/** + * @deprecated this option does nothing + */ +#define FF_DEBUG_MV 32 +#endif +#define FF_DEBUG_DCT_COEFF 0x00000040 +#define FF_DEBUG_SKIP 0x00000080 +#define FF_DEBUG_STARTCODE 0x00000100 +#define FF_DEBUG_ER 0x00000400 +#define FF_DEBUG_MMCO 0x00000800 +#define FF_DEBUG_BUGS 0x00001000 +#if FF_API_DEBUG_MV +#define FF_DEBUG_VIS_QP 0x00002000 +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 +#endif +#define FF_DEBUG_BUFFERS 0x00008000 +#define FF_DEBUG_THREADS 0x00010000 +#define FF_DEBUG_GREEN_MD 0x00800000 +#define FF_DEBUG_NOMC 0x01000000 + +#if FF_API_DEBUG_MV + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 // visualize forward predicted MVs of P-frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 // visualize forward predicted MVs of B-frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 // visualize backward predicted MVs of B-frames +#endif + + /** + * Error recognition; may misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int err_recognition; + +/** + * Verify checksums embedded in the bitstream (could be of either encoded or + * decoded data, depending on the codec) and print an error message on mismatch. + * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the + * decoder returning an error. + */ +#define AV_EF_CRCCHECK (1<<0) +#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations +#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length +#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection + +#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue +#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors +#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors +#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error + + + /** + * opaque 64-bit number (generally a PTS) that will be reordered and + * output in AVFrame.reordered_opaque + * - encoding: unused + * - decoding: Set by user. + */ + int64_t reordered_opaque; + + /** + * Hardware accelerator in use + * - encoding: unused. + * - decoding: Set by libavcodec + */ + const struct AVHWAccel *hwaccel; + + /** + * Hardware accelerator context. + * For some hardware accelerators, a global context needs to be + * provided by the user. In that case, this holds display-dependent + * data FFmpeg cannot instantiate itself. Please refer to the + * FFmpeg HW accelerator documentation to know how to fill this + * is. e.g. for VA API, this is a struct vaapi_context. + * - encoding: unused + * - decoding: Set by user + */ + void *hwaccel_context; + + /** + * error + * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR. + * - decoding: unused + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + + /** + * DCT algorithm, see FF_DCT_* below + * - encoding: Set by user. + * - decoding: unused + */ + int dct_algo; +#define FF_DCT_AUTO 0 +#define FF_DCT_FASTINT 1 +#define FF_DCT_INT 2 +#define FF_DCT_MMX 3 +#define FF_DCT_ALTIVEC 5 +#define FF_DCT_FAAN 6 + + /** + * IDCT algorithm, see FF_IDCT_* below. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int idct_algo; +#define FF_IDCT_AUTO 0 +#define FF_IDCT_INT 1 +#define FF_IDCT_SIMPLE 2 +#define FF_IDCT_SIMPLEMMX 3 +#define FF_IDCT_ARM 7 +#define FF_IDCT_ALTIVEC 8 +#define FF_IDCT_SIMPLEARM 10 +#define FF_IDCT_XVID 14 +#define FF_IDCT_SIMPLEARMV5TE 16 +#define FF_IDCT_SIMPLEARMV6 17 +#define FF_IDCT_FAAN 20 +#define FF_IDCT_SIMPLENEON 22 +#define FF_IDCT_NONE 24 /* Used by XvMC to extract IDCT coefficients with FF_IDCT_PERM_NONE */ +#define FF_IDCT_SIMPLEAUTO 128 + + /** + * bits per sample/pixel from the demuxer (needed for huffyuv). + * - encoding: Set by libavcodec. + * - decoding: Set by user. + */ + int bits_per_coded_sample; + + /** + * Bits per sample/pixel of internal libavcodec pixel/sample format. + * - encoding: set by user. + * - decoding: set by libavcodec. + */ + int bits_per_raw_sample; + +#if FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + */ + int lowres; +#endif + +#if FF_API_CODED_FRAME + /** + * the picture in the bitstream + * - encoding: Set by libavcodec. + * - decoding: unused + * + * @deprecated use the quality factor packet side data instead + */ + attribute_deprecated AVFrame *coded_frame; +#endif + + /** + * thread count + * is used to decide how many independent tasks should be passed to execute() + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_count; + + /** + * Which multithreading methods to use. + * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, + * so clients which cannot provide future frames should not use it. + * + * - encoding: Set by user, otherwise the default is used. + * - decoding: Set by user, otherwise the default is used. + */ + int thread_type; +#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once +#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once + + /** + * Which multithreading methods are in use by the codec. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int active_thread_type; + + /** + * Set by the client if its custom get_buffer() callback can be called + * synchronously from another thread, which allows faster multithreaded decoding. + * draw_horiz_band() will be called from other threads regardless of this setting. + * Ignored if the default get_buffer() is used. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_safe_callbacks; + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * @param count the number of things to execute + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. + * @param c context passed also to func + * @param count the number of things to execute + * @param arg2 argument passed unchanged to func + * @param ret return values of executed functions, must have space for "count" values. May be NULL. + * @param func function that will be called count times, with jobnr from 0 to count-1. + * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no + * two instances of func executing at the same time will have the same threadnr. + * @return always 0 currently, but code should handle a future improvement where when any call to func + * returns < 0 no further calls to func may be done and < 0 is returned. + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); + + /** + * noise vs. sse weight for the nsse comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int nsse_weight; + + /** + * profile + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int profile; +#define FF_PROFILE_UNKNOWN -99 +#define FF_PROFILE_RESERVED -100 + +#define FF_PROFILE_AAC_MAIN 0 +#define FF_PROFILE_AAC_LOW 1 +#define FF_PROFILE_AAC_SSR 2 +#define FF_PROFILE_AAC_LTP 3 +#define FF_PROFILE_AAC_HE 4 +#define FF_PROFILE_AAC_HE_V2 28 +#define FF_PROFILE_AAC_LD 22 +#define FF_PROFILE_AAC_ELD 38 +#define FF_PROFILE_MPEG2_AAC_LOW 128 +#define FF_PROFILE_MPEG2_AAC_HE 131 + +#define FF_PROFILE_DNXHD 0 +#define FF_PROFILE_DNXHR_LB 1 +#define FF_PROFILE_DNXHR_SQ 2 +#define FF_PROFILE_DNXHR_HQ 3 +#define FF_PROFILE_DNXHR_HQX 4 +#define FF_PROFILE_DNXHR_444 5 + +#define FF_PROFILE_DTS 20 +#define FF_PROFILE_DTS_ES 30 +#define FF_PROFILE_DTS_96_24 40 +#define FF_PROFILE_DTS_HD_HRA 50 +#define FF_PROFILE_DTS_HD_MA 60 +#define FF_PROFILE_DTS_EXPRESS 70 + +#define FF_PROFILE_MPEG2_422 0 +#define FF_PROFILE_MPEG2_HIGH 1 +#define FF_PROFILE_MPEG2_SS 2 +#define FF_PROFILE_MPEG2_SNR_SCALABLE 3 +#define FF_PROFILE_MPEG2_MAIN 4 +#define FF_PROFILE_MPEG2_SIMPLE 5 + +#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag +#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag + +#define FF_PROFILE_H264_BASELINE 66 +#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) +#define FF_PROFILE_H264_MAIN 77 +#define FF_PROFILE_H264_EXTENDED 88 +#define FF_PROFILE_H264_HIGH 100 +#define FF_PROFILE_H264_HIGH_10 110 +#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_MULTIVIEW_HIGH 118 +#define FF_PROFILE_H264_HIGH_422 122 +#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_STEREO_HIGH 128 +#define FF_PROFILE_H264_HIGH_444 144 +#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 +#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_CAVLC_444 44 + +#define FF_PROFILE_VC1_SIMPLE 0 +#define FF_PROFILE_VC1_MAIN 1 +#define FF_PROFILE_VC1_COMPLEX 2 +#define FF_PROFILE_VC1_ADVANCED 3 + +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1 +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2 +#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768 +#define FF_PROFILE_JPEG2000_DCINEMA_2K 3 +#define FF_PROFILE_JPEG2000_DCINEMA_4K 4 + +#define FF_PROFILE_VP9_0 0 +#define FF_PROFILE_VP9_1 1 +#define FF_PROFILE_VP9_2 2 +#define FF_PROFILE_VP9_3 3 + +#define FF_PROFILE_HEVC_MAIN 1 +#define FF_PROFILE_HEVC_MAIN_10 2 +#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3 +#define FF_PROFILE_HEVC_REXT 4 + +#define FF_PROFILE_AV1_MAIN 0 +#define FF_PROFILE_AV1_HIGH 1 +#define FF_PROFILE_AV1_PROFESSIONAL 2 + +#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0 +#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1 +#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2 +#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3 +#define FF_PROFILE_MJPEG_JPEG_LS 0xf7 + +#define FF_PROFILE_SBC_MSBC 1 + +#define FF_PROFILE_AVS3_MAIN 1 +#define FF_PROFILE_AVS3_MAIN_10 2 + + /** + * level + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int level; +#define FF_LEVEL_UNKNOWN -99 + + /** + * Skip loop filtering for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_loop_filter; + + /** + * Skip IDCT/dequantization for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_idct; + + /** + * Skip decoding for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_frame; + + /** + * Header containing style information for text subtitles. + * For SUBTITLE_ASS subtitle type, it should contain the whole ASS + * [Script Info] and [V4+ Styles] section, plus the [Events] line and + * the Format line following. It shouldn't include any Dialogue line. + * - encoding: Set/allocated/freed by user (before avcodec_open2()) + * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) + */ + uint8_t *subtitle_header; + int subtitle_header_size; + +#if FF_API_VBV_DELAY + /** + * VBV delay coded in the last frame (in periods of a 27 MHz clock). + * Used for compliant TS muxing. + * - encoding: Set by libavcodec. + * - decoding: unused. + * @deprecated this value is now exported as a part of + * AV_PKT_DATA_CPB_PROPERTIES packet side data + */ + attribute_deprecated + uint64_t vbv_delay; +#endif + +#if FF_API_SIDEDATA_ONLY_PKT + /** + * Encoding only and set by default. Allow encoders to output packets + * that do not contain any encoded data, only side data. + * + * Some encoders need to output such packets, e.g. to update some stream + * parameters at the end of encoding. + * + * @deprecated this field disables the default behaviour and + * it is kept only for compatibility. + */ + attribute_deprecated + int side_data_only_packets; +#endif + + /** + * Audio only. The number of "priming" samples (padding) inserted by the + * encoder at the beginning of the audio. I.e. this number of leading + * decoded samples must be discarded by the caller to get the original audio + * without leading padding. + * + * - decoding: unused + * - encoding: Set by libavcodec. The timestamps on the output packets are + * adjusted by the encoder so that they always refer to the + * first sample of the data actually contained in the packet, + * including any added padding. E.g. if the timebase is + * 1/samplerate and the timestamp of the first input sample is + * 0, the timestamp of the first output packet will be + * -initial_padding. + */ + int initial_padding; + + /** + * - decoding: For codecs that store a framerate value in the compressed + * bitstream, the decoder may export it here. { 0, 1} when + * unknown. + * - encoding: May be used to signal the framerate of CFR content to an + * encoder. + */ + AVRational framerate; + + /** + * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx. + * - encoding: unused. + * - decoding: Set by libavcodec before calling get_format() + */ + enum AVPixelFormat sw_pix_fmt; + + /** + * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. + * - encoding unused. + * - decoding set by user. + */ + AVRational pkt_timebase; + + /** + * AVCodecDescriptor + * - encoding: unused. + * - decoding: set by libavcodec. + */ + const AVCodecDescriptor *codec_descriptor; + +#if !FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + */ + int lowres; +#endif + + /** + * Current statistics for PTS correction. + * - decoding: maintained and used by libavcodec, not intended to be used by user apps + * - encoding: unused + */ + int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far + int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far + int64_t pts_correction_last_pts; /// PTS of the last frame + int64_t pts_correction_last_dts; /// DTS of the last frame + + /** + * Character encoding of the input subtitles file. + * - decoding: set by user + * - encoding: unused + */ + char *sub_charenc; + + /** + * Subtitles character encoding mode. Formats or codecs might be adjusting + * this setting (if they are doing the conversion themselves for instance). + * - decoding: set by libavcodec + * - encoding: unused + */ + int sub_charenc_mode; +#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) +#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself +#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv +#define FF_SUB_CHARENC_MODE_IGNORE 2 ///< neither convert the subtitles, nor check them for valid UTF-8 + + /** + * Skip processing alpha if supported by codec. + * Note that if the format uses pre-multiplied alpha (common with VP6, + * and recommended due to better video quality/compression) + * the image will look as if alpha-blended onto a black background. + * However for formats that do not use pre-multiplied alpha + * there might be serious artefacts (though e.g. libswscale currently + * assumes pre-multiplied alpha anyway). + * + * - decoding: set by user + * - encoding: unused + */ + int skip_alpha; + + /** + * Number of samples to skip after a discontinuity + * - decoding: unused + * - encoding: set by libavcodec + */ + int seek_preroll; + +#if !FF_API_DEBUG_MV + /** + * debug motion vectors + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames +#endif + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: unused. + */ + uint16_t *chroma_intra_matrix; + + /** + * dump format separator. + * can be ", " or "\n " or anything else + * - encoding: Set by user. + * - decoding: Set by user. + */ + uint8_t *dump_separator; + + /** + * ',' separated list of allowed decoders. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *codec_whitelist; + + /** + * Properties of the stream that gets decoded + * - encoding: unused + * - decoding: set by libavcodec + */ + unsigned properties; +#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001 +#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002 + + /** + * Additional data associated with the entire coded stream. + * + * - decoding: unused + * - encoding: may be set by libavcodec after avcodec_open2(). + */ + AVPacketSideData *coded_side_data; + int nb_coded_side_data; + + /** + * A reference to the AVHWFramesContext describing the input (for encoding) + * or output (decoding) frames. The reference is set by the caller and + * afterwards owned (and freed) by libavcodec - it should never be read by + * the caller after being set. + * + * - decoding: This field should be set by the caller from the get_format() + * callback. The previous reference (if any) will always be + * unreffed by libavcodec before the get_format() call. + * + * If the default get_buffer2() is used with a hwaccel pixel + * format, then this AVHWFramesContext will be used for + * allocating the frame buffers. + * + * - encoding: For hardware encoders configured to use a hwaccel pixel + * format, this field should be set by the caller to a reference + * to the AVHWFramesContext describing input frames. + * AVHWFramesContext.format must be equal to + * AVCodecContext.pix_fmt. + * + * This field should be set before avcodec_open2() is called. + */ + AVBufferRef *hw_frames_ctx; + + /** + * Control the form of AVSubtitle.rects[N]->ass + * - decoding: set by user + * - encoding: unused + */ + int sub_text_format; +#define FF_SUB_TEXT_FMT_ASS 0 +#if FF_API_ASS_TIMING +#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS 1 +#endif + + /** + * Audio only. The amount of padding (in samples) appended by the encoder to + * the end of the audio. I.e. this number of decoded samples must be + * discarded by the caller from the end of the stream to get the original + * audio without any trailing padding. + * + * - decoding: unused + * - encoding: unused + */ + int trailing_padding; + + /** + * The number of pixels per image to maximally accept. + * + * - decoding: set by user + * - encoding: set by user + */ + int64_t max_pixels; + + /** + * A reference to the AVHWDeviceContext describing the device which will + * be used by a hardware encoder/decoder. The reference is set by the + * caller and afterwards owned (and freed) by libavcodec. + * + * This should be used if either the codec device does not require + * hardware frames or any that are used are to be allocated internally by + * libavcodec. If the user wishes to supply any of the frames used as + * encoder input or decoder output then hw_frames_ctx should be used + * instead. When hw_frames_ctx is set in get_format() for a decoder, this + * field will be ignored while decoding the associated stream segment, but + * may again be used on a following one after another get_format() call. + * + * For both encoders and decoders this field should be set before + * avcodec_open2() is called and must not be written to thereafter. + * + * Note that some decoders may require this field to be set initially in + * order to support hw_frames_ctx at all - in that case, all frames + * contexts used must be created on the same device. + */ + AVBufferRef *hw_device_ctx; + + /** + * Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated + * decoding (if active). + * - encoding: unused + * - decoding: Set by user (either before avcodec_open2(), or in the + * AVCodecContext.get_format callback) + */ + int hwaccel_flags; + + /** + * Video decoding only. Certain video codecs support cropping, meaning that + * only a sub-rectangle of the decoded frame is intended for display. This + * option controls how cropping is handled by libavcodec. + * + * When set to 1 (the default), libavcodec will apply cropping internally. + * I.e. it will modify the output frame width/height fields and offset the + * data pointers (only by as much as possible while preserving alignment, or + * by the full amount if the AV_CODEC_FLAG_UNALIGNED flag is set) so that + * the frames output by the decoder refer only to the cropped area. The + * crop_* fields of the output frames will be zero. + * + * When set to 0, the width/height fields of the output frames will be set + * to the coded dimensions and the crop_* fields will describe the cropping + * rectangle. Applying the cropping is left to the caller. + * + * @warning When hardware acceleration with opaque output frames is used, + * libavcodec is unable to apply cropping from the top/left border. + * + * @note when this option is set to zero, the width/height fields of the + * AVCodecContext and output AVFrames have different meanings. The codec + * context fields store display dimensions (with the coded dimensions in + * coded_width/height), while the frame fields store the coded dimensions + * (with the display dimensions being determined by the crop_* fields). + */ + int apply_cropping; + + /* + * Video decoding only. Sets the number of extra hardware frames which + * the decoder will allocate for use by the caller. This must be set + * before avcodec_open2() is called. + * + * Some hardware decoders require all frames that they will use for + * output to be defined in advance before decoding starts. For such + * decoders, the hardware frame pool must therefore be of a fixed size. + * The extra frames set here are on top of any number that the decoder + * needs internally in order to operate normally (for example, frames + * used as reference pictures). + */ + int extra_hw_frames; +} AVCodecContext; + +#if FF_API_CODEC_GET_SET +/** + * Accessors for some AVCodecContext fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +attribute_deprecated +AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val); + +attribute_deprecated +const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc); + +attribute_deprecated +unsigned av_codec_get_codec_properties(const AVCodecContext *avctx); + +#if FF_API_LOWRES +attribute_deprecated +int av_codec_get_lowres(const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_lowres(AVCodecContext *avctx, int val); +#endif + +attribute_deprecated +int av_codec_get_seek_preroll(const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_seek_preroll(AVCodecContext *avctx, int val); + +attribute_deprecated +uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx); +attribute_deprecated +void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val); +#endif + +/** + * AVProfile. + */ +typedef struct AVProfile { + int profile; + const char *name; ///< short name for the profile +} AVProfile; + +enum { + /** + * The codec supports this format via the hw_device_ctx interface. + * + * When selecting this format, AVCodecContext.hw_device_ctx should + * have been set to a device of the specified type before calling + * avcodec_open2(). + */ + AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01, + /** + * The codec supports this format via the hw_frames_ctx interface. + * + * When selecting this format for a decoder, + * AVCodecContext.hw_frames_ctx should be set to a suitable frames + * context inside the get_format() callback. The frames context + * must have been created on a device of the specified type. + */ + AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02, + /** + * The codec supports this format by some internal method. + * + * This format can be selected without any additional configuration - + * no device or frames context is required. + */ + AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04, + /** + * The codec supports this format by some ad-hoc method. + * + * Additional settings and/or function calls are required. See the + * codec-specific documentation for details. (Methods requiring + * this sort of configuration are deprecated and others should be + * used in preference.) + */ + AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08, +}; + +typedef struct AVCodecHWConfig { + /** + * A hardware pixel format which the codec can use. + */ + enum AVPixelFormat pix_fmt; + /** + * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible + * setup methods which can be used with this configuration. + */ + int methods; + /** + * The device type associated with the configuration. + * + * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and + * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused. + */ + enum AVHWDeviceType device_type; +} AVCodecHWConfig; + +typedef struct AVCodecDefault AVCodecDefault; + +struct AVSubtitle; + +/** + * AVCodec. + */ +typedef struct AVCodec { + /** + * Name of the codec implementation. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + * This is the primary way to find a codec from the user perspective. + */ + const char *name; + /** + * Descriptive name for the codec, meant to be more human readable than name. + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *long_name; + enum AVMediaType type; + enum AVCodecID id; + /** + * Codec capabilities. + * see AV_CODEC_CAP_* + */ + int capabilities; + const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} + const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 + const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 + const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 + const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder + const AVClass *priv_class; ///< AVClass for the private context + const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} + + /** + * Group name of the codec implementation. + * This is a short symbolic name of the wrapper backing this codec. A + * wrapper uses some kind of external implementation for the codec, such + * as an external library, or a codec implementation provided by the OS or + * the hardware. + * If this field is NULL, this is a builtin, libavcodec native codec. + * If non-NULL, this will be the suffix in AVCodec.name in most cases + * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>"). + */ + const char *wrapper_name; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int priv_data_size; + struct AVCodec *next; + /** + * @name Frame-level threading support functions + * @{ + */ + /** + * If defined, called on thread contexts when they are created. + * If the codec allocates writable tables in init(), re-allocate them here. + * priv_data will be set to a copy of the original. + */ + int (*init_thread_copy)(AVCodecContext *); + /** + * Copy necessary context variables from a previous thread context to the current one. + * If not defined, the next thread will start automatically; otherwise, the codec + * must call liteav_ff_thread_finish_setup(). + * + * dst and src will (rarely) point to the same context, in which case memcpy should be skipped. + */ + int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); + /** @} */ + + /** + * Private codec-specific defaults. + */ + const AVCodecDefault *defaults; + + /** + * Initialize codec static data, called from liteav_avcodec_register(). + * + * This is not intended for time consuming operations as it is + * run for every codec regardless of that codec being used. + */ + void (*init_static_data)(struct AVCodec *codec); + + int (*init)(AVCodecContext *); + int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size, + const struct AVSubtitle *sub); + /** + * Encode data to an AVPacket. + * + * @param avctx codec context + * @param avpkt output AVPacket (may contain a user-provided buffer) + * @param[in] frame AVFrame containing the raw data to be encoded + * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a + * non-empty packet was returned in avpkt. + * @return 0 on success, negative error code on failure + */ + int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, + int *got_packet_ptr); + int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); + int (*close)(AVCodecContext *); + /** + * Encode API with decoupled packet/frame dataflow. The API is the + * same as the avcodec_ prefixed APIs (liteav_avcodec_send_frame() etc.), except + * that: + * - never called if the codec is closed or the wrong type, + * - if AV_CODEC_CAP_DELAY is not set, drain frames are never sent, + * - only one drain frame is ever passed down, + */ + int (*send_frame)(AVCodecContext *avctx, const AVFrame *frame); + int (*receive_packet)(AVCodecContext *avctx, AVPacket *avpkt); + + /** + * Decode API with decoupled packet/frame dataflow. This function is called + * to get one output frame. It should call liteav_ff_decode_get_packet() to obtain + * input data. + */ + int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame); + /** + * Flush buffers. + * Will be called when seeking + */ + void (*flush)(AVCodecContext *); + /** + * Internal codec capabilities. + * See FF_CODEC_CAP_* in internal.h + */ + int caps_internal; + + /** + * Decoding only, a comma-separated list of bitstream filters to apply to + * packets before decoding. + */ + const char *bsfs; + + /** + * Array of pointers to hardware configurations supported by the codec, + * or NULL if no hardware supported. The array is terminated by a NULL + * pointer. + * + * The user can only access this field via avcodec_get_hw_config(). + */ + const struct AVCodecHWConfigInternal **hw_configs; +} AVCodec; + +#if FF_API_CODEC_GET_SET +attribute_deprecated +int av_codec_get_max_lowres(const AVCodec *codec); +#endif + +struct MpegEncContext; + +/** + * Retrieve supported hardware configurations for a codec. + * + * Values of index from zero to some maximum return the indexed configuration + * descriptor; all other values return NULL. If the codec does not support + * any hardware configurations then it will always return NULL. + */ +const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index); + +/** + * @defgroup lavc_hwaccel AVHWAccel + * + * @note Nothing in this structure should be accessed by the user. At some + * point in future it will not be externally visible at all. + * + * @{ + */ +typedef struct AVHWAccel { + /** + * Name of the hardware accelerated codec. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + */ + const char *name; + + /** + * Type of codec implemented by the hardware accelerator. + * + * See AVMEDIA_TYPE_xxx + */ + enum AVMediaType type; + + /** + * Codec implemented by the hardware accelerator. + * + * See AV_CODEC_ID_xxx + */ + enum AVCodecID id; + + /** + * Supported pixel format. + * + * Only hardware accelerated formats are supported here. + */ + enum AVPixelFormat pix_fmt; + + /** + * Hardware accelerated codec capabilities. + * see AV_HWACCEL_CODEC_CAP_* + */ + int capabilities; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Allocate a custom buffer + */ + int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame); + + /** + * Called at the beginning of each frame or field picture. + * + * Meaningful frame information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * Note that buf can be NULL along with buf_size set to 0. + * Otherwise, this means the whole frame is available at this point. + * + * @param avctx the codec context + * @param buf the frame data buffer base + * @param buf_size the size of the frame in bytes + * @return zero if successful, a negative value otherwise + */ + int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for parameter data (SPS/PPS/VPS etc). + * + * Useful for hardware decoders which keep persistent state about the + * video parameters, and need to receive any changes to update that state. + * + * @param avctx the codec context + * @param type the nal unit type + * @param buf the nal unit data buffer + * @param buf_size the size of the nal unit in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for each slice. + * + * Meaningful slice information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * The only exception is XvMC, that works on MB level. + * + * @param avctx the codec context + * @param buf the slice data buffer base + * @param buf_size the size of the slice in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Called at the end of each frame or field picture. + * + * The whole picture is parsed at this point and can now be sent + * to the hardware accelerator. This function is mandatory. + * + * @param avctx the codec context + * @return zero if successful, a negative value otherwise + */ + int (*end_frame)(AVCodecContext *avctx); + + /** + * Size of per-frame hardware accelerator private data. + * + * Private data is allocated with liteav_av_mallocz() before + * AVCodecContext.get_buffer() and deallocated after + * AVCodecContext.release_buffer(). + */ + int frame_priv_data_size; + + /** + * Called for every Macroblock in a slice. + * + * XvMC uses it to replace the liteav_ff_mpv_reconstruct_mb(). + * Instead of decoding to raw picture, MB parameters are + * stored in an array provided by the video driver. + * + * @param s the mpeg context + */ + void (*decode_mb)(struct MpegEncContext *s); + + /** + * Initialize the hwaccel private data. + * + * This will be called from liteav_ff_get_format(), after hwaccel and + * hwaccel_context are set and the hwaccel private data in AVCodecInternal + * is allocated. + */ + int (*init)(AVCodecContext *avctx); + + /** + * Uninitialize the hwaccel private data. + * + * This will be called from get_format() or avcodec_close(), after hwaccel + * and hwaccel_context are already uninitialized. + */ + int (*uninit)(AVCodecContext *avctx); + + /** + * Size of the private data to allocate in + * AVCodecInternal.hwaccel_priv_data. + */ + int priv_data_size; + + /** + * Internal hwaccel capabilities. + */ + int caps_internal; + + /** + * Fill the given hw_frames context with current codec parameters. Called + * from get_format. Refer to liteav_avcodec_get_hw_frames_parameters() for + * details. + * + * This CAN be called before AVHWAccel.init is called, and you must assume + * that avctx->hwaccel_priv_data is invalid. + */ + int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx); +} AVHWAccel; + +/** + * HWAccel is experimental and is thus avoided in favor of non experimental + * codecs + */ +#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200 + +/** + * Hardware acceleration should be used for decoding even if the codec level + * used is unknown or higher than the maximum supported level reported by the + * hardware driver. + * + * It's generally a good idea to pass this flag unless you have a specific + * reason not to, as hardware tends to under-report supported levels. + */ +#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0) + +/** + * Hardware acceleration can output YUV pixel formats with a different chroma + * sampling than 4:2:0 and/or other than 8 bits per component. + */ +#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1) + +/** + * Hardware acceleration should still be attempted for decoding when the + * codec profile does not match the reported capabilities of the hardware. + * + * For example, this can be used to try to decode baseline profile H.264 + * streams in hardware - it will often succeed, because many streams marked + * as baseline profile actually conform to constrained baseline profile. + * + * @warning If the stream is actually not supported then the behaviour is + * undefined, and may include returning entirely incorrect output + * while indicating success. + */ +#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2) + +/** + * @} + */ + +#if FF_API_AVPICTURE +/** + * @defgroup lavc_picture AVPicture + * + * Functions for working with AVPicture + * @{ + */ + +/** + * Picture data structure. + * + * Up to four components can be stored into it, the last component is + * alpha. + * @deprecated use AVFrame or imgutils functions instead + */ +typedef struct AVPicture { + attribute_deprecated + uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes + attribute_deprecated + int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line +} AVPicture; + +/** + * @} + */ +#endif + +enum AVSubtitleType { + SUBTITLE_NONE, + + SUBTITLE_BITMAP, ///< A bitmap, pict will be set + + /** + * Plain text, the text field must be set by the decoder and is + * authoritative. ass and pict fields may contain approximations. + */ + SUBTITLE_TEXT, + + /** + * Formatted text, the ass field must be set by the decoder and is + * authoritative. pict and text fields may contain approximations. + */ + SUBTITLE_ASS, + + /** + * WebVTT rich text, the box field must be set by the decoder and is + * authoritative. pict and text fields are empty. + */ + SUBTITLE_WEBVTT, +}; + +#define AV_SUBTITLE_FLAG_FORCED 0x00000001 + +/** + * indicating that y of AVSubtitleRect is a line number. + * @see AVSubtitleBox + */ +#define AV_SUBTITLE_FLAG_LINE_NUMBER 0x00000002 + +/** + * indicating that WebVTT setting has "line" property + */ +#define AV_SUBTITLE_FLAG_LINE_SETTING 0x00000004 + +typedef struct AVTextStyle { + /** Family font names */ + char * font_name; /**< The name of the font */ + char * mono_font_name; /**< The name of the mono font */ + + uint16_t features; /**< Feature flags (means non default) */ + uint16_t style_flags; /**< Formatting style flags */ + + /* Font style */ + float font_relsize; /**< The font size in video height % */ + int font_size; /**< The font size in pixels */ + int font_color; /**< The color of the text 0xRRGGBB + (native endianness) */ + uint8_t font_alpha; /**< The transparency of the text.*/ + int spacing; /**< The spaceing between glyphs in pixels */ + + /* Outline */ + int outline_color; /**< The color of the outline 0xRRGGBB */ + uint8_t outline_alpha; /**< The transparency of the outline */ + int outline_width; /**< The width of the outline in pixels */ + + /* Shadow */ + int shadow_color; /**< The color of the shadow 0xRRGGBB */ + uint8_t shadow_alpha; /**< The transparency of the shadow. */ + int shadow_width; /**< The width of the shadow in pixels */ + + /* Background (and karaoke) */ + int background_color;/**< The color of the background 0xRRGGBB */ + uint8_t background_alpha;/**< The transparency of the background */ + int karaoke_background_color;/**< Background color for karaoke 0xRRGGBB */ + uint8_t karaoke_background_alpha;/**< The transparency of the karaoke bg */ + + /* Line breaking */ + enum + { + STYLE_WRAP_DEFAULT = 0, /**< Breaks on whitespace or fallback on char */ + STYLE_WRAP_CHAR, /**< Breaks at character level only */ + STYLE_WRAP_NONE, /**< No line breaks (except explicit ones) */ + } wrapinfo; +} AVTextStyle; + +#define STYLE_ALPHA_OPAQUE 0xFF +#define STYLE_ALPHA_TRANSPARENT 0x00 + +/* Features flags for AVTextStyle features */ +#define STYLE_NO_DEFAULTS 0x0 +#define STYLE_FULLY_SET 0xFFFF +#define STYLE_HAS_FONT_COLOR (1 << 0) +#define STYLE_HAS_FONT_ALPHA (1 << 1) +#define STYLE_HAS_FLAGS (1 << 2) +#define STYLE_HAS_OUTLINE_COLOR (1 << 3) +#define STYLE_HAS_OUTLINE_ALPHA (1 << 4) +#define STYLE_HAS_SHADOW_COLOR (1 << 5) +#define STYLE_HAS_SHADOW_ALPHA (1 << 6) +#define STYLE_HAS_BACKGROUND_COLOR (1 << 7) +#define STYLE_HAS_BACKGROUND_ALPHA (1 << 8) +#define STYLE_HAS_K_BACKGROUND_COLOR (1 << 9) +#define STYLE_HAS_K_BACKGROUND_ALPHA (1 << 10) +#define STYLE_HAS_WRAP_INFO (1 << 11) + +/* Style flags for AVTextStyle style_flags */ +#define STYLE_BOLD (1 << 0) +#define STYLE_ITALIC (1 << 1) +#define STYLE_OUTLINE (1 << 2) +#define STYLE_SHADOW (1 << 3) +#define STYLE_BACKGROUND (1 << 4) +#define STYLE_UNDERLINE (1 << 5) +#define STYLE_STRIKEOUT (1 << 6) +#define STYLE_HALFWIDTH (1 << 7) +#define STYLE_MONOSPACED (1 << 8) +#define STYLE_DOUBLEWIDTH (1 << 9) +#define STYLE_BLINK_FOREGROUND (1 << 10) +#define STYLE_BLINK_BACKGROUND (1 << 11) + +#define STYLE_DEFAULT_FONT_SIZE 20 +#define STYLE_DEFAULT_REL_FONT_SIZE 6.25 + +/** + * subtitle alignment for positioning a piece of subtitle text. + */ +enum AVSubtitleAlign { + AV_SUBTITLE_ALIGN_START, + + AV_SUBTITLE_ALIGN_CENTER, + + AV_SUBTITLE_ALIGN_END, + + AV_SUBTITLE_ALIGN_LEFT, + + AV_SUBTITLE_ALIGN_RIGHT, +}; + +enum AVWritingDirection { + AV_WRITING_DIRECTION_HORIZONTAL, + AV_WRITING_DIRECTION_VERTICAL_RL, // vertical, right to left + AV_WRITING_DIRECTION_VERTICAL_LR, // vertical, left to right +}; + +typedef struct AVTextSegment { + char *text; + AVTextStyle *style; + struct AVTextSegment *next; +} AVTextSegment; + +/** + * AVSubtitleBox is define to extent AVSubtitleRect + */ +typedef struct AVSubtitleBox { + /** + * (x,y,w,h), with y_align determine a box into which a piece of text is rendered. + * the meaning of (x,y,w,h) dependes on the writing direction. + * if writing direction is horizontal, x is offset from the left of the video viewport to the + * left side of the box, and y is the offset from the top of the video viewport(see y_align), + * and w is the box's horizontal size, and h currently unused . + * + * if writing direction is vertical, x is offset from the top of the video viewport to the + * top side of the box, and y is the offset from the right(for AV_WRITING_DIRECTION_VERTICAL_RL), + * or left of the video view port(for AV_WRITING_DIRECTION_VERTICAL_LR), and w is the box's + * vertical size, and h currently unused. + */ + float x; + float y; + float w; + float h; + + /** + * reference width of x, w (for horizontal direction), or y, h (for vertical direction). + * if reference width equals 1, then x, y, w, h is a ratio, except that the writing direction + * is vertical and y is a line number. so, when you calculate y dimension in case of vertical + * writing direction, you should firstly check whether y is line number(see AV_SUBTITLE_FLAG_LINE_NUMBER). + */ + int ref_width; + + /** + * reference height of y, h (for horizontal direction), x, w (for vertical direction) + * if reference width equals 1, then x, y, w, h is a ratio, except that the writing direction + * is vertical and y is a line number. so, when you calculate y dimension in case of vertical + * writing direction, you should firstly check whether y is line number(see AV_SUBTITLE_FLAG_LINE_NUMBER). + */ + int ref_height; + /** + * the alignment of display box of a piece of text, depending on the writing direction. + * AV_SUBTITLE_ALIGN_START: + * the display box's top side(for horizontal writing direction), + * left side(for vertical and left-to-right writing direction), + * or right side(for vertical and right-to-left writing direction) is align at y. + * AV_SUBTITLE_ALIGN_CENTER: + * The display box is centered at y. + * AV_SUBTITLE_ALIGN_END: + * the display box's bottom side(for horizontal writing direction), + * right side(for vertical and left-to-right writing direction), + * or left side(for vertical and right-to-left writing direction) is align at y. + */ + enum AVSubtitleAlign y_align; + + // the alignment of text within diplay box + enum AVSubtitleAlign text_align; + + // text_segments is an array of AVTextSegment, which represent a fragment of text with text style + AVTextSegment *text_segments; + + enum AVWritingDirection writing_direction; + /** + * @see AV_SUBTITLE_FLAG_LINE_NUMBER + */ + int flags; +} AVSubtitleBox; + +typedef struct AVSubtitleRect { + int x; ///< top left corner of pict, undefined when pict is not set + int y; ///< top left corner of pict, undefined when pict is not set + int w; ///< width of pict, undefined when pict is not set + int h; ///< height of pict, undefined when pict is not set + int nb_colors; ///< number of colors in pict, undefined when pict is not set + +#if FF_API_AVPICTURE + /** + * @deprecated unused + */ + attribute_deprecated + AVPicture pict; +#endif + /** + * data+linesize for the bitmap of this subtitle. + * Can be set for text/ass as well once they are rendered. + */ + uint8_t *data[4]; + int linesize[4]; + + enum AVSubtitleType type; + + char *text; ///< 0 terminated plain UTF-8 text + + /** + * 0 terminated ASS/SSA compatible event line. + * The presentation of this is unaffected by the other values in this + * struct. + */ + char *ass; + + int flags; + + //the len of subtitle text + int text_len; + + //the flags of unicode encode type; + int UnicodeFlags; + + // the subtitle box in which a piece of subtitle text is rendered; + AVSubtitleBox box; +} AVSubtitleRect; + +typedef struct AVSubtitle { + uint16_t format; /* 0 = graphics */ + uint32_t start_display_time; /* relative to packet pts, in ms */ + uint32_t end_display_time; /* relative to packet pts, in ms */ + unsigned num_rects; + AVSubtitleRect **rects; + int64_t pts; ///< Same as packet pts, in AV_TIME_BASE +} AVSubtitle; + +/** + * This struct describes the properties of an encoded stream. + * + * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must + * be allocated with avcodec_parameters_alloc() and freed with + * avcodec_parameters_free(). + */ +typedef struct AVCodecParameters { + /** + * General type of the encoded data. + */ + enum AVMediaType codec_type; + /** + * Specific type of the encoded data (the codec used). + */ + enum AVCodecID codec_id; + /** + * Additional information about the codec (corresponds to the AVI FOURCC). + */ + uint32_t codec_tag; + + /** + * Extra binary data needed for initializing the decoder, codec-dependent. + * + * Must be allocated with liteav_av_malloc() and will be freed by + * avcodec_parameters_free(). The allocated size of extradata must be at + * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding + * bytes zeroed. + */ + uint8_t *extradata; + /** + * Size of the extradata content in bytes. + */ + int extradata_size; + + /** + * - video: the pixel format, the value corresponds to enum AVPixelFormat. + * - audio: the sample format, the value corresponds to enum AVSampleFormat. + */ + int format; + + /** + * The average bitrate of the encoded data (in bits per second). + */ + int64_t bit_rate; + + /** + * The number of bits per sample in the codedwords. + * + * This is basically the bitrate per sample. It is mandatory for a bunch of + * formats to actually decode them. It's the number of bits for one sample in + * the actual coded bitstream. + * + * This could be for example 4 for ADPCM + * For PCM formats this matches bits_per_raw_sample + * Can be 0 + */ + int bits_per_coded_sample; + + /** + * This is the number of valid bits in each output sample. If the + * sample format has more bits, the least significant bits are additional + * padding bits, which are always 0. Use right shifts to reduce the sample + * to its actual size. For example, audio formats with 24 bit samples will + * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32. + * To get the original sample use "(int32_t)sample >> 8"." + * + * For ADPCM this might be 12 or 16 or similar + * Can be 0 + */ + int bits_per_raw_sample; + + /** + * Codec-specific bitstream restrictions that the stream conforms to. + */ + int profile; + int level; + + /** + * Video only. The dimensions of the video frame in pixels. + */ + int width; + int height; + + /** + * Video only. The aspect ratio (width / height) which a single pixel + * should have when displayed. + * + * When the aspect ratio is unknown / undefined, the numerator should be + * set to 0 (the denominator may have any value). + */ + AVRational sample_aspect_ratio; + + /** + * Video only. The order of the fields in interlaced video. + */ + enum AVFieldOrder field_order; + + /** + * Video only. Additional colorspace characteristics. + */ + enum AVColorRange color_range; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace color_space; + enum AVChromaLocation chroma_location; + + /** + * Video only. Number of delayed frames. + */ + int video_delay; + + /** + * Audio only. The channel layout bitmask. May be 0 if the channel layout is + * unknown or unspecified, otherwise the number of bits set must be equal to + * the channels field. + */ + uint64_t channel_layout; + /** + * Audio only. The number of audio channels. + */ + int channels; + /** + * Audio only. The number of audio samples per second. + */ + int sample_rate; + /** + * Audio only. The number of bytes per coded audio frame, required by some + * formats. + * + * Corresponds to nBlockAlign in WAVEFORMATEX. + */ + int block_align; + /** + * Audio only. Audio frame size, if known. Required by some formats to be static. + */ + int frame_size; + + /** + * Audio only. The amount of padding (in samples) inserted by the encoder at + * the beginning of the audio. I.e. this number of leading decoded samples + * must be discarded by the caller to get the original audio without leading + * padding. + */ + int initial_padding; + /** + * Audio only. The amount of padding (in samples) appended by the encoder to + * the end of the audio. I.e. this number of decoded samples must be + * discarded by the caller from the end of the stream to get the original + * audio without any trailing padding. + */ + int trailing_padding; + /** + * Audio only. Number of samples to skip after a discontinuity. + */ + int seek_preroll; +} AVCodecParameters; + +/** + * Iterate over all registered codecs. + * + * @param opaque a pointer where libavcodec will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered codec or NULL when the iteration is + * finished + */ +const AVCodec *liteav_av_codec_iterate(void **opaque); + +#if FF_API_NEXT +/** + * If c is NULL, returns the first registered codec, + * if c is non-NULL, returns the next registered codec after c, + * or NULL if c is the last one. + */ +attribute_deprecated +AVCodec *liteav_av_codec_next(const AVCodec *c); +#endif + +/** + * Return the LIBAVCODEC_VERSION_INT constant. + */ +unsigned avcodec_version(void); + +/** + * Return the libavcodec build-time configuration. + */ +const char *avcodec_configuration(void); + +/** + * Return the libavcodec license. + */ +const char *avcodec_license(void); + +#if FF_API_NEXT +/** + * Register the codec codec and initialize libavcodec. + * + * @warning either this function or liteav_avcodec_register_all() must be called + * before any other libavcodec functions. + * + * @see liteav_avcodec_register_all() + */ +attribute_deprecated +void liteav_avcodec_register(AVCodec *codec); + +/** + * Register all the codecs, parsers and bitstream filters which were enabled at + * configuration time. If you do not call this function you can select exactly + * which formats you want to support, by using the individual registration + * functions. + * + * @see liteav_avcodec_register + * @see liteav_av_register_codec_parser + * @see liteav_av_register_bitstream_filter + */ +attribute_deprecated +void liteav_avcodec_register_all(void); +#endif + +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct should be freed with avcodec_free_context(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open2() + * with a different codec. + * If NULL, then the codec-specific defaults won't be initialized, + * which may result in suboptimal default settings (this is + * important mainly for encoders, e.g. libx264). + * + * @return An AVCodecContext filled with default values or NULL on failure. + */ +AVCodecContext *avcodec_alloc_context3(const AVCodec *codec); + +/** + * Free the codec context and everything associated with it and write NULL to + * the provided pointer. + */ +void avcodec_free_context(AVCodecContext **avctx); + +#if FF_API_GET_CONTEXT_DEFAULTS +/** + * @deprecated This function should not be used, as closing and opening a codec + * context multiple time is not supported. A new codec context should be + * allocated for each new use. + */ +int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec); +#endif + +/** + * Get the AVClass for AVCodecContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *avcodec_get_class(void); + +#if FF_API_COPY_CONTEXT +/** + * Get the AVClass for AVFrame. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *avcodec_get_frame_class(void); + +/** + * Get the AVClass for AVSubtitleRect. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *avcodec_get_subtitle_rect_class(void); + +/** + * Copy the settings of the source AVCodecContext into the destination + * AVCodecContext. The resulting destination codec context will be + * unopened, i.e. you are required to call avcodec_open2() before you + * can use this AVCodecContext to decode/encode video/audio data. + * + * @param dest target codec context, should be initialized with + * avcodec_alloc_context3(NULL), but otherwise uninitialized + * @param src source codec context + * @return AVERROR() on error (e.g. memory allocation error), 0 on success + * + * @deprecated The semantics of this function are ill-defined and it should not + * be used. If you need to transfer the stream parameters from one codec context + * to another, use an intermediate AVCodecParameters instance and the + * avcodec_parameters_from_context() / avcodec_parameters_to_context() + * functions. + */ +attribute_deprecated +int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); +#endif + +/** + * Allocate a new AVCodecParameters and set its fields to default values + * (unknown/invalid/0). The returned struct must be freed with + * avcodec_parameters_free(). + */ +AVCodecParameters *avcodec_parameters_alloc(void); + +/** + * Free an AVCodecParameters instance and everything associated with it and + * write NULL to the supplied pointer. + */ +void avcodec_parameters_free(AVCodecParameters **par); + +/** + * Copy the contents of src to dst. Any allocated fields in dst are freed and + * replaced with newly allocated duplicates of the corresponding fields in src. + * + * @return >= 0 on success, a negative AVERROR code on failure. + */ +int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src); + +/** + * Fill the parameters struct based on the values from the supplied codec + * context. Any allocated fields in par are freed and replaced with duplicates + * of the corresponding fields in codec. + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int avcodec_parameters_from_context(AVCodecParameters *par, + const AVCodecContext *codec); + +/** + * Fill the codec context based on the values from the supplied codec + * parameters. Any allocated fields in codec that have a corresponding field in + * par are freed and replaced with duplicates of the corresponding field in par. + * Fields in codec that do not have a counterpart in par are not touched. + * + * @return >= 0 on success, a negative AVERROR code on failure. + */ +int avcodec_parameters_to_context(AVCodecContext *codec, + const AVCodecParameters *par); + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context3(). + * + * The functions liteav_avcodec_find_decoder_by_name(), liteav_avcodec_find_encoder_by_name(), + * liteav_avcodec_find_decoder() and liteav_avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @note Always call this function before using decoding routines (such as + * @ref liteav_avcodec_receive_frame()). + * + * @code + * liteav_avcodec_register_all(); + * liteav_av_dict_set(&opts, "b", "2.5M", 0); + * codec = liteav_avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open2(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param codec The codec to open this context for. If a non-NULL codec has been + * previously passed to avcodec_alloc_context3() or + * for this context, then this parameter MUST be either NULL or + * equal to the previously passed codec. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), liteav_avcodec_find_decoder(), liteav_avcodec_find_encoder(), + * liteav_av_dict_set(), liteav_av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + +/** + * Close a given AVCodecContext and free all the data associated with it + * (but not the AVCodecContext itself). + * + * Calling this function on an AVCodecContext that hasn't been opened will free + * the codec-specific data allocated in avcodec_alloc_context3() with a non-NULL + * codec. Subsequent calls will do nothing. + * + * @note Do not use this function. Use avcodec_free_context() to destroy a + * codec context (either open or closed). Opening and closing a codec context + * multiple times is not supported anymore -- use multiple codec contexts + * instead. + */ +int avcodec_close(AVCodecContext *avctx); + +/** + * Free all allocated data in the given subtitle struct. + * + * @param sub AVSubtitle to free. + */ +void avsubtitle_free(AVSubtitle *sub); + +/** + * @} + */ + +/** + * @addtogroup lavc_packet + * @{ + */ + +/** + * Allocate an AVPacket and set its fields to default values. The resulting + * struct must be freed using liteav_av_packet_free(). + * + * @return An AVPacket filled with default values or NULL on failure. + * + * @note this only allocates the AVPacket itself, not the data buffers. Those + * must be allocated through other means such as liteav_av_new_packet. + * + * @see liteav_av_new_packet + */ +AVPacket *liteav_av_packet_alloc(void); + +/** + * Create a new packet that references the same data as src. + * + * This is a shortcut for liteav_av_packet_alloc()+liteav_av_packet_ref(). + * + * @return newly created AVPacket on success, NULL on error. + * + * @see liteav_av_packet_alloc + * @see liteav_av_packet_ref + */ +AVPacket *liteav_av_packet_clone(const AVPacket *src); + +/** + * Free the packet, if the packet is reference counted, it will be + * unreferenced first. + * + * @param pkt packet to be freed. The pointer will be set to NULL. + * @note passing NULL is a no-op. + */ +void liteav_av_packet_free(AVPacket **pkt); + +/** + * Initialize optional fields of a packet with default values. + * + * Note, this does not touch the data and size members, which have to be + * initialized separately. + * + * @param pkt packet + */ +void liteav_av_init_packet(AVPacket *pkt); + +/** + * Allocate the payload of a packet and initialize its fields with + * default values. + * + * @param pkt packet + * @param size wanted payload size + * @return 0 if OK, AVERROR_xxx otherwise + */ +int liteav_av_new_packet(AVPacket *pkt, int size); + +/** + * Reduce packet size, correctly zeroing padding + * + * @param pkt packet + * @param size new size + */ +void liteav_av_shrink_packet(AVPacket *pkt, int size); + +/** + * Increase packet size, correctly zeroing padding + * + * @param pkt packet + * @param grow_by number of bytes by which to increase the size of the packet + */ +int liteav_av_grow_packet(AVPacket *pkt, int grow_by); + +/** + * Initialize a reference-counted packet from liteav_av_malloc()ed data. + * + * @param pkt packet to be initialized. This function will set the data, size, + * buf and destruct fields, all others are left untouched. + * @param data Data allocated by liteav_av_malloc() to be used as packet data. If this + * function returns successfully, the data is owned by the underlying AVBuffer. + * The caller may not access the data through other means. + * @param size size of data in bytes, without the padding. I.e. the full buffer + * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE. + * + * @return 0 on success, a negative AVERROR on error + */ +int liteav_av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); + +#if FF_API_AVPACKET_OLD_API +/** + * @warning This is a hack - the packet memory allocation stuff is broken. The + * packet is allocated if it was not really allocated. + * + * @deprecated Use liteav_av_packet_ref or liteav_av_packet_make_refcounted + */ +attribute_deprecated +int liteav_av_dup_packet(AVPacket *pkt); +/** + * Copy packet, including contents + * + * @return 0 on success, negative AVERROR on fail + * + * @deprecated Use liteav_av_packet_ref + */ +attribute_deprecated +int liteav_av_copy_packet(AVPacket *dst, const AVPacket *src); + +/** + * Copy packet side data + * + * @return 0 on success, negative AVERROR on fail + * + * @deprecated Use liteav_av_packet_copy_props + */ +attribute_deprecated +int liteav_av_copy_packet_side_data(AVPacket *dst, const AVPacket *src); + +/** + * Free a packet. + * + * @deprecated Use liteav_av_packet_unref + * + * @param pkt packet to free + */ +attribute_deprecated +void liteav_av_free_packet(AVPacket *pkt); +#endif +/** + * Allocate new information of a packet. + * + * @param pkt packet + * @param type side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t* liteav_av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Wrap an existing array as a packet side data. + * + * @param pkt packet + * @param type side information type + * @param data the side data array. It must be allocated with the liteav_av_malloc() + * family of functions. The ownership of the data is transferred to + * pkt. + * @param size side information size + * @return a non-negative number on success, a negative AVERROR code on + * failure. On failure, the packet is unchanged and the data remains + * owned by the caller. + */ +int liteav_av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + uint8_t *data, size_t size); + +/** + * Shrink the already allocated side data buffer + * + * @param pkt packet + * @param type side information type + * @param size new side information size + * @return 0 on success, < 0 on failure + */ +int liteav_av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Get side information from packet. + * + * @param pkt packet + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t* liteav_av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, + int *size); + +#if FF_API_MERGE_SD_API +attribute_deprecated +int liteav_av_packet_merge_side_data(AVPacket *pkt); + +attribute_deprecated +int liteav_av_packet_split_side_data(AVPacket *pkt); +#endif + +const char *liteav_av_packet_side_data_name(enum AVPacketSideDataType type); + +/** + * Pack a dictionary for use in side_data. + * + * @param dict The dictionary to pack. + * @param size pointer to store the size of the returned data + * @return pointer to data if successful, NULL otherwise + */ +uint8_t *liteav_av_packet_pack_dictionary(AVDictionary *dict, int *size); +/** + * Unpack a dictionary from side_data. + * + * @param data data from side_data + * @param size size of the data + * @param dict the metadata storage dictionary + * @return 0 on success, < 0 on failure + */ +int liteav_av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict); + + +/** + * Convenience function to free all the side data stored. + * All the other fields stay untouched. + * + * @param pkt packet + */ +void liteav_av_packet_free_side_data(AVPacket *pkt); + +/** + * Setup a new reference to the data described by a given packet + * + * If src is reference-counted, setup dst as a new reference to the + * buffer in src. Otherwise allocate a new buffer in dst and copy the + * data from src into it. + * + * All the other fields are copied from src. + * + * @see liteav_av_packet_unref + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success, a negative AVERROR on error. + */ +int liteav_av_packet_ref(AVPacket *dst, const AVPacket *src); + +/** + * Wipe the packet. + * + * Unreference the buffer referenced by the packet and reset the + * remaining packet fields to their default values. + * + * @param pkt The packet to be unreferenced. + */ +void liteav_av_packet_unref(AVPacket *pkt); + +/** + * Move every field in src to dst and reset src. + * + * @see liteav_av_packet_unref + * + * @param src Source packet, will be reset + * @param dst Destination packet + */ +void liteav_av_packet_move_ref(AVPacket *dst, AVPacket *src); + +/** + * Copy only "properties" fields from src to dst. + * + * Properties for the purpose of this function are all the fields + * beside those related to the packet data (buf, data, size) + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success AVERROR on failure. + */ +int liteav_av_packet_copy_props(AVPacket *dst, const AVPacket *src); + +/** + * Ensure the data described by a given packet is reference counted. + * + * @note This function does not ensure that the reference will be writable. + * Use liteav_av_packet_make_writable instead for that purpose. + * + * @see liteav_av_packet_ref + * @see liteav_av_packet_make_writable + * + * @param pkt packet whose data should be made reference counted. + * + * @return 0 on success, a negative AVERROR on error. On failure, the + * packet is unchanged. + */ +int liteav_av_packet_make_refcounted(AVPacket *pkt); + +/** + * Create a writable reference for the data described by a given packet, + * avoiding data copy if possible. + * + * @param pkt Packet whose data should be made writable. + * + * @return 0 on success, a negative AVERROR on failure. On failure, the + * packet is unchanged. + */ +int liteav_av_packet_make_writable(AVPacket *pkt); + +/** + * Convert valid timing fields (timestamps / durations) in a packet from one + * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be + * ignored. + * + * @param pkt packet on which the conversion will be performed + * @param tb_src source timebase, in which the timing fields in pkt are + * expressed + * @param tb_dst destination timebase, to which the timing fields will be + * converted + */ +void liteav_av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst); + +/** + * @} + */ + +/** + * @addtogroup lavc_decoding + * @{ + */ + +/** + * Find a registered decoder with a matching codec ID. + * + * @param id AVCodecID of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *liteav_avcodec_find_decoder(enum AVCodecID id); + +/** + * Find a registered decoder with the specified name. + * + * @param name name of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *liteav_avcodec_find_decoder_by_name(const char *name); + +/** + * The default callback for AVCodecContext.get_buffer2(). It is made public so + * it can be called by custom get_buffer2() implementations for decoders without + * AV_CODEC_CAP_DR1 set. + */ +int liteav_avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you do not use any horizontal + * padding. + * + * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened. + */ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you also ensure that all + * line sizes are a multiple of the respective linesize_align[i]. + * + * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened. + */ +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]); + +/** + * Converts AVChromaLocation to swscale x/y chroma position. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos); + +/** + * Converts swscale x/y chroma position to AVChromaLocation. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos); + +/** + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame and the return value would be + * less than the packet size. In this case, liteav_avcodec_decode_audio4 has to be + * called again with an AVPacket containing the remaining data in order to + * decode the second frame, etc... Even if no frames are returned, the packet + * needs to be fed to the decoder with remaining data until it is completely + * consumed or an error occurs. + * + * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning samples. It is safe to flush even those decoders that are not + * marked with AV_CODEC_CAP_DELAY, then no samples will be returned. + * + * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * The decoder will allocate a buffer for the decoded frame by + * calling the AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using liteav_av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if liteav_av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. Note that this field being set to zero + * does not mean that an error has occurred. For + * decoders with AV_CODEC_CAP_DELAY set, no given decode + * call is guaranteed to produce a frame. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. + * +* @deprecated Use liteav_avcodec_send_packet() and liteav_avcodec_receive_frame(). + */ +attribute_deprecated +int liteav_avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, const AVPacket *avpkt); + +/** + * Decode the video frame of size avpkt->size from avpkt->data into picture. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. + * + * @warning The input buffer must be AV_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer buf should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @note Codecs which have the AV_CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] picture The AVFrame in which the decoded video frame will be stored. + * Use liteav_av_frame_alloc() to get an AVFrame. The codec will + * allocate memory for the actual bitmap by calling the + * AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using liteav_av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if liteav_av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with liteav_av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields like + * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least + * fields possible. + * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame could be decompressed. + * + * @deprecated Use liteav_avcodec_send_packet() and liteav_avcodec_receive_frame(). + */ +attribute_deprecated +int liteav_avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, + int *got_picture_ptr, + const AVPacket *avpkt); + +/** + * Decode a subtitle message. + * Return a negative value on error, otherwise return the number of bytes used. + * If no subtitle could be decompressed, got_sub_ptr is zero. + * Otherwise, the subtitle is stored in *sub. + * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for + * simplicity, because the performance difference is expect to be negligible + * and reusing a get_buffer written for video codecs would probably perform badly + * due to a potentially very different allocation pattern. + * + * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning subtitles. It is safe to flush even those decoders that are not + * marked with AV_CODEC_CAP_DELAY, then no subtitles will be returned. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored, + * must be freed with avsubtitle_free if *got_sub_ptr is set. + * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. + * @param[in] avpkt The input AVPacket containing the input buffer. + */ +int liteav_avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, + int *got_sub_ptr, + AVPacket *avpkt); + +/** + * Supply raw packet data as input to a decoder. + * + * Internally, this call will copy relevant AVCodecContext fields, which can + * influence decoding per-packet, and apply them when the packet is actually + * decoded. (For example AVCodecContext.skip_frame, which might direct the + * decoder to drop the frame contained by the packet sent with this function.) + * + * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @warning Do not mix this API with the legacy API (like liteav_avcodec_decode_video2()) + * on the same AVCodecContext. It will return unexpected results now + * or in future libavcodec versions. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx codec context + * @param[in] avpkt The input AVPacket. Usually, this will be a single video + * frame, or several complete audio frames. + * Ownership of the packet remains with the caller, and the + * decoder will not write to the packet. The decoder may create + * a reference to the packet data (or copy it if the packet is + * not reference-counted). + * Unlike with older APIs, the packet is always fully consumed, + * and if it contains multiple frames (e.g. some audio codecs), + * will require you to call liteav_avcodec_receive_frame() multiple + * times afterwards before you can send a new packet. + * It can be NULL (or an AVPacket with data set to NULL and + * size set to 0); in this case, it is considered a flush + * packet, which signals the end of the stream. Sending the + * first flush packet will return success. Subsequent ones are + * unnecessary and will return AVERROR_EOF. If the decoder + * still has frames buffered, it will return them after sending + * a flush packet. + * + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): input is not accepted in the current state - user + * must read output with liteav_avcodec_receive_frame() (once + * all output is read, the packet should be resent, and + * the call will not fail with EAGAIN). + * AVERROR_EOF: the decoder has been flushed, and no new packets can + * be sent to it (also returned if more than 1 flush + * packet is sent) + * AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush + * AVERROR(ENOMEM): failed to add packet to internal queue, or similar + * other errors: legitimate decoding errors + */ +int liteav_avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt); + +/** + * Return decoded output data from a decoder. + * + * @param avctx codec context + * @param frame This will be set to a reference-counted video or audio + * frame (depending on the decoder type) allocated by the + * decoder. Note that the function will always call + * liteav_av_frame_unref(frame) before doing anything else. + * + * @return + * 0: success, a frame was returned + * AVERROR(EAGAIN): output is not available in this state - user must try + * to send new input + * AVERROR_EOF: the decoder has been fully flushed, and there will be + * no more output frames + * AVERROR(EINVAL): codec not opened, or it is an encoder + * other negative values: legitimate decoding errors + */ +int liteav_avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame); + +/** + * Supply a raw video or audio frame to the encoder. Use liteav_avcodec_receive_packet() + * to retrieve buffered output packets. + * + * @param avctx codec context + * @param[in] frame AVFrame containing the raw audio or video frame to be encoded. + * Ownership of the frame remains with the caller, and the + * encoder will not write to the frame. The encoder may create + * a reference to the frame data (or copy it if the frame is + * not reference-counted). + * It can be NULL, in which case it is considered a flush + * packet. This signals the end of the stream. If the encoder + * still has packets buffered, it will return them after this + * call. Once flushing mode has been entered, additional flush + * packets are ignored, and sending frames will return + * AVERROR_EOF. + * + * For audio: + * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): input is not accepted in the current state - user + * must read output with liteav_avcodec_receive_packet() (once + * all output is read, the packet should be resent, and + * the call will not fail with EAGAIN). + * AVERROR_EOF: the encoder has been flushed, and no new frames can + * be sent to it + * AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a + * decoder, or requires flush + * AVERROR(ENOMEM): failed to add packet to internal queue, or similar + * other errors: legitimate decoding errors + */ +int liteav_avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame); + +/** + * Read encoded data from the encoder. + * + * @param avctx codec context + * @param avpkt This will be set to a reference-counted packet allocated by the + * encoder. Note that the function will always call + * liteav_av_frame_unref(frame) before doing anything else. + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): output is not available in the current state - user + * must try to send input + * AVERROR_EOF: the encoder has been fully flushed, and there will be + * no more output packets + * AVERROR(EINVAL): codec not opened, or it is an encoder + * other errors: legitimate decoding errors + */ +int liteav_avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); + +/** + * Create and return a AVHWFramesContext with values adequate for hardware + * decoding. This is meant to get called from the get_format callback, and is + * a helper for preparing a AVHWFramesContext for AVCodecContext.hw_frames_ctx. + * This API is for decoding with certain hardware acceleration modes/APIs only. + * + * The returned AVHWFramesContext is not initialized. The caller must do this + * with liteav_av_hwframe_ctx_init(). + * + * Calling this function is not a requirement, but makes it simpler to avoid + * codec or hardware API specific details when manually allocating frames. + * + * Alternatively to this, an API user can set AVCodecContext.hw_device_ctx, + * which sets up AVCodecContext.hw_frames_ctx fully automatically, and makes + * it unnecessary to call this function or having to care about + * AVHWFramesContext initialization at all. + * + * There are a number of requirements for calling this function: + * + * - It must be called from get_format with the same avctx parameter that was + * passed to get_format. Calling it outside of get_format is not allowed, and + * can trigger undefined behavior. + * - The function is not always supported (see description of return values). + * Even if this function returns successfully, hwaccel initialization could + * fail later. (The degree to which implementations check whether the stream + * is actually supported varies. Some do this check only after the user's + * get_format callback returns.) + * - The hw_pix_fmt must be one of the choices suggested by get_format. If the + * user decides to use a AVHWFramesContext prepared with this API function, + * the user must return the same hw_pix_fmt from get_format. + * - The device_ref passed to this function must support the given hw_pix_fmt. + * - After calling this API function, it is the user's responsibility to + * initialize the AVHWFramesContext (returned by the out_frames_ref parameter), + * and to set AVCodecContext.hw_frames_ctx to it. If done, this must be done + * before returning from get_format (this is implied by the normal + * AVCodecContext.hw_frames_ctx API rules). + * - The AVHWFramesContext parameters may change every time time get_format is + * called. Also, AVCodecContext.hw_frames_ctx is reset before get_format. So + * you are inherently required to go through this process again on every + * get_format call. + * - It is perfectly possible to call this function without actually using + * the resulting AVHWFramesContext. One use-case might be trying to reuse a + * previously initialized AVHWFramesContext, and calling this API function + * only to test whether the required frame parameters have changed. + * - Fields that use dynamically allocated values of any kind must not be set + * by the user unless setting them is explicitly allowed by the documentation. + * If the user sets AVHWFramesContext.free and AVHWFramesContext.user_opaque, + * the new free callback must call the potentially set previous free callback. + * This API call may set any dynamically allocated fields, including the free + * callback. + * + * The function will set at least the following fields on AVHWFramesContext + * (potentially more, depending on hwaccel API): + * + * - All fields set by liteav_av_hwframe_ctx_alloc(). + * - Set the format field to hw_pix_fmt. + * - Set the sw_format field to the most suited and most versatile format. (An + * implication is that this will prefer generic formats over opaque formats + * with arbitrary restrictions, if possible.) + * - Set the width/height fields to the coded frame size, rounded up to the + * API-specific minimum alignment. + * - Only _if_ the hwaccel requires a pre-allocated pool: set the initial_pool_size + * field to the number of maximum reference surfaces possible with the codec, + * plus 1 surface for the user to work (meaning the user can safely reference + * at most 1 decoded surface at a time), plus additional buffering introduced + * by frame threading. If the hwaccel does not require pre-allocation, the + * field is left to 0, and the decoder will allocate new surfaces on demand + * during decoding. + * - Possibly AVHWFramesContext.hwctx fields, depending on the underlying + * hardware API. + * + * Essentially, out_frames_ref returns the same as liteav_av_hwframe_ctx_alloc(), but + * with basic frame parameters set. + * + * The function is stateless, and does not change the AVCodecContext or the + * device_ref AVHWDeviceContext. + * + * @param avctx The context which is currently calling get_format, and which + * implicitly contains all state needed for filling the returned + * AVHWFramesContext properly. + * @param device_ref A reference to the AVHWDeviceContext describing the device + * which will be used by the hardware decoder. + * @param hw_pix_fmt The hwaccel format you are going to return from get_format. + * @param out_frames_ref On success, set to a reference to an _uninitialized_ + * AVHWFramesContext, created from the given device_ref. + * Fields will be set to values required for decoding. + * Not changed if an error is returned. + * @return zero on success, a negative value on error. The following error codes + * have special semantics: + * AVERROR(ENOENT): the decoder does not support this functionality. Setup + * is always manual, or it is a decoder which does not + * support setting AVCodecContext.hw_frames_ctx at all, + * or it is a software format. + * AVERROR(EINVAL): it is known that hardware decoding is not supported for + * this configuration, or the device_ref is not supported + * for the hwaccel referenced by hw_pix_fmt. + */ +int liteav_avcodec_get_hw_frames_parameters(AVCodecContext *avctx, + AVBufferRef *device_ref, + enum AVPixelFormat hw_pix_fmt, + AVBufferRef **out_frames_ref); + + + +/** + * @defgroup lavc_parsing Frame parsing + * @{ + */ + +enum AVPictureStructure { + AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown + AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field + AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field + AV_PICTURE_STRUCTURE_FRAME, //< coded as frame +}; + +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t next_frame_offset; /* offset of the next frame */ + /* video info */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + /** + * This field is used for proper frame duration computation in lavf. + * It signals, how much longer the frame duration of the current frame + * is compared to normal frame duration. + * + * frame_duration = (1 + repeat_pict) * time_base + * + * It is used by codecs like H.264 to display telecined material. + */ + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + int fetch_timestamp; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; + + int flags; +#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 +#define PARSER_FLAG_ONCE 0x0002 +/// Set if the parser has a valid file offset +#define PARSER_FLAG_FETCHED_OFFSET 0x0004 +#define PARSER_FLAG_USE_CODEC_TS 0x1000 + + int64_t offset; ///< byte offset from starting packet start + int64_t cur_frame_end[AV_PARSER_PTS_NB]; + + /** + * Set by parser to 1 for key frames and 0 for non-key frames. + * It is initialized to -1, so if the parser doesn't set this flag, + * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames + * will be used. + */ + int key_frame; + +#if FF_API_CONVERGENCE_DURATION + /** + * @deprecated unused + */ + attribute_deprecated + int64_t convergence_duration; +#endif + + // Timestamp generation support: + /** + * Synchronization point for start of timestamp generation. + * + * Set to >0 for sync point, 0 for no sync point and <0 for undefined + * (default). + * + * For example, this corresponds to presence of H.264 buffering period + * SEI message. + */ + int dts_sync_point; + + /** + * Offset of the current timestamp against last timestamp sync point in + * units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain a valid timestamp offset. + * + * Note that the timestamp of sync point has usually a nonzero + * dts_ref_dts_delta, which refers to the previous sync point. Offset of + * the next frame after timestamp sync point will be usually 1. + * + * For example, this corresponds to H.264 cpb_removal_delay. + */ + int dts_ref_dts_delta; + + /** + * Presentation delay of current frame in units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain valid non-negative timestamp delta (presentation time of a frame + * must not lie in the past). + * + * This delay represents the difference between decoding and presentation + * time of the frame. + * + * For example, this corresponds to H.264 dpb_output_delay. + */ + int pts_dts_delta; + + /** + * Position of the packet in file. + * + * Analogous to cur_frame_pts/dts + */ + int64_t cur_frame_pos[AV_PARSER_PTS_NB]; + + /** + * Byte position of currently parsed frame in stream. + */ + int64_t pos; + + /** + * Previous frame byte position. + */ + int64_t last_pos; + + /** + * Duration of the current frame. + * For audio, this is in units of 1 / AVCodecContext.sample_rate. + * For all other types, this is in units of AVCodecContext.time_base. + */ + int duration; + + enum AVFieldOrder field_order; + + /** + * Indicate whether a picture is coded as a frame, top field or bottom field. + * + * For example, H.264 field_pic_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag + * equal to 1 and bottom_field_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_TOP_FIELD. + */ + enum AVPictureStructure picture_structure; + + /** + * Picture number incremented in presentation or output order. + * This field may be reinitialized at the first picture of a new sequence. + * + * For example, this corresponds to H.264 PicOrderCnt. + */ + int output_picture_number; + + /** + * Dimensions of the decoded video intended for presentation. + */ + int width; + int height; + + /** + * Dimensions of the coded video. + */ + int coded_width; + int coded_height; + + /** + * The format of the coded data, corresponds to enum AVPixelFormat for video + * and for enum AVSampleFormat for audio. + * + * Note that a decoder can have considerable freedom in how exactly it + * decodes the data, so the format reported here might be different from the + * one returned by a decoder. + */ + int format; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[5]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + /* This callback never returns an error, a negative value means that + * the frame start was in a previous packet. */ + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); + struct AVCodecParser *next; +} AVCodecParser; + +/** + * Iterate over all registered codec parsers. + * + * @param opaque a pointer where libavcodec will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered codec parser or NULL when the iteration is + * finished + */ +const AVCodecParser *liteav_av_parser_iterate(void **opaque); + +attribute_deprecated +AVCodecParser *liteav_av_parser_next(const AVCodecParser *c); + +attribute_deprecated +void liteav_av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *liteav_av_parser_init(int codec_id); + +/** + * Parse a packet. + * + * @param s parser context. + * @param avctx codec context. + * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. + * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. + * @param buf input buffer. + * @param buf_size buffer size in bytes without the padding. I.e. the full buffer + size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE. + To signal EOF, this should be 0 (so that the last frame + can be output). + * @param pts input presentation timestamp. + * @param dts input decoding timestamp. + * @param pos input byte position in stream. + * @return the number of bytes of the input bitstream used. + * + * Example: + * @code + * while(in_len){ + * len = liteav_av_parser_parse2(myparser, AVCodecContext, &data, &size, + * in_data, in_len, + * pts, dts, pos); + * in_data += len; + * in_len -= len; + * + * if(size) + * decode_frame(data, size); + * } + * @endcode + */ +int liteav_av_parser_parse2(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts, + int64_t pos); + +/** + * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed + * @deprecated use AVBitStreamFilter + */ +int liteav_av_parser_change(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +void liteav_av_parser_close(AVCodecParserContext *s); + +/** + * @} + * @} + */ + +/** + * @addtogroup lavc_encoding + * @{ + */ + +/** + * Find a registered encoder with a matching codec ID. + * + * @param id AVCodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *liteav_avcodec_find_encoder(enum AVCodecID id); + +/** + * Find a registered encoder with the specified name. + * + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *liteav_avcodec_find_encoder_by_name(const char *name); + +/** + * Encode a frame of audio. + * + * Takes input samples from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay, split, and combine input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. If avpkt->data and + * avpkt->size are set, avpkt->destruct must also be set. All + * other AVPacket fields will be reset by the encoder using + * liteav_av_init_packet(). If avpkt->data is NULL, the encoder will + * allocate it. The encoder will set avpkt->size to the size + * of the output packet. + * + * If this function fails or produces no output, avpkt will be + * freed using liteav_av_packet_unref(). + * @param[in] frame AVFrame containing the raw audio data to be encoded. + * May be NULL when flushing an encoder that has the + * AV_CODEC_CAP_DELAY capability set. + * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + * + * @deprecated use liteav_avcodec_send_frame()/liteav_avcodec_receive_packet() instead + */ +attribute_deprecated +int liteav_avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +/** + * Encode a frame of video. + * + * Takes input raw video data from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay and reorder input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using liteav_av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. The returned data (if any) belongs to the + * caller, he is responsible for freeing it. + * + * If this function fails or produces no output, avpkt will be + * freed using liteav_av_packet_unref(). + * @param[in] frame AVFrame containing the raw video data to be encoded. + * May be NULL when flushing an encoder that has the + * AV_CODEC_CAP_DELAY capability set. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + * + * @deprecated use liteav_avcodec_send_frame()/liteav_avcodec_receive_packet() instead + */ +attribute_deprecated +int liteav_avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +int liteav_avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVSubtitle *sub); + + +/** + * @} + */ + +#if FF_API_AVPICTURE +/** + * @addtogroup lavc_picture + * @{ + */ + +/** + * @deprecated unused + */ +attribute_deprecated +int liteav_avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated unused + */ +attribute_deprecated +void liteav_avpicture_free(AVPicture *picture); + +/** + * @deprecated use liteav_av_image_fill_arrays() instead. + */ +attribute_deprecated +int liteav_avpicture_fill(AVPicture *picture, const uint8_t *ptr, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated use liteav_av_image_copy_to_buffer() instead. + */ +attribute_deprecated +int liteav_avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt, + int width, int height, + unsigned char *dest, int dest_size); + +/** + * @deprecated use liteav_av_image_get_buffer_size() instead. + */ +attribute_deprecated +int liteav_avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated liteav_av_image_copy() instead. + */ +attribute_deprecated +void liteav_av_picture_copy(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated unused + */ +attribute_deprecated +int liteav_av_picture_crop(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int top_band, int left_band); + +/** + * @deprecated unused + */ +attribute_deprecated +int liteav_av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt, + int padtop, int padbottom, int padleft, int padright, int *color); + +/** + * @} + */ +#endif + +/** + * @defgroup lavc_misc Utility functions + * @ingroup libavc + * + * Miscellaneous utility functions related to both encoding and decoding + * (or neither). + * @{ + */ + +/** + * @defgroup lavc_misc_pixfmt Pixel formats + * + * Functions for working with pixel formats. + * @{ + */ + +#if FF_API_GETCHROMA +/** + * @deprecated Use liteav_av_pix_fmt_get_chroma_sub_sample + */ + +attribute_deprecated +void liteav_avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); +#endif + +/** + * Return a value representing the fourCC code associated to the + * pixel format pix_fmt, or 0 if no associated fourCC code can be + * found. + */ +unsigned int liteav_avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt); + +/** + * @deprecated see liteav_av_get_pix_fmt_loss() + */ +int liteav_avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format. When converting from one pixel format to another, information loss + * may occur. For example, when converting from RGB24 to GRAY, the color + * information will be lost. Similarly, other losses occur when converting from + * some formats to other formats. liteav_avcodec_find_best_pix_fmt_of_2() searches which of + * the given pixel formats should be used to suffer the least amount of loss. + * The pixel formats from which it chooses one, are determined by the + * pix_fmt_list parameter. + * + * + * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat liteav_avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); + +/** + * @deprecated see liteav_av_find_best_pix_fmt_of_2() + */ +enum AVPixelFormat liteav_avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +attribute_deprecated +enum AVPixelFormat liteav_avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +enum AVPixelFormat liteav_avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + +/** + * @} + */ + +#if FF_API_TAG_STRING +/** + * Put a string representing the codec tag codec_tag in buf. + * + * @param buf buffer to place codec tag in + * @param buf_size size in bytes of buf + * @param codec_tag codec tag to assign + * @return the length of the string that would have been generated if + * enough space had been available, excluding the trailing null + * + * @deprecated see av_fourcc_make_string() and av_fourcc2str(). + */ +attribute_deprecated +size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); +#endif + +void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); + +/** + * Return a name for the specified profile, if available. + * + * @param codec the codec that is searched for the given profile + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + */ +const char *av_get_profile_name(const AVCodec *codec, int profile); + +/** + * Return a name for the specified profile, if available. + * + * @param codec_id the ID of the codec to which the requested profile belongs + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + * + * @note unlike av_get_profile_name(), which searches a list of profiles + * supported by a specific decoder or encoder implementation, this + * function searches the list of profiles from the AVCodecDescriptor + */ +const char *avcodec_profile_name(enum AVCodecID codec_id, int profile); + +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); +int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); +//FIXME func typedef + +/** + * Fill AVFrame audio data and linesize pointers. + * + * The buffer buf must be a preallocated buffer with a size big enough + * to contain the specified samples amount. The filled AVFrame data + * pointers will point to this buffer. + * + * AVFrame extended_data channel pointers are allocated if necessary for + * planar audio. + * + * @param frame the AVFrame + * frame->nb_samples must be set prior to calling the + * function. This function fills in frame->data, + * frame->extended_data, frame->linesize[0]. + * @param nb_channels channel count + * @param sample_fmt sample format + * @param buf buffer to use for frame data + * @param buf_size size of buffer + * @param align plane size sample alignment (0 = default) + * @return >=0 on success, negative error code on failure + * @todo return the size in bytes required to store the samples in + * case of success, at the next libavutil bump + */ +int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, + enum AVSampleFormat sample_fmt, const uint8_t *buf, + int buf_size, int align); + +/** + * Reset the internal decoder state / flush internal buffers. Should be called + * e.g. when seeking or when switching to a different stream. + * + * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0), + * this invalidates the frames previously returned from the decoder. When + * refcounted frames are used, the decoder just releases any references it might + * keep internally, but the caller's reference remains valid. + */ +void liteav_avcodec_flush_buffers(AVCodecContext *avctx); + +/** + * Return codec bits per sample. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return the PCM codec associated with a sample format. + * @param be endianness, 0 for little, 1 for big, + * -1 (or anything else) for native + * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE + */ +enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be); + +/** + * Return codec bits per sample. + * Only return non-zero if the bits per sample is exactly correct, not an + * approximation. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_exact_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return audio frame duration. + * + * @param avctx codec context + * @param frame_bytes size of the frame, or 0 if unknown + * @return frame duration, in samples, if known. 0 if not able to + * determine. + */ +int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); + +/** + * This function is the same as av_get_audio_frame_duration(), except it works + * with AVCodecParameters instead of an AVCodecContext. + */ +int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes); + +#if FF_API_OLD_BSF +typedef struct AVBitStreamFilterContext { + void *priv_data; + const struct AVBitStreamFilter *filter; + AVCodecParserContext *parser; + struct AVBitStreamFilterContext *next; + /** + * Internal default arguments, used if NULL is passed to liteav_av_bitstream_filter_filter(). + * Not for access by library users. + */ + char *args; +} AVBitStreamFilterContext; +#endif + +typedef struct AVBSFInternal AVBSFInternal; + +/** + * The bitstream filter state. + * + * This struct must be allocated with liteav_av_bsf_alloc() and freed with + * liteav_av_bsf_free(). + * + * The fields in the struct will only be changed (by the caller or by the + * filter) as described in their documentation, and are to be considered + * immutable otherwise. + */ +typedef struct AVBSFContext { + /** + * A class for logging and AVOptions + */ + const AVClass *av_class; + + /** + * The bitstream filter this context is an instance of. + */ + const struct AVBitStreamFilter *filter; + + /** + * Opaque libavcodec internal data. Must not be touched by the caller in any + * way. + */ + AVBSFInternal *internal; + + /** + * Opaque filter-specific private data. If filter->priv_class is non-NULL, + * this is an AVOptions-enabled struct. + */ + void *priv_data; + + /** + * Parameters of the input stream. This field is allocated in + * liteav_av_bsf_alloc(), it needs to be filled by the caller before + * liteav_av_bsf_init(). + */ + AVCodecParameters *par_in; + + /** + * Parameters of the output stream. This field is allocated in + * liteav_av_bsf_alloc(), it is set by the filter in liteav_av_bsf_init(). + */ + AVCodecParameters *par_out; + + /** + * The timebase used for the timestamps of the input packets. Set by the + * caller before liteav_av_bsf_init(). + */ + AVRational time_base_in; + + /** + * The timebase used for the timestamps of the output packets. Set by the + * filter in liteav_av_bsf_init(). + */ + AVRational time_base_out; +} AVBSFContext; + +typedef struct AVBitStreamFilter { + const char *name; + + /** + * A list of codec ids supported by the filter, terminated by + * AV_CODEC_ID_NONE. + * May be NULL, in that case the bitstream filter works with any codec id. + */ + const enum AVCodecID *codec_ids; + + /** + * A class for the private data, used to declare bitstream filter private + * AVOptions. This field is NULL for bitstream filters that do not declare + * any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavcodec generic + * code to this class. + */ + const AVClass *priv_class; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + int priv_data_size; + int (*init)(AVBSFContext *ctx); + int (*filter)(AVBSFContext *ctx, AVPacket *pkt); + void (*close)(AVBSFContext *ctx); + void (*flush)(AVBSFContext *ctx); +} AVBitStreamFilter; + +#if FF_API_OLD_BSF +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use the new bitstream filtering API (using AVBSFContext). + */ +attribute_deprecated +void liteav_av_register_bitstream_filter(AVBitStreamFilter *bsf); +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use liteav_av_bsf_get_by_name(), liteav_av_bsf_alloc(), and liteav_av_bsf_init() + * from the new bitstream filtering API (using AVBSFContext). + */ +attribute_deprecated +AVBitStreamFilterContext *liteav_av_bitstream_filter_init(const char *name); +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use liteav_av_bsf_send_packet() and liteav_av_bsf_receive_packet() from the + * new bitstream filtering API (using AVBSFContext). + */ +attribute_deprecated +int liteav_av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use liteav_av_bsf_free() from the new bitstream filtering API (using + * AVBSFContext). + */ +attribute_deprecated +void liteav_av_bitstream_filter_close(AVBitStreamFilterContext *bsf); +/** + * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext) + * is deprecated. Use liteav_av_bsf_iterate() from the new bitstream filtering API (using + * AVBSFContext). + */ +attribute_deprecated +const AVBitStreamFilter *liteav_av_bitstream_filter_next(const AVBitStreamFilter *f); +#endif + +/** + * @return a bitstream filter with the specified name or NULL if no such + * bitstream filter exists. + */ +const AVBitStreamFilter *liteav_av_bsf_get_by_name(const char *name); + +/** + * Iterate over all registered bitstream filters. + * + * @param opaque a pointer where libavcodec will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered bitstream filter or NULL when the iteration is + * finished + */ +const AVBitStreamFilter *liteav_av_bsf_iterate(void **opaque); +#if FF_API_NEXT +attribute_deprecated +const AVBitStreamFilter *liteav_av_bsf_next(void **opaque); +#endif + +/** + * Allocate a context for a given bitstream filter. The caller must fill in the + * context parameters as described in the documentation and then call + * liteav_av_bsf_init() before sending any data to the filter. + * + * @param filter the filter for which to allocate an instance. + * @param ctx a pointer into which the pointer to the newly-allocated context + * will be written. It must be freed with liteav_av_bsf_free() after the + * filtering is done. + * + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx); + +/** + * Prepare the filter for use, after all the parameters and options have been + * set. + */ +int liteav_av_bsf_init(AVBSFContext *ctx); + +/** + * Submit a packet for filtering. + * + * After sending each packet, the filter must be completely drained by calling + * liteav_av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or + * AVERROR_EOF. + * + * @param pkt the packet to filter. The bitstream filter will take ownership of + * the packet and reset the contents of pkt. pkt is not touched if an error occurs. + * This parameter may be NULL, which signals the end of the stream (i.e. no more + * packets will be sent). That will cause the filter to output any packets it + * may have buffered internally. + * + * @return 0 on success, a negative AVERROR on error. + */ +int liteav_av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt); + +/** + * Retrieve a filtered packet. + * + * @param[out] pkt this struct will be filled with the contents of the filtered + * packet. It is owned by the caller and must be freed using + * liteav_av_packet_unref() when it is no longer needed. + * This parameter should be "clean" (i.e. freshly allocated + * with liteav_av_packet_alloc() or unreffed with liteav_av_packet_unref()) + * when this function is called. If this function returns + * successfully, the contents of pkt will be completely + * overwritten by the returned data. On failure, pkt is not + * touched. + * + * @return 0 on success. AVERROR(EAGAIN) if more packets need to be sent to the + * filter (using liteav_av_bsf_send_packet()) to get more output. AVERROR_EOF if there + * will be no further output from the filter. Another negative AVERROR value if + * an error occurs. + * + * @note one input packet may result in several output packets, so after sending + * a packet with liteav_av_bsf_send_packet(), this function needs to be called + * repeatedly until it stops returning 0. It is also possible for a filter to + * output fewer packets than were sent to it, so this function may return + * AVERROR(EAGAIN) immediately after a successful liteav_av_bsf_send_packet() call. + */ +int liteav_av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt); + +/** + * Reset the internal bitstream filter state / flush internal buffers. + */ +void liteav_av_bsf_flush(AVBSFContext *ctx); + +/** + * Free a bitstream filter context and everything associated with it; write NULL + * into the supplied pointer. + */ +void liteav_av_bsf_free(AVBSFContext **ctx); + +/** + * Get the AVClass for AVBSFContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *liteav_av_bsf_get_class(void); + +/** + * Structure for chain/list of bitstream filters. + * Empty list can be allocated by liteav_av_bsf_list_alloc(). + */ +typedef struct AVBSFList AVBSFList; + +/** + * Allocate empty list of bitstream filters. + * The list must be later freed by liteav_av_bsf_list_free() + * or finalized by liteav_av_bsf_list_finalize(). + * + * @return Pointer to @ref AVBSFList on success, NULL in case of failure + */ +AVBSFList *liteav_av_bsf_list_alloc(void); + +/** + * Free list of bitstream filters. + * + * @param lst Pointer to pointer returned by liteav_av_bsf_list_alloc() + */ +void liteav_av_bsf_list_free(AVBSFList **lst); + +/** + * Append bitstream filter to the list of bitstream filters. + * + * @param lst List to append to + * @param bsf Filter context to be appended + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf); + +/** + * Construct new bitstream filter context given it's name and options + * and append it to the list of bitstream filters. + * + * @param lst List to append to + * @param bsf_name Name of the bitstream filter + * @param options Options for the bitstream filter, can be set to NULL + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_av_bsf_list_append2(AVBSFList *lst, const char * bsf_name, AVDictionary **options); +/** + * Finalize list of bitstream filters. + * + * This function will transform @ref AVBSFList to single @ref AVBSFContext, + * so the whole chain of bitstream filters can be treated as single filter + * freshly allocated by liteav_av_bsf_alloc(). + * If the call is successful, @ref AVBSFList structure is freed and lst + * will be set to NULL. In case of failure, caller is responsible for + * freeing the structure by liteav_av_bsf_list_free() + * + * @param lst Filter list structure to be transformed + * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure + * representing the chain of bitstream filters + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf); + +/** + * Parse string describing list of bitstream filters and create single + * @ref AVBSFContext describing the whole chain of bitstream filters. + * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext freshly + * allocated by liteav_av_bsf_alloc(). + * + * @param str String describing chain of bitstream filters in format + * `bsf1[=opt1=val1:opt2=val2][,bsf2]` + * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure + * representing the chain of bitstream filters + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_av_bsf_list_parse_str(const char *str, AVBSFContext **bsf); + +/** + * Get null/pass-through bitstream filter. + * + * @param[out] bsf Pointer to be set to new instance of pass-through bitstream filter + * + * @return + */ +int liteav_av_bsf_get_null_filter(AVBSFContext **bsf); + +/* memory */ + +/** + * Same behaviour liteav_av_fast_malloc but the buffer has additional + * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. + * + * In addition the whole buffer will initially and after resizes + * be 0-initialized so that no uninitialized data will ever appear. + */ +void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_padded_malloc except that buffer will always + * be 0-initialized after call. + */ +void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Encode extradata length to a buffer. Used by xiph codecs. + * + * @param s buffer to write to; must be at least (v/255+1) bytes long + * @param v size of extradata in bytes + * @return number of bytes written to the buffer. + */ +unsigned int av_xiphlacing(unsigned char *s, unsigned int v); + +#if FF_API_USER_VISIBLE_AVHWACCEL +/** + * Register the hardware accelerator hwaccel. + * + * @deprecated This function doesn't do anything. + */ +attribute_deprecated +void av_register_hwaccel(AVHWAccel *hwaccel); + +/** + * If hwaccel is NULL, returns the first registered hardware accelerator, + * if hwaccel is non-NULL, returns the next registered hardware accelerator + * after hwaccel, or NULL if hwaccel is the last one. + * + * @deprecated AVHWaccel structures contain no user-serviceable parts, so + * this function should not be used. + */ +attribute_deprecated +AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel); +#endif + +#if FF_API_LOCKMGR +/** + * Lock operation used by lockmgr + * + * @deprecated Deprecated together with av_lockmgr_register(). + */ +enum AVLockOp { + AV_LOCK_CREATE, ///< Create a mutex + AV_LOCK_OBTAIN, ///< Lock the mutex + AV_LOCK_RELEASE, ///< Unlock the mutex + AV_LOCK_DESTROY, ///< Free mutex resources +}; + +/** + * Register a user provided lock manager supporting the operations + * specified by AVLockOp. The "mutex" argument to the function points + * to a (void *) where the lockmgr should store/get a pointer to a user + * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the + * value left by the last call for all other ops. If the lock manager is + * unable to perform the op then it should leave the mutex in the same + * state as when it was called and return a non-zero value. However, + * when called with AV_LOCK_DESTROY the mutex will always be assumed to + * have been successfully destroyed. If av_lockmgr_register succeeds + * it will return a non-negative value, if it fails it will return a + * negative value and destroy all mutex and unregister all callbacks. + * av_lockmgr_register is not thread-safe, it must be called from a + * single thread before any calls which make use of locking are used. + * + * @param cb User defined callback. av_lockmgr_register invokes calls + * to this callback and the previously registered callback. + * The callback will be used to create more than one mutex + * each of which must be backed by its own underlying locking + * mechanism (i.e. do not use a single static object to + * implement your lock manager). If cb is set to NULL the + * lockmgr will be unregistered. + * + * @deprecated This function does nothing, and always returns 0. Be sure to + * build with thread support to get basic thread safety. + */ +attribute_deprecated +int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); +#endif + +/** + * Get the type of the given codec. + */ +enum AVMediaType liteav_avcodec_get_type(enum AVCodecID codec_id); + +/** + * Get the name of a codec. + * @return a static string identifying the codec; never NULL + */ +const char *avcodec_get_name(enum AVCodecID id); + +/** + * @return a positive value if s is open (i.e. avcodec_open2() was called on it + * with no corresponding avcodec_close()), 0 otherwise. + */ +int avcodec_is_open(AVCodecContext *s); + +/** + * @return a non-zero number if codec is an encoder, zero otherwise + */ +int av_codec_is_encoder(const AVCodec *codec); + +/** + * @return a non-zero number if codec is a decoder, zero otherwise + */ +int av_codec_is_decoder(const AVCodec *codec); + +/** + * @return descriptor for given codec ID or NULL if no descriptor exists. + */ +const AVCodecDescriptor *liteav_avcodec_descriptor_get(enum AVCodecID id); + +/** + * Iterate over all codec descriptors known to libavcodec. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVCodecDescriptor *liteav_avcodec_descriptor_next(const AVCodecDescriptor *prev); + +/** + * @return codec descriptor with the given name or NULL if no such descriptor + * exists. + */ +const AVCodecDescriptor *liteav_avcodec_descriptor_get_by_name(const char *name); + +/** + * Allocate a CPB properties structure and initialize its fields to default + * values. + * + * @param size if non-NULL, the size of the allocated struct will be written + * here. This is useful for embedding it in side data. + * + * @return the newly allocated struct or NULL on failure + */ +AVCPBProperties *av_cpb_properties_alloc(size_t *size); + +/** + * @} + */ + +#endif /* AVCODEC_AVCODEC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avdct.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avdct.h new file mode 100644 index 0000000..90a6b0d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avdct.h @@ -0,0 +1,85 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVDCT_H +#define AVCODEC_AVDCT_H + +#include "libavutil/opt.h" + +/** + * AVDCT context. + * @note function pointers can be NULL if the specific features have been + * disabled at build time. + */ +typedef struct AVDCT { + const AVClass *av_class; + + void (*idct)(int16_t *block /* align 16 */); + + /** + * IDCT input permutation. + * Several optimized IDCTs need a permutated input (relative to the + * normal order of the reference IDCT). + * This permutation must be performed before the idct_put/add. + * Note, normally this can be merged with the zigzag/alternate scan<br> + * An example to avoid confusion: + * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...) + * - (x -> reference DCT -> reference IDCT -> x) + * - (x -> reference DCT -> simple_mmx_perm = idct_permutation + * -> simple_idct_mmx -> x) + * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant + * -> simple_idct_mmx -> ...) + */ + uint8_t idct_permutation[64]; + + void (*fdct)(int16_t *block /* align 16 */); + + + /** + * DCT algorithm. + * must use AVOptions to set this field. + */ + int dct_algo; + + /** + * IDCT algorithm. + * must use AVOptions to set this field. + */ + int idct_algo; + + void (*get_pixels)(int16_t *block /* align 16 */, + const uint8_t *pixels /* align 8 */, + ptrdiff_t line_size); + + int bits_per_sample; +} AVDCT; + +/** + * Allocates a AVDCT context. + * This needs to be initialized with liteav_avcodec_dct_init() after optionally + * configuring it with AVOptions. + * + * To free it use liteav_av_free() + */ +AVDCT *liteav_avcodec_dct_alloc(void); +int liteav_avcodec_dct_init(AVDCT *); + +const AVClass *liteav_avcodec_dct_get_class(void); + +#endif /* AVCODEC_AVDCT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avfft.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avfft.h new file mode 100644 index 0000000..5aa411b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/avfft.h @@ -0,0 +1,119 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVFFT_H +#define AVCODEC_AVFFT_H + +/** + * @file + * @ingroup lavc_fft + * FFT functions + */ + +/** + * @defgroup lavc_fft FFT functions + * @ingroup lavc_misc + * + * @{ + */ + +typedef float FFTSample; + +typedef struct FFTComplex { + FFTSample re, im; +} FFTComplex; + +typedef struct FFTContext FFTContext; + +/** + * Set up a complex FFT. + * @param nbits log2 of the length of the input array + * @param inverse if 0 perform the forward transform, if 1 perform the inverse + */ +FFTContext *liteav_av_fft_init(int nbits, int inverse); + +/** + * Do the permutation needed BEFORE calling liteav_ff_fft_calc(). + */ +void liteav_av_fft_permute(FFTContext *s, FFTComplex *z); + +/** + * Do a complex FFT with the parameters defined in liteav_av_fft_init(). The + * input data must be permuted before. No 1.0/sqrt(n) normalization is done. + */ +void liteav_av_fft_calc(FFTContext *s, FFTComplex *z); + +void liteav_av_fft_end(FFTContext *s); + +FFTContext *liteav_av_mdct_init(int nbits, int inverse, double scale); +void liteav_av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void liteav_av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); +void liteav_av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void liteav_av_mdct_end(FFTContext *s); + +/* Real Discrete Fourier Transform */ + +enum RDFTransformType { + DFT_R2C, + IDFT_C2R, + IDFT_R2C, + DFT_C2R, +}; + +typedef struct RDFTContext RDFTContext; + +/** + * Set up a real FFT. + * @param nbits log2 of the length of the input array + * @param trans the type of transform + */ +RDFTContext *liteav_av_rdft_init(int nbits, enum RDFTransformType trans); +void liteav_av_rdft_calc(RDFTContext *s, FFTSample *data); +void liteav_av_rdft_end(RDFTContext *s); + +/* Discrete Cosine Transform */ + +typedef struct DCTContext DCTContext; + +enum DCTTransformType { + DCT_II = 0, + DCT_III, + DCT_I, + DST_I, +}; + +/** + * Set up DCT. + * + * @param nbits size of the input array: + * (1 << nbits) for DCT-II, DCT-III and DST-I + * (1 << nbits) + 1 for DCT-I + * @param type the type of transform + * + * @note the first element of the input of DST-I is ignored + */ +DCTContext *liteav_av_dct_init(int nbits, enum DCTTransformType type); +void liteav_av_dct_calc(DCTContext *s, FFTSample *data); +void liteav_av_dct_end (DCTContext *s); + +/** + * @} + */ + +#endif /* AVCODEC_AVFFT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/bytestream.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/bytestream.h new file mode 100644 index 0000000..7be7fc2 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/bytestream.h @@ -0,0 +1,376 @@ +/* + * Bytestream functions + * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr> + * Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_BYTESTREAM_H +#define AVCODEC_BYTESTREAM_H + +#include <stdint.h> +#include <string.h> + +#include "libavutil/avassert.h" +#include "libavutil/common.h" +#include "libavutil/intreadwrite.h" + +typedef struct GetByteContext { + const uint8_t *buffer, *buffer_end, *buffer_start; +} GetByteContext; + +typedef struct PutByteContext { + uint8_t *buffer, *buffer_end, *buffer_start; + int eof; +} PutByteContext; + +#define DEF(type, name, bytes, read, write) \ +static av_always_inline type bytestream_get_ ## name(const uint8_t **b) \ +{ \ + (*b) += bytes; \ + return read(*b - bytes); \ +} \ +static av_always_inline void bytestream_put_ ## name(uint8_t **b, \ + const type value) \ +{ \ + write(*b, value); \ + (*b) += bytes; \ +} \ +static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, \ + const type value) \ +{ \ + bytestream_put_ ## name(&p->buffer, value); \ +} \ +static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, \ + const type value) \ +{ \ + if (!p->eof && (p->buffer_end - p->buffer >= bytes)) { \ + write(p->buffer, value); \ + p->buffer += bytes; \ + } else \ + p->eof = 1; \ +} \ +static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \ +{ \ + return bytestream_get_ ## name(&g->buffer); \ +} \ +static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \ +{ \ + if (g->buffer_end - g->buffer < bytes) { \ + g->buffer = g->buffer_end; \ + return 0; \ + } \ + return bytestream2_get_ ## name ## u(g); \ +} \ +static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \ +{ \ + if (g->buffer_end - g->buffer < bytes) \ + return 0; \ + return read(g->buffer); \ +} + +DEF(uint64_t, le64, 8, AV_RL64, AV_WL64) +DEF(unsigned int, le32, 4, AV_RL32, AV_WL32) +DEF(unsigned int, le24, 3, AV_RL24, AV_WL24) +DEF(unsigned int, le16, 2, AV_RL16, AV_WL16) +DEF(uint64_t, be64, 8, AV_RB64, AV_WB64) +DEF(unsigned int, be32, 4, AV_RB32, AV_WB32) +DEF(unsigned int, be24, 3, AV_RB24, AV_WB24) +DEF(unsigned int, be16, 2, AV_RB16, AV_WB16) +DEF(unsigned int, byte, 1, AV_RB8 , AV_WB8) + +#if AV_HAVE_BIGENDIAN +# define bytestream2_get_ne16 bytestream2_get_be16 +# define bytestream2_get_ne24 bytestream2_get_be24 +# define bytestream2_get_ne32 bytestream2_get_be32 +# define bytestream2_get_ne64 bytestream2_get_be64 +# define bytestream2_get_ne16u bytestream2_get_be16u +# define bytestream2_get_ne24u bytestream2_get_be24u +# define bytestream2_get_ne32u bytestream2_get_be32u +# define bytestream2_get_ne64u bytestream2_get_be64u +# define bytestream2_put_ne16 bytestream2_put_be16 +# define bytestream2_put_ne24 bytestream2_put_be24 +# define bytestream2_put_ne32 bytestream2_put_be32 +# define bytestream2_put_ne64 bytestream2_put_be64 +# define bytestream2_peek_ne16 bytestream2_peek_be16 +# define bytestream2_peek_ne24 bytestream2_peek_be24 +# define bytestream2_peek_ne32 bytestream2_peek_be32 +# define bytestream2_peek_ne64 bytestream2_peek_be64 +#else +# define bytestream2_get_ne16 bytestream2_get_le16 +# define bytestream2_get_ne24 bytestream2_get_le24 +# define bytestream2_get_ne32 bytestream2_get_le32 +# define bytestream2_get_ne64 bytestream2_get_le64 +# define bytestream2_get_ne16u bytestream2_get_le16u +# define bytestream2_get_ne24u bytestream2_get_le24u +# define bytestream2_get_ne32u bytestream2_get_le32u +# define bytestream2_get_ne64u bytestream2_get_le64u +# define bytestream2_put_ne16 bytestream2_put_le16 +# define bytestream2_put_ne24 bytestream2_put_le24 +# define bytestream2_put_ne32 bytestream2_put_le32 +# define bytestream2_put_ne64 bytestream2_put_le64 +# define bytestream2_peek_ne16 bytestream2_peek_le16 +# define bytestream2_peek_ne24 bytestream2_peek_le24 +# define bytestream2_peek_ne32 bytestream2_peek_le32 +# define bytestream2_peek_ne64 bytestream2_peek_le64 +#endif + +static av_always_inline void bytestream2_init(GetByteContext *g, + const uint8_t *buf, + int buf_size) +{ + av_assert0(buf_size >= 0); + g->buffer = buf; + g->buffer_start = buf; + g->buffer_end = buf + buf_size; +} + +static av_always_inline void bytestream2_init_writer(PutByteContext *p, + uint8_t *buf, + int buf_size) +{ + av_assert0(buf_size >= 0); + p->buffer = buf; + p->buffer_start = buf; + p->buffer_end = buf + buf_size; + p->eof = 0; +} + +static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g) +{ + return g->buffer_end - g->buffer; +} + +static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p) +{ + return p->buffer_end - p->buffer; +} + +static av_always_inline void bytestream2_skip(GetByteContext *g, + unsigned int size) +{ + g->buffer += FFMIN(g->buffer_end - g->buffer, size); +} + +static av_always_inline void bytestream2_skipu(GetByteContext *g, + unsigned int size) +{ + g->buffer += size; +} + +static av_always_inline void bytestream2_skip_p(PutByteContext *p, + unsigned int size) +{ + int size2; + if (p->eof) + return; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + p->buffer += size2; +} + +static av_always_inline int bytestream2_tell(GetByteContext *g) +{ + return (int)(g->buffer - g->buffer_start); +} + +static av_always_inline int bytestream2_tell_p(PutByteContext *p) +{ + return (int)(p->buffer - p->buffer_start); +} + +static av_always_inline int bytestream2_size(GetByteContext *g) +{ + return (int)(g->buffer_end - g->buffer_start); +} + +static av_always_inline int bytestream2_size_p(PutByteContext *p) +{ + return (int)(p->buffer_end - p->buffer_start); +} + +static av_always_inline int bytestream2_seek(GetByteContext *g, + int offset, + int whence) +{ + switch (whence) { + case SEEK_CUR: + offset = av_clip(offset, -(g->buffer - g->buffer_start), + g->buffer_end - g->buffer); + g->buffer += offset; + break; + case SEEK_END: + offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0); + g->buffer = g->buffer_end + offset; + break; + case SEEK_SET: + offset = av_clip(offset, 0, g->buffer_end - g->buffer_start); + g->buffer = g->buffer_start + offset; + break; + default: + return AVERROR(EINVAL); + } + return bytestream2_tell(g); +} + +static av_always_inline int bytestream2_seek_p(PutByteContext *p, + int offset, + int whence) +{ + p->eof = 0; + switch (whence) { + case SEEK_CUR: + if (p->buffer_end - p->buffer < offset) + p->eof = 1; + offset = av_clip(offset, -(p->buffer - p->buffer_start), + p->buffer_end - p->buffer); + p->buffer += offset; + break; + case SEEK_END: + if (offset > 0) + p->eof = 1; + offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0); + p->buffer = p->buffer_end + offset; + break; + case SEEK_SET: + if (p->buffer_end - p->buffer_start < offset) + p->eof = 1; + offset = av_clip(offset, 0, p->buffer_end - p->buffer_start); + p->buffer = p->buffer_start + offset; + break; + default: + return AVERROR(EINVAL); + } + return bytestream2_tell_p(p); +} + +static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, + uint8_t *dst, + unsigned int size) +{ + int size2 = FFMIN(g->buffer_end - g->buffer, size); + memcpy(dst, g->buffer, size2); + g->buffer += size2; + return size2; +} + +static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, + uint8_t *dst, + unsigned int size) +{ + memcpy(dst, g->buffer, size); + g->buffer += size; + return size; +} + +static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, + const uint8_t *src, + unsigned int size) +{ + int size2; + if (p->eof) + return 0; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + memcpy(p->buffer, src, size2); + p->buffer += size2; + return size2; +} + +static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p, + const uint8_t *src, + unsigned int size) +{ + memcpy(p->buffer, src, size); + p->buffer += size; + return size; +} + +static av_always_inline void bytestream2_set_buffer(PutByteContext *p, + const uint8_t c, + unsigned int size) +{ + int size2; + if (p->eof) + return; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + memset(p->buffer, c, size2); + p->buffer += size2; +} + +static av_always_inline void bytestream2_set_bufferu(PutByteContext *p, + const uint8_t c, + unsigned int size) +{ + memset(p->buffer, c, size); + p->buffer += size; +} + +static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p) +{ + return p->eof; +} + +static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p, + GetByteContext *g, + unsigned int size) +{ + memcpy(p->buffer, g->buffer, size); + p->buffer += size; + g->buffer += size; + return size; +} + +static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p, + GetByteContext *g, + unsigned int size) +{ + int size2; + + if (p->eof) + return 0; + size = FFMIN(g->buffer_end - g->buffer, size); + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + + return bytestream2_copy_bufferu(p, g, size2); +} + +static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, + uint8_t *dst, + unsigned int size) +{ + memcpy(dst, *b, size); + (*b) += size; + return size; +} + +static av_always_inline void bytestream_put_buffer(uint8_t **b, + const uint8_t *src, + unsigned int size) +{ + memcpy(*b, src, size); + (*b) += size; +} + +#endif /* AVCODEC_BYTESTREAM_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h new file mode 100644 index 0000000..89a7170 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/d3d11va.h @@ -0,0 +1,113 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Direct3D11 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * copyright (c) 2015 Steve Lhomme + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_D3D11VA_H +#define AVCODEC_D3D11VA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_d3d11va + * Public libavcodec D3D11VA header. + */ + +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#endif + +#include <stdint.h> +#include <d3d11.h> + +/** + * @defgroup lavc_codec_hwaccel_d3d11va Direct3D11 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for Direct3D11 and old UVD/UVD+ ATI video cards +#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for Direct3D11 and old Intel GPUs with ClearVideo interface + +/** + * This structure is used to provides the necessary configurations and data + * to the Direct3D11 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + * + * Use liteav_av_d3d11va_alloc_context() exclusively to allocate an AVD3D11VAContext. + */ +typedef struct AVD3D11VAContext { + /** + * D3D11 decoder object + */ + ID3D11VideoDecoder *decoder; + + /** + * D3D11 VideoContext + */ + ID3D11VideoContext *video_context; + + /** + * D3D11 configuration used to create the decoder + */ + D3D11_VIDEO_DECODER_CONFIG *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + ID3D11VideoDecoderOutputView **surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; + + /** + * Mutex to access video_context + */ + HANDLE context_mutex; +} AVD3D11VAContext; + +/** + * Allocate an AVD3D11VAContext. + * + * @return Newly-allocated AVD3D11VAContext or NULL on failure. + */ +AVD3D11VAContext *liteav_av_d3d11va_alloc_context(void); + +/** + * @} + */ + +#endif /* AVCODEC_D3D11VA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dirac.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dirac.h new file mode 100644 index 0000000..549b8b1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dirac.h @@ -0,0 +1,132 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2007 Marco Gerards <marco@gnu.org> + * Copyright (C) 2009 David Conrad + * Copyright (C) 2011 Jordi Ortiz + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DIRAC_H +#define AVCODEC_DIRAC_H + +/** + * @file + * Interface to Dirac Decoder/Encoder + * @author Marco Gerards <marco@gnu.org> + * @author David Conrad + * @author Jordi Ortiz + */ + +#include "avcodec.h" + +/** + * The spec limits the number of wavelet decompositions to 4 for both + * level 1 (VC-2) and 128 (long-gop default). + * 5 decompositions is the maximum before >16-bit buffers are needed. + * Schroedinger allows this for DD 9,7 and 13,7 wavelets only, limiting + * the others to 4 decompositions (or 3 for the fidelity filter). + * + * We use this instead of MAX_DECOMPOSITIONS to save some memory. + */ +#define MAX_DWT_LEVELS 5 + +/** + * Parse code values: + * + * Dirac Specification -> + * 9.6.1 Table 9.1 + * + * VC-2 Specification -> + * 10.4.1 Table 10.1 + */ + +enum DiracParseCodes { + DIRAC_PCODE_SEQ_HEADER = 0x00, + DIRAC_PCODE_END_SEQ = 0x10, + DIRAC_PCODE_AUX = 0x20, + DIRAC_PCODE_PAD = 0x30, + DIRAC_PCODE_PICTURE_CODED = 0x08, + DIRAC_PCODE_PICTURE_RAW = 0x48, + DIRAC_PCODE_PICTURE_LOW_DEL = 0xC8, + DIRAC_PCODE_PICTURE_HQ = 0xE8, + DIRAC_PCODE_INTER_NOREF_CO1 = 0x0A, + DIRAC_PCODE_INTER_NOREF_CO2 = 0x09, + DIRAC_PCODE_INTER_REF_CO1 = 0x0D, + DIRAC_PCODE_INTER_REF_CO2 = 0x0E, + DIRAC_PCODE_INTRA_REF_CO = 0x0C, + DIRAC_PCODE_INTRA_REF_RAW = 0x4C, + DIRAC_PCODE_INTRA_REF_PICT = 0xCC, + DIRAC_PCODE_MAGIC = 0x42424344, +}; + +typedef struct DiracVersionInfo { + int major; + int minor; +} DiracVersionInfo; + +typedef struct AVDiracSeqHeader { + unsigned width; + unsigned height; + uint8_t chroma_format; ///< 0: 444 1: 422 2: 420 + + uint8_t interlaced; + uint8_t top_field_first; + + uint8_t frame_rate_index; ///< index into dirac_frame_rate[] + uint8_t aspect_ratio_index; ///< index into dirac_aspect_ratio[] + + uint16_t clean_width; + uint16_t clean_height; + uint16_t clean_left_offset; + uint16_t clean_right_offset; + + uint8_t pixel_range_index; ///< index into dirac_pixel_range_presets[] + uint8_t color_spec_index; ///< index into dirac_color_spec_presets[] + + int profile; + int level; + + AVRational framerate; + AVRational sample_aspect_ratio; + + enum AVPixelFormat pix_fmt; + enum AVColorRange color_range; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace colorspace; + + DiracVersionInfo version; + int bit_depth; +} AVDiracSeqHeader; + +/** + * Parse a Dirac sequence header. + * + * @param dsh this function will allocate and fill an AVDiracSeqHeader struct + * and write it into this pointer. The caller must free it with + * liteav_av_free(). + * @param buf the data buffer + * @param buf_size the size of the data buffer in bytes + * @param log_ctx if non-NULL, this function will log errors here + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_dirac_parse_sequence_header(AVDiracSeqHeader **dsh, + const uint8_t *buf, size_t buf_size, + void *log_ctx); + +#endif /* AVCODEC_DIRAC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h new file mode 100644 index 0000000..74068ec --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dv_profile.h @@ -0,0 +1,84 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DV_PROFILE_H +#define AVCODEC_DV_PROFILE_H + +#include <stdint.h> + +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" +#include "avcodec.h" + +/* minimum number of bytes to read from a DV stream in order to + * determine the profile */ +#define DV_PROFILE_BYTES (6 * 80) /* 6 DIF blocks */ + + +/* + * AVDVProfile is used to express the differences between various + * DV flavors. For now it's primarily used for differentiating + * 525/60 and 625/50, but the plans are to use it for various + * DV specs as well (e.g. SMPTE314M vs. IEC 61834). + */ +typedef struct AVDVProfile { + int dsf; /* value of the dsf in the DV header */ + int video_stype; /* stype for VAUX source pack */ + int frame_size; /* total size of one frame in bytes */ + int difseg_size; /* number of DIF segments per DIF channel */ + int n_difchan; /* number of DIF channels per frame */ + AVRational time_base; /* 1/framerate */ + int ltc_divisor; /* FPS from the LTS standpoint */ + int height; /* picture height in pixels */ + int width; /* picture width in pixels */ + AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */ + enum AVPixelFormat pix_fmt; /* picture pixel format */ + int bpm; /* blocks per macroblock */ + const uint8_t *block_sizes; /* AC block sizes, in bits */ + int audio_stride; /* size of audio_shuffle table */ + int audio_min_samples[3]; /* min amount of audio samples */ + /* for 48kHz, 44.1kHz and 32kHz */ + int audio_samples_dist[5]; /* how many samples are supposed to be */ + /* in each frame in a 5 frames window */ + const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */ +} AVDVProfile; + +/** + * Get a DV profile for the provided compressed frame. + * + * @param sys the profile used for the previous frame, may be NULL + * @param frame the compressed data buffer + * @param buf_size size of the buffer in bytes + * @return the DV profile for the supplied data or NULL on failure + */ +const AVDVProfile *liteav_av_dv_frame_profile(const AVDVProfile *sys, + const uint8_t *frame, unsigned buf_size); + +/** + * Get a DV profile for the provided stream parameters. + */ +const AVDVProfile *liteav_av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt); + +/** + * Get a DV profile for the provided stream parameters. + * The frame rate is used as a best-effort parameter. + */ +const AVDVProfile *liteav_av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate); + +#endif /* AVCODEC_DV_PROFILE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dxva2.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dxva2.h new file mode 100644 index 0000000..22c9399 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/dxva2.h @@ -0,0 +1,93 @@ +/* + * DXVA2 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DXVA2_H +#define AVCODEC_DXVA2_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_dxva2 + * Public libavcodec DXVA2 header. + */ + +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#endif + +#include <stdint.h> +#include <d3d9.h> +#include <dxva2api.h> + +/** + * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards +#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for DXVA2 and old Intel GPUs with ClearVideo interface + +/** + * This structure is used to provides the necessary configurations and data + * to the DXVA2 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct dxva_context { + /** + * DXVA2 decoder object + */ + IDirectXVideoDecoder *decoder; + + /** + * DXVA2 configuration used to create the decoder + */ + const DXVA2_ConfigPictureDecode *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + LPDIRECT3DSURFACE9 *surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; +}; + +/** + * @} + */ + +#endif /* AVCODEC_DXVA2_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/jni.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/jni.h new file mode 100644 index 0000000..85c9ae5 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/jni.h @@ -0,0 +1,47 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * JNI public API functions + * + * Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_JNI_H +#define AVCODEC_JNI_H + +/* + * Manually set a Java virtual machine which will be used to retrieve the JNI + * environment. Once a Java VM is set it cannot be changed afterwards, meaning + * you can call multiple times liteav_av_jni_set_java_vm with the same Java VM pointer + * however it will error out if you try to set a different Java VM. + * + * @param vm Java virtual machine + * @param log_ctx context used for logging, can be NULL + * @return 0 on success, < 0 otherwise + */ +int liteav_av_jni_set_java_vm(void *vm, void *log_ctx); + +/* + * Get the Java virtual machine which has been set with liteav_av_jni_set_java_vm. + * + * @param vm Java virtual machine + * @return a pointer to the Java virtual machine + */ +void *liteav_av_jni_get_java_vm(void *log_ctx); + +#endif /* AVCODEC_JNI_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h new file mode 100644 index 0000000..3a716d1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/mediacodec.h @@ -0,0 +1,102 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Android MediaCodec public API + * + * Copyright (c) 2016 Matthieu Bouron <matthieu.bouron stupeflix.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_MEDIACODEC_H +#define AVCODEC_MEDIACODEC_H + +#include "libavcodec/avcodec.h" + +/** + * This structure holds a reference to a android/view/Surface object that will + * be used as output by the decoder. + * + */ +typedef struct AVMediaCodecContext { + + /** + * android/view/Surface object reference. + */ + void *surface; + +} AVMediaCodecContext; + +/** + * Allocate and initialize a MediaCodec context. + * + * When decoding with MediaCodec is finished, the caller must free the + * MediaCodec context with liteav_av_mediacodec_default_free. + * + * @return a pointer to a newly allocated AVMediaCodecContext on success, NULL otherwise + */ +AVMediaCodecContext *liteav_av_mediacodec_alloc_context(void); + +/** + * Convenience function that sets up the MediaCodec context. + * + * @param avctx codec context + * @param ctx MediaCodec context to initialize + * @param surface reference to an android/view/Surface + * @return 0 on success, < 0 otherwise + */ +int liteav_av_mediacodec_default_init(AVCodecContext *avctx, AVMediaCodecContext *ctx, void *surface); + +/** + * This function must be called to free the MediaCodec context initialized with + * liteav_av_mediacodec_default_init(). + * + * @param avctx codec context + */ +void liteav_av_mediacodec_default_free(AVCodecContext *avctx); + +/** + * Opaque structure representing a MediaCodec buffer to render. + */ +typedef struct MediaCodecBuffer AVMediaCodecBuffer; + +/** + * Release a MediaCodec buffer and render it to the surface that is associated + * with the decoder. This function should only be called once on a given + * buffer, once released the underlying buffer returns to the codec, thus + * subsequent calls to this function will have no effect. + * + * @param buffer the buffer to render + * @param render 1 to release and render the buffer to the surface or 0 to + * discard the buffer + * @return 0 on success, < 0 otherwise + */ +int liteav_av_mediacodec_release_buffer(AVMediaCodecBuffer *buffer, int render); + +/** + * Release a MediaCodec buffer and render it at the given time to the surface + * that is associated with the decoder. The timestamp must be within one second + * of the current java/lang/System#nanoTime() (which is implemented using + * CLOCK_MONOTONIC on Android). See the Android MediaCodec documentation + * of android/media/MediaCodec#releaseOutputBuffer(int,long) for more details. + * + * @param buffer the buffer to render + * @param time timestamp in nanoseconds of when to render the buffer + * @return 0 on success, < 0 otherwise + */ +int liteav_av_mediacodec_render_buffer_at_time(AVMediaCodecBuffer *buffer, int64_t time); + +#endif /* AVCODEC_MEDIACODEC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/qsv.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/qsv.h new file mode 100644 index 0000000..04eaf12 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/qsv.h @@ -0,0 +1,108 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Intel MediaSDK QSV public API + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_QSV_H +#define AVCODEC_QSV_H + +#include <mfx/mfxvideo.h> + +#include "libavutil/buffer.h" + +/** + * This struct is used for communicating QSV parameters between libavcodec and + * the caller. It is managed by the caller and must be assigned to + * AVCodecContext.hwaccel_context. + * - decoding: hwaccel_context must be set on return from the get_format() + * callback + * - encoding: hwaccel_context must be set before avcodec_open2() + */ +typedef struct AVQSVContext { + /** + * If non-NULL, the session to use for encoding or decoding. + * Otherwise, libavcodec will try to create an internal session. + */ + mfxSession session; + + /** + * The IO pattern to use. + */ + int iopattern; + + /** + * Extra buffers to pass to encoder or decoder initialization. + */ + mfxExtBuffer **ext_buffers; + int nb_ext_buffers; + + /** + * Encoding only. If this field is set to non-zero by the caller, libavcodec + * will create an mfxExtOpaqueSurfaceAlloc extended buffer and pass it to + * the encoder initialization. This only makes sense if iopattern is also + * set to MFX_IOPATTERN_IN_OPAQUE_MEMORY. + * + * The number of allocated opaque surfaces will be the sum of the number + * required by the encoder and the user-provided value nb_opaque_surfaces. + * The array of the opaque surfaces will be exported to the caller through + * the opaque_surfaces field. + */ + int opaque_alloc; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. Before + * calling avcodec_open2(), the caller should set this field to the number + * of extra opaque surfaces to allocate beyond what is required by the + * encoder. + * + * On return from avcodec_open2(), this field will be set by libavcodec to + * the total number of allocated opaque surfaces. + */ + int nb_opaque_surfaces; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. On return + * from avcodec_open2(), this field will be used by libavcodec to export the + * array of the allocated opaque surfaces to the caller, so they can be + * passed to other parts of the pipeline. + * + * The buffer reference exported here is owned and managed by libavcodec, + * the callers should make their own reference with liteav_av_buffer_ref() and free + * it with liteav_av_buffer_unref() when it is no longer needed. + * + * The buffer data is an nb_opaque_surfaces-sized array of mfxFrameSurface1. + */ + AVBufferRef *opaque_surfaces; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. On return + * from avcodec_open2(), this field will be set to the surface type used in + * the opaque allocation request. + */ + int opaque_alloc_type; +} AVQSVContext; + +/** + * Allocate a new context. + * + * It must be freed by the caller with liteav_av_free(). + */ +AVQSVContext *liteav_av_qsv_alloc_context(void); + +#endif /* AVCODEC_QSV_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vaapi.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vaapi.h new file mode 100644 index 0000000..2cf7da5 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vaapi.h @@ -0,0 +1,86 @@ +/* + * Video Acceleration API (shared data between FFmpeg and the video player) + * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 + * + * Copyright (C) 2008-2009 Splitted-Desktop Systems + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VAAPI_H +#define AVCODEC_VAAPI_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vaapi + * Public libavcodec VA API header. + */ + +#include <stdint.h> +#include "libavutil/attributes.h" +#include "version.h" + +#if FF_API_STRUCT_VAAPI_CONTEXT + +/** + * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding + * @ingroup lavc_codec_hwaccel + * @{ + */ + +/** + * This structure is used to share data between the FFmpeg library and + * the client video application. + * This shall be zero-allocated and available as + * AVCodecContext.hwaccel_context. All user members can be set once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + * + * Deprecated: use AVCodecContext.hw_frames_ctx instead. + */ +struct attribute_deprecated vaapi_context { + /** + * Window system dependent data + * + * - encoding: unused + * - decoding: Set by user + */ + void *display; + + /** + * Configuration ID + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t config_id; + + /** + * Context ID (video decode pipeline) + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t context_id; +}; + +/* @} */ + +#endif /* FF_API_STRUCT_VAAPI_CONTEXT */ + +#endif /* AVCODEC_VAAPI_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vdpau.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vdpau.h new file mode 100644 index 0000000..461bc59 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vdpau.h @@ -0,0 +1,177 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * The Video Decode and Presentation API for UNIX (VDPAU) is used for + * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1. + * + * Copyright (C) 2008 NVIDIA + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDPAU_H +#define AVCODEC_VDPAU_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vdpau + * Public libavcodec VDPAU header. + */ + + +/** + * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer + * @ingroup lavc_codec_hwaccel + * + * VDPAU hardware acceleration has two modules + * - VDPAU decoding + * - VDPAU presentation + * + * The VDPAU decoding module parses all headers using FFmpeg + * parsing mechanisms and uses VDPAU for the actual decoding. + * + * As per the current implementation, the actual decoding + * and rendering (API calls) are done as part of the VDPAU + * presentation (vo_vdpau.c) module. + * + * @{ + */ + +#include <vdpau/vdpau.h> + +#include "libavutil/avconfig.h" +#include "libavutil/attributes.h" + +#include "avcodec.h" +#include "version.h" + +struct AVCodecContext; +struct AVFrame; + +typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, + const VdpPictureInfo *, uint32_t, + const VdpBitstreamBuffer *); + +/** + * This structure is used to share data between the libavcodec library and + * the client video application. + * The user shall allocate the structure via the av_alloc_vdpau_hwaccel + * function and make it available as + * AVCodecContext.hwaccel_context. Members can be set by the user once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + * + * The size of this structure is not a part of the public ABI and must not + * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an + * AVVDPAUContext. + */ +typedef struct AVVDPAUContext { + /** + * VDPAU decoder handle + * + * Set by user. + */ + VdpDecoder decoder; + + /** + * VDPAU decoder render callback + * + * Set by the user. + */ + VdpDecoderRender *render; + + AVVDPAU_Render2 render2; +} AVVDPAUContext; + +/** + * @brief allocation function for AVVDPAUContext + * + * Allows extending the struct without breaking API/ABI + */ +AVVDPAUContext *av_alloc_vdpaucontext(void); + +AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *); +void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2); + +/** + * Associate a VDPAU device with a codec context for hardware acceleration. + * This function is meant to be called from the get_format() codec callback, + * or earlier. It can also be called after liteav_avcodec_flush_buffers() to change + * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent + * display preemption). + * + * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes + * successfully. + * + * @param avctx decoding context whose get_format() callback is invoked + * @param device VDPAU device handle to use for hardware acceleration + * @param get_proc_address VDPAU device driver + * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags + * + * @return 0 on success, an AVERROR code on failure. + */ +int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, + VdpGetProcAddress *get_proc_address, unsigned flags); + +/** + * Gets the parameters to create an adequate VDPAU video surface for the codec + * context using VDPAU hardware decoding acceleration. + * + * @note Behavior is undefined if the context was not successfully bound to a + * VDPAU device using av_vdpau_bind_context(). + * + * @param avctx the codec context being used for decoding the stream + * @param type storage space for the VDPAU video surface chroma type + * (or NULL to ignore) + * @param width storage space for the VDPAU video surface pixel width + * (or NULL to ignore) + * @param height storage space for the VDPAU video surface pixel height + * (or NULL to ignore) + * + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, + uint32_t *width, uint32_t *height); + +/** + * Allocate an AVVDPAUContext. + * + * @return Newly-allocated AVVDPAUContext or NULL on failure. + */ +AVVDPAUContext *av_vdpau_alloc_context(void); + +#if FF_API_VDPAU_PROFILE +/** + * Get a decoder profile that should be used for initializing a VDPAU decoder. + * Should be called from the AVCodecContext.get_format() callback. + * + * @deprecated Use av_vdpau_bind_context() instead. + * + * @param avctx the codec context being used for decoding the stream + * @param profile a pointer into which the result will be written on success. + * The contents of profile are undefined if this function returns + * an error. + * + * @return 0 on success (non-negative), a negative AVERROR on failure. + */ +attribute_deprecated +int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile); +#endif + +/* @}*/ + +#endif /* AVCODEC_VDPAU_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/version.h new file mode 100644 index 0000000..c907524 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/version.h @@ -0,0 +1,146 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VERSION_H +#define AVCODEC_VERSION_H + +/** + * @file + * @ingroup libavc + * Libavcodec version macros. + */ + +#include "libavutil/version.h" + +#define LIBAVCODEC_VERSION_MAJOR 58 +#define LIBAVCODEC_VERSION_MINOR 35 +#define LIBAVCODEC_VERSION_MICRO 100 + +#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT + +#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + */ + +#ifndef FF_API_LOWRES +#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_DEBUG_MV +#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AVCTX_TIMEBASE +#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODED_FRAME +#define FF_API_CODED_FRAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_SIDEDATA_ONLY_PKT +#define FF_API_SIDEDATA_ONLY_PKT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_WITHOUT_PREFIX +#define FF_API_WITHOUT_PREFIX (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_VDPAU_PROFILE +#define FF_API_VDPAU_PROFILE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CONVERGENCE_DURATION +#define FF_API_CONVERGENCE_DURATION (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVPICTURE +#define FF_API_AVPICTURE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVPACKET_OLD_API +#define FF_API_AVPACKET_OLD_API (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_RTP_CALLBACK +#define FF_API_RTP_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_VBV_DELAY +#define FF_API_VBV_DELAY (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODER_TYPE +#define FF_API_CODER_TYPE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STAT_BITS +#define FF_API_STAT_BITS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_PRIVATE_OPT +#define FF_API_PRIVATE_OPT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_ASS_TIMING +#define FF_API_ASS_TIMING (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_BSF +#define FF_API_OLD_BSF (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_COPY_CONTEXT +#define FF_API_COPY_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_GET_CONTEXT_DEFAULTS +#define FF_API_GET_CONTEXT_DEFAULTS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_NVENC_OLD_NAME +#define FF_API_NVENC_OLD_NAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STRUCT_VAAPI_CONTEXT +#define FF_API_STRUCT_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_MERGE_SD_API +#define FF_API_MERGE_SD_API (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_TAG_STRING +#define FF_API_TAG_STRING (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_GETCHROMA +#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODEC_GET_SET +#define FF_API_CODEC_GET_SET (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_USER_VISIBLE_AVHWACCEL +#define FF_API_USER_VISIBLE_AVHWACCEL (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LOCKMGR +#define FF_API_LOCKMGR (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_NEXT +#define FF_API_NEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODEC_NAME +#define FF_API_CODEC_NAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_EMU_EDGE +#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif + + +#endif /* AVCODEC_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h new file mode 100644 index 0000000..0289f5b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/videotoolbox.h @@ -0,0 +1,128 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Videotoolbox hardware acceleration + * + * copyright (c) 2012 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VIDEOTOOLBOX_H +#define AVCODEC_VIDEOTOOLBOX_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_videotoolbox + * Public libavcodec Videotoolbox header. + */ + +#include <stdint.h> + +#define Picture QuickdrawPicture +#include <VideoToolbox/VideoToolbox.h> +#undef Picture + +#include "libavcodec/avcodec.h" + +/** + * This struct holds all the information that needs to be passed + * between the caller and libavcodec for initializing Videotoolbox decoding. + * Its size is not a part of the public ABI, it must be allocated with + * liteav_av_videotoolbox_alloc_context() and freed with liteav_av_free(). + */ +typedef struct AVVideotoolboxContext { + /** + * Videotoolbox decompression session object. + * Created and freed the caller. + */ + VTDecompressionSessionRef session; + + /** + * The output callback that must be passed to the session. + * Set by av_videottoolbox_default_init() + */ + VTDecompressionOutputCallback output_callback; + + /** + * CVPixelBuffer Format Type that Videotoolbox will use for decoded frames. + * set by the caller. If this is set to 0, then no specific format is + * requested from the decoder, and its native format is output. + */ + OSType cv_pix_fmt_type; + + /** + * CoreMedia Format Description that Videotoolbox will use to create the decompression session. + * Set by the caller. + */ + CMVideoFormatDescriptionRef cm_fmt_desc; + + /** + * CoreMedia codec type that Videotoolbox will use to create the decompression session. + * Set by the caller. + */ + int cm_codec_type; +} AVVideotoolboxContext; + +/** + * Allocate and initialize a Videotoolbox context. + * + * This function should be called from the get_format() callback when the caller + * selects the AV_PIX_FMT_VIDETOOLBOX format. The caller must then create + * the decoder object (using the output callback provided by libavcodec) that + * will be used for Videotoolbox-accelerated decoding. + * + * When decoding with Videotoolbox is finished, the caller must destroy the decoder + * object and free the Videotoolbox context using liteav_av_free(). + * + * @return the newly allocated context or NULL on failure + */ +AVVideotoolboxContext *liteav_av_videotoolbox_alloc_context(void); + +/** + * This is a convenience function that creates and sets up the Videotoolbox context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int liteav_av_videotoolbox_default_init(AVCodecContext *avctx); + +/** + * This is a convenience function that creates and sets up the Videotoolbox context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * @param vtctx the Videotoolbox context to use + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int liteav_av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx); + +/** + * This function must be called to free the Videotoolbox context initialized with + * liteav_av_videotoolbox_default_init(). + * + * @param avctx the corresponding codec context + */ +void liteav_av_videotoolbox_default_free(AVCodecContext *avctx); + +/** + * @} + */ + +#endif /* AVCODEC_VIDEOTOOLBOX_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h new file mode 100644 index 0000000..428cde6 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/vorbis_parser.h @@ -0,0 +1,75 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * A public API for Vorbis parsing + * + * Determines the duration for each packet. + */ + +#ifndef AVCODEC_VORBIS_PARSER_H +#define AVCODEC_VORBIS_PARSER_H + +#include <stdint.h> + +typedef struct AVVorbisParseContext AVVorbisParseContext; + +/** + * Allocate and initialize the Vorbis parser using headers in the extradata. + */ +AVVorbisParseContext *liteav_av_vorbis_parse_init(const uint8_t *extradata, + int extradata_size); + +/** + * Free the parser and everything associated with it. + */ +void liteav_av_vorbis_parse_free(AVVorbisParseContext **s); + +#define VORBIS_FLAG_HEADER 0x00000001 +#define VORBIS_FLAG_COMMENT 0x00000002 +#define VORBIS_FLAG_SETUP 0x00000004 + +/** + * Get the duration for a Vorbis packet. + * + * If @p flags is @c NULL, + * special frames are considered invalid. + * + * @param s Vorbis parser context + * @param buf buffer containing a Vorbis frame + * @param buf_size size of the buffer + * @param flags flags for special frames + */ +int liteav_av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf, + int buf_size, int *flags); + +/** + * Get the duration for a Vorbis packet. + * + * @param s Vorbis parser context + * @param buf buffer containing a Vorbis frame + * @param buf_size size of the buffer + */ +int liteav_av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf, + int buf_size); + +void liteav_av_vorbis_parse_reset(AVVorbisParseContext *s); + +#endif /* AVCODEC_VORBIS_PARSER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/xvmc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/xvmc.h new file mode 100644 index 0000000..92a95e1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavcodec/xvmc.h @@ -0,0 +1,171 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2003 Ivan Kalvachev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_XVMC_H +#define AVCODEC_XVMC_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_xvmc + * Public libavcodec XvMC header. + */ + +#include <X11/extensions/XvMC.h> + +#include "libavutil/attributes.h" +#include "version.h" +#include "avcodec.h" + +/** + * @defgroup lavc_codec_hwaccel_xvmc XvMC + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct + the number is 1337 speak for the letters IDCT MCo (motion compensation) */ + +struct attribute_deprecated xvmc_pix_fmt { + /** The field contains the special constant value AV_XVMC_ID. + It is used as a test that the application correctly uses the API, + and that there is no corruption caused by pixel routines. + - application - set during initialization + - libavcodec - unchanged + */ + int xvmc_id; + + /** Pointer to the block array allocated by XvMCCreateBlocks(). + The array has to be freed by XvMCDestroyBlocks(). + Each group of 64 values represents one data block of differential + pixel information (in MoCo mode) or coefficients for IDCT. + - application - set the pointer during initialization + - libavcodec - fills coefficients/pixel data into the array + */ + short* data_blocks; + + /** Pointer to the macroblock description array allocated by + XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). + - application - set the pointer during initialization + - libavcodec - fills description data into the array + */ + XvMCMacroBlock* mv_blocks; + + /** Number of macroblock descriptions that can be stored in the mv_blocks + array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_mv_blocks; + + /** Number of blocks that can be stored at once in the data_blocks array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_data_blocks; + + /** Indicate that the hardware would interpret data_blocks as IDCT + coefficients and perform IDCT on them. + - application - set during initialization + - libavcodec - unchanged + */ + int idct; + + /** In MoCo mode it indicates that intra macroblocks are assumed to be in + unsigned format; same as the XVMC_INTRA_UNSIGNED flag. + - application - set during initialization + - libavcodec - unchanged + */ + int unsigned_intra; + + /** Pointer to the surface allocated by XvMCCreateSurface(). + It has to be freed by XvMCDestroySurface() on application exit. + It identifies the frame and its state on the video hardware. + - application - set during initialization + - libavcodec - unchanged + */ + XvMCSurface* p_surface; + +/** Set by the decoder before calling liteav_ff_draw_horiz_band(), + needed by the XvMCRenderSurface function. */ +//@{ + /** Pointer to the surface used as past reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_past_surface; + + /** Pointer to the surface used as future reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_future_surface; + + /** top/bottom field or frame + - application - unchanged + - libavcodec - set + */ + unsigned int picture_structure; + + /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence + - application - unchanged + - libavcodec - set + */ + unsigned int flags; +//}@ + + /** Number of macroblock descriptions in the mv_blocks array + that have already been passed to the hardware. + - application - zeroes it on get_buffer(). + A successful liteav_ff_draw_horiz_band() may increment it + with filled_mb_block_num or zero both. + - libavcodec - unchanged + */ + int start_mv_blocks_num; + + /** Number of new macroblock descriptions in the mv_blocks array (after + start_mv_blocks_num) that are filled by libavcodec and have to be + passed to the hardware. + - application - zeroes it on get_buffer() or after successful + liteav_ff_draw_horiz_band(). + - libavcodec - increment with one of each stored MB + */ + int filled_mv_blocks_num; + + /** Number of the next free data block; one data block consists of + 64 short values in the data_blocks array. + All blocks before this one have already been claimed by placing their + position into the corresponding block description structure field, + that are part of the mv_blocks array. + - application - zeroes it on get_buffer(). + A successful liteav_ff_draw_horiz_band() may zero it together + with start_mb_blocks_num. + - libavcodec - each decoded macroblock increases it by the number + of coded blocks it contains. + */ + int next_free_data_block_num; +}; + +/** + * @} + */ + +#endif /* AVCODEC_XVMC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/avfilter.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/avfilter.h new file mode 100644 index 0000000..9eb15d9 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/avfilter.h @@ -0,0 +1,1169 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * filter layer + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTER_H +#define AVFILTER_AVFILTER_H + +/** + * @file + * @ingroup lavfi + * Main libavfilter public API header + */ + +/** + * @defgroup lavfi libavfilter + * Graph-based frame editing library. + * + * @{ + */ + +#include <stddef.h> + +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/buffer.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/samplefmt.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "libavfilter/version.h" + +/** + * Return the LIBAVFILTER_VERSION_INT constant. + */ +unsigned liteav_avfilter_version(void); + +/** + * Return the libavfilter build-time configuration. + */ +const char *liteav_avfilter_configuration(void); + +/** + * Return the libavfilter license. + */ +const char *liteav_avfilter_license(void); + +typedef struct AVFilterContext AVFilterContext; +typedef struct AVFilterLink AVFilterLink; +typedef struct AVFilterPad AVFilterPad; +typedef struct AVFilterFormats AVFilterFormats; + +/** + * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g. + * AVFilter.inputs/outputs). + */ +int liteav_avfilter_pad_count(const AVFilterPad *pads); + +/** + * Get the name of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array it; is the caller's + * responsibility to ensure the index is valid + * + * @return name of the pad_idx'th pad in pads + */ +const char *liteav_avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx); + +/** + * Get the type of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array; it is the caller's + * responsibility to ensure the index is valid + * + * @return type of the pad_idx'th pad in pads + */ +enum AVMediaType liteav_avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx); + +/** + * The number of the filter inputs is not determined just by AVFilter.inputs. + * The filter might add additional inputs during initialization depending on the + * options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0) +/** + * The number of the filter outputs is not determined just by AVFilter.outputs. + * The filter might add additional outputs during initialization depending on + * the options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1) +/** + * The filter supports multithreading by splitting frames into multiple parts + * and processing them concurrently. + */ +#define AVFILTER_FLAG_SLICE_THREADS (1 << 2) +/** + * Some filters support a generic "enable" expression option that can be used + * to enable or disable a filter in the timeline. Filters supporting this + * option have this flag set. When the enable expression is false, the default + * no-op filter_frame() function is called in place of the filter_frame() + * callback defined on each input pad, thus the frame is passed unchanged to + * the next filters. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16) +/** + * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will + * have its filter_frame() callback(s) called as usual even when the enable + * expression is false. The filter will disable filtering within the + * filter_frame() callback(s) itself, for example executing code depending on + * the AVFilterContext->is_disabled value. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17) +/** + * Handy mask to test whether the filter supports or no the timeline feature + * (internally or generically). + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL) + +/** + * Filter definition. This defines the pads a filter contains, and all the + * callback functions used to interact with the filter. + */ +typedef struct AVFilter { + /** + * Filter name. Must be non-NULL and unique among filters. + */ + const char *name; + + /** + * A description of the filter. May be NULL. + * + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *description; + + /** + * List of inputs, terminated by a zeroed element. + * + * NULL if there are no (static) inputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in + * this list. + */ + const AVFilterPad *inputs; + /** + * List of outputs, terminated by a zeroed element. + * + * NULL if there are no (static) outputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in + * this list. + */ + const AVFilterPad *outputs; + + /** + * A class for the private data, used to declare filter private AVOptions. + * This field is NULL for filters that do not declare any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavfilter generic + * code to this class. + */ + const AVClass *priv_class; + + /** + * A combination of AVFILTER_FLAG_* + */ + int flags; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Filter pre-initialization function + * + * This callback will be called immediately after the filter context is + * allocated, to allow allocating and initing sub-objects. + * + * If this callback is not NULL, the uninit callback will be called on + * allocation failure. + * + * @return 0 on success, + * AVERROR code on failure (but the code will be + * dropped and treated as ENOMEM by the calling code) + */ + int (*preinit)(AVFilterContext *ctx); + + /** + * Filter initialization function. + * + * This callback will be called only once during the filter lifetime, after + * all the options have been set, but before links between filters are + * established and format negotiation is done. + * + * Basic filter initialization should be done here. Filters with dynamic + * inputs and/or outputs should create those inputs/outputs here based on + * provided options. No more changes to this filter's inputs/outputs can be + * done after this callback. + * + * This callback must not assume that the filter links exist or frame + * parameters are known. + * + * @ref AVFilter.uninit "uninit" is guaranteed to be called even if + * initialization fails, so this callback does not have to clean up on + * failure. + * + * @return 0 on success, a negative AVERROR on failure + */ + int (*init)(AVFilterContext *ctx); + + /** + * Should be set instead of @ref AVFilter.init "init" by the filters that + * want to pass a dictionary of AVOptions to nested contexts that are + * allocated during init. + * + * On return, the options dict should be freed and replaced with one that + * contains all the options which could not be processed by this filter (or + * with NULL if all the options were processed). + * + * Otherwise the semantics is the same as for @ref AVFilter.init "init". + */ + int (*init_dict)(AVFilterContext *ctx, AVDictionary **options); + + /** + * Filter uninitialization function. + * + * Called only once right before the filter is freed. Should deallocate any + * memory held by the filter, release any buffer references, etc. It does + * not need to deallocate the AVFilterContext.priv memory itself. + * + * This callback may be called even if @ref AVFilter.init "init" was not + * called or failed, so it must be prepared to handle such a situation. + */ + void (*uninit)(AVFilterContext *ctx); + + /** + * Query formats supported by the filter on its inputs and outputs. + * + * This callback is called after the filter is initialized (so the inputs + * and outputs are fixed), shortly before the format negotiation. This + * callback may be called more than once. + * + * This callback must set AVFilterLink.out_formats on every input link and + * AVFilterLink.in_formats on every output link to a list of pixel/sample + * formats that the filter supports on that link. For audio links, this + * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" / + * @ref AVFilterLink.out_samplerates "out_samplerates" and + * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" / + * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously. + * + * This callback may be NULL for filters with one input, in which case + * libavfilter assumes that it supports all input formats and preserves + * them on output. + * + * @return zero on success, a negative value corresponding to an + * AVERROR code otherwise + */ + int (*query_formats)(AVFilterContext *); + + int priv_size; ///< size of private data to allocate for the filter + + int flags_internal; ///< Additional flags for avfilter internal use only. + + /** + * Used by the filter registration system. Must not be touched by any other + * code. + */ + struct AVFilter *next; + + /** + * Make the filter instance process a command. + * + * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported. + * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be + * time consuming then a filter should treat it like an unsupported command + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ + int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags); + + /** + * Filter initialization function, alternative to the init() + * callback. Args contains the user-supplied parameters, opaque is + * used for providing binary data. + */ + int (*init_opaque)(AVFilterContext *ctx, void *opaque); + + /** + * Filter activation function. + * + * Called when any processing is needed from the filter, instead of any + * filter_frame and request_frame on pads. + * + * The function must examine inlinks and outlinks and perform a single + * step of processing. If there is nothing to do, the function must do + * nothing and not return an error. If more steps are or may be + * possible, it must use liteav_ff_filter_set_ready() to schedule another + * activation. + */ + int (*activate)(AVFilterContext *ctx); +} AVFilter; + +/** + * Process multiple parts of the frame concurrently. + */ +#define AVFILTER_THREAD_SLICE (1 << 0) + +typedef struct AVFilterInternal AVFilterInternal; + +/** An instance of a filter */ +struct AVFilterContext { + const AVClass *av_class; ///< needed for liteav_av_log() and filters common options + + const AVFilter *filter; ///< the AVFilter of which this is an instance + + char *name; ///< name of this filter instance + + AVFilterPad *input_pads; ///< array of input pads + AVFilterLink **inputs; ///< array of pointers to input links + unsigned nb_inputs; ///< number of input pads + + AVFilterPad *output_pads; ///< array of output pads + AVFilterLink **outputs; ///< array of pointers to output links + unsigned nb_outputs; ///< number of output pads + + void *priv; ///< private data for use by the filter + + struct AVFilterGraph *graph; ///< filtergraph this filter belongs to + + /** + * Type of multithreading being allowed/used. A combination of + * AVFILTER_THREAD_* flags. + * + * May be set by the caller before initializing the filter to forbid some + * or all kinds of multithreading for this filter. The default is allowing + * everything. + * + * When the filter is initialized, this field is combined using bit AND with + * AVFilterGraph.thread_type to get the final mask used for determining + * allowed threading types. I.e. a threading type needs to be set in both + * to be allowed. + * + * After the filter is initialized, libavfilter sets this field to the + * threading type that is actually used (0 for no multithreading). + */ + int thread_type; + + /** + * An opaque struct for libavfilter internal use. + */ + AVFilterInternal *internal; + + struct AVFilterCommand *command_queue; + + char *enable_str; ///< enable expression string + void *enable; ///< parsed expression (AVExpr*) + double *var_values; ///< variable values for the enable expression + int is_disabled; ///< the enabled state from the last expression evaluation + + /** + * For filters which will create hardware frames, sets the device the + * filter should create them in. All other filters will ignore this field: + * in particular, a filter which consumes or processes hardware frames will + * instead use the hw_frames_ctx field in AVFilterLink to carry the + * hardware context information. + */ + AVBufferRef *hw_device_ctx; + + /** + * Max number of threads allowed in this filter instance. + * If <= 0, its value is ignored. + * Overrides global number of threads set per filter graph. + */ + int nb_threads; + + /** + * Ready status of the filter. + * A non-0 value means that the filter needs activating; + * a higher value suggests a more urgent activation. + */ + unsigned ready; + + /** + * Sets the number of extra hardware frames which the filter will + * allocate on its output links for use in following filters or by + * the caller. + * + * Some hardware filters require all frames that they will use for + * output to be defined in advance before filtering starts. For such + * filters, any hardware frame pools used for output must therefore be + * of fixed size. The extra frames set here are on top of any number + * that the filter needs internally in order to operate normally. + * + * This field must be set before the graph containing this filter is + * configured. + */ + int extra_hw_frames; +}; + +/** + * A link between two filters. This contains pointers to the source and + * destination filters between which this link exists, and the indexes of + * the pads involved. In addition, this link also contains the parameters + * which have been negotiated and agreed upon between the filter, such as + * image dimensions, format, etc. + * + * Applications must not normally access the link structure directly. + * Use the buffersrc and buffersink API instead. + * In the future, access to the header may be reserved for filters + * implementation. + */ +struct AVFilterLink { + AVFilterContext *src; ///< source filter + AVFilterPad *srcpad; ///< output pad on the source filter + + AVFilterContext *dst; ///< dest filter + AVFilterPad *dstpad; ///< input pad on the dest filter + + enum AVMediaType type; ///< filter media type + + /* These parameters apply only to video */ + int w; ///< agreed upon image width + int h; ///< agreed upon image height + AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio + /* These parameters apply only to audio */ + uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h) + int sample_rate; ///< samples per second + + int format; ///< agreed upon media format + + /** + * Define the time base used by the PTS of the frames/samples + * which will pass through this link. + * During the configuration stage, each filter is supposed to + * change only the output timebase, while the timebase of the + * input link is assumed to be an unchangeable property. + */ + AVRational time_base; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + /** + * Lists of formats and channel layouts supported by the input and output + * filters respectively. These lists are used for negotiating the format + * to actually be used, which will be loaded into the format and + * channel_layout members, above, when chosen. + * + */ + AVFilterFormats *in_formats; + AVFilterFormats *out_formats; + + /** + * Lists of channel layouts and sample rates used for automatic + * negotiation. + */ + AVFilterFormats *in_samplerates; + AVFilterFormats *out_samplerates; + struct AVFilterChannelLayouts *in_channel_layouts; + struct AVFilterChannelLayouts *out_channel_layouts; + + /** + * Audio only, the destination filter sets this to a non-zero value to + * request that buffers with the given number of samples should be sent to + * it. AVFilterPad.needs_fifo must also be set on the corresponding input + * pad. + * Last buffer before EOF will be padded with silence. + */ + int request_samples; + + /** stage of the initialization of the link properties (dimensions, etc) */ + enum { + AVLINK_UNINIT = 0, ///< not started + AVLINK_STARTINIT, ///< started, but incomplete + AVLINK_INIT ///< complete + } init_state; + + /** + * Graph the filter belongs to. + */ + struct AVFilterGraph *graph; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in link time_base units. + */ + int64_t current_pts; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in AV_TIME_BASE units. + */ + int64_t current_pts_us; + + /** + * Index in the age array. + */ + int age_index; + + /** + * Frame rate of the stream on the link, or 1/0 if unknown or variable; + * if left to 0/0, will be automatically copied from the first input + * of the source filter if it exists. + * + * Sources should set it to the best estimation of the real frame rate. + * If the source frame rate is unknown or variable, set this to 1/0. + * Filters should update it if necessary depending on their function. + * Sinks can use it to set a default output frame rate. + * It is similar to the r_frame_rate field in AVStream. + */ + AVRational frame_rate; + + /** + * Buffer partially filled with samples to achieve a fixed/minimum size. + */ + AVFrame *partial_buf; + + /** + * Size of the partial buffer to allocate. + * Must be between min_samples and max_samples. + */ + int partial_buf_size; + + /** + * Minimum number of samples to filter at once. If filter_frame() is + * called with fewer samples, it will accumulate them in partial_buf. + * This field and the related ones must not be changed after filtering + * has started. + * If 0, all related fields are ignored. + */ + int min_samples; + + /** + * Maximum number of samples to filter at once. If filter_frame() is + * called with more samples, it will split them. + */ + int max_samples; + + /** + * Number of channels. + */ + int channels; + + /** + * Link processing flags. + */ + unsigned flags; + + /** + * Number of past frames sent through the link. + */ + int64_t frame_count_in, frame_count_out; + + /** + * A pointer to a FFFramePool struct. + */ + void *frame_pool; + + /** + * True if a frame is currently wanted on the output of this filter. + * Set when liteav_ff_request_frame() is called by the output, + * cleared when a frame is filtered. + */ + int frame_wanted_out; + + /** + * For hwaccel pixel formats, this should be a reference to the + * AVHWFramesContext describing the frames. + */ + AVBufferRef *hw_frames_ctx; + +#ifndef FF_INTERNAL_FIELDS + + /** + * Internal structure members. + * The fields below this limit are internal for libavfilter's use + * and must in no way be accessed by applications. + */ + char reserved[0xF000]; + +#else /* FF_INTERNAL_FIELDS */ + + /** + * Queue of frames waiting to be filtered. + */ + FFFrameQueue fifo; + + /** + * If set, the source filter can not generate a frame as is. + * The goal is to avoid repeatedly calling the request_frame() method on + * the same link. + */ + int frame_blocked_in; + + /** + * Link input status. + * If not zero, all attempts of filter_frame will fail with the + * corresponding code. + */ + int status_in; + + /** + * Timestamp of the input status change. + */ + int64_t status_in_pts; + + /** + * Link output status. + * If not zero, all attempts of request_frame will fail with the + * corresponding code. + */ + int status_out; + +#endif /* FF_INTERNAL_FIELDS */ + +}; + +/** + * Link two filters together. + * + * @param src the source filter + * @param srcpad index of the output pad on the source filter + * @param dst the destination filter + * @param dstpad index of the input pad on the destination filter + * @return zero on success + */ +int liteav_avfilter_link(AVFilterContext *src, unsigned srcpad, + AVFilterContext *dst, unsigned dstpad); + +/** + * Free the link in *link, and set its pointer to NULL. + */ +void liteav_avfilter_link_free(AVFilterLink **link); + +#if FF_API_FILTER_GET_SET +/** + * Get the number of channels of a link. + * @deprecated Use liteav_av_buffersink_get_channels() + */ +attribute_deprecated +int liteav_avfilter_link_get_channels(AVFilterLink *link); +#endif + +/** + * Set the closed field of a link. + * @deprecated applications are not supposed to mess with links, they should + * close the sinks. + */ +attribute_deprecated +void liteav_avfilter_link_set_closed(AVFilterLink *link, int closed); + +/** + * Negotiate the media format, dimensions, etc of all inputs to a filter. + * + * @param filter the filter to negotiate the properties for its inputs + * @return zero on successful negotiation + */ +int liteav_avfilter_config_links(AVFilterContext *filter); + +#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically +#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) + +/** + * Make the filter instance process a command. + * It is recommended to use liteav_avfilter_graph_send_command(). + */ +int liteav_avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Iterate over all registered filters. + * + * @param opaque a pointer where libavfilter will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered filter or NULL when the iteration is + * finished + */ +const AVFilter *liteav_av_filter_iterate(void **opaque); + +#if FF_API_NEXT +/** Initialize the filter system. Register all builtin filters. */ +attribute_deprecated +void liteav_avfilter_register_all(void); + +/** + * Register a filter. This is only needed if you plan to use + * liteav_avfilter_get_by_name later to lookup the AVFilter structure by name. A + * filter can still by instantiated with liteav_avfilter_graph_alloc_filter even if it + * is not registered. + * + * @param filter the filter to register + * @return 0 if the registration was successful, a negative value + * otherwise + */ +attribute_deprecated +int liteav_avfilter_register(AVFilter *filter); + +/** + * Iterate over all registered filters. + * @return If prev is non-NULL, next registered filter after prev or NULL if + * prev is the last filter. If prev is NULL, return the first registered filter. + */ +attribute_deprecated +const AVFilter *liteav_avfilter_next(const AVFilter *prev); +#endif + +/** + * Get a filter definition matching the given name. + * + * @param name the filter name to find + * @return the filter definition, if any matching one is registered. + * NULL if none found. + */ +const AVFilter *liteav_avfilter_get_by_name(const char *name); + + +/** + * Initialize a filter with the supplied parameters. + * + * @param ctx uninitialized filter context to initialize + * @param args Options to initialize the filter with. This must be a + * ':'-separated list of options in the 'key=value' form. + * May be NULL if the options have been set directly using the + * AVOptions API or there are no options that need to be set. + * @return 0 on success, a negative AVERROR on failure + */ +int liteav_avfilter_init_str(AVFilterContext *ctx, const char *args); + +/** + * Initialize a filter with the supplied dictionary of options. + * + * @param ctx uninitialized filter context to initialize + * @param options An AVDictionary filled with options for this filter. On + * return this parameter will be destroyed and replaced with + * a dict containing options that were not found. This dictionary + * must be freed by the caller. + * May be NULL, then this function is equivalent to + * liteav_avfilter_init_str() with the second parameter set to NULL. + * @return 0 on success, a negative AVERROR on failure + * + * @note This function and liteav_avfilter_init_str() do essentially the same thing, + * the difference is in manner in which the options are passed. It is up to the + * calling code to choose whichever is more preferable. The two functions also + * behave differently when some of the provided options are not declared as + * supported by the filter. In such a case, liteav_avfilter_init_str() will fail, but + * this function will leave those extra options in the options AVDictionary and + * continue as usual. + */ +int liteav_avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options); + +/** + * Free a filter context. This will also remove the filter from its + * filtergraph's list of filters. + * + * @param filter the filter to free + */ +void liteav_avfilter_free(AVFilterContext *filter); + +/** + * Insert a filter in the middle of an existing link. + * + * @param link the link into which the filter should be inserted + * @param filt the filter to be inserted + * @param filt_srcpad_idx the input pad on the filter to connect + * @param filt_dstpad_idx the output pad on the filter to connect + * @return zero on success + */ +int liteav_avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, + unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); + +/** + * @return AVClass for AVFilterContext. + * + * @see liteav_av_opt_find(). + */ +const AVClass *liteav_avfilter_get_class(void); + +typedef struct AVFilterGraphInternal AVFilterGraphInternal; + +/** + * A function pointer passed to the @ref AVFilterGraph.execute callback to be + * executed multiple times, possibly in parallel. + * + * @param ctx the filter context the job belongs to + * @param arg an opaque parameter passed through from @ref + * AVFilterGraph.execute + * @param jobnr the index of the job being executed + * @param nb_jobs the total number of jobs + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); + +/** + * A function executing multiple jobs, possibly in parallel. + * + * @param ctx the filter context to which the jobs belong + * @param func the function to be called multiple times + * @param arg the argument to be passed to func + * @param ret a nb_jobs-sized array to be filled with return values from each + * invocation of func + * @param nb_jobs the number of jobs to execute + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func, + void *arg, int *ret, int nb_jobs); + +typedef struct AVFilterGraph { + const AVClass *av_class; + AVFilterContext **filters; + unsigned nb_filters; + + char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters +#if FF_API_LAVR_OPTS + attribute_deprecated char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters +#endif + + /** + * Type of multithreading allowed for filters in this graph. A combination + * of AVFILTER_THREAD_* flags. + * + * May be set by the caller at any point, the setting will apply to all + * filters initialized after that. The default is allowing everything. + * + * When a filter in this graph is initialized, this field is combined using + * bit AND with AVFilterContext.thread_type to get the final mask used for + * determining allowed threading types. I.e. a threading type needs to be + * set in both to be allowed. + */ + int thread_type; + + /** + * Maximum number of threads used by filters in this graph. May be set by + * the caller before adding any filters to the filtergraph. Zero (the + * default) means that the number of threads is determined automatically. + */ + int nb_threads; + + /** + * Opaque object for libavfilter internal use. + */ + AVFilterGraphInternal *internal; + + /** + * Opaque user data. May be set by the caller to an arbitrary value, e.g. to + * be used from callbacks like @ref AVFilterGraph.execute. + * Libavfilter will not touch this field in any way. + */ + void *opaque; + + /** + * This callback may be set by the caller immediately after allocating the + * graph and before adding any filters to it, to provide a custom + * multithreading implementation. + * + * If set, filters with slice threading capability will call this callback + * to execute multiple jobs in parallel. + * + * If this field is left unset, libavfilter will use its internal + * implementation, which may or may not be multithreaded depending on the + * platform and build options. + */ + avfilter_execute_func *execute; + + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + /** + * Private fields + * + * The following fields are for internal use only. + * Their type, offset, number and semantic can change without notice. + */ + + AVFilterLink **sink_links; + int sink_links_count; + + unsigned disable_auto_convert; +} AVFilterGraph; + +/** + * Allocate a filter graph. + * + * @return the allocated filter graph on success or NULL. + */ +AVFilterGraph *liteav_avfilter_graph_alloc(void); + +/** + * Create a new filter instance in a filter graph. + * + * @param graph graph in which the new filter will be used + * @param filter the filter to create an instance of + * @param name Name to give to the new instance (will be copied to + * AVFilterContext.name). This may be used by the caller to identify + * different filters, libavfilter itself assigns no semantics to + * this parameter. May be NULL. + * + * @return the context of the newly created filter instance (note that it is + * also retrievable directly through AVFilterGraph.filters or with + * liteav_avfilter_graph_get_filter()) on success or NULL on failure. + */ +AVFilterContext *liteav_avfilter_graph_alloc_filter(AVFilterGraph *graph, + const AVFilter *filter, + const char *name); + +/** + * Get a filter instance identified by instance name from graph. + * + * @param graph filter graph to search through. + * @param name filter instance name (should be unique in the graph). + * @return the pointer to the found filter instance or NULL if it + * cannot be found. + */ +AVFilterContext *liteav_avfilter_graph_get_filter(AVFilterGraph *graph, const char *name); + +/** + * Create and add a filter instance into an existing graph. + * The filter instance is created from the filter filt and inited + * with the parameters args and opaque. + * + * In case of success put in *filt_ctx the pointer to the created + * filter instance, otherwise set *filt_ctx to NULL. + * + * @param name the instance name to give to the created filter instance + * @param graph_ctx the filter graph + * @return a negative AVERROR error code in case of failure, a non + * negative value otherwise + */ +int liteav_avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, + const char *name, const char *args, void *opaque, + AVFilterGraph *graph_ctx); + +/** + * Enable or disable automatic format conversion inside the graph. + * + * Note that format conversion can still happen inside explicitly inserted + * scale and aresample filters. + * + * @param flags any of the AVFILTER_AUTO_CONVERT_* constants + */ +void liteav_avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags); + +enum { + AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */ + AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */ +}; + +/** + * Check validity and configure all the links and formats in the graph. + * + * @param graphctx the filter graph + * @param log_ctx context used for logging + * @return >= 0 in case of success, a negative AVERROR code otherwise + */ +int liteav_avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx); + +/** + * Free a graph, destroy its links, and set *graph to NULL. + * If *graph is NULL, do nothing. + */ +void liteav_avfilter_graph_free(AVFilterGraph **graph); + +/** + * A linked-list of the inputs/outputs of the filter chain. + * + * This is mainly useful for liteav_avfilter_graph_parse() / liteav_avfilter_graph_parse2(), + * where it is used to communicate open (unlinked) inputs and outputs from and + * to the caller. + * This struct specifies, per each not connected pad contained in the graph, the + * filter context and the pad index required for establishing a link. + */ +typedef struct AVFilterInOut { + /** unique name for this input/output in the list */ + char *name; + + /** filter context associated to this input/output */ + AVFilterContext *filter_ctx; + + /** index of the filt_ctx pad to use for linking */ + int pad_idx; + + /** next input/input in the list, NULL if this is the last */ + struct AVFilterInOut *next; +} AVFilterInOut; + +/** + * Allocate a single AVFilterInOut entry. + * Must be freed with liteav_avfilter_inout_free(). + * @return allocated AVFilterInOut on success, NULL on failure. + */ +AVFilterInOut *liteav_avfilter_inout_alloc(void); + +/** + * Free the supplied list of AVFilterInOut and set *inout to NULL. + * If *inout is NULL, do nothing. + */ +void liteav_avfilter_inout_free(AVFilterInOut **inout); + +/** + * Add a graph described by a string to a graph. + * + * @note The caller must provide the lists of inputs and outputs, + * which therefore must be known before calling the function. + * + * @note The inputs parameter describes inputs of the already existing + * part of the graph; i.e. from the point of view of the newly created + * part, they are outputs. Similarly the outputs parameter describes + * outputs of the already existing filters, which are provided as + * inputs to the parsed filters. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs linked list to the inputs of the graph + * @param outputs linked list to the outputs of the graph + * @return zero on success, a negative AVERROR code on error + */ +int liteav_avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut *inputs, AVFilterInOut *outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * In the graph filters description, if the input label of the first + * filter is not specified, "in" is assumed; if the output label of + * the last filter is not specified, "out" is assumed. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with liteav_avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with liteav_avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + */ +int liteav_avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * @param[in] graph the filter graph where to link the parsed graph context + * @param[in] filters string to be parsed + * @param[out] inputs a linked list of all free (unlinked) inputs of the + * parsed graph will be returned here. It is to be freed + * by the caller using liteav_avfilter_inout_free(). + * @param[out] outputs a linked list of all free (unlinked) outputs of the + * parsed graph will be returned here. It is to be freed by the + * caller using liteav_avfilter_inout_free(). + * @return zero on success, a negative AVERROR code on error + * + * @note This function returns the inputs and outputs that are left + * unlinked after parsing the graph and the caller then deals with + * them. + * @note This function makes no reference whatsoever to already + * existing parts of the graph and the inputs parameter will on return + * contain inputs of the newly parsed part of the graph. Analogously + * the outputs parameter will contain outputs of the newly created + * filters. + */ +int liteav_avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, + AVFilterInOut **outputs); + +/** + * Send a command to one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ +int liteav_avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Queue a command for one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param ts time at which the command should be sent to the filter + * + * @note As this executes commands after this function returns, no return code + * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported. + */ +int liteav_avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts); + + +/** + * Dump a graph into a human-readable string representation. + * + * @param graph the graph to dump + * @param options formatting options; currently ignored + * @return a string, or NULL in case of memory allocation failure; + * the string must be freed using liteav_av_free + */ +char *liteav_avfilter_graph_dump(AVFilterGraph *graph, const char *options); + +/** + * Request a frame on the oldest sink link. + * + * If the request returns AVERROR_EOF, try the next. + * + * Note that this function is not meant to be the sole scheduling mechanism + * of a filtergraph, only a convenience function to help drain a filtergraph + * in a balanced way under normal circumstances. + * + * Also note that AVERROR_EOF does not mean that frames did not arrive on + * some of the sinks during the process. + * When there are multiple sink links, in case the requested link + * returns an EOF, this may cause a filter to flush pending frames + * which are sent to another sink link, although unrequested. + * + * @return the return value of liteav_ff_request_frame(), + * or AVERROR_EOF if all links returned AVERROR_EOF + */ +int liteav_avfilter_graph_request_oldest(AVFilterGraph *graph); + +/** + * @} + */ + +#endif /* AVFILTER_AVFILTER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersink.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersink.h new file mode 100644 index 0000000..6ca5aea --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersink.h @@ -0,0 +1,166 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSINK_H +#define AVFILTER_BUFFERSINK_H + +/** + * @file + * @ingroup lavfi_buffersink + * memory buffer sink API for audio and video + */ + +#include "avfilter.h" + +/** + * @defgroup lavfi_buffersink Buffer sink API + * @ingroup lavfi + * @{ + */ + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a buffersink or abuffersink filter context. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using liteav_av_frame_unref() / liteav_av_frame_free() + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * + * @return >= 0 in for success, a negative AVERROR code for failure. + */ +int liteav_av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); + +/** + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer + * reference, but not remove it from the buffer. This is useful if you + * need only to read a video/samples buffer, without to fetch it. + */ +#define AV_BUFFERSINK_FLAG_PEEK 1 + +/** + * Tell av_buffersink_get_buffer_ref() not to request a frame from its input. + * If a frame is already buffered, it is read (and removed from the buffer), + * but if no frame is present, return AVERROR(EAGAIN). + */ +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2 + +/** + * Struct to use for initializing a buffersink context. + */ +typedef struct AVBufferSinkParams { + const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE +} AVBufferSinkParams; + +/** + * Create an AVBufferSinkParams structure. + * + * Must be freed with liteav_av_free(). + */ +AVBufferSinkParams *liteav_av_buffersink_params_alloc(void); + +/** + * Struct to use for initializing an abuffersink context. + */ +typedef struct AVABufferSinkParams { + const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1 + const int *channel_counts; ///< list of allowed channel counts, terminated by -1 + int all_channel_counts; ///< if not 0, accept any channel count or layout + int *sample_rates; ///< list of allowed sample rates, terminated by -1 +} AVABufferSinkParams; + +/** + * Create an AVABufferSinkParams structure. + * + * Must be freed with liteav_av_free(). + */ +AVABufferSinkParams *liteav_av_abuffersink_params_alloc(void); + +/** + * Set the frame size for an audio buffer sink. + * + * All calls to av_buffersink_get_buffer_ref will return a buffer with + * exactly the specified number of samples, or AVERROR(EAGAIN) if there is + * not enough. The last buffer at EOF will be padded with 0. + */ +void liteav_av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); + +/** + * @defgroup lavfi_buffersink_accessors Buffer sink accessors + * Get the properties of the stream + * @{ + */ + +enum AVMediaType liteav_av_buffersink_get_type (const AVFilterContext *ctx); +AVRational liteav_av_buffersink_get_time_base (const AVFilterContext *ctx); +int liteav_av_buffersink_get_format (const AVFilterContext *ctx); + +AVRational liteav_av_buffersink_get_frame_rate (const AVFilterContext *ctx); +int liteav_av_buffersink_get_w (const AVFilterContext *ctx); +int liteav_av_buffersink_get_h (const AVFilterContext *ctx); +AVRational liteav_av_buffersink_get_sample_aspect_ratio (const AVFilterContext *ctx); + +int liteav_av_buffersink_get_channels (const AVFilterContext *ctx); +uint64_t liteav_av_buffersink_get_channel_layout (const AVFilterContext *ctx); +int liteav_av_buffersink_get_sample_rate (const AVFilterContext *ctx); + +AVBufferRef * liteav_av_buffersink_get_hw_frames_ctx (const AVFilterContext *ctx); + +/** @} */ + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using liteav_av_frame_unref() / liteav_av_frame_free() + * + * @return + * - >= 0 if a frame was successfully returned. + * - AVERROR(EAGAIN) if no frames are available at this point; more + * input frames must be added to the filtergraph to get more output. + * - AVERROR_EOF if there will be no more output frames on this sink. + * - A different negative AVERROR code in other failure cases. + */ +int liteav_av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Same as liteav_av_buffersink_get_frame(), but with the ability to specify the number + * of samples read. This function is less efficient than + * liteav_av_buffersink_get_frame(), because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using liteav_av_frame_unref() / liteav_av_frame_free() + * frame will contain exactly nb_samples audio samples, except at + * the end of stream, when it can contain less than nb_samples. + * + * @return The return codes have the same meaning as for + * liteav_av_buffersink_get_frame(). + * + * @warning do not mix this function with liteav_av_buffersink_get_frame(). Use only one or + * the other with a single sink, not both. + */ +int liteav_av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); + +/** + * @} + */ + +#endif /* AVFILTER_BUFFERSINK_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h new file mode 100644 index 0000000..1e6b84a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/buffersrc.h @@ -0,0 +1,210 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSRC_H +#define AVFILTER_BUFFERSRC_H + +/** + * @file + * @ingroup lavfi_buffersrc + * Memory buffer source API. + */ + +#include "avfilter.h" + +/** + * @defgroup lavfi_buffersrc Buffer source API + * @ingroup lavfi + * @{ + */ + +enum { + + /** + * Do not check for format changes. + */ + AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1, + + /** + * Immediately push the frame to the output. + */ + AV_BUFFERSRC_FLAG_PUSH = 4, + + /** + * Keep a reference to the frame. + * If the frame if reference-counted, create a new reference; otherwise + * copy the frame data. + */ + AV_BUFFERSRC_FLAG_KEEP_REF = 8, + +}; + +/** + * Get the number of failed requests. + * + * A failed request is when the request_frame method is called while no + * frame is present in the buffer. + * The number is reset when a frame is added. + */ +unsigned liteav_av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); + +/** + * This structure contains the parameters describing the frames that will be + * passed to this filter. + * + * It should be allocated with liteav_av_buffersrc_parameters_alloc() and freed with + * liteav_av_free(). All the allocated fields in it remain owned by the caller. + */ +typedef struct AVBufferSrcParameters { + /** + * video: the pixel format, value corresponds to enum AVPixelFormat + * audio: the sample format, value corresponds to enum AVSampleFormat + */ + int format; + /** + * The timebase to be used for the timestamps on the input frames. + */ + AVRational time_base; + + /** + * Video only, the display dimensions of the input frames. + */ + int width, height; + + /** + * Video only, the sample (pixel) aspect ratio. + */ + AVRational sample_aspect_ratio; + + /** + * Video only, the frame rate of the input video. This field must only be + * set to a non-zero value if input stream has a known constant framerate + * and should be left at its initial value if the framerate is variable or + * unknown. + */ + AVRational frame_rate; + + /** + * Video with a hwaccel pixel format only. This should be a reference to an + * AVHWFramesContext instance describing the input frames. + */ + AVBufferRef *hw_frames_ctx; + + /** + * Audio only, the audio sampling rate in samples per second. + */ + int sample_rate; + + /** + * Audio only, the audio channel layout + */ + uint64_t channel_layout; +} AVBufferSrcParameters; + +/** + * Allocate a new AVBufferSrcParameters instance. It should be freed by the + * caller with liteav_av_free(). + */ +AVBufferSrcParameters *liteav_av_buffersrc_parameters_alloc(void); + +/** + * Initialize the buffersrc or abuffersrc filter with the provided parameters. + * This function may be called multiple times, the later calls override the + * previous ones. Some of the parameters may also be set through AVOptions, then + * whatever method is used last takes precedence. + * + * @param ctx an instance of the buffersrc or abuffersrc filter + * @param param the stream parameters. The frames later passed to this filter + * must conform to those parameters. All the allocated fields in + * param remain owned by the caller, libavfilter will make internal + * copies or references when necessary. + * @return 0 on success, a negative AVERROR code on failure. + */ +int liteav_av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param); + +/** + * Add a frame to the buffer source. + * + * @param ctx an instance of the buffersrc filter + * @param frame frame to be added. If the frame is reference counted, this + * function will make a new reference to it. Otherwise the frame data will be + * copied. + * + * @return 0 on success, a negative AVERROR on error + * + * This function is equivalent to liteav_av_buffersrc_add_frame_flags() with the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +av_warn_unused_result +int liteav_av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * @param ctx an instance of the buffersrc filter + * @param frame frame to be added. If the frame is reference counted, this + * function will take ownership of the reference(s) and reset the frame. + * Otherwise the frame data will be copied. If this function returns an error, + * the input frame is not touched. + * + * @return 0 on success, a negative AVERROR on error. + * + * @note the difference between this function and liteav_av_buffersrc_write_frame() is + * that liteav_av_buffersrc_write_frame() creates a new reference to the input frame, + * while this function takes ownership of the reference passed to it. + * + * This function is equivalent to liteav_av_buffersrc_add_frame_flags() without the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +av_warn_unused_result +int liteav_av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * By default, if the frame is reference-counted, this function will take + * ownership of the reference(s) and reset the frame. This can be controlled + * using the flags. + * + * If this function returns an error, the input frame is not touched. + * + * @param buffer_src pointer to a buffer source context + * @param frame a frame, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +av_warn_unused_result +int liteav_av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, + AVFrame *frame, int flags); + +/** + * Close the buffer source after EOF. + * + * This is similar to passing NULL to liteav_av_buffersrc_add_frame_flags() + * except it takes the timestamp of the EOF, i.e. the timestamp of the end + * of the last frame. + */ +int liteav_av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags); + +/** + * @} + */ + +#endif /* AVFILTER_BUFFERSRC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/version.h new file mode 100644 index 0000000..9f0a996 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavfilter/version.h @@ -0,0 +1,65 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_VERSION_H +#define AVFILTER_VERSION_H + +/** + * @file + * @ingroup lavfi + * Libavfilter version macros + */ + +#include "libavutil/version.h" + +#define LIBAVFILTER_VERSION_MAJOR 7 +#define LIBAVFILTER_VERSION_MINOR 40 +#define LIBAVFILTER_VERSION_MICRO 101 + +#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT + +#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_OLD_FILTER_OPTS_ERROR +#define FF_API_OLD_FILTER_OPTS_ERROR (LIBAVFILTER_VERSION_MAJOR < 8) +#endif +#ifndef FF_API_LAVR_OPTS +#define FF_API_LAVR_OPTS (LIBAVFILTER_VERSION_MAJOR < 8) +#endif +#ifndef FF_API_FILTER_GET_SET +#define FF_API_FILTER_GET_SET (LIBAVFILTER_VERSION_MAJOR < 8) +#endif +#ifndef FF_API_NEXT +#define FF_API_NEXT (LIBAVFILTER_VERSION_MAJOR < 8) +#endif + +#endif /* AVFILTER_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avc.h new file mode 100644 index 0000000..d91d636 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avc.h @@ -0,0 +1,38 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * AVC helper functions for muxers + * Copyright (c) 2008 Aurelien Jacobs <aurel@gnuage.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVC_H +#define AVFORMAT_AVC_H + +#include <stdint.h> +#include "avio.h" + +int liteav_ff_avc_parse_nal_units(AVIOContext *s, const uint8_t *buf, int size); +int liteav_ff_avc_parse_nal_units_buf(const uint8_t *buf_in, uint8_t **buf, int *size); +int liteav_ff_isom_write_avcc(AVIOContext *pb, const uint8_t *data, int len); +const uint8_t *liteav_ff_avc_find_startcode(const uint8_t *p, const uint8_t *end); +int liteav_ff_avc_write_annexb_extradata(const uint8_t *in, uint8_t **buf, int *size); +const uint8_t *liteav_ff_avc_mp4_find_startcode(const uint8_t *start, + const uint8_t *end, + int nal_length_size); + +#endif /* AVFORMAT_AVC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avformat.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avformat.h new file mode 100644 index 0000000..8086860 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avformat.h @@ -0,0 +1,3108 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVFORMAT_H +#define AVFORMAT_AVFORMAT_H + +/** + * @file + * @ingroup libavf + * Main libavformat public API header + */ + +/** + * @defgroup libavf libavformat + * I/O and Muxing/Demuxing Library + * + * Libavformat (lavf) is a library for dealing with various media container + * formats. Its main two purposes are demuxing - i.e. splitting a media file + * into component streams, and the reverse process of muxing - writing supplied + * data in a specified container format. It also has an @ref lavf_io + * "I/O module" which supports a number of protocols for accessing the data (e.g. + * file, tcp, http and others). Before using lavf, you need to call + * liteav_av_register_all() to register all compiled muxers, demuxers and protocols. + * Unless you are absolutely sure you won't use libavformat's network + * capabilities, you should also call avformat_network_init(). + * + * A supported input format is described by an AVInputFormat struct, conversely + * an output format is described by AVOutputFormat. You can iterate over all + * registered input/output formats using the liteav_av_iformat_next() / + * liteav_av_oformat_next() functions. The protocols layer is not part of the public + * API, so you can only get the names of supported protocols with the + * liteav_avio_enum_protocols() function. + * + * Main lavf structure used for both muxing and demuxing is AVFormatContext, + * which exports all information about the file being read or written. As with + * most Libavformat structures, its size is not part of public ABI, so it cannot be + * allocated on stack or directly with liteav_av_malloc(). To create an + * AVFormatContext, use avformat_alloc_context() (some functions, like + * avformat_open_input() might do that for you). + * + * Most importantly an AVFormatContext contains: + * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat + * "output" format. It is either autodetected or set by user for input; + * always set by user for output. + * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all + * elementary streams stored in the file. AVStreams are typically referred to + * using their index in this array. + * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or + * set by user for input, always set by user for output (unless you are dealing + * with an AVFMT_NOFILE format). + * + * @section lavf_options Passing options to (de)muxers + * It is possible to configure lavf muxers and demuxers using the @ref avoptions + * mechanism. Generic (format-independent) libavformat options are provided by + * AVFormatContext, they can be examined from a user program by calling + * liteav_av_opt_next() / liteav_av_opt_find() on an allocated AVFormatContext (or its AVClass + * from avformat_get_class()). Private (format-specific) options are provided by + * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / + * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. + * Further options may be provided by the @ref AVFormatContext.pb "I/O context", + * if its AVClass is non-NULL, and the protocols layer. See the discussion on + * nesting in @ref avoptions documentation to learn how to access those. + * + * @section urls + * URL strings in libavformat are made of a scheme/protocol, a ':', and a + * scheme specific string. URLs without a scheme and ':' used for local files + * are supported but deprecated. "file:" should be used for local files. + * + * It is important that the scheme string is not taken from untrusted + * sources without checks. + * + * Note that some schemes/protocols are quite powerful, allowing access to + * both local and remote files, parts of them, concatenations of them, local + * audio and video devices and so on. + * + * @{ + * + * @defgroup lavf_decoding Demuxing + * @{ + * Demuxers read a media file and split it into chunks of data (@em packets). A + * @ref AVPacket "packet" contains one or more encoded frames which belongs to a + * single elementary stream. In the lavf API this process is represented by the + * avformat_open_input() function for opening a file, av_read_frame() for + * reading a single packet and finally avformat_close_input(), which does the + * cleanup. + * + * @section lavf_decoding_open Opening a media file + * The minimum information required to open a file is its URL, which + * is passed to avformat_open_input(), as in the following code: + * @code + * const char *url = "file:in.mp3"; + * AVFormatContext *s = NULL; + * int ret = avformat_open_input(&s, url, NULL, NULL); + * if (ret < 0) + * abort(); + * @endcode + * The above code attempts to allocate an AVFormatContext, open the + * specified file (autodetecting the format) and read the header, exporting the + * information stored there into s. Some formats do not have a header or do not + * store enough information there, so it is recommended that you call the + * avformat_find_stream_info() function which tries to read and decode a few + * frames to find missing information. + * + * In some cases you might want to preallocate an AVFormatContext yourself with + * avformat_alloc_context() and do some tweaking on it before passing it to + * avformat_open_input(). One such case is when you want to use custom functions + * for reading input data instead of lavf internal I/O layer. + * To do that, create your own AVIOContext with liteav_avio_alloc_context(), passing + * your reading callbacks to it. Then set the @em pb field of your + * AVFormatContext to newly created AVIOContext. + * + * Since the format of the opened file is in general not known until after + * avformat_open_input() has returned, it is not possible to set demuxer private + * options on a preallocated context. Instead, the options should be passed to + * avformat_open_input() wrapped in an AVDictionary: + * @code + * AVDictionary *options = NULL; + * liteav_av_dict_set(&options, "video_size", "640x480", 0); + * liteav_av_dict_set(&options, "pixel_format", "rgb24", 0); + * + * if (avformat_open_input(&s, url, NULL, &options) < 0) + * abort(); + * liteav_av_dict_free(&options); + * @endcode + * This code passes the private options 'video_size' and 'pixel_format' to the + * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it + * cannot know how to interpret raw video data otherwise. If the format turns + * out to be something different than raw video, those options will not be + * recognized by the demuxer and therefore will not be applied. Such unrecognized + * options are then returned in the options dictionary (recognized options are + * consumed). The calling program can handle such unrecognized options as it + * wishes, e.g. + * @code + * AVDictionaryEntry *e; + * if (e = liteav_av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { + * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); + * abort(); + * } + * @endcode + * + * After you have finished reading the file, you must close it with + * avformat_close_input(). It will free everything associated with the file. + * + * @section lavf_decoding_read Reading from an opened file + * Reading data from an opened AVFormatContext is done by repeatedly calling + * av_read_frame() on it. Each call, if successful, will return an AVPacket + * containing encoded data for one AVStream, identified by + * AVPacket.stream_index. This packet may be passed straight into the libavcodec + * decoding functions liteav_avcodec_send_packet() or liteav_avcodec_decode_subtitle2() if the + * caller wishes to decode the data. + * + * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be + * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for + * pts/dts, 0 for duration) if the stream does not provide them. The timing + * information will be in AVStream.time_base units, i.e. it has to be + * multiplied by the timebase to convert them to seconds. + * + * If AVPacket.buf is set on the returned packet, then the packet is + * allocated dynamically and the user may keep it indefinitely. + * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a + * static storage somewhere inside the demuxer and the packet is only valid + * until the next av_read_frame() call or closing the file. If the caller + * requires a longer lifetime, liteav_av_dup_packet() will make an liteav_av_malloc()ed copy + * of it. + * In both cases, the packet must be freed with liteav_av_packet_unref() when it is no + * longer needed. + * + * @section lavf_decoding_seek Seeking + * @} + * + * @defgroup lavf_encoding Muxing + * @{ + * Muxers take encoded data in the form of @ref AVPacket "AVPackets" and write + * it into files or other output bytestreams in the specified container format. + * + * The main API functions for muxing are liteav_avformat_write_header() for writing the + * file header, liteav_av_write_frame() / liteav_av_interleaved_write_frame() for writing the + * packets and liteav_av_write_trailer() for finalizing the file. + * + * At the beginning of the muxing process, the caller must first call + * avformat_alloc_context() to create a muxing context. The caller then sets up + * the muxer by filling the various fields in this context: + * + * - The @ref AVFormatContext.oformat "oformat" field must be set to select the + * muxer that will be used. + * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb + * "pb" field must be set to an opened IO context, either returned from + * liteav_avio_open2() or a custom one. + * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must + * be created with the avformat_new_stream() function. The caller should fill + * the @ref AVStream.codecpar "stream codec parameters" information, such as the + * codec @ref AVCodecParameters.codec_type "type", @ref AVCodecParameters.codec_id + * "id" and other parameters (e.g. width / height, the pixel or sample format, + * etc.) as known. The @ref AVStream.time_base "stream timebase" should + * be set to the timebase that the caller desires to use for this stream (note + * that the timebase actually used by the muxer can be different, as will be + * described later). + * - It is advised to manually initialize only the relevant fields in + * AVCodecParameters, rather than using @ref avcodec_parameters_copy() during + * remuxing: there is no guarantee that the codec context values remain valid + * for both input and output format contexts. + * - The caller may fill in additional information, such as @ref + * AVFormatContext.metadata "global" or @ref AVStream.metadata "per-stream" + * metadata, @ref AVFormatContext.chapters "chapters", @ref + * AVFormatContext.programs "programs", etc. as described in the + * AVFormatContext documentation. Whether such information will actually be + * stored in the output depends on what the container format and the muxer + * support. + * + * When the muxing context is fully set up, the caller must call + * liteav_avformat_write_header() to initialize the muxer internals and write the file + * header. Whether anything actually is written to the IO context at this step + * depends on the muxer, but this function must always be called. Any muxer + * private options must be passed in the options parameter to this function. + * + * The data is then sent to the muxer by repeatedly calling liteav_av_write_frame() or + * liteav_av_interleaved_write_frame() (consult those functions' documentation for + * discussion on the difference between them; only one of them may be used with + * a single muxing context, they should not be mixed). Do note that the timing + * information on the packets sent to the muxer must be in the corresponding + * AVStream's timebase. That timebase is set by the muxer (in the + * liteav_avformat_write_header() step) and may be different from the timebase + * requested by the caller. + * + * Once all the data has been written, the caller must call liteav_av_write_trailer() + * to flush any buffered packets and finalize the output file, then close the IO + * context (if any) and finally free the muxing context with + * avformat_free_context(). + * @} + * + * @defgroup lavf_io I/O Read/Write + * @{ + * @section lavf_io_dirlist Directory listing + * The directory listing API makes it possible to list files on remote servers. + * + * Some of possible use cases: + * - an "open file" dialog to choose files from a remote location, + * - a recursive media finder providing a player with an ability to play all + * files from a given directory. + * + * @subsection lavf_io_dirlist_open Opening a directory + * At first, a directory needs to be opened by calling liteav_avio_open_dir() + * supplied with a URL and, optionally, ::AVDictionary containing + * protocol-specific parameters. The function returns zero or positive + * integer and allocates AVIODirContext on success. + * + * @code + * AVIODirContext *ctx = NULL; + * if (liteav_avio_open_dir(&ctx, "smb://example.com/some_dir", NULL) < 0) { + * fprintf(stderr, "Cannot open directory.\n"); + * abort(); + * } + * @endcode + * + * This code tries to open a sample directory using smb protocol without + * any additional parameters. + * + * @subsection lavf_io_dirlist_read Reading entries + * Each directory's entry (i.e. file, another directory, anything else + * within ::AVIODirEntryType) is represented by AVIODirEntry. + * Reading consecutive entries from an opened AVIODirContext is done by + * repeatedly calling liteav_avio_read_dir() on it. Each call returns zero or + * positive integer if successful. Reading can be stopped right after the + * NULL entry has been read -- it means there are no entries left to be + * read. The following code reads all entries from a directory associated + * with ctx and prints their names to standard output. + * @code + * AVIODirEntry *entry = NULL; + * for (;;) { + * if (liteav_avio_read_dir(ctx, &entry) < 0) { + * fprintf(stderr, "Cannot list directory.\n"); + * abort(); + * } + * if (!entry) + * break; + * printf("%s\n", entry->name); + * liteav_avio_free_directory_entry(&entry); + * } + * @endcode + * @} + * + * @defgroup lavf_codec Demuxers + * @{ + * @defgroup lavf_codec_native Native Demuxers + * @{ + * @} + * @defgroup lavf_codec_wrappers External library wrappers + * @{ + * @} + * @} + * @defgroup lavf_protos I/O Protocols + * @{ + * @} + * @defgroup lavf_internal Internal + * @{ + * @} + * @} + */ + +#include <time.h> +#include <stdio.h> /* FILE */ +#include "libavcodec/avcodec.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "avio.h" +#include "libavformat/version.h" + +struct AVFormatContext; + +struct AVDeviceInfoList; +struct AVDeviceCapabilitiesQuery; + +/** + * @defgroup metadata_api Public Metadata API + * @{ + * @ingroup libavf + * The metadata API allows libavformat to export metadata tags to a client + * application when demuxing. Conversely it allows a client application to + * set metadata when muxing. + * + * Metadata is exported or set as pairs of key/value strings in the 'metadata' + * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs + * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, + * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata + * exported by demuxers isn't checked to be valid UTF-8 in most cases. + * + * Important concepts to keep in mind: + * - Keys are unique; there can never be 2 tags with the same key. This is + * also meant semantically, i.e., a demuxer should not knowingly produce + * several keys that are literally different but semantically identical. + * E.g., key=Author5, key=Author6. In this example, all authors must be + * placed in the same tag. + * - Metadata is flat, not hierarchical; there are no subtags. If you + * want to store, e.g., the email address of the child of producer Alice + * and actor Bob, that could have key=alice_and_bobs_childs_email_address. + * - Several modifiers can be applied to the tag name. This is done by + * appending a dash character ('-') and the modifier name in the order + * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. + * - language -- a tag whose value is localized for a particular language + * is appended with the ISO 639-2/B 3-letter language code. + * For example: Author-ger=Michael, Author-eng=Mike + * The original/default language is in the unqualified "Author" tag. + * A demuxer should set a default if it sets any translated tag. + * - sorting -- a modified version of a tag that should be used for + * sorting will have '-sort' appended. E.g. artist="The Beatles", + * artist-sort="Beatles, The". + * - Some protocols and demuxers support metadata updates. After a successful + * call to av_read_packet(), AVFormatContext.event_flags or AVStream.event_flags + * will be updated to indicate if metadata changed. In order to detect metadata + * changes on a stream, you need to loop through all streams in the AVFormatContext + * and check their individual event_flags. + * + * - Demuxers attempt to export metadata in a generic format, however tags + * with no generic equivalents are left as they are stored in the container. + * Follows a list of generic tag names: + * + @verbatim + album -- name of the set this work belongs to + album_artist -- main creator of the set/album, if different from artist. + e.g. "Various Artists" for compilation albums. + artist -- main creator of the work + comment -- any additional description of the file. + composer -- who composed the work, if different from artist. + copyright -- name of copyright holder. + creation_time-- date when the file was created, preferably in ISO 8601. + date -- date when the work was created, preferably in ISO 8601. + disc -- number of a subset, e.g. disc in a multi-disc collection. + encoder -- name/settings of the software/hardware that produced the file. + encoded_by -- person/group who created the file. + filename -- original name of the file. + genre -- <self-evident>. + language -- main language in which the work is performed, preferably + in ISO 639-2 format. Multiple languages can be specified by + separating them with commas. + performer -- artist who performed the work, if different from artist. + E.g for "Also sprach Zarathustra", artist would be "Richard + Strauss" and performer "London Philharmonic Orchestra". + publisher -- name of the label/publisher. + service_name -- name of the service in broadcasting (channel name). + service_provider -- name of the service provider in broadcasting. + title -- name of the work. + track -- number of this work in the set, can be in form current/total. + variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @endverbatim + * + * Look in the examples section for an application example how to use the Metadata API. + * + * @} + */ + +/* packet functions */ + + +/** + * Allocate and read the payload of a packet and initialize its + * fields with default values. + * + * @param s associated IO context + * @param pkt packet + * @param size desired payload size + * @return >0 (read size) if OK, AVERROR_xxx otherwise + */ +int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); + + +/** + * Read data and append it to the current content of the AVPacket. + * If pkt->size is 0 this is identical to av_get_packet. + * Note that this uses liteav_av_grow_packet and thus involves a realloc + * which is inefficient. Thus this function should only be used + * when there is no reasonable way to know (an upper bound of) + * the final size. + * + * @param s associated IO context + * @param pkt packet + * @param size amount of data to read + * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data + * will not be lost even if an error occurs. + */ +int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); + +/*************************************************/ +/* input/output formats */ + +struct AVCodecTag; + +/** + * This structure contains the data a format has to probe a file. + */ +typedef struct AVProbeData { + const char *filename; + unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ + int buf_size; /**< Size of buf except extra allocated bytes */ + const char *mime_type; /**< mime_type, when known. */ +} AVProbeData; + +#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) +#define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1) + +#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension +#define AVPROBE_SCORE_MIME 75 ///< score for file mime type +#define AVPROBE_SCORE_MAX 100 ///< maximum score + +#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer + +/// Demuxer will use liteav_avio_open, no opened file should be provided by the caller. +#define AVFMT_NOFILE 0x0001 +#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ +#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ +#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ +#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ +#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ +#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ +#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ +#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ +#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ +#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ +#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ +#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ +#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ +#define AVFMT_TS_NONSTRICT 0x20000 /**< Format does not require strictly + increasing timestamps, but they must + still be monotonic */ +#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative + timestamps. If not set the timestamp + will be shifted in liteav_av_write_frame and + liteav_av_interleaved_write_frame so they + start from 0. + The user or muxer can override this through + AVFormatContext.avoid_negative_ts + */ + +#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ + +/** + * @addtogroup lavf_encoding + * @{ + */ +typedef struct AVOutputFormat { + const char *name; + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + const char *mime_type; + const char *extensions; /**< comma-separated filename extensions */ + /* output support */ + enum AVCodecID audio_codec; /**< default audio codec */ + enum AVCodecID video_codec; /**< default video codec */ + enum AVCodecID subtitle_codec; /**< default subtitle codec */ + /** + * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, + * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, + * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, + * AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE + */ + int flags; + + /** + * List of supported codec_id-codec_tag pairs, ordered by "better + * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. + */ + const struct AVCodecTag * const *codec_tag; + + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVOutputFormat *next; + /** + * size of private data so that it can be allocated in the wrapper + */ + int priv_data_size; + + int (*write_header)(struct AVFormatContext *); + /** + * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, + * pkt can be NULL in order to flush data buffered in the muxer. + * When flushing, return 0 if there still is more data to flush, + * or 1 if everything was flushed and there is no more buffered + * data. + */ + int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); + int (*write_trailer)(struct AVFormatContext *); + /** + * Currently only used to set pixel format if not YUV420P. + */ + int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, + AVPacket *in, int flush); + /** + * Test if the given codec can be stored in this container. + * + * @return 1 if the codec is supported, 0 if it is not. + * A negative number if unknown. + * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC + */ + int (*query_codec)(enum AVCodecID id, int std_compliance); + + void (*get_output_timestamp)(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + /** + * Allows sending messages from application to device. + */ + int (*control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + + /** + * Write an uncoded AVFrame. + * + * See liteav_av_write_uncoded_frame() for details. + * + * The library will free *frame afterwards, but the muxer can prevent it + * by setting the pointer to NULL. + */ + int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index, + AVFrame **frame, unsigned flags); + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); + /** + * Initialize device capabilities submodule. + * @see avdevice_capabilities_create() for more details. + */ + int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + /** + * Free device capabilities submodule. + * @see avdevice_capabilities_free() for more details. + */ + int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + enum AVCodecID data_codec; /**< default data codec */ + /** + * Initialize format. May allocate data here, and set any AVFormatContext or + * AVStream parameters that need to be set before packets are sent. + * This method must not write output. + * + * Return 0 if streams were fully configured, 1 if not, negative AVERROR on failure + * + * Any allocations made here must be freed in deinit(). + */ + int (*init)(struct AVFormatContext *); + /** + * Deinitialize format. If present, this is called whenever the muxer is being + * destroyed, regardless of whether or not the header has been written. + * + * If a trailer is being written, this is called after write_trailer(). + * + * This is called if init() fails as well. + */ + void (*deinit)(struct AVFormatContext *); + /** + * Set up any necessary bitstream filtering and extract any extra data needed + * for the global header. + * Return 0 if more packets from this stream must be checked; 1 if not. + */ + int (*check_bitstream)(struct AVFormatContext *, const AVPacket *pkt); +} AVOutputFormat; +/** + * @} + */ + +/** + * @addtogroup lavf_decoding + * @{ + */ +typedef struct AVInputFormat { + /** + * A comma separated list of short names for the format. New names + * may be appended with a minor bump. + */ + const char *name; + + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + + /** + * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, + * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, + * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. + */ + int flags; + + /** + * If extensions are defined, then no probe is done. You should + * usually not use extension format guessing because it is not + * reliable enough + */ + const char *extensions; + + const struct AVCodecTag * const *codec_tag; + + const AVClass *priv_class; ///< AVClass for the private context + + /** + * Comma-separated list of mime types. + * It is used check for matching mime types while probing. + * @see liteav_av_probe_input_format2 + */ + const char *mime_type; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVInputFormat *next; + + /** + * Raw demuxers store their codec ID here. + */ + int raw_codec_id; + + /** + * Size of private data so that it can be allocated in the wrapper. + */ + int priv_data_size; + + /** + * Tell if a given file has a chance of being parsed as this format. + * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes + * big so you do not have to check for that unless you need more. + */ + int (*read_probe)(AVProbeData *); + + /** + * Read the format header and initialize the AVFormatContext + * structure. Return 0 if OK. 'avformat_new_stream' should be + * called to create new streams. + */ + int (*read_header)(struct AVFormatContext *); + + /** + * 自定义协议解析函数需要携带AVDictionary + */ + int (*read_header_with_dict)(struct AVFormatContext *, AVDictionary **options); + + /** + * Read one packet and put it in 'pkt'. pts and flags are also + * set. 'avformat_new_stream' can be called only if the flag + * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a + * background thread). + * @return 0 on success, < 0 on error. + * When returning an error, pkt must not have been allocated + * or must be freed before returning + */ + int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); + + /** + * Close the stream. The AVFormatContext and AVStreams are not + * freed by this function + */ + int (*read_close)(struct AVFormatContext *); + + /** + * Seek to a given timestamp relative to the frames in + * stream component stream_index. + * @param stream_index Must not be -1. + * @param flags Selects which direction should be preferred if no exact + * match is available. + * @return >= 0 on success (but not necessarily the new offset) + */ + int (*read_seek)(struct AVFormatContext *, + int stream_index, int64_t timestamp, int flags); + + /** + * Get the next timestamp in stream[stream_index].time_base units. + * @return the timestamp or AV_NOPTS_VALUE if an error occurred + */ + int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, + int64_t *pos, int64_t pos_limit); + + /** + * Start/resume playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_play)(struct AVFormatContext *); + + /** + * Pause playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_pause)(struct AVFormatContext *); + + /** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + */ + int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); + + /** + * Initialize device capabilities submodule. + * @see avdevice_capabilities_create() for more details. + */ + int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + + /** + * Free device capabilities submodule. + * @see avdevice_capabilities_free() for more details. + */ + int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); +} AVInputFormat; +/** + * @} + */ + +enum AVStreamParseType { + AVSTREAM_PARSE_NONE, + AVSTREAM_PARSE_FULL, /**< full parsing and repack */ + AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ + AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ + AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ + AVSTREAM_PARSE_FULL_RAW, /**< full parsing and repack with timestamp and position generation by parser for raw + this assumes that each packet in the file contains no demuxer level headers and + just codec level data, otherwise position generation would fail */ +}; + +typedef struct AVIndexEntry { + int64_t pos; + int64_t timestamp; /**< + * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available + * when seeking to this entry. That means preferable PTS on keyframe based formats. + * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better + * is known + */ +#define AVINDEX_KEYFRAME 0x0001 +#define AVINDEX_DISCARD_FRAME 0x0002 /** + * Flag is used to indicate which frame should be discarded after decoding. + */ + int flags:2; + int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). + int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ +} AVIndexEntry; + +#define AV_DISPOSITION_DEFAULT 0x0001 +#define AV_DISPOSITION_DUB 0x0002 +#define AV_DISPOSITION_ORIGINAL 0x0004 +#define AV_DISPOSITION_COMMENT 0x0008 +#define AV_DISPOSITION_LYRICS 0x0010 +#define AV_DISPOSITION_KARAOKE 0x0020 + +/** + * Track should be used during playback by default. + * Useful for subtitle track that should be displayed + * even when user did not explicitly ask for subtitles. + */ +#define AV_DISPOSITION_FORCED 0x0040 +#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ +#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ +#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ +/** + * The stream is stored in the file as an attached picture/"cover art" (e.g. + * APIC frame in ID3v2). The first (usually only) packet associated with it + * will be returned among the first few packets read from the file unless + * seeking takes place. It can also be accessed at any time in + * AVStream.attached_pic. + */ +#define AV_DISPOSITION_ATTACHED_PIC 0x0400 +/** + * The stream is sparse, and contains thumbnail images, often corresponding + * to chapter markers. Only ever used with AV_DISPOSITION_ATTACHED_PIC. + */ +#define AV_DISPOSITION_TIMED_THUMBNAILS 0x0800 + +typedef struct AVStreamInternal AVStreamInternal; + +/** + * To specify text track kind (different from subtitles default). + */ +#define AV_DISPOSITION_CAPTIONS 0x10000 +#define AV_DISPOSITION_DESCRIPTIONS 0x20000 +#define AV_DISPOSITION_METADATA 0x40000 +#define AV_DISPOSITION_DEPENDENT 0x80000 ///< dependent audio stream (mix_type=0 in mpegts) +#define AV_DISPOSITION_STILL_IMAGE 0x100000 ///< still images in video stream (still_picture_flag=1 in mpegts) + +/** + * Options for behavior on timestamp wrap detection. + */ +#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap +#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection +#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection + +/** + * Stream structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVStream) must not be used outside libav*. + */ +typedef struct AVStream { + int index; /**< stream index in AVFormatContext */ + /** + * Format-specific stream ID. + * decoding: set by libavformat + * encoding: set by the user, replaced by libavformat if left unset + */ + int id; +#if FF_API_LAVF_AVCTX + /** + * @deprecated use the codecpar struct instead + */ + attribute_deprecated + AVCodecContext *codec; +#endif + void *priv_data; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. + * + * decoding: set by libavformat + * encoding: May be set by the caller before liteav_avformat_write_header() to + * provide a hint to the muxer about the desired timebase. In + * liteav_avformat_write_header(), the muxer will overwrite this field + * with the timebase that will actually be used for the timestamps + * written into the file (which may or may not be related to the + * user-provided one, depending on the format). + */ + AVRational time_base; + + /** + * Decoding: pts of the first frame of the stream in presentation order, in stream time base. + * Only set this if you are absolutely 100% sure that the value you set + * it to really is the pts of the first frame. + * This may be undefined (AV_NOPTS_VALUE). + * @note The ASF header does NOT contain a correct start_time the ASF + * demuxer must NOT set this. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in stream time base. + * If a source file does not specify a duration, but does specify + * a bitrate, this value will be estimated from bitrate and file size. + * + * Encoding: May be set by the caller before liteav_avformat_write_header() to + * provide a hint to the muxer about the estimated duration. + */ + int64_t duration; + + int64_t nb_frames; ///< number of frames in this stream if known or 0 + + int disposition; /**< AV_DISPOSITION_* bit field */ + + enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. + + /** + * sample aspect ratio (0 if unknown) + * - encoding: Set by user. + * - decoding: Set by libavformat. + */ + AVRational sample_aspect_ratio; + + AVDictionary *metadata; + + /** + * Average framerate + * + * - demuxing: May be set by libavformat when creating the stream or in + * avformat_find_stream_info(). + * - muxing: May be set by the caller before liteav_avformat_write_header(). + */ + AVRational avg_frame_rate; + + /** + * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet + * will contain the attached picture. + * + * decoding: set by libavformat, must not be modified by the caller. + * encoding: unused + */ + AVPacket attached_pic; + + /** + * An array of side data that applies to the whole stream (i.e. the + * container does not allow it to change between packets). + * + * There may be no overlap between the side data in this array and side data + * in the packets. I.e. a given side data is either exported by the muxer + * (demuxing) / set by the caller (muxing) in this array, then it never + * appears in the packets, or the side data is exported / sent through + * the packets (always in the first packet where the value becomes known or + * changes), then it does not appear in this array. + * + * - demuxing: Set by libavformat when the stream is created. + * - muxing: May be set by the caller before liteav_avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). + * + * @see av_format_inject_global_side_data() + */ + AVPacketSideData *side_data; + /** + * The number of elements in the AVStream.side_data array. + */ + int nb_side_data; + + /** + * Flags for the user to detect events happening on the stream. Flags must + * be cleared by the user once the event has been handled. + * A combination of AVSTREAM_EVENT_FLAG_*. + */ + int event_flags; +#define AVSTREAM_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata. + + /** + * Real base framerate of the stream. + * This is the lowest framerate with which all timestamps can be + * represented accurately (it is the least common multiple of all + * framerates in the stream). Note, this value is just a guess! + * For example, if the time base is 1/90000 and all frames have either + * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. + */ + AVRational r_frame_rate; + +#if FF_API_LAVF_FFSERVER + /** + * String containing pairs of key and values describing recommended encoder configuration. + * Pairs are separated by ','. + * Keys are separated from values by '='. + * + * @deprecated unused + */ + attribute_deprecated + char *recommended_encoder_configuration; +#endif + + /** + * Codec parameters associated with this stream. Allocated and freed by + * libavformat in avformat_new_stream() and avformat_free_context() + * respectively. + * + * - demuxing: filled by libavformat on stream creation or in + * avformat_find_stream_info() + * - muxing: filled by the caller before liteav_avformat_write_header() + */ + AVCodecParameters *codecpar; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * Internal note: be aware that physically removing these fields + * will break ABI. Replace removed fields with dummy fields, and + * add new fields to AVStreamInternal. + ***************************************************************** + */ + +#define MAX_STD_TIMEBASES (30*12+30+3+6) + /** + * Stream information used internally by avformat_find_stream_info() + */ + struct { + int64_t last_dts; + int64_t duration_gcd; + int duration_count; + int64_t rfps_duration_sum; + double (*duration_error)[2][MAX_STD_TIMEBASES]; + int64_t codec_info_duration; + int64_t codec_info_duration_fields; + int frame_delay_evidence; + + /** + * 0 -> decoder has not been searched for yet. + * >0 -> decoder found + * <0 -> decoder with codec_id == -found_decoder has not been found + */ + int found_decoder; + + int64_t last_duration; + + /** + * Those are used for average framerate estimation. + */ + int64_t fps_first_dts; + int fps_first_dts_idx; + int64_t fps_last_dts; + int fps_last_dts_idx; + + } *info; + + int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ + + // Timestamp generation support: + /** + * Timestamp corresponding to the last dts sync point. + * + * Initialized when AVCodecParserContext.dts_sync_point >= 0 and + * a DTS is received from the underlying container. Otherwise set to + * AV_NOPTS_VALUE by default. + */ + int64_t first_dts; + int64_t cur_dts; + int64_t last_IP_pts; + int last_IP_duration; + + /** + * Number of packets to buffer for codec probing + */ + int probe_packets; + + /** + * Number of frames that have been demuxed during avformat_find_stream_info() + */ + int codec_info_nb_frames; + + /* av_read_frame() support */ + enum AVStreamParseType need_parsing; + struct AVCodecParserContext *parser; + + /** + * last packet in packet_buffer for this stream when muxing. + */ + struct AVPacketList *last_in_packet_buffer; + AVProbeData probe_data; +#define MAX_REORDER_DELAY 16 + int64_t pts_buffer[MAX_REORDER_DELAY+1]; + + AVIndexEntry *index_entries; /**< Only used if the format does not + support seeking natively. */ + int nb_index_entries; + unsigned int index_entries_allocated_size; + + /** + * Stream Identifier + * This is the MPEG-TS stream identifier +1 + * 0 means unknown + */ + int stream_identifier; + + /** + * Details of the MPEG-TS program which created this stream. + */ + int program_num; + int pmt_version; + int pmt_stream_idx; + + int64_t interleaver_chunk_size; + int64_t interleaver_chunk_duration; + + /** + * stream probing state + * -1 -> probing finished + * 0 -> no probing requested + * rest -> perform probing with request_probe being the minimum score to accept. + * NOT PART OF PUBLIC API + */ + int request_probe; + /** + * Indicates that everything up to the next keyframe + * should be discarded. + */ + int skip_to_keyframe; + + /** + * Number of samples to skip at the start of the frame decoded from the next packet. + */ + int skip_samples; + + /** + * If not 0, the number of samples that should be skipped from the start of + * the stream (the samples are removed from packets with pts==0, which also + * assumes negative timestamps do not happen). + * Intended for use with formats such as mp3 with ad-hoc gapless audio + * support. + */ + int64_t start_skip_samples; + + /** + * If not 0, the first audio sample that should be discarded from the stream. + * This is broken by design (needs global sample count), but can't be + * avoided for broken by design formats such as mp3 with ad-hoc gapless + * audio support. + */ + int64_t first_discard_sample; + + /** + * The sample after last sample that is intended to be discarded after + * first_discard_sample. Works on frame boundaries only. Used to prevent + * early EOF if the gapless info is broken (considered concatenated mp3s). + */ + int64_t last_discard_sample; + + /** + * Number of internally decoded frames, used internally in libavformat, do not access + * its lifetime differs from info which is why it is not in that structure. + */ + int nb_decoded_frames; + + /** + * Timestamp offset added to timestamps before muxing + * NOT PART OF PUBLIC API + */ + int64_t mux_ts_offset; + + /** + * Internal data to check for wrapping of the time stamp + */ + int64_t pts_wrap_reference; + + /** + * Options for behavior, when a wrap is detected. + * + * Defined by AV_PTS_WRAP_ values. + * + * If correction is enabled, there are two possibilities: + * If the first time stamp is near the wrap point, the wrap offset + * will be subtracted, which will create negative time stamps. + * Otherwise the offset will be added. + */ + int pts_wrap_behavior; + + /** + * Internal data to prevent doing update_initial_durations() twice + */ + int update_initial_durations_done; + + /** + * Internal data to generate dts from pts + */ + int64_t pts_reorder_error[MAX_REORDER_DELAY+1]; + uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1]; + + /** + * Internal data to analyze DTS and detect faulty mpeg streams + */ + int64_t last_dts_for_order_check; + uint8_t dts_ordered; + uint8_t dts_misordered; + + /** + * Internal data to inject global side data + */ + int inject_global_side_data; + + /** + * display aspect ratio (0 if unknown) + * - encoding: unused + * - decoding: Set by libavformat to calculate sample_aspect_ratio internally + */ + AVRational display_aspect_ratio; + + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVStreamInternal *internal; +} AVStream; + +#if FF_API_FORMAT_GET_SET +/** + * Accessors for some AVStream fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +attribute_deprecated +AVRational av_stream_get_r_frame_rate(const AVStream *s); +attribute_deprecated +void av_stream_set_r_frame_rate(AVStream *s, AVRational r); +#if FF_API_LAVF_FFSERVER +attribute_deprecated +char* av_stream_get_recommended_encoder_configuration(const AVStream *s); +attribute_deprecated +void av_stream_set_recommended_encoder_configuration(AVStream *s, char *configuration); +#endif +#endif + +struct AVCodecParserContext *av_stream_get_parser(const AVStream *s); + +/** + * Returns the pts of the last muxed packet + its duration + * + * the retuned value is undefined when used with a demuxer. + */ +int64_t av_stream_get_end_pts(const AVStream *st); + +#define AV_PROGRAM_RUNNING 1 + +/** + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVProgram) must not be used outside libav*. + */ +typedef struct AVProgram { + int id; + int flags; + enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller + unsigned int *stream_index; + unsigned int nb_stream_indexes; + AVDictionary *metadata; + + int program_num; + int pmt_pid; + int pcr_pid; + int pmt_version; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int64_t start_time; + int64_t end_time; + + int64_t pts_wrap_reference; ///< reference dts for wrap detection + int pts_wrap_behavior; ///< behavior on wrap detection +} AVProgram; + +#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present + (streams are added dynamically) */ +#define AVFMTCTX_UNSEEKABLE 0x0002 /**< signal that the stream is definitely + not seekable, and attempts to call the + seek function will fail. For some + network protocols (e.g. HLS), this can + change dynamically at runtime. */ + +typedef struct AVChapter { + int id; ///< unique ID to identify the chapter + AVRational time_base; ///< time base in which the start/end timestamps are specified + int64_t start, end; ///< chapter start/end time in time_base units + AVDictionary *metadata; +} AVChapter; + + +/** + * Callback used by devices to communicate with application. + */ +typedef int (*av_format_control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + +typedef int (*AVOpenCallback)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * The duration of a video can be estimated through various ways, and this enum can be used + * to know how the duration was estimated. + */ +enum AVDurationEstimationMethod { + AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes + AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration + AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) +}; + +typedef struct AVFormatInternal AVFormatInternal; + +/** + * Format I/O context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVFormatContext) must not be used outside libav*, use + * avformat_alloc_context() to create an AVFormatContext. + * + * Fields can be accessed through AVOptions (av_opt*), + * the name string used matches the associated command line parameter name and + * can be found in libavformat/options_table.h. + * The AVOption/command line parameter names differ in some cases from the C + * structure field names for historic reasons or brevity. + */ +typedef struct AVFormatContext { + /** + * A class for logging and @ref avoptions. Set by avformat_alloc_context(). + * Exports (de)muxer private options if they exist. + */ + const AVClass *av_class; + + /** + * The input container format. + * + * Demuxing only, set by avformat_open_input(). + */ + struct AVInputFormat *iformat; + + /** + * The output container format. + * + * Muxing only, must be set by the caller before liteav_avformat_write_header(). + */ + struct AVOutputFormat *oformat; + + /** + * Format private data. This is an AVOptions-enabled struct + * if and only if iformat/oformat.priv_class is not NULL. + * + * - muxing: set by liteav_avformat_write_header() + * - demuxing: set by avformat_open_input() + */ + void *priv_data; + + /** + * I/O context. + * + * - demuxing: either set by the user before avformat_open_input() (then + * the user must close it manually) or set by avformat_open_input(). + * - muxing: set by the user before liteav_avformat_write_header(). The caller must + * take care of closing / freeing the IO context. + * + * Do NOT set this field if AVFMT_NOFILE flag is set in + * iformat/oformat.flags. In such a case, the (de)muxer will handle + * I/O in some other way and this field will be NULL. + */ + AVIOContext *pb; + + /* stream info */ + /** + * Flags signalling stream properties. A combination of AVFMTCTX_*. + * Set by libavformat. + */ + int ctx_flags; + + /** + * Number of elements in AVFormatContext.streams. + * + * Set by avformat_new_stream(), must not be modified by any other code. + */ + unsigned int nb_streams; + /** + * A list of all streams in the file. New streams are created with + * avformat_new_stream(). + * + * - demuxing: streams are created by libavformat in avformat_open_input(). + * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also + * appear in av_read_frame(). + * - muxing: streams are created by the user before liteav_avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). + */ + AVStream **streams; + +#if FF_API_FORMAT_FILENAME + /** + * input or output filename + * + * - demuxing: set by avformat_open_input() + * - muxing: may be set by the caller before liteav_avformat_write_header() + * + * @deprecated Use url instead. + */ + attribute_deprecated + char filename[1024]; +#endif + + /** + * input or output URL. Unlike the old filename field, this field has no + * length restriction. + * + * - demuxing: set by avformat_open_input(), initialized to an empty + * string if url parameter was NULL in avformat_open_input(). + * - muxing: may be set by the caller before calling liteav_avformat_write_header() + * (or liteav_avformat_init_output() if that is called first) to a string + * which is freeable by liteav_av_free(). Set to an empty string if it + * was NULL in liteav_avformat_init_output(). + * + * Freed by libavformat in avformat_free_context(). + */ + char *url; + + /** + * Position of the first frame of the component, in + * AV_TIME_BASE fractional seconds. NEVER set this value directly: + * It is deduced from the AVStream values. + * + * Demuxing only, set by libavformat. + */ + int64_t start_time; + + /** + * Duration of the stream, in AV_TIME_BASE fractional + * seconds. Only set this value if you know none of the individual stream + * durations and also do not set any of them. This is deduced from the + * AVStream values if not set. + * + * Demuxing only, set by libavformat. + */ + int64_t duration; + + /** + * Total stream bitrate in bit/s, 0 if not + * available. Never set it directly if the file_size and the + * duration are known as FFmpeg can compute it automatically. + */ + int64_t bit_rate; + + unsigned int packet_size; + int max_delay; + + /** + * Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*. + * Set by the user before avformat_open_input() / liteav_avformat_write_header(). + */ + int flags; +#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. +#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. +#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. +#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS +#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container +#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled +#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible +#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't liteav_avio_close() it. +#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted +#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. +/** + * When muxing, try to avoid writing any random/volatile data to the output. + * This includes any random IDs, real-time timestamps/dates, muxer version, etc. + * + * This flag is mainly intended for testing. + */ +#define AVFMT_FLAG_BITEXACT 0x0400 +#if FF_API_LAVF_MP4A_LATM +#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Deprecated, does nothing. +#endif +#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) +#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) +#if FF_API_LAVF_KEEPSIDE_FLAG +#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Deprecated, does nothing. +#endif +#define AVFMT_FLAG_FAST_SEEK 0x80000 ///< Enable fast, but inaccurate seeks for some formats +#define AVFMT_FLAG_SHORTEST 0x100000 ///< Stop muxing when the shortest stream stops. +#define AVFMT_FLAG_AUTO_BSF 0x200000 ///< Add bitstream filters as requested by the muxer + + /** + * Maximum size of the data read from input for determining + * the input container format. + * Demuxing only, set by the caller before avformat_open_input(). + */ + int64_t probesize; + + /** + * Maximum duration (in AV_TIME_BASE units) of the data read + * from input in avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). + * Can be set to 0 to let avformat choose using a heuristic. + */ + int64_t max_analyze_duration; + + const uint8_t *key; + int keylen; + + unsigned int nb_programs; + AVProgram **programs; + + /** + * Forced video codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID video_codec_id; + + /** + * Forced audio codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID audio_codec_id; + + /** + * Forced subtitle codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID subtitle_codec_id; + + /** + * Maximum amount of memory in bytes to use for the index of each stream. + * If the index exceeds this size, entries will be discarded as + * needed to maintain a smaller size. This can lead to slower or less + * accurate seeking (depends on demuxer). + * Demuxers for which a full in-memory index is mandatory will ignore + * this. + * - muxing: unused + * - demuxing: set by user + */ + unsigned int max_index_size; + + /** + * Maximum amount of memory in bytes to use for buffering frames + * obtained from realtime capture devices. + */ + unsigned int max_picture_buffer; + + /** + * Number of chapters in AVChapter array. + * When muxing, chapters are normally written in the file header, + * so nb_chapters should normally be initialized before write_header + * is called. Some muxers (e.g. mov and mkv) can also write chapters + * in the trailer. To write chapters in the trailer, nb_chapters + * must be zero when write_header is called and non-zero when + * write_trailer is called. + * - muxing: set by user + * - demuxing: set by libavformat + */ + unsigned int nb_chapters; + AVChapter **chapters; + + /** + * Metadata that applies to the whole file. + * + * - demuxing: set by libavformat in avformat_open_input() + * - muxing: may be set by the caller before liteav_avformat_write_header() + * + * Freed by libavformat in avformat_free_context(). + */ + AVDictionary *metadata; + + /** + * Start time of the stream in real world time, in microseconds + * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the + * stream was captured at this real world time. + * - muxing: Set by the caller before liteav_avformat_write_header(). If set to + * either 0 or AV_NOPTS_VALUE, then the current wall-time will + * be used. + * - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that + * the value may become known after some number of frames + * have been received. + */ + int64_t start_time_realtime; + + /** + * The number of frames used for determining the framerate in + * avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). + */ + int fps_probe_size; + + /** + * Error recognition; higher values will detect more errors but may + * misdetect some more or less valid parts as errors. + * Demuxing only, set by the caller before avformat_open_input(). + */ + int error_recognition; + + /** + * Custom interrupt callbacks for the I/O layer. + * + * demuxing: set by the user before avformat_open_input(). + * muxing: set by the user before liteav_avformat_write_header() + * (mainly useful for AVFMT_NOFILE formats). The callback + * should also be passed to liteav_avio_open2() if it's used to + * open the file. + */ + AVIOInterruptCB interrupt_callback; + + /** + * Flags to enable debugging. + */ + int debug; +#define FF_FDEBUG_TS 0x0001 + + /** + * Maximum buffering duration for interleaving. + * + * To ensure all the streams are interleaved correctly, + * liteav_av_interleaved_write_frame() will wait until it has at least one packet + * for each stream before actually writing any packets to the output file. + * When some streams are "sparse" (i.e. there are large gaps between + * successive packets), this can result in excessive buffering. + * + * This field specifies the maximum difference between the timestamps of the + * first and the last packet in the muxing queue, above which libavformat + * will output a packet regardless of whether it has queued a packet for all + * the streams. + * + * Muxing only, set by the caller before liteav_avformat_write_header(). + */ + int64_t max_interleave_delta; + + /** + * Allow non-standard and experimental extension + * @see AVCodecContext.strict_std_compliance + */ + int strict_std_compliance; + + /** + * Flags for the user to detect events happening on the file. Flags must + * be cleared by the user once the event has been handled. + * A combination of AVFMT_EVENT_FLAG_*. + */ + int event_flags; +#define AVFMT_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata. +#define AVFMT_EVENT_FLAG_AVFORMAT_UPDATED 0x0002 ///< The call resulted in updated avformat context. +#define AVFMT_EVENT_FLAG_PROGRAM_UPDATED 0x0004 ///< The call resulted in updated program context. +#define AVFMT_EVENT_FLAG_FINDSTREAM_MEET_DISCONTINUITY 0x0008 ///< The call resulted in find stream + ///< info meet discontinuity must be + ///< terminated. + + + /** + * Maximum number of packets to read while waiting for the first timestamp. + * Decoding only. + */ + int max_ts_probe; + + /** + * Avoid negative timestamps during muxing. + * Any value of the AVFMT_AVOID_NEG_TS_* constants. + * Note, this only works when using liteav_av_interleaved_write_frame. (interleave_packet_per_dts is in use) + * - muxing: Set by user + * - demuxing: unused + */ + int avoid_negative_ts; +#define AVFMT_AVOID_NEG_TS_AUTO -1 ///< Enabled when required by target format +#define AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE 1 ///< Shift timestamps so they are non negative +#define AVFMT_AVOID_NEG_TS_MAKE_ZERO 2 ///< Shift timestamps so that they start at 0 + + /** + * Transport stream id. + * This will be moved into demuxer private options. Thus no API/ABI compatibility + */ + int ts_id; + + /** + * Audio preload in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int audio_preload; + + /** + * Max chunk time in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int max_chunk_duration; + + /** + * Max chunk size in bytes + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int max_chunk_size; + + /** + * forces the use of wallclock timestamps as pts/dts of packets + * This has undefined results in the presence of B frames. + * - encoding: unused + * - decoding: Set by user + */ + int use_wallclock_as_timestamps; + + /** + * avio flags, used to force AVIO_FLAG_DIRECT. + * - encoding: unused + * - decoding: Set by user + */ + int avio_flags; + + /** + * The duration field can be estimated through various ways, and this field can be used + * to know how the duration was estimated. + * - encoding: unused + * - decoding: Read by user + */ + enum AVDurationEstimationMethod duration_estimation_method; + + /** + * Skip initial bytes when opening stream + * - encoding: unused + * - decoding: Set by user + */ + int64_t skip_initial_bytes; + + /** + * Correct single timestamp overflows + * - encoding: unused + * - decoding: Set by user + */ + unsigned int correct_ts_overflow; + + /** + * Force seeking to any (also non key) frames. + * - encoding: unused + * - decoding: Set by user + */ + int seek2any; + + /** + * Flush the I/O context after each packet. + * - encoding: Set by user + * - decoding: unused + */ + int flush_packets; + + /** + * format probing score. + * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes + * the format. + * - encoding: unused + * - decoding: set by avformat, read by user + */ + int probe_score; + + /** + * number of bytes to read maximally to identify format. + * - encoding: unused + * - decoding: set by user + */ + int format_probesize; + + /** + * ',' separated list of allowed decoders. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *codec_whitelist; + + /** + * ',' separated list of allowed demuxers. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *format_whitelist; + + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVFormatInternal *internal; + + /** + * IO repositioned flag. + * This is set by avformat when the underlaying IO context read pointer + * is repositioned, for example when doing byte based seeking. + * Demuxers can use the flag to detect such changes. + */ + int io_repositioned; + + /** + * Forced video codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *video_codec; + + /** + * Forced audio codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *audio_codec; + + /** + * Forced subtitle codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *subtitle_codec; + + /** + * Forced data codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *data_codec; + + /** + * Number of bytes to be written as padding in a metadata header. + * Demuxing: Unused. + * Muxing: Set by user via av_format_set_metadata_header_padding. + */ + int metadata_header_padding; + + /** + * User data. + * This is a place for some private data of the user. + */ + void *opaque; + + /** + * Callback used by devices to communicate with application. + */ + av_format_control_message control_message_cb; + + /** + * Output timestamp offset, in microseconds. + * Muxing: set by user + */ + int64_t output_ts_offset; + + /** + * dump format separator. + * can be ", " or "\n " or anything else + * - muxing: Set by user. + * - demuxing: Set by user. + */ + uint8_t *dump_separator; + + /** + * Forced Data codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID data_codec_id; + +#if FF_API_OLD_OPEN_CALLBACKS + /** + * Called to open further IO contexts when needed for demuxing. + * + * This can be set by the user application to perform security checks on + * the URLs before opening them. + * The function should behave like liteav_avio_open2(), AVFormatContext is provided + * as contextual information and to reach AVFormatContext.opaque. + * + * If NULL then some simple checks are used together with liteav_avio_open2(). + * + * Must not be accessed directly from outside avformat. + * @See av_format_set_open_cb() + * + * Demuxing: Set by user. + * + * @deprecated Use io_open and io_close. + */ + attribute_deprecated + int (*open_cb)(struct AVFormatContext *s, AVIOContext **p, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options); +#endif + + /** + * ',' separated list of allowed protocols. + * - encoding: unused + * - decoding: set by user + */ + char *protocol_whitelist; + + /** + * A callback for opening new IO streams. + * + * Whenever a muxer or a demuxer needs to open an IO stream (typically from + * avformat_open_input() for demuxers, but for certain formats can happen at + * other times as well), it will call this callback to obtain an IO context. + * + * @param s the format context + * @param pb on success, the newly opened IO context should be returned here + * @param url the url to open + * @param flags a combination of AVIO_FLAG_* + * @param options a dictionary of additional options, with the same + * semantics as in liteav_avio_open2() + * @return 0 on success, a negative AVERROR code on failure + * + * @note Certain muxers and demuxers do nesting, i.e. they open one or more + * additional internal format contexts. Thus the AVFormatContext pointer + * passed to this callback may be different from the one facing the caller. + * It will, however, have the same 'opaque' field. + */ + int (*io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, + int flags, AVDictionary **options); + + /** + * A callback for closing the streams opened with AVFormatContext.io_open(). + */ + void (*io_close)(struct AVFormatContext *s, AVIOContext *pb); + + /** + * ',' separated list of disallowed protocols. + * - encoding: unused + * - decoding: set by user + */ + char *protocol_blacklist; + + /** + * The maximum number of streams. + * - encoding: unused + * - decoding: set by user + */ + int max_streams; + + int64_t open_time; + int is_has_open_time; + + /** + * Skip duration calcuation in estimate_timings_from_pts. + * - encoding: unused + * - decoding: set by user + */ + int skip_estimate_duration_from_pts; + + /** + * Means find stream info in progress + */ + int find_stream_info_in_progress; +} AVFormatContext; + +#if FF_API_FORMAT_GET_SET +/** + * Accessors for some AVFormatContext fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +attribute_deprecated +int av_format_get_probe_score(const AVFormatContext *s); +attribute_deprecated +AVCodec * av_format_get_video_codec(const AVFormatContext *s); +attribute_deprecated +void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); +attribute_deprecated +AVCodec * av_format_get_audio_codec(const AVFormatContext *s); +attribute_deprecated +void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c); +attribute_deprecated +AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s); +attribute_deprecated +void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c); +attribute_deprecated +AVCodec * av_format_get_data_codec(const AVFormatContext *s); +attribute_deprecated +void av_format_set_data_codec(AVFormatContext *s, AVCodec *c); +attribute_deprecated +int av_format_get_metadata_header_padding(const AVFormatContext *s); +attribute_deprecated +void av_format_set_metadata_header_padding(AVFormatContext *s, int c); +attribute_deprecated +void * av_format_get_opaque(const AVFormatContext *s); +attribute_deprecated +void av_format_set_opaque(AVFormatContext *s, void *opaque); +attribute_deprecated +av_format_control_message av_format_get_control_message_cb(const AVFormatContext *s); +attribute_deprecated +void av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback); +#if FF_API_OLD_OPEN_CALLBACKS +attribute_deprecated AVOpenCallback av_format_get_open_cb(const AVFormatContext *s); +attribute_deprecated void av_format_set_open_cb(AVFormatContext *s, AVOpenCallback callback); +#endif +#endif + +/** + * This function will cause global side data to be injected in the next packet + * of each stream as well as after any subsequent seek. + */ +void av_format_inject_global_side_data(AVFormatContext *s); + +/** + * Returns the method used to set ctx->duration. + * + * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. + */ +enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); + +typedef struct AVPacketList { + AVPacket pkt; + struct AVPacketList *next; +} AVPacketList; + + +/** + * @defgroup lavf_core Core functions + * @ingroup libavf + * + * Functions for querying libavformat capabilities, allocating core structures, + * etc. + * @{ + */ + +/** + * Return the LIBAVFORMAT_VERSION_INT constant. + */ +unsigned avformat_version(void); + +/** + * Return the libavformat build-time configuration. + */ +const char *avformat_configuration(void); + +/** + * Return the libavformat license. + */ +const char *avformat_license(void); + +#if FF_API_NEXT +/** + * Initialize libavformat and register all the muxers, demuxers and + * protocols. If you do not call this function, then you can select + * exactly which formats you want to support. + * + * @see liteav_av_register_input_format() + * @see liteav_av_register_output_format() + */ +attribute_deprecated +void liteav_av_register_all(void); + +attribute_deprecated +void liteav_av_register_input_format(AVInputFormat *format); +attribute_deprecated +void liteav_av_register_output_format(AVOutputFormat *format); +#endif + +/** + * Do global initialization of network libraries. This is optional, + * and not recommended anymore. + * + * This functions only exists to work around thread-safety issues + * with older GnuTLS or OpenSSL libraries. If libavformat is linked + * to newer versions of those libraries, or if you do not use them, + * calling this function is unnecessary. Otherwise, you need to call + * this function before any other threads using them are started. + * + * This function will be deprecated once support for older GnuTLS and + * OpenSSL libraries is removed, and this function has no purpose + * anymore. + */ +int avformat_network_init(void); + +/** + * Undo the initialization done by avformat_network_init. Call it only + * once for each time you called avformat_network_init. + */ +int avformat_network_deinit(void); + +#if FF_API_NEXT +/** + * If f is NULL, returns the first registered input format, + * if f is non-NULL, returns the next registered input format after f + * or NULL if f is the last one. + */ +attribute_deprecated +AVInputFormat *liteav_av_iformat_next(const AVInputFormat *f); + +/** + * If f is NULL, returns the first registered output format, + * if f is non-NULL, returns the next registered output format after f + * or NULL if f is the last one. + */ +attribute_deprecated +AVOutputFormat *liteav_av_oformat_next(const AVOutputFormat *f); +#endif + +/** + * Iterate over all registered muxers. + * + * @param opaque a pointer where libavformat will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered muxer or NULL when the iteration is + * finished + */ +const AVOutputFormat *liteav_av_muxer_iterate(void **opaque); + +/** + * Iterate over all registered demuxers. + * + * @param opaque a pointer where libavformat will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered demuxer or NULL when the iteration is + * finished + */ +const AVInputFormat *liteav_av_demuxer_iterate(void **opaque); + +/** + * Allocate an AVFormatContext. + * avformat_free_context() can be used to free the context and everything + * allocated by the framework within it. + */ +AVFormatContext *avformat_alloc_context(void); + +/** + * Free an AVFormatContext and all its streams. + * @param s context to free + */ +void avformat_free_context(AVFormatContext *s); + +/** + * Get the AVClass for AVFormatContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *avformat_get_class(void); + +/** + * Add a new stream to a media file. + * + * When demuxing, it is called by the demuxer in read_header(). If the + * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also + * be called in read_packet(). + * + * When muxing, should be called by the user before liteav_avformat_write_header(). + * + * User is required to call avcodec_close() and avformat_free_context() to + * clean up the allocation by avformat_new_stream(). + * + * @param s media file handle + * @param c If non-NULL, the AVCodecContext corresponding to the new stream + * will be initialized to use this codec. This is needed for e.g. codec-specific + * defaults to be set, so codec should be provided if it is known. + * + * @return newly created stream or NULL on error. + */ +AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); + +/** + * Wrap an existing array as stream side data. + * + * @param st stream + * @param type side information type + * @param data the side data array. It must be allocated with the liteav_av_malloc() + * family of functions. The ownership of the data is transferred to + * st. + * @param size side information size + * @return zero on success, a negative AVERROR code on failure. On failure, + * the stream is unchanged and the data remains owned by the caller. + */ +int av_stream_add_side_data(AVStream *st, enum AVPacketSideDataType type, + uint8_t *data, size_t size); + +/** + * Allocate new information from stream. + * + * @param stream stream + * @param type desired side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t *av_stream_new_side_data(AVStream *stream, + enum AVPacketSideDataType type, int size); +/** + * Get side information from stream. + * + * @param stream stream + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t *av_stream_get_side_data(const AVStream *stream, + enum AVPacketSideDataType type, int *size); + +AVProgram *av_new_program(AVFormatContext *s, int id); + +/** + * @} + */ + + +/** + * Allocate an AVFormatContext for an output format. + * avformat_free_context() can be used to free the context and + * everything allocated by the framework within it. + * + * @param *ctx is set to the created format context, or to NULL in + * case of failure + * @param oformat format to use for allocating the context, if NULL + * format_name and filename are used instead + * @param format_name the name of output format to use for allocating the + * context, if NULL filename is used instead + * @param filename the name of the filename to use for allocating the + * context, may be NULL + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +int liteav_avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, + const char *format_name, const char *filename); + +/** + * @addtogroup lavf_decoding + * @{ + */ + +/** + * Find AVInputFormat based on the short name of the input format. + */ +AVInputFormat *liteav_av_find_input_format(const char *short_name); + +/** + * Guess the file format. + * + * @param pd data to be probed + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + */ +AVInputFormat *liteav_av_probe_input_format(AVProbeData *pd, int is_opened); + +/** + * Guess the file format. + * + * @param pd data to be probed + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_max A probe score larger that this is required to accept a + * detection, the variable is set to the actual detection + * score afterwards. + * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended + * to retry with a larger probe buffer. + */ +AVInputFormat *liteav_av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_ret The score of the best detection. + */ +AVInputFormat *liteav_av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); + +/** + * Probe a bytestream to determine the input format. Each time a probe returns + * with a score that is too low, the probe buffer size is increased and another + * attempt is made. When the maximum probe size is reached, the input format + * with the highest score is returned. + * + * @param pb the bytestream to probe + * @param fmt the input format is put here + * @param url the url of the stream + * @param logctx the log context + * @param offset the offset within the bytestream to probe from + * @param max_probe_size the maximum probe buffer size (zero for default) + * @return the score in case of success, a negative value corresponding to an + * the maximal score is AVPROBE_SCORE_MAX + * AVERROR code otherwise + */ +int liteav_av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, + const char *url, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Like liteav_av_probe_input_buffer2() but returns 0 on success + */ +int liteav_av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, + const char *url, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Open an input stream and read the header. The codecs are not opened. + * The stream must be closed with avformat_close_input(). + * + * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). + * May be a pointer to NULL, in which case an AVFormatContext is allocated by this + * function and written into ps. + * Note that a user-supplied AVFormatContext will be freed on failure. + * @param url URL of the stream to open. + * @param fmt If non-NULL, this parameter forces a specific input format. + * Otherwise the format is autodetected. + * @param options A dictionary filled with AVFormatContext and demuxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, a negative AVERROR on failure. + * + * @note If you want to use custom IO, preallocate the format context and set its pb field. + */ +int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options); + +attribute_deprecated +int av_demuxer_open(AVFormatContext *ic); + +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @param options If non-NULL, an ic.nb_streams long array of pointers to + * dictionaries, where i-th member contains options for + * codec corresponding to i-th stream. + * On return each dictionary will be filled with options that were not found. + * @return >=0 if OK, AVERROR_xxx on error + * + * @note this function isn't guaranteed to open all the codecs, so + * options being non-empty at return is a perfectly normal behavior. + * + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + */ +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); + +/** + * Find the programs which belong to a given stream. + * + * @param ic media file handle + * @param last the last found program, the search will start after this + * program, or from the beginning if it is NULL + * @param s stream index + * @return the next program which belongs to s, NULL if no program is found or + * the last program is not among the programs of ic. + */ +AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); + +void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx); + +/** + * Find the "best" stream in the file. + * The best stream is determined according to various heuristics as the most + * likely to be what the user expects. + * If the decoder parameter is non-NULL, av_find_best_stream will find the + * default decoder for the stream's codec; streams for which no decoder can + * be found are ignored. + * + * @param ic media file handle + * @param type stream type: video, audio, subtitles, etc. + * @param wanted_stream_nb user-requested stream number, + * or -1 for automatic selection + * @param related_stream try to find a stream related (eg. in the same + * program) to this one, or -1 if none + * @param decoder_ret if non-NULL, returns the decoder for the + * selected stream + * @param flags flags; none are currently defined + * @return the non-negative stream number in case of success, + * AVERROR_STREAM_NOT_FOUND if no stream with the requested type + * could be found, + * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder + * @note If av_find_best_stream returns successfully and decoder_ret is not + * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec. + */ +int av_find_best_stream(AVFormatContext *ic, + enum AVMediaType type, + int wanted_stream_nb, + int related_stream, + AVCodec **decoder_ret, + int flags); + +/** + * Return the next frame of a stream. + * This function returns what is stored in the file, and does not validate + * that what is there are valid frames for the decoder. It will split what is + * stored in the file into frames and return one for each call. It will not + * omit invalid data between valid frames so as to give the decoder the maximum + * information possible for decoding. + * + * If pkt->buf is NULL, then the packet is valid until the next + * av_read_frame() or until avformat_close_input(). Otherwise the packet + * is valid indefinitely. In both cases the packet must be freed with + * liteav_av_packet_unref when it is no longer needed. For video, the packet contains + * exactly one frame. For audio, it contains an integer number of frames if each + * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames + * have a variable size (e.g. MPEG audio), then it contains one frame. + * + * pkt->pts, pkt->dts and pkt->duration are always set to correct + * values in AVStream.time_base units (and guessed if the format cannot + * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format + * has B-frames, so it is better to rely on pkt->dts if you do not + * decompress the payload. + * + * @return 0 if OK, < 0 on error or end of file + */ +int av_read_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Seek to the keyframe at timestamp. + * 'timestamp' in 'stream_index'. + * + * @param s media file handle + * @param stream_index If stream_index is (-1), a default + * stream is selected, and timestamp is automatically converted + * from AV_TIME_BASE units to the stream specific time_base. + * @param timestamp Timestamp in AVStream.time_base units + * or, if no stream is specified, in AV_TIME_BASE units. + * @param flags flags which select direction and seeking mode + * @return >= 0 on success + */ +int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, + int flags); + +/** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + * + * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and + * are the file position (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames + * in the stream with stream_index (this may not be supported by all demuxers). + * Otherwise all timestamps are in units of the stream selected by stream_index + * or if stream_index is -1, in AV_TIME_BASE units. + * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as + * keyframes (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. + * + * @param s media file handle + * @param stream_index index of the stream which is used as time base reference + * @param min_ts smallest acceptable timestamp + * @param ts target timestamp + * @param max_ts largest acceptable timestamp + * @param flags flags + * @return >=0 on success, error code otherwise + * + * @note This is part of the new seek API which is still under construction. + * Thus do not use this yet. It may change at any time, do not expect + * ABI compatibility yet! + */ +int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + +/** + * Discard all internally buffered data. This can be useful when dealing with + * discontinuities in the byte stream. Generally works only with formats that + * can resync. This includes headerless formats like MPEG-TS/TS but should also + * work with NUT, Ogg and in a limited way AVI for example. + * + * The set of streams, the detected duration, stream parameters and codecs do + * not change when calling this function. If you want a complete reset, it's + * better to open a new AVFormatContext. + * + * This does not flush the AVIOContext (s->pb). If necessary, call + * liteav_avio_flush(s->pb) before calling this function. + * + * @param s media file handle + * @return >=0 on success, error code otherwise + */ +int avformat_flush(AVFormatContext *s); + +/** + * Start playing a network-based stream (e.g. RTSP stream) at the + * current position. + */ +int av_read_play(AVFormatContext *s); + +/** + * Pause a network-based stream (e.g. RTSP stream). + * + * Use av_read_play() to resume it. + */ +int av_read_pause(AVFormatContext *s); + +/** + * Close an opened input AVFormatContext. Free it and all its contents + * and set *s to NULL. + */ +void avformat_close_input(AVFormatContext **s); +/** + * @} + */ + +#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward +#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes +#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes +#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number +#define AVSEEK_FLAG_SEQUENCE 16 ///<seeking based on ts sequence num +#define AVSEEK_FLAG_SEG_INTERNAL 32 ///<seeking based on ts sequence internal i frame which more than seek pos. +#define AVSEEK_FLAG_SEG_INTERNAL_BEFORE 64 ///<seeking based on ts sequence internal i frame which less than seek pos. + +/** + * @addtogroup lavf_encoding + * @{ + */ + +#define AVSTREAM_INIT_IN_WRITE_HEADER 0 ///< stream parameters initialized in liteav_avformat_write_header +#define AVSTREAM_INIT_IN_INIT_OUTPUT 1 ///< stream parameters initialized in liteav_avformat_init_output + +/** + * Allocate the stream private data and write the stream header to + * an output media file. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec had not already been fully initialized in avformat_init, + * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec had already been fully initialized in avformat_init, + * negative AVERROR on failure. + * + * @see liteav_av_opt_find, liteav_av_dict_set, liteav_avio_open, liteav_av_oformat_next, liteav_avformat_init_output. + */ +av_warn_unused_result +int liteav_avformat_write_header(AVFormatContext *s, AVDictionary **options); + +/** + * Allocate the stream private data and initialize the codec, but do not write the header. + * May optionally be used before liteav_avformat_write_header to initialize stream parameters + * before actually writing the header. + * If using this function, do not pass the same options to liteav_avformat_write_header. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec requires liteav_avformat_write_header to fully initialize, + * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec has been fully initialized, + * negative AVERROR on failure. + * + * @see liteav_av_opt_find, liteav_av_dict_set, liteav_avio_open, liteav_av_oformat_next, liteav_avformat_write_header. + */ +av_warn_unused_result +int liteav_avformat_init_output(AVFormatContext *s, AVDictionary **options); + +/** + * Write a packet to an output media file. + * + * This function passes the packet directly to the muxer, without any buffering + * or reordering. The caller is responsible for correctly interleaving the + * packets if the format requires it. Callers that want libavformat to handle + * the interleaving should call liteav_av_interleaved_write_frame() instead of this + * function. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. Note that unlike + * liteav_av_interleaved_write_frame(), this function does not take + * ownership of the packet passed to it (though some muxers may make + * an internal reference to the input packet). + * <br> + * This parameter can be NULL (at any time, not just at the end), in + * order to immediately flush data buffered within the muxer, for + * muxers that buffer up data internally before writing it to the + * output. + * <br> + * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". + * <br> + * The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") + * must be set to correct values in the stream's timebase (unless the + * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then + * they can be set to AV_NOPTS_VALUE). + * The dts for subsequent packets passed to this function must be strictly + * increasing when compared in their respective timebases (unless the + * output format is flagged with the AVFMT_TS_NONSTRICT, then they + * merely have to be nondecreasing). @ref AVPacket.duration + * "duration") should also be set if known. + * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush + * + * @see liteav_av_interleaved_write_frame() + */ +int liteav_av_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write a packet to an output media file ensuring correct interleaving. + * + * This function will buffer the packets internally as needed to make sure the + * packets in the output file are properly interleaved in the order of + * increasing dts. Callers doing their own interleaving should call + * liteav_av_write_frame() instead of this function. + * + * Using this function instead of liteav_av_write_frame() can give muxers advance + * knowledge of future packets, improving e.g. the behaviour of the mp4 + * muxer for VFR content in fragmenting mode. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. + * <br> + * If the packet is reference-counted, this function will take + * ownership of this reference and unreference it later when it sees + * fit. + * The caller must not access the data through this reference after + * this function returns. If the packet is not reference-counted, + * libavformat will make a copy. + * <br> + * This parameter can be NULL (at any time, not just at the end), to + * flush the interleaving queues. + * <br> + * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". + * <br> + * The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") + * must be set to correct values in the stream's timebase (unless the + * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then + * they can be set to AV_NOPTS_VALUE). + * The dts for subsequent packets in one stream must be strictly + * increasing (unless the output format is flagged with the + * AVFMT_TS_NONSTRICT, then they merely have to be nondecreasing). + * @ref AVPacket.duration "duration") should also be set if known. + * + * @return 0 on success, a negative AVERROR on error. Libavformat will always + * take care of freeing the packet, even if this function fails. + * + * @see liteav_av_write_frame(), AVFormatContext.max_interleave_delta + */ +int liteav_av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write an uncoded frame to an output media file. + * + * The frame must be correctly interleaved according to the container + * specification; if not, then liteav_av_interleaved_write_frame() must be used. + * + * See liteav_av_interleaved_write_frame() for details. + */ +int liteav_av_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Write an uncoded frame to an output media file. + * + * If the muxer supports it, this function makes it possible to write an AVFrame + * structure directly, without encoding it into a packet. + * It is mostly useful for devices and similar special muxers that use raw + * video or PCM data and will not serialize it into a byte stream. + * + * To test whether it is possible to use it with a given muxer and stream, + * use liteav_av_write_uncoded_frame_query(). + * + * The caller gives up ownership of the frame and must not access it + * afterwards. + * + * @return >=0 for success, a negative code on error + */ +int liteav_av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Test whether a muxer supports uncoded frame. + * + * @return >=0 if an uncoded frame can be written to that muxer and stream, + * <0 if not + */ +int liteav_av_write_uncoded_frame_query(AVFormatContext *s, int stream_index); + +/** + * Write the stream trailer to an output media file and free the + * file private data. + * + * May only be called after a successful call to liteav_avformat_write_header. + * + * @param s media file handle + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_av_write_trailer(AVFormatContext *s); + +/** + * Return the output format in the list of registered output formats + * which best matches the provided parameters, or return NULL if + * there is no match. + * + * @param short_name if non-NULL checks if short_name matches with the + * names of the registered formats + * @param filename if non-NULL checks if filename terminates with the + * extensions of the registered formats + * @param mime_type if non-NULL checks if mime_type matches with the + * MIME type of the registered formats + */ +AVOutputFormat *liteav_av_guess_format(const char *short_name, + const char *filename, + const char *mime_type); + +/** + * Guess the codec ID based upon muxer and filename. + */ +enum AVCodecID liteav_av_guess_codec(AVOutputFormat *fmt, const char *short_name, + const char *filename, const char *mime_type, + enum AVMediaType type); + +/** + * Get timing information for the data currently output. + * The exact meaning of "currently output" depends on the format. + * It is mostly relevant for devices that have an internal buffer and/or + * work in real time. + * @param s media file handle + * @param stream stream in the media file + * @param[out] dts DTS of the last packet output for the stream, in stream + * time_base units + * @param[out] wall absolute time when that packet whas output, + * in microsecond + * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it + * Note: some formats or devices may not allow to measure dts and wall + * atomically. + */ +int liteav_av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + + +/** + * @} + */ + + +/** + * @defgroup lavf_misc Utility functions + * @ingroup libavf + * @{ + * + * Miscellaneous utility functions related to both muxing and demuxing + * (or neither). + */ + +/** + * Send a nice hexadecimal dump of a buffer to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param buf buffer + * @param size buffer size + * + * @see liteav_av_hex_dump_log, liteav_av_pkt_dump2, liteav_av_pkt_dump_log2 + */ +void liteav_av_hex_dump(FILE *f, const uint8_t *buf, int size); + +/** + * Send a nice hexadecimal dump of a buffer to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param buf buffer + * @param size buffer size + * + * @see liteav_av_hex_dump, liteav_av_pkt_dump2, liteav_av_pkt_dump_log2 + */ +void liteav_av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); + +/** + * Send a nice dump of a packet to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void liteav_av_pkt_dump2(FILE *f, const AVPacket *pkt, int dump_payload, const AVStream *st); + + +/** + * Send a nice dump of a packet to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void liteav_av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, + const AVStream *st); + +/** + * Get the AVCodecID for the given codec tag tag. + * If no codec id is found returns AV_CODEC_ID_NONE. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param tag codec tag to match to a codec ID + */ +enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); + +/** + * Get the codec tag for the given codec id id. + * If no codec tag is found returns 0. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec ID to match to a codec tag + */ +unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); + +/** + * Get the codec tag for the given codec id. + * + * @param tags list of supported codec_id - codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec id that should be searched for in the list + * @param tag A pointer to the found tag + * @return 0 if id was not found in tags, > 0 if it was found + */ +int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, + unsigned int *tag); + +int av_find_default_stream_index(AVFormatContext *s); + +/** + * Get the index for a specific timestamp. + * + * @param st stream that the timestamp belongs to + * @param timestamp timestamp to retrieve the index for + * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond + * to the timestamp which is <= the requested one, if backward + * is 0, then it will be >= + * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise + * @return < 0 if no such timestamp could be found + */ +int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); + +/** + * Add an index entry into a sorted list. Update the entry if the list + * already contains it. + * + * @param timestamp timestamp in the time base of the given stream + */ +int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, + int size, int distance, int flags); + + +/** + * Split a URL string into components. + * + * The pointers to buffers for storing individual components may be null, + * in order to ignore that component. Buffers for components not found are + * set to empty strings. If the port is not found, it is set to a negative + * value. + * + * @param proto the buffer for the protocol + * @param proto_size the size of the proto buffer + * @param authorization the buffer for the authorization + * @param authorization_size the size of the authorization buffer + * @param hostname the buffer for the host name + * @param hostname_size the size of the hostname buffer + * @param port_ptr a pointer to store the port number in + * @param path the buffer for the path + * @param path_size the size of the path buffer + * @param url the URL to split + */ +void av_url_split(char *proto, int proto_size, + char *authorization, int authorization_size, + char *hostname, int hostname_size, + int *port_ptr, + char *path, int path_size, + const char *url); + + +/** + * Print detailed information about the input or output format, such as + * duration, bitrate, streams, container, programs, metadata, side data, + * codec and time base. + * + * @param ic the context to analyze + * @param index index of the stream to dump information about + * @param url the URL to print, such as source or destination file + * @param is_output Select whether the specified context is an input(0) or output(1) + */ +void liteav_av_dump_format(AVFormatContext *ic, + int index, + const char *url, + int is_output); + + +#define AV_FRAME_FILENAME_FLAGS_MULTIPLE 1 ///< Allow multiple %d + +/** + * Return in 'buf' the path with '%d' replaced by a number. + * + * Also handles the '%0nd' format where 'n' is the total number + * of digits and '%%'. + * + * @param buf destination buffer + * @param buf_size destination buffer size + * @param path numbered sequence string + * @param number frame number + * @param flags AV_FRAME_FILENAME_FLAGS_* + * @return 0 if OK, -1 on format error + */ +int av_get_frame_filename2(char *buf, int buf_size, + const char *path, int number, int flags); + +int av_get_frame_filename(char *buf, int buf_size, + const char *path, int number); + +/** + * Check whether filename actually is a numbered sequence generator. + * + * @param filename possible numbered sequence string + * @return 1 if a valid numbered sequence string, 0 otherwise + */ +int av_filename_number_test(const char *filename); + +/** + * Generate an SDP for an RTP session. + * + * Note, this overwrites the id values of AVStreams in the muxer contexts + * for getting unique dynamic payload types. + * + * @param ac array of AVFormatContexts describing the RTP streams. If the + * array is composed by only one context, such context can contain + * multiple AVStreams (one AVStream per RTP stream). Otherwise, + * all the contexts in the array (an AVCodecContext per RTP stream) + * must contain only one AVStream. + * @param n_files number of AVCodecContexts contained in ac + * @param buf buffer where the SDP will be stored (must be allocated by + * the caller) + * @param size the size of the buffer + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); + +/** + * Return a positive value if the given filename has one of the given + * extensions, 0 otherwise. + * + * @param filename file name to check against the given extensions + * @param extensions a comma-separated list of filename extensions + */ +int liteav_av_match_ext(const char *filename, const char *extensions); + +/** + * Test if the given container can store a codec. + * + * @param ofmt container to check for compatibility + * @param codec_id codec to potentially store in container + * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* + * + * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. + * A negative number if this information is not available. + */ +int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id, + int std_compliance); + +/** + * @defgroup riff_fourcc RIFF FourCCs + * @{ + * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are + * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the + * following code: + * @code + * uint32_t tag = MKTAG('H', '2', '6', '4'); + * const struct AVCodecTag *table[] = { liteav_avformat_get_riff_video_tags(), 0 }; + * enum AVCodecID id = av_codec_get_id(table, tag); + * @endcode + */ +/** + * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *liteav_avformat_get_riff_video_tags(void); +/** + * @return the table mapping RIFF FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *liteav_avformat_get_riff_audio_tags(void); +/** + * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *liteav_avformat_get_mov_video_tags(void); +/** + * @return the table mapping MOV FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *liteav_avformat_get_mov_audio_tags(void); + +/** + * @} + */ + +/** + * Guess the sample aspect ratio of a frame, based on both the stream and the + * frame aspect ratio. + * + * Since the frame aspect ratio is set by the codec but the stream aspect ratio + * is set by the demuxer, these two may not be equal. This function tries to + * return the value that you should use if you would like to display the frame. + * + * Basic logic is to use the stream aspect ratio if it is set to something sane + * otherwise use the frame aspect ratio. This way a container setting, which is + * usually easy to modify can override the coded value in the frames. + * + * @param format the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame with the aspect ratio to be determined + * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea + */ +AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); + +/** + * Guess the frame rate, based on both the container and codec information. + * + * @param ctx the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame for which the frame rate should be determined, may be NULL + * @return the guessed (valid) frame rate, 0/1 if no idea + */ +AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); + +/** + * Check if the stream st contained in s is matched by the stream specifier + * spec. + * + * See the "stream specifiers" chapter in the documentation for the syntax + * of spec. + * + * @return >0 if st is matched by spec; + * 0 if st is not matched by spec; + * AVERROR code if spec is invalid + * + * @note A stream specifier can match several streams in the format. + */ +int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, + const char *spec); + +int avformat_queue_attached_pictures(AVFormatContext *s); + +#if FF_API_OLD_BSF +/** + * Apply a list of bitstream filters to a packet. + * + * @param codec AVCodecContext, usually from an AVStream + * @param pkt the packet to apply filters to. If, on success, the returned + * packet has size == 0 and side_data_elems == 0, it indicates that + * the packet should be dropped + * @param bsfc a NULL-terminated list of filters to apply + * @return >=0 on success; + * AVERROR code on failure + */ +attribute_deprecated +int av_apply_bitstream_filters(AVCodecContext *codec, AVPacket *pkt, + AVBitStreamFilterContext *bsfc); +#endif + +enum AVTimebaseSource { + AVFMT_TBCF_AUTO = -1, + AVFMT_TBCF_DECODER, + AVFMT_TBCF_DEMUXER, +#if FF_API_R_FRAME_RATE + AVFMT_TBCF_R_FRAMERATE, +#endif +}; + +/** + * Transfer internal timing information from one stream to another. + * + * This function is useful when doing stream copy. + * + * @param ofmt target output format for ost + * @param ost output stream which needs timings copy and adjustments + * @param ist reference input stream to copy timings from + * @param copy_tb define from where the stream codec timebase needs to be imported + */ +int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, + AVStream *ost, const AVStream *ist, + enum AVTimebaseSource copy_tb); + +/** + * Get the internal codec timebase from a stream. + * + * @param st input stream to extract the timebase from + */ +AVRational av_stream_get_codec_timebase(const AVStream *st); + +/** + * @} + */ + +#endif /* AVFORMAT_AVFORMAT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avio.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avio.h new file mode 100644 index 0000000..37ed31f --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/avio.h @@ -0,0 +1,868 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef AVFORMAT_AVIO_H +#define AVFORMAT_AVIO_H + +/** + * @file + * @ingroup lavf_io + * Buffered I/O operations + */ + +#include <stdint.h> + +#include "libavutil/common.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavformat/version.h" + +/** + * Seeking works like for a local file. + */ +#define AVIO_SEEKABLE_NORMAL (1 << 0) + +/** + * Seeking by timestamp with liteav_avio_seek_time() is possible. + */ +#define AVIO_SEEKABLE_TIME (1 << 1) + +/** + * Callback for checking whether to abort blocking functions. + * AVERROR_EXIT is returned in this case by the interrupted + * function. During blocking operations, callback is called with + * opaque as parameter. If the callback returns 1, the + * blocking operation will be aborted. + * + * No members can be added to this struct without a major bump, if + * new elements have been added after this struct in AVFormatContext + * or AVIOContext. + */ +typedef struct AVIOInterruptCB { + int (*callback)(void*); + void *opaque; +} AVIOInterruptCB; + +/** + * Directory entry types. + */ +enum AVIODirEntryType { + AVIO_ENTRY_UNKNOWN, + AVIO_ENTRY_BLOCK_DEVICE, + AVIO_ENTRY_CHARACTER_DEVICE, + AVIO_ENTRY_DIRECTORY, + AVIO_ENTRY_NAMED_PIPE, + AVIO_ENTRY_SYMBOLIC_LINK, + AVIO_ENTRY_SOCKET, + AVIO_ENTRY_FILE, + AVIO_ENTRY_SERVER, + AVIO_ENTRY_SHARE, + AVIO_ENTRY_WORKGROUP, +}; + +/** + * Describes single entry of the directory. + * + * Only name and type fields are guaranteed be set. + * Rest of fields are protocol or/and platform dependent and might be unknown. + */ +typedef struct AVIODirEntry { + char *name; /**< Filename */ + int type; /**< Type of the entry */ + int utf8; /**< Set to 1 when name is encoded with UTF-8, 0 otherwise. + Name can be encoded with UTF-8 even though 0 is set. */ + int64_t size; /**< File size in bytes, -1 if unknown. */ + int64_t modification_timestamp; /**< Time of last modification in microseconds since unix + epoch, -1 if unknown. */ + int64_t access_timestamp; /**< Time of last access in microseconds since unix epoch, + -1 if unknown. */ + int64_t status_change_timestamp; /**< Time of last status change in microseconds since unix + epoch, -1 if unknown. */ + int64_t user_id; /**< User ID of owner, -1 if unknown. */ + int64_t group_id; /**< Group ID of owner, -1 if unknown. */ + int64_t filemode; /**< Unix file mode, -1 if unknown. */ +} AVIODirEntry; + +typedef struct AVIODirContext { + struct URLContext *url_context; +} AVIODirContext; + +/** + * Different data types that can be returned via the AVIO + * write_data_type callback. + */ +enum AVIODataMarkerType { + /** + * Header data; this needs to be present for the stream to be decodeable. + */ + AVIO_DATA_MARKER_HEADER, + /** + * A point in the output bytestream where a decoder can start decoding + * (i.e. a keyframe). A demuxer/decoder given the data flagged with + * AVIO_DATA_MARKER_HEADER, followed by any AVIO_DATA_MARKER_SYNC_POINT, + * should give decodeable results. + */ + AVIO_DATA_MARKER_SYNC_POINT, + /** + * A point in the output bytestream where a demuxer can start parsing + * (for non self synchronizing bytestream formats). That is, any + * non-keyframe packet start point. + */ + AVIO_DATA_MARKER_BOUNDARY_POINT, + /** + * This is any, unlabelled data. It can either be a muxer not marking + * any positions at all, it can be an actual boundary/sync point + * that the muxer chooses not to mark, or a later part of a packet/fragment + * that is cut into multiple write callbacks due to limited IO buffer size. + */ + AVIO_DATA_MARKER_UNKNOWN, + /** + * Trailer data, which doesn't contain actual content, but only for + * finalizing the output file. + */ + AVIO_DATA_MARKER_TRAILER, + /** + * A point in the output bytestream where the underlying AVIOContext might + * flush the buffer depending on latency or buffering requirements. Typically + * means the end of a packet. + */ + AVIO_DATA_MARKER_FLUSH_POINT, +}; + +/** + * Bytestream IO Context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVIOContext) must not be used outside libav*. + * + * @note None of the function pointers in AVIOContext should be called + * directly, they should only be set by the client application + * when implementing custom I/O. Normally these are set to the + * function pointers specified in liteav_avio_alloc_context() + */ +typedef struct AVIOContext { + /** + * A class for private options. + * + * If this AVIOContext is created by liteav_avio_open2(), av_class is set and + * passes the options down to protocols. + * + * If this AVIOContext is manually allocated, then av_class may be set by + * the caller. + * + * warning -- this field can be NULL, be sure to not pass this AVIOContext + * to any av_opt_* functions in that case. + */ + const AVClass *av_class; + + /* + * The following shows the relationship between buffer, buf_ptr, + * buf_ptr_max, buf_end, buf_size, and pos, when reading and when writing + * (since AVIOContext is used for both): + * + ********************************************************************************** + * READING + ********************************************************************************** + * + * | buffer_size | + * |---------------------------------------| + * | | + * + * buffer buf_ptr buf_end + * +---------------+-----------------------+ + * |/ / / / / / / /|/ / / / / / /| | + * read buffer: |/ / consumed / | to be read /| | + * |/ / / / / / / /|/ / / / / / /| | + * +---------------+-----------------------+ + * + * pos + * +-------------------------------------------+-----------------+ + * input file: | | | + * +-------------------------------------------+-----------------+ + * + * + ********************************************************************************** + * WRITING + ********************************************************************************** + * + * | buffer_size | + * |--------------------------------------| + * | | + * + * buf_ptr_max + * buffer (buf_ptr) buf_end + * +-----------------------+--------------+ + * |/ / / / / / / / / / / /| | + * write buffer: | / / to be flushed / / | | + * |/ / / / / / / / / / / /| | + * +-----------------------+--------------+ + * buf_ptr can be in this + * due to a backward seek + * + * pos + * +-------------+----------------------------------------------+ + * output file: | | | + * +-------------+----------------------------------------------+ + * + */ + unsigned char *buffer; /**< Start of the buffer. */ + int buffer_size; /**< Maximum buffer size */ + unsigned char *buf_ptr; /**< Current position in the buffer */ + unsigned char *buf_end; /**< End of the data, may be less than + buffer+buffer_size if the read function returned + less data than requested, e.g. for streams where + no more data has been received yet. */ + void *opaque; /**< A private pointer, passed to the read/write/seek/... + functions. */ + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size); + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); + int64_t (*seek)(void *opaque, int64_t offset, int whence); + int64_t pos; /**< position in the file of the current buffer */ + int eof_reached; /**< true if eof reached */ + int write_flag; /**< true if open for writing */ + int max_packet_size; + unsigned long checksum; + unsigned char *checksum_ptr; + unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size); + int error; /**< contains the error code or 0 if no error happened */ + /** + * Pause or resume playback for network streaming protocols - e.g. MMS. + */ + int (*read_pause)(void *opaque, int pause); + /** + * Seek to a given timestamp in stream with the specified stream_index. + * Needed for some network streaming protocols which don't support seeking + * to byte position. + */ + int64_t (*read_seek)(void *opaque, int stream_index, + int64_t timestamp, int flags); + /** + * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. + */ + int seekable; + + /** + * max filesize, used to limit allocations + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t maxsize; + + /** + * liteav_avio_read and liteav_avio_write should if possible be satisfied directly + * instead of going through a buffer, and liteav_avio_seek will always + * call the underlying seek function directly. + */ + int direct; + + /** + * Bytes read statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t bytes_read; + + /** + * seek statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int seek_count; + + /** + * writeout statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int writeout_count; + + /** + * Original buffer size + * used internally after probing and ensure seekback to reset the buffer size + * This field is internal to libavformat and access from outside is not allowed. + */ + int orig_buffer_size; + + /** + * Threshold to favor readahead over seek. + * This is current internal only, do not use from outside. + */ + int short_seek_threshold; + + /** + * ',' separated list of allowed protocols. + */ + const char *protocol_whitelist; + + /** + * ',' separated list of disallowed protocols. + */ + const char *protocol_blacklist; + + /** + * A callback that is used instead of write_packet. + */ + int (*write_data_type)(void *opaque, uint8_t *buf, int buf_size, + enum AVIODataMarkerType type, int64_t time); + /** + * If set, don't call write_data_type separately for AVIO_DATA_MARKER_BOUNDARY_POINT, + * but ignore them and treat them as AVIO_DATA_MARKER_UNKNOWN (to avoid needlessly + * small chunks of data returned from the callback). + */ + int ignore_boundary_point; + + /** + * Internal, not meant to be used from outside of AVIOContext. + */ + enum AVIODataMarkerType current_type; + int64_t last_time; + + /** + * A callback that is used instead of short_seek_threshold. + * This is current internal only, do not use from outside. + */ + int (*short_seek_get)(void *opaque); + + int64_t written; + + /** + * Maximum reached position before a backward seek in the write buffer, + * used keeping track of already written data for a later flush. + */ + unsigned char *buf_ptr_max; + + /** + * Try to buffer at least this amount of data before flushing it + */ + int min_packet_size; +} AVIOContext; + +/** + * Return the name of the protocol that will handle the passed URL. + * + * NULL is returned if no protocol could be found for the given URL. + * + * @return Name of the protocol or NULL. + */ +const char *liteav_avio_find_protocol_name(const char *url); + +/** + * Return AVIO_FLAG_* access flags corresponding to the access permissions + * of the resource in url, or a negative value corresponding to an + * AVERROR code in case of failure. The returned access flags are + * masked by the value in flags. + * + * @note This function is intrinsically unsafe, in the sense that the + * checked resource may change its existence or permission status from + * one call to another. Thus you should not trust the returned value, + * unless you are sure that no other processes are accessing the + * checked resource. + */ +int liteav_avio_check(const char *url, int flags); + +/** + * Move or rename a resource. + * + * @note url_src and url_dst should share the same protocol and authority. + * + * @param url_src url to resource to be moved + * @param url_dst new url to resource if the operation succeeded + * @return >=0 on success or negative on error. + */ +int liteav_avpriv_io_move(const char *url_src, const char *url_dst); + +/** + * Delete a resource. + * + * @param url resource to be deleted. + * @return >=0 on success or negative on error. + */ +int liteav_avpriv_io_delete(const char *url); + +/** + * Open directory for reading. + * + * @param s directory read context. Pointer to a NULL pointer must be passed. + * @param url directory to be listed. + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dictionary + * containing options that were not found. May be NULL. + * @return >=0 on success or negative on error. + */ +int liteav_avio_open_dir(AVIODirContext **s, const char *url, AVDictionary **options); + +/** + * Get next directory entry. + * + * Returned entry must be freed with liteav_avio_free_directory_entry(). In particular + * it may outlive AVIODirContext. + * + * @param s directory read context. + * @param[out] next next entry or NULL when no more entries. + * @return >=0 on success or negative on error. End of list is not considered an + * error. + */ +int liteav_avio_read_dir(AVIODirContext *s, AVIODirEntry **next); + +/** + * Close directory. + * + * @note Entries created using liteav_avio_read_dir() are not deleted and must be + * freeded with liteav_avio_free_directory_entry(). + * + * @param s directory read context. + * @return >=0 on success or negative on error. + */ +int liteav_avio_close_dir(AVIODirContext **s); + +/** + * Free entry allocated by liteav_avio_read_dir(). + * + * @param entry entry to be freed. + */ +void liteav_avio_free_directory_entry(AVIODirEntry **entry); + +/** + * Allocate and initialize an AVIOContext for buffered I/O. It must be later + * freed with liteav_avio_context_free(). + * + * @param buffer Memory block for input/output operations via AVIOContext. + * The buffer must be allocated with liteav_av_malloc() and friends. + * It may be freed and replaced with a new buffer by libavformat. + * AVIOContext.buffer holds the buffer currently in use, + * which must be later freed with liteav_av_free(). + * @param buffer_size The buffer size is very important for performance. + * For protocols with fixed blocksize it should be set to this blocksize. + * For others a typical size is a cache page, e.g. 4kb. + * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise. + * @param opaque An opaque pointer to user-specific data. + * @param read_packet A function for refilling the buffer, may be NULL. + * For stream protocols, must never return 0 but rather + * a proper AVERROR code. + * @param write_packet A function for writing the buffer contents, may be NULL. + * The function may not change the input buffers content. + * @param seek A function for seeking to specified byte position, may be NULL. + * + * @return Allocated AVIOContext or NULL on failure. + */ +AVIOContext *liteav_avio_alloc_context( + unsigned char *buffer, + int buffer_size, + int write_flag, + void *opaque, + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), + int64_t (*seek)(void *opaque, int64_t offset, int whence)); + +/** + * Free the supplied IO context and everything associated with it. + * + * @param s Double pointer to the IO context. This function will write NULL + * into s. + */ +void liteav_avio_context_free(AVIOContext **s); + +void liteav_avio_w8(AVIOContext *s, int b); +void liteav_avio_write(AVIOContext *s, const unsigned char *buf, int size); +void liteav_avio_wl64(AVIOContext *s, uint64_t val); +void liteav_avio_wb64(AVIOContext *s, uint64_t val); +void liteav_avio_wl32(AVIOContext *s, unsigned int val); +void liteav_avio_wb32(AVIOContext *s, unsigned int val); +void liteav_avio_wl24(AVIOContext *s, unsigned int val); +void liteav_avio_wb24(AVIOContext *s, unsigned int val); +void liteav_avio_wl16(AVIOContext *s, unsigned int val); +void liteav_avio_wb16(AVIOContext *s, unsigned int val); + +/** + * Write a NULL-terminated string. + * @return number of bytes written. + */ +int liteav_avio_put_str(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16LE and write it. + * @param s the AVIOContext + * @param str NULL-terminated UTF-8 string + * + * @return number of bytes written. + */ +int liteav_avio_put_str16le(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16BE and write it. + * @param s the AVIOContext + * @param str NULL-terminated UTF-8 string + * + * @return number of bytes written. + */ +int liteav_avio_put_str16be(AVIOContext *s, const char *str); + +/** + * Mark the written bytestream as a specific type. + * + * Zero-length ranges are omitted from the output. + * + * @param time the stream time the current bytestream pos corresponds to + * (in AV_TIME_BASE units), or AV_NOPTS_VALUE if unknown or not + * applicable + * @param type the kind of data written starting at the current pos + */ +void liteav_avio_write_marker(AVIOContext *s, int64_t time, enum AVIODataMarkerType type); + +/** + * ORing this as the "whence" parameter to a seek function causes it to + * return the filesize without seeking anywhere. Supporting this is optional. + * If it is not supported then the seek function will return <0. + */ +#define AVSEEK_SIZE 0x10000 + +/** + * Passing this flag as the "whence" parameter to a seek function causes it to + * seek by any means (like reopening and linear reading) or other normally unreasonable + * means that can be extremely slow. + * This may be ignored by the seek code. + */ +#define AVSEEK_FORCE 0x20000 + +/** + * fseek() equivalent for AVIOContext. + * @return new position or AVERROR. + */ +int64_t liteav_avio_seek(AVIOContext *s, int64_t offset, int whence); + +/** + * Skip given number of bytes forward + * @return new position or AVERROR. + */ +int64_t liteav_avio_skip(AVIOContext *s, int64_t offset); + +/** + * ftell() equivalent for AVIOContext. + * @return position or AVERROR. + */ +static av_always_inline int64_t avio_tell(AVIOContext *s) +{ + return liteav_avio_seek(s, 0, SEEK_CUR); +} + +/** + * Get the filesize. + * @return filesize or AVERROR + */ +int64_t liteav_avio_size(AVIOContext *s); + +/** + * feof() equivalent for AVIOContext. + * @return non zero if and only if end of file + */ +int liteav_avio_feof(AVIOContext *s); + +/** + * 增加AVDictionary用于辅助判断自定义协议的结尾判断 + * @return non zero if and only if end of file by hlscache + */ +int liteav_avio_feof_with_dict(AVIOContext *s, AVDictionary **options); + +/** @warning Writes up to 4 KiB per call */ +int liteav_avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Force flushing of buffered data. + * + * For write streams, force the buffered data to be immediately written to the output, + * without to wait to fill the internal buffer. + * + * For read streams, discard all currently buffered data, and advance the + * reported file position to that of the underlying stream. This does not + * read new data, and does not perform any seeks. + */ +void liteav_avio_flush(AVIOContext *s); + +/** + * Read size bytes from AVIOContext into buf. + * @return number of bytes read or AVERROR + */ +int liteav_avio_read(AVIOContext *s, unsigned char *buf, int size); + +/** + * Read size bytes from AVIOContext into buf. Unlike liteav_avio_read(), this is allowed + * to read fewer bytes than requested. The missing bytes can be read in the next + * call. This always tries to read at least 1 byte. + * Useful to reduce latency in certain cases. + * @return number of bytes read or AVERROR + */ +int liteav_avio_read_partial(AVIOContext *s, unsigned char *buf, int size); + +/** + * @name Functions for reading from AVIOContext + * @{ + * + * @note return 0 if EOF, so you cannot use it if EOF handling is + * necessary + */ +int liteav_avio_r8 (AVIOContext *s); +unsigned int liteav_avio_rl16(AVIOContext *s); +unsigned int liteav_avio_rl24(AVIOContext *s); +unsigned int liteav_avio_rl32(AVIOContext *s); +uint64_t liteav_avio_rl64(AVIOContext *s); +unsigned int liteav_avio_rb16(AVIOContext *s); +unsigned int liteav_avio_rb24(AVIOContext *s); +unsigned int liteav_avio_rb32(AVIOContext *s); +uint64_t liteav_avio_rb64(AVIOContext *s); +/** + * @} + */ + +/** + * Read a string from pb into buf. The reading will terminate when either + * a NULL character was encountered, maxlen bytes have been read, or nothing + * more can be read from pb. The result is guaranteed to be NULL-terminated, it + * will be truncated if buf is too small. + * Note that the string is not interpreted or validated in any way, it + * might get truncated in the middle of a sequence for multi-byte encodings. + * + * @return number of bytes read (is always <= maxlen). + * If reading ends on EOF or error, the return value will be one more than + * bytes actually read. + */ +int liteav_avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen); + +/** + * Read a UTF-16 string from pb and convert it to UTF-8. + * The reading will terminate when either a null or invalid character was + * encountered or maxlen bytes have been read. + * @return number of bytes read (is always <= maxlen) + */ +int liteav_avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen); +int liteav_avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); + + +/** + * @name URL open modes + * The flags argument to liteav_avio_open must be one of the following + * constants, optionally ORed with other flags. + * @{ + */ +#define AVIO_FLAG_READ 1 /**< read-only */ +#define AVIO_FLAG_WRITE 2 /**< write-only */ +#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */ +/** + * @} + */ + +/** + * Use non-blocking mode. + * If this flag is set, operations on the context will return + * AVERROR(EAGAIN) if they can not be performed immediately. + * If this flag is not set, operations on the context will never return + * AVERROR(EAGAIN). + * Note that this flag does not affect the opening/connecting of the + * context. Connecting a protocol will always block if necessary (e.g. on + * network protocols) but never hang (e.g. on busy devices). + * Warning: non-blocking protocols is work-in-progress; this flag may be + * silently ignored. + */ +#define AVIO_FLAG_NONBLOCK 8 + +/** + * Use direct mode. + * liteav_avio_read and liteav_avio_write should if possible be satisfied directly + * instead of going through a buffer, and liteav_avio_seek will always + * call the underlying seek function directly. + */ +#define AVIO_FLAG_DIRECT 0x8000 + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param url resource to access + * @param flags flags which control how the resource indicated by url + * is to be opened + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int liteav_avio_open(AVIOContext **s, const char *url, int flags); + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param url resource to access + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb an interrupt callback to be used at the protocols level + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int liteav_avio_open2(AVIOContext **s, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Close the resource accessed by the AVIOContext s and free it. + * This function can only be used if s was opened by liteav_avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see liteav_avio_closep + */ +int liteav_avio_close(AVIOContext *s); + +/** + * Close the resource accessed by the AVIOContext *s, free it + * and set the pointer pointing to it to NULL. + * This function can only be used if s was opened by liteav_avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see liteav_avio_close + */ +int liteav_avio_closep(AVIOContext **s); + + +/** + * Open a write only memory stream. + * + * @param s new IO context + * @return zero if no error. + */ +int liteav_avio_open_dyn_buf(AVIOContext **s); + +/** + * Return the written size and a pointer to the buffer. + * The AVIOContext stream is left intact. + * The buffer must NOT be freed. + * No padding is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int liteav_avio_get_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Return the written size and a pointer to the buffer. The buffer + * must be freed with liteav_av_free(). + * Padding of AV_INPUT_BUFFER_PADDING_SIZE is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int liteav_avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Iterate through names of available protocols. + * + * @param opaque A private pointer representing current protocol. + * It must be a pointer to NULL on first iteration and will + * be updated by successive calls to liteav_avio_enum_protocols. + * @param output If set to 1, iterate over output protocols, + * otherwise over input protocols. + * + * @return A static string containing the name of current protocol or NULL + */ +const char *liteav_avio_enum_protocols(void **opaque, int output); + +/** + * Pause and resume playing - only meaningful if using a network streaming + * protocol (e.g. MMS). + * + * @param h IO context from which to call the read_pause function pointer + * @param pause 1 for pause, 0 for resume + */ +int liteav_avio_pause(AVIOContext *h, int pause); + +/** + * Seek to a given timestamp relative to some component stream. + * Only meaningful if using a network streaming protocol (e.g. MMS.). + * + * @param h IO context from which to call the seek function pointers + * @param stream_index The stream index that the timestamp is relative to. + * If stream_index is (-1) the timestamp should be in AV_TIME_BASE + * units from the beginning of the presentation. + * If a stream_index >= 0 is used and the protocol does not support + * seeking based on component streams, the call will fail. + * @param timestamp timestamp in AVStream.time_base units + * or if there is no stream specified then in AV_TIME_BASE units. + * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE + * and AVSEEK_FLAG_ANY. The protocol may silently ignore + * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will + * fail if used and not supported. + * @return >= 0 on success + * @see AVInputFormat::read_seek + */ +int64_t liteav_avio_seek_time(AVIOContext *h, int stream_index, + int64_t timestamp, int flags); + +/* Avoid a warning. The header can not be included because it breaks c++. */ +struct AVBPrint; + +/** + * Read contents of h into print buffer, up to max_size bytes, or up to EOF. + * + * @return 0 for success (max_size bytes read or EOF reached), negative error + * code otherwise + */ +int liteav_avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size); + +/** + * Accept and allocate a client context on a server context. + * @param s the server context + * @param c the client context, must be unallocated + * @return >= 0 on success or a negative value corresponding + * to an AVERROR on failure + */ +int liteav_avio_accept(AVIOContext *s, AVIOContext **c); + +/** + * Perform one step of the protocol handshake to accept a new client. + * This function must be called on a client returned by liteav_avio_accept() before + * using it as a read/write context. + * It is separate from liteav_avio_accept() because it may block. + * A step of the handshake is defined by places where the application may + * decide to change the proceedings. + * For example, on a protocol with a request header and a reply header, each + * one can constitute a step because the application may use the parameters + * from the request to change parameters in the reply; or each individual + * chunk of the request can constitute a step. + * If the handshake is already finished, liteav_avio_handshake() does nothing and + * returns 0 immediately. + * + * @param c the client context to perform the handshake on + * @return 0 on a complete and successful handshake + * > 0 if the handshake progressed, but is not complete + * < 0 for an AVERROR code + */ +int liteav_avio_handshake(AVIOContext *c); +#endif /* AVFORMAT_AVIO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/internal.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/internal.h new file mode 100644 index 0000000..a95f3ae --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/internal.h @@ -0,0 +1,807 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_INTERNAL_H +#define AVFORMAT_INTERNAL_H + +#include <stdint.h> + +#include "libavutil/bprint.h" +#include "avformat.h" +#include "os_support.h" + +#define MAX_URL_SIZE 4096 + +/** size of probe buffer, for guessing file type from file contents */ +#define PROBE_BUF_MIN 2048 +#define PROBE_BUF_MAX (1 << 20) + +#define MAX_PROBE_PACKETS 2500 + +#ifdef DEBUG +# define hex_dump_debug(class, buf, size) liteav_av_hex_dump_log(class, AV_LOG_DEBUG, buf, size) +#else +# define hex_dump_debug(class, buf, size) do { if (0) liteav_av_hex_dump_log(class, AV_LOG_DEBUG, buf, size); } while(0) +#endif + +typedef struct AVCodecTag { + enum AVCodecID id; + unsigned int tag; +} AVCodecTag; + +typedef struct CodecMime{ + char str[32]; + enum AVCodecID id; +} CodecMime; + +/*************************************************/ +/* fractional numbers for exact pts handling */ + +/** + * The exact value of the fractional number is: 'val + num / den'. + * num is assumed to be 0 <= num < den. + */ +typedef struct FFFrac { + int64_t val, num, den; +} FFFrac; + + +struct AVFormatInternal { + /** + * Number of streams relevant for interleaving. + * Muxing only. + */ + int nb_interleaved_streams; + + /** + * This buffer is only needed when packets were already buffered but + * not decoded, for example to get the codec parameters in MPEG + * streams. + */ + struct AVPacketList *packet_buffer; + struct AVPacketList *packet_buffer_end; + + /* av_seek_frame() support */ + int64_t data_offset; /**< offset of the first packet */ + + /** + * Raw packets from the demuxer, prior to parsing and decoding. + * This buffer is used for buffering packets until the codec can + * be identified, as parsing cannot be done without knowing the + * codec. + */ + struct AVPacketList *raw_packet_buffer; + struct AVPacketList *raw_packet_buffer_end; + /** + * Packets split by the parser get queued here. + */ + struct AVPacketList *parse_queue; + struct AVPacketList *parse_queue_end; + /** + * Remaining size available for raw_packet_buffer, in bytes. + */ +#define RAW_PACKET_BUFFER_SIZE 2500000 + int raw_packet_buffer_remaining_size; + + /** + * Offset to remap timestamps to be non-negative. + * Expressed in timebase units. + * @see AVStream.mux_ts_offset + */ + int64_t offset; + + /** + * Timebase for the timestamp offset. + */ + AVRational offset_timebase; + +#if FF_API_COMPUTE_PKT_FIELDS2 + int missing_ts_warning; +#endif + + int inject_global_side_data; + + int avoid_negative_ts_use_pts; + + /** + * Timestamp of the end of the shortest stream. + */ + int64_t shortest_end; + + /** + * Whether or not liteav_avformat_init_output has already been called + */ + int initialized; + + /** + * Whether or not liteav_avformat_init_output fully initialized streams + */ + int streams_initialized; + + /** + * ID3v2 tag useful for MP3 demuxing + */ + AVDictionary *id3v2_meta; + + /* + * Prefer the codec framerate for avg_frame_rate computation. + */ + int prefer_codec_framerate; +}; + +struct AVStreamInternal { + /** + * Set to 1 if the codec allows reordering, so pts can be different + * from dts. + */ + int reorder; + + /** + * bitstream filters to run on stream + * - encoding: Set by muxer using liteav_ff_stream_add_bitstream_filter + * - decoding: unused + */ + AVBSFContext **bsfcs; + int nb_bsfcs; + + /** + * Whether or not check_bitstream should still be run on each packet + */ + int bitstream_checked; + + /** + * The codec context used by avformat_find_stream_info, the parser, etc. + */ + AVCodecContext *avctx; + /** + * 1 if avctx has been initialized with the values from the codec parameters + */ + int avctx_inited; + + enum AVCodecID orig_codec_id; + + /* the context for extracting extradata in find_stream_info() + * inited=1/bsf=NULL signals that extracting is not possible (codec not + * supported) */ + struct { + AVBSFContext *bsf; + AVPacket *pkt; + int inited; + } extract_extradata; + + /** + * Whether the internal avctx needs to be updated from codecpar (after a late change to codecpar) + */ + int need_context_update; + + FFFrac *priv_pts; +}; + +#ifdef __GNUC__ +#define dynarray_add(tab, nb_ptr, elem)\ +do {\ + __typeof__(tab) _tab = (tab);\ + __typeof__(elem) _elem = (elem);\ + (void)sizeof(**_tab == _elem); /* check that types are compatible */\ + liteav_av_dynarray_add(_tab, nb_ptr, _elem);\ +} while(0) +#else +#define dynarray_add(tab, nb_ptr, elem)\ +do {\ + liteav_av_dynarray_add((tab), nb_ptr, (elem));\ +} while(0) +#endif + +struct tm *liteav_ff_brktimegm(time_t secs, struct tm *tm); + +/** + * Automatically create sub-directories + * + * @param path will create sub-directories by path + * @return 0, or < 0 on error + */ +int liteav_ff_mkdir_p(const char *path); + +char *liteav_ff_data_to_hex(char *buf, const uint8_t *src, int size, int lowercase); + +/** + * Parse a string of hexadecimal strings. Any space between the hexadecimal + * digits is ignored. + * + * @param data if non-null, the parsed data is written to this pointer + * @param p the string to parse + * @return the number of bytes written (or to be written, if data is null) + */ +int liteav_ff_hex_to_data(uint8_t *data, const char *p); + +/** + * Add packet to AVFormatContext->packet_buffer list, determining its + * interleaved position using compare() function argument. + * @return 0, or < 0 on error + */ +int liteav_ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, + int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)); + +void liteav_ff_read_frame_flush(AVFormatContext *s); + +#define NTP_OFFSET 2208988800ULL +#define NTP_OFFSET_US (NTP_OFFSET * 1000000ULL) + +/** Get the current time since NTP epoch in microseconds. */ +uint64_t liteav_ff_ntp_time(void); + +/** + * Get the NTP time stamp formatted as per the RFC-5905. + * + * @param ntp_time NTP time in micro seconds (since NTP epoch) + * @return the formatted NTP time stamp + */ +uint64_t liteav_ff_get_formatted_ntp_time(uint64_t ntp_time_us); + +/** + * Append the media-specific SDP fragment for the media stream c + * to the buffer buff. + * + * Note, the buffer needs to be initialized, since it is appended to + * existing content. + * + * @param buff the buffer to append the SDP fragment to + * @param size the size of the buff buffer + * @param st the AVStream of the media to describe + * @param idx the global stream index + * @param dest_addr the destination address of the media stream, may be NULL + * @param dest_type the destination address type, may be NULL + * @param port the destination port of the media stream, 0 if unknown + * @param ttl the time to live of the stream, 0 if not multicast + * @param fmt the AVFormatContext, which might contain options modifying + * the generated SDP + */ +void liteav_ff_sdp_write_media(char *buff, int size, AVStream *st, int idx, + const char *dest_addr, const char *dest_type, + int port, int ttl, AVFormatContext *fmt); + +/** + * Write a packet to another muxer than the one the user originally + * intended. Useful when chaining muxers, where one muxer internally + * writes a received packet to another muxer. + * + * @param dst the muxer to write the packet to + * @param dst_stream the stream index within dst to write the packet to + * @param pkt the packet to be written + * @param src the muxer the packet originally was intended for + * @param interleave 0->use liteav_av_write_frame, 1->liteav_av_interleaved_write_frame + * @return the value liteav_av_write_frame returned + */ +int liteav_ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt, + AVFormatContext *src, int interleave); + +/** + * Get the length in bytes which is needed to store val as v. + */ +int liteav_ff_get_v_length(uint64_t val); + +/** + * Put val using a variable number of bytes. + */ +void liteav_ff_put_v(AVIOContext *bc, uint64_t val); + +/** + * Read a whole line of text from AVIOContext. Stop reading after reaching + * either a \\n, a \\0 or EOF. The returned string is always \\0-terminated, + * and may be truncated if the buffer is too small. + * + * @param s the read-only AVIOContext + * @param buf buffer to store the read line + * @param maxlen size of the buffer + * @return the length of the string written in the buffer, not including the + * final \\0 + */ +int liteav_ff_get_line(AVIOContext *s, char *buf, int maxlen); + +/** + * Same as liteav_ff_get_line but strip the white-space characters in the text tail + * + * @param s the read-only AVIOContext + * @param buf buffer to store the read line + * @param maxlen size of the buffer + * @return the length of the string written in the buffer + */ +int liteav_ff_get_chomp_line(AVIOContext *s, char *buf, int maxlen); + +/** + * Read a whole line of text from AVIOContext to an AVBPrint buffer. Stop + * reading after reaching a \\r, a \\n, a \\r\\n, a \\0 or EOF. The line + * ending characters are NOT included in the buffer, but they are skipped on + * the input. + * + * @param s the read-only AVIOContext + * @param bp the AVBPrint buffer + * @return the length of the read line, not including the line endings, + * negative on error. + */ +int64_t liteav_ff_read_line_to_bprint(AVIOContext *s, AVBPrint *bp); + +/** + * Read a whole line of text from AVIOContext to an AVBPrint buffer overwriting + * its contents. Stop reading after reaching a \\r, a \\n, a \\r\\n, a \\0 or + * EOF. The line ending characters are NOT included in the buffer, but they + * are skipped on the input. + * + * @param s the read-only AVIOContext + * @param bp the AVBPrint buffer + * @return the length of the read line not including the line endings, + * negative on error, or if the buffer becomes truncated. + */ +int64_t liteav_ff_read_line_to_bprint_overwrite(AVIOContext *s, AVBPrint *bp); + +#define SPACE_CHARS " \t\r\n" + +/** + * Callback function type for liteav_ff_parse_key_value. + * + * @param key a pointer to the key + * @param key_len the number of bytes that belong to the key, including the '=' + * char + * @param dest return the destination pointer for the value in *dest, may + * be null to ignore the value + * @param dest_len the length of the *dest buffer + */ +typedef void (*liteav_ff_parse_key_val_cb)(void *context, const char *key, + int key_len, char **dest, int *dest_len); +/** + * Parse a string with comma-separated key=value pairs. The value strings + * may be quoted and may contain escaped characters within quoted strings. + * + * @param str the string to parse + * @param callback_get_buf function that returns where to store the + * unescaped value string. + * @param context the opaque context pointer to pass to callback_get_buf + */ +void liteav_ff_parse_key_value(const char *str, liteav_ff_parse_key_val_cb callback_get_buf, + void *context); + +/** + * Find stream index based on format-specific stream ID + * @return stream index, or < 0 on error + */ +int liteav_ff_find_stream_index(AVFormatContext *s, int id); + +/** + * Internal version of av_index_search_timestamp + */ +int liteav_ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries, + int64_t wanted_timestamp, int flags); + +/** + * Internal version of av_add_index_entry + */ +int liteav_ff_add_index_entry(AVIndexEntry **index_entries, + int *nb_index_entries, + unsigned int *index_entries_allocated_size, + int64_t pos, int64_t timestamp, int size, int distance, int flags); + +void liteav_ff_configure_buffers_for_index(AVFormatContext *s, int64_t time_tolerance); + +/** + * Add a new chapter. + * + * @param s media file handle + * @param id unique ID for this chapter + * @param start chapter start time in time_base units + * @param end chapter end time in time_base units + * @param title chapter title + * + * @return AVChapter or NULL on error + */ +AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, + int64_t start, int64_t end, const char *title); + +/** + * Ensure the index uses less memory than the maximum specified in + * AVFormatContext.max_index_size by discarding entries if it grows + * too large. + */ +void liteav_ff_reduce_index(AVFormatContext *s, int stream_index); + +enum AVCodecID liteav_ff_guess_image2_codec(const char *filename); + +/** + * Perform a binary search using av_index_search_timestamp() and + * AVInputFormat.read_timestamp(). + * + * @param target_ts target timestamp in the time base of the given stream + * @param stream_index stream number + */ +int liteav_ff_seek_frame_binary(AVFormatContext *s, int stream_index, + int64_t target_ts, int flags); + +/** + * Update cur_dts of all streams based on the given timestamp and AVStream. + * + * Stream ref_st unchanged, others set cur_dts in their native time base. + * Only needed for timestamp wrapping or if (dts not set and pts!=dts). + * @param timestamp new dts expressed in time_base of param ref_st + * @param ref_st reference stream giving time_base of param timestamp + */ +void liteav_ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp); + +int liteav_ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos, + int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )); + +/** + * Perform a binary search using read_timestamp(). + * + * @param target_ts target timestamp in the time base of the given stream + * @param stream_index stream number + */ +int64_t liteav_ff_gen_search(AVFormatContext *s, int stream_index, + int64_t target_ts, int64_t pos_min, + int64_t pos_max, int64_t pos_limit, + int64_t ts_min, int64_t ts_max, + int flags, int64_t *ts_ret, + int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )); + +/** + * Set the time base and wrapping info for a given stream. This will be used + * to interpret the stream's timestamps. If the new time base is invalid + * (numerator or denominator are non-positive), it leaves the stream + * unchanged. + * + * @param s stream + * @param pts_wrap_bits number of bits effectively used by the pts + * (used for wrap control) + * @param pts_num time base numerator + * @param pts_den time base denominator + */ +void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, + unsigned int pts_num, unsigned int pts_den); + +/** + * Add side data to a packet for changing parameters to the given values. + * Parameters set to 0 aren't included in the change. + */ +int liteav_ff_add_param_change(AVPacket *pkt, int32_t channels, + uint64_t channel_layout, int32_t sample_rate, + int32_t width, int32_t height); + +/** + * Set the timebase for each stream from the corresponding codec timebase and + * print it. + */ +int liteav_ff_framehash_write_header(AVFormatContext *s); + +/** + * Read a transport packet from a media file. + * + * @param s media file handle + * @param pkt is filled + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_ff_read_packet(AVFormatContext *s, AVPacket *pkt); + +/** + * Interleave a packet per dts in an output media file. + * + * Packets with pkt->destruct == av_destruct_packet will be freed inside this + * function, so they cannot be used after it. Note that calling liteav_av_packet_unref() + * on them is still safe. + * + * @param s media file handle + * @param out the interleaved packet will be output here + * @param pkt the input packet + * @param flush 1 if no further packets are available as input and all + * remaining packets should be output + * @return 1 if a packet was output, 0 if no packet could be output, + * < 0 if an error occurred + */ +int liteav_ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, + AVPacket *pkt, int flush); + +/* + * check specified stream st is in s then free it(set to NULL in s->streams) + * do nothing else, like modify nb_streams or reorder streams in s->streams + */ +void liteav_ff_free_stream_only(AVFormatContext *s, AVStream *st); + +void liteav_ff_free_stream(AVFormatContext *s, AVStream *st); + +/** + * Return the frame duration in seconds. Return 0 if not available. + */ +void liteav_ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st, + AVCodecParserContext *pc, AVPacket *pkt); + +unsigned int liteav_ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id); + +enum AVCodecID liteav_ff_codec_get_id(const AVCodecTag *tags, unsigned int tag); + +/** + * Select a PCM codec based on the given parameters. + * + * @param bps bits-per-sample + * @param flt floating-point + * @param be big-endian + * @param sflags signed flags. each bit corresponds to one byte of bit depth. + * e.g. the 1st bit indicates if 8-bit should be signed or + * unsigned, the 2nd bit indicates if 16-bit should be signed or + * unsigned, etc... This is useful for formats such as WAVE where + * only 8-bit is unsigned and all other bit depths are signed. + * @return a PCM codec id or AV_CODEC_ID_NONE + */ +enum AVCodecID liteav_ff_get_pcm_codec_id(int bps, int flt, int be, int sflags); + +/** + * Chooses a timebase for muxing the specified stream. + * + * The chosen timebase allows sample accurate timestamps based + * on the framerate or sample rate for audio streams. It also is + * at least as precise as 1/min_precision would be. + */ +AVRational liteav_ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precision); + +/** + * Chooses a timebase for muxing the specified stream. + */ +enum AVChromaLocation liteav_ff_choose_chroma_location(AVFormatContext *s, AVStream *st); + +/** + * Generate standard extradata for AVC-Intra based on width/height and field + * order. + */ +int liteav_ff_generate_avci_extradata(AVStream *st); + +/** + * Add a bitstream filter to a stream. + * + * @param st output stream to add a filter to + * @param name the name of the filter to add + * @param args filter-specific argument string + * @return >0 on success; + * AVERROR code on failure + */ +int liteav_ff_stream_add_bitstream_filter(AVStream *st, const char *name, const char *args); + +/** + * Copy encoding parameters from source to destination stream + * + * @param dst pointer to destination AVStream + * @param src pointer to source AVStream + * @return >=0 on success, AVERROR code on error + */ +int liteav_ff_stream_encode_params_copy(AVStream *dst, const AVStream *src); + +/** + * Wrap errno on rename() error. + * + * @param oldpath source path + * @param newpath destination path + * @return 0 or AVERROR on failure + */ +static inline int liteav_ff_rename(const char *oldpath, const char *newpath, void *logctx) +{ + int ret = 0; + if (rename(oldpath, newpath) == -1) { + ret = AVERROR(errno); + if (logctx) { + char err[AV_ERROR_MAX_STRING_SIZE] = {0}; + av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret); + liteav_av_log(logctx, AV_LOG_ERROR, "failed to rename file %s to %s: %s\n", oldpath, newpath, err); + } + } + return ret; +} + +/** + * Allocate extradata with additional AV_INPUT_BUFFER_PADDING_SIZE at end + * which is always set to 0. + * + * Previously allocated extradata in par will be freed. + * + * @param size size of extradata + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_ff_alloc_extradata(AVCodecParameters *par, int size); + +/** + * Allocate extradata with additional AV_INPUT_BUFFER_PADDING_SIZE at end + * which is always set to 0 and fill it from pb. + * + * @param size size of extradata + * @return >= 0 if OK, AVERROR_xxx on error + */ +int liteav_ff_get_extradata(AVFormatContext *s, AVCodecParameters *par, AVIOContext *pb, int size); + +/** + * add frame for rfps calculation. + * + * @param dts timestamp of the i-th frame + * @return 0 if OK, AVERROR_xxx on error + */ +int liteav_ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t dts); + +void liteav_ff_rfps_calculate(AVFormatContext *ic); + +/** + * Flags for AVFormatContext.write_uncoded_frame() + */ +enum AVWriteUncodedFrameFlags { + + /** + * Query whether the feature is possible on this stream. + * The frame argument is ignored. + */ + AV_WRITE_UNCODED_FRAME_QUERY = 0x0001, + +}; + +/** + * Copies the whilelists from one context to the other + */ +int liteav_ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src); + +int liteav_ffio_open2_wrapper(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Returned by demuxers to indicate that data was consumed but discarded + * (ignored streams or junk data). The framework will re-call the demuxer. + */ +#define FFERROR_REDO FFERRTAG('R','E','D','O') + +/** + * Utility function to open IO stream of output format. + * + * @param s AVFormatContext + * @param url URL or file name to open for writing + * @options optional options which will be passed to io_open callback + * @return >=0 on success, negative AVERROR in case of failure + */ +int liteav_ff_format_output_open(AVFormatContext *s, const char *url, AVDictionary **options); + +/* + * A wrapper around AVFormatContext.io_close that should be used + * instead of calling the pointer directly. + */ +void liteav_ff_format_io_close(AVFormatContext *s, AVIOContext **pb); + +/** + * Utility function to check if the file uses http or https protocol + * + * @param s AVFormatContext + * @param filename URL or file name to open for writing + */ +int liteav_ff_is_http_proto(char *filename); + +/** + * Parse creation_time in AVFormatContext metadata if exists and warn if the + * parsing fails. + * + * @param s AVFormatContext + * @param timestamp parsed timestamp in microseconds, only set on successful parsing + * @param return_seconds set this to get the number of seconds in timestamp instead of microseconds + * @return 1 if OK, 0 if the metadata was not present, AVERROR(EINVAL) on parse error + */ +int liteav_ff_parse_creation_time_metadata(AVFormatContext *s, int64_t *timestamp, int return_seconds); + +/** + * Standardize creation_time metadata in AVFormatContext to an ISO-8601 + * timestamp string. + * + * @param s AVFormatContext + * @return <0 on error + */ +int liteav_ff_standardize_creation_time(AVFormatContext *s); + +#define CONTAINS_PAL 2 +/** + * Reshuffles the lines to use the user specified stride. + * + * @param ppkt input and output packet + * @return negative error code or + * 0 if no new packet was allocated + * non-zero if a new packet was allocated and ppkt has to be freed + * CONTAINS_PAL if in addition to a new packet the old contained a palette + */ +int liteav_ff_reshuffle_raw_rgb(AVFormatContext *s, AVPacket **ppkt, AVCodecParameters *par, int expected_stride); + +/** + * Retrieves the palette from a packet, either from side data, or + * appended to the video data in the packet itself (raw video only). + * It is commonly used after a call to liteav_ff_reshuffle_raw_rgb(). + * + * Use 0 for the ret parameter to check for side data only. + * + * @param pkt pointer to packet before calling liteav_ff_reshuffle_raw_rgb() + * @param ret return value from liteav_ff_reshuffle_raw_rgb(), or 0 + * @param palette pointer to palette buffer + * @return negative error code or + * 1 if the packet has a palette, else 0 + */ +int liteav_ff_get_packet_palette(AVFormatContext *s, AVPacket *pkt, int ret, uint32_t *palette); + +/** + * Finalize buf into extradata and set its size appropriately. + */ +int liteav_ff_bprint_to_codecpar_extradata(AVCodecParameters *par, struct AVBPrint *buf); + +/** + * Find the next packet in the interleaving queue for the given stream. + * The pkt parameter is filled in with the queued packet, including + * references to the data (which the caller is not allowed to keep or + * modify). + * + * @return 0 if a packet was found, a negative value if no packet was found + */ +int liteav_ff_interleaved_peek(AVFormatContext *s, int stream, + AVPacket *pkt, int add_offset); + + +int liteav_ff_lock_avformat(void); +int liteav_ff_unlock_avformat(void); + +/** + * Set AVFormatContext url field to the provided pointer. The pointer must + * point to a valid string. The existing url field is freed if necessary. Also + * set the legacy filename field to the same string which was provided in url. + */ +void liteav_ff_format_set_url(AVFormatContext *s, char *url); + +#define FF_PACKETLIST_FLAG_REF_PACKET (1 << 0) /**< Create a new reference for the packet instead of + transferring the ownership of the existing one to the + list. */ + +/** + * Append an AVPacket to the list. + * + * @param head List head element + * @param tail List tail element + * @param pkt The packet being appended + * @param flags Any combination of FF_PACKETLIST_FLAG_* flags + * @return 0 on success, negative AVERROR value on failure. On failure, + the list is unchanged + */ +int liteav_ff_packet_list_put(AVPacketList **head, AVPacketList **tail, + AVPacket *pkt, int flags); + +/** + * Remove the oldest AVPacket in the list and return it. + * + * @note The pkt will be overwritten completely. The caller owns the + * packet and must unref it by itself. + * + * @param head List head element + * @param tail List tail element + * @param pkt Pointer to an initialized AVPacket struct + */ +int liteav_ff_packet_list_get(AVPacketList **head, AVPacketList **tail, + AVPacket *pkt); + +/** + * Wipe the list and unref all the packets in it. + * + * @param head List head element + * @param tail List tail element + */ +void liteav_ff_packet_list_free(AVPacketList **head, AVPacketList **tail); + +void liteav_avpriv_register_devices(const AVOutputFormat * const o[], const AVInputFormat * const i[]); + +#endif /* AVFORMAT_INTERNAL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/os_support.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/os_support.h new file mode 100644 index 0000000..172b2c2 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/os_support.h @@ -0,0 +1,248 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * various OS-feature replacement utilities + * copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_OS_SUPPORT_H +#define AVFORMAT_OS_SUPPORT_H + +/** + * @file + * miscellaneous OS support macros and functions. + */ + +#include "config.h" + +#include <sys/stat.h> + +#ifdef _WIN32 +#if HAVE_DIRECT_H +#include <direct.h> +#endif +#if HAVE_IO_H +#include <io.h> +#endif +#endif + +#ifdef _WIN32 +# include <fcntl.h> +# ifdef lseek +# undef lseek +# endif +# define lseek(f,p,w) _lseeki64((f), (p), (w)) +# ifdef stat +# undef stat +# endif +# define stat _stati64 +# ifdef fstat +# undef fstat +# endif +# define fstat(f,s) _fstati64((f), (s)) +#endif /* defined(_WIN32) */ + + +#ifdef __ANDROID__ +# if HAVE_UNISTD_H +# include <unistd.h> +# endif +# ifdef lseek +# undef lseek +# endif +# define lseek(f,p,w) lseek64((f), (p), (w)) +#endif + +static inline int is_dos_path(const char *path) +{ +#if HAVE_DOS_PATHS + if (path[0] && path[1] == ':') + return 1; +#endif + return 0; +} + +#if defined(__OS2__) +#define SHUT_RD 0 +#define SHUT_WR 1 +#define SHUT_RDWR 2 +#endif + +#if defined(_WIN32) +#define SHUT_RD SD_RECEIVE +#define SHUT_WR SD_SEND +#define SHUT_RDWR SD_BOTH + +#ifndef S_IRUSR +#define S_IRUSR S_IREAD +#endif +#ifndef S_IWUSR +#define S_IWUSR S_IWRITE +#endif +#endif + +#if CONFIG_NETWORK +#if !HAVE_SOCKLEN_T +typedef int socklen_t; +#endif + +/* most of the time closing a socket is just closing an fd */ +#if !HAVE_CLOSESOCKET +#define closesocket close +#endif + +#if !HAVE_POLL_H +typedef unsigned long nfds_t; + +#if HAVE_WINSOCK2_H +#include <winsock2.h> +#endif +#if !HAVE_STRUCT_POLLFD +struct pollfd { + int fd; + short events; /* events to look for */ + short revents; /* events that occurred */ +}; + +/* events & revents */ +#define POLLIN 0x0001 /* any readable data available */ +#define POLLOUT 0x0002 /* file descriptor is writeable */ +#define POLLRDNORM POLLIN +#define POLLWRNORM POLLOUT +#define POLLRDBAND 0x0008 /* priority readable data */ +#define POLLWRBAND 0x0010 /* priority data can be written */ +#define POLLPRI 0x0020 /* high priority readable data */ + +/* revents only */ +#define POLLERR 0x0004 /* errors pending */ +#define POLLHUP 0x0080 /* disconnected */ +#define POLLNVAL 0x1000 /* invalid file descriptor */ +#endif + + +int liteav_ff_poll(struct pollfd *fds, nfds_t numfds, int timeout); +#define poll liteav_ff_poll +#endif /* HAVE_POLL_H */ +#endif /* CONFIG_NETWORK */ + +#ifdef _WIN32 +#include <stdio.h> +#include <windows.h> +#include "libavutil/wchar_filename.h" + +#define DEF_FS_FUNCTION(name, wfunc, afunc) \ +static inline int win32_##name(const char *filename_utf8) \ +{ \ + wchar_t *filename_w; \ + int ret; \ + \ + if (utf8towchar(filename_utf8, &filename_w)) \ + return -1; \ + if (!filename_w) \ + goto fallback; \ + \ + ret = wfunc(filename_w); \ + liteav_av_free(filename_w); \ + return ret; \ + \ +fallback: \ + /* filename may be be in CP_ACP */ \ + return afunc(filename_utf8); \ +} + +DEF_FS_FUNCTION(unlink, _wunlink, _unlink) +DEF_FS_FUNCTION(mkdir, _wmkdir, _mkdir) +DEF_FS_FUNCTION(rmdir, _wrmdir , _rmdir) + +#define DEF_FS_FUNCTION2(name, wfunc, afunc, partype) \ +static inline int win32_##name(const char *filename_utf8, partype par) \ +{ \ + wchar_t *filename_w; \ + int ret; \ + \ + if (utf8towchar(filename_utf8, &filename_w)) \ + return -1; \ + if (!filename_w) \ + goto fallback; \ + \ + ret = wfunc(filename_w, par); \ + liteav_av_free(filename_w); \ + return ret; \ + \ +fallback: \ + /* filename may be be in CP_ACP */ \ + return afunc(filename_utf8, par); \ +} + +DEF_FS_FUNCTION2(access, _waccess, _access, int) +DEF_FS_FUNCTION2(stat, _wstati64, _stati64, struct stat*) + +static inline int win32_rename(const char *src_utf8, const char *dest_utf8) +{ + wchar_t *src_w, *dest_w; + int ret; + + if (utf8towchar(src_utf8, &src_w)) + return -1; + if (utf8towchar(dest_utf8, &dest_w)) { + liteav_av_free(src_w); + return -1; + } + if (!src_w || !dest_w) { + liteav_av_free(src_w); + liteav_av_free(dest_w); + goto fallback; + } + + ret = MoveFileExW(src_w, dest_w, MOVEFILE_REPLACE_EXISTING); + liteav_av_free(src_w); + liteav_av_free(dest_w); + // Lacking proper mapping from GetLastError() error codes to errno codes + if (ret) + errno = EPERM; + return ret; + +fallback: + /* filename may be be in CP_ACP */ +#if !HAVE_UWP + ret = MoveFileExA(src_utf8, dest_utf8, MOVEFILE_REPLACE_EXISTING); + if (ret) + errno = EPERM; +#else + /* Windows Phone doesn't have MoveFileExA, and for Windows Store apps, + * it is available but not allowed by the app certification kit. However, + * it's unlikely that anybody would input filenames in CP_ACP there, so this + * fallback is kept mostly for completeness. Alternatively we could + * do MultiByteToWideChar(CP_ACP) and use MoveFileExW, but doing + * explicit conversions with CP_ACP is allegedly forbidden in windows + * store apps (or windows phone), and the notion of a native code page + * doesn't make much sense there. */ + ret = rename(src_utf8, dest_utf8); +#endif + return ret; +} + +#define mkdir(a, b) win32_mkdir(a) +#define rename win32_rename +#define rmdir win32_rmdir +#define unlink win32_unlink +#define access win32_access + +#endif + +#endif /* AVFORMAT_OS_SUPPORT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/url.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/url.h new file mode 100644 index 0000000..34cc30e --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/url.h @@ -0,0 +1,345 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * unbuffered private I/O API + */ + +#ifndef AVFORMAT_URL_H +#define AVFORMAT_URL_H + +#include "avio.h" +#include "libavformat/version.h" + +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#define URL_PROTOCOL_FLAG_NESTED_SCHEME 1 /*< The protocol name can be the first part of a nested protocol scheme */ +#define URL_PROTOCOL_FLAG_NETWORK 2 /*< The protocol uses network */ + +extern const AVClass liteav_ffurl_context_class; + +typedef struct URLContext { + const AVClass *av_class; /**< information for liteav_av_log(). Set by url_open(). */ + const struct URLProtocol *prot; + void *priv_data; + char *filename; /**< specified URL */ + int flags; + int max_packet_size; /**< if non zero, the stream is packetized with this max packet size */ + int is_streamed; /**< true if streamed (no seek possible), default = false */ + int is_connected; + AVIOInterruptCB interrupt_callback; + int64_t rw_timeout; /**< maximum time to wait for (network) read/write operation completion, in mcs */ + const char *protocol_whitelist; + const char *protocol_blacklist; + int nReopenTimes; + int min_packet_size; /**< if non zero, the stream is packetized with this min packet size */ +} URLContext; + +typedef struct URLProtocol { + const char *name; + int (*url_open)( URLContext *h, const char *url, int flags); + /** + * This callback is to be used by protocols which open further nested + * protocols. options are then to be passed to liteav_ffurl_open()/liteav_ffurl_connect() + * for those nested protocols. + */ + int (*url_open2)(URLContext *h, const char *url, int flags, AVDictionary **options); + int (*url_accept)(URLContext *s, URLContext **c); + int (*url_handshake)(URLContext *c); + + /** + * Read data from the protocol. + * If data is immediately available (even less than size), EOF is + * reached or an error occurs (including EINTR), return immediately. + * Otherwise: + * In non-blocking mode, return AVERROR(EAGAIN) immediately. + * In blocking mode, wait for data/EOF/error with a short timeout (0.1s), + * and return AVERROR(EAGAIN) on timeout. + * Checking interrupt_callback, looping on EINTR and EAGAIN and until + * enough data has been read is left to the calling function; see + * retry_transfer_wrapper in avio.c. + */ + int (*url_read)( URLContext *h, unsigned char *buf, int size); + int (*url_write)(URLContext *h, const unsigned char *buf, int size); + int64_t (*url_seek)( URLContext *h, int64_t pos, int whence); + int (*url_close)(URLContext *h); + int (*url_read_pause)(URLContext *h, int pause); + int64_t (*url_read_seek)(URLContext *h, int stream_index, + int64_t timestamp, int flags); + int (*url_get_file_handle)(URLContext *h); + int (*url_get_multi_file_handle)(URLContext *h, int **handles, + int *numhandles); + int (*url_get_short_seek)(URLContext *h); + int (*url_shutdown)(URLContext *h, int flags); + int priv_data_size; + const AVClass *priv_data_class; + int flags; + int (*url_check)(URLContext *h, int mask); + int (*url_open_dir)(URLContext *h); + int (*url_read_dir)(URLContext *h, AVIODirEntry **next); + int (*url_close_dir)(URLContext *h); + int (*url_delete)(URLContext *h); + int (*url_move)(URLContext *h_src, URLContext *h_dst); + const char *default_whitelist; +} URLProtocol; + +/** + * Create a URLContext for accessing to the resource indicated by + * url, but do not initiate the connection yet. + * + * @param puc pointer to the location where, in case of success, the + * function puts the pointer to the created URLContext + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb interrupt callback to use for the URLContext, may be + * NULL + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int liteav_ffurl_alloc(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb); + +/** + * Connect an URLContext that has been allocated by liteav_ffurl_alloc + * + * @param options A dictionary filled with options for nested protocols, + * i.e. it will be passed to url_open2() for protocols implementing it. + * This parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + */ +int liteav_ffurl_connect(URLContext *uc, AVDictionary **options); + +/** + * Create an URLContext for accessing to the resource indicated by + * url, and open it. + * + * @param puc pointer to the location where, in case of success, the + * function puts the pointer to the created URLContext + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb interrupt callback to use for the URLContext, may be + * NULL + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @param parent An enclosing URLContext, whose generic options should + * be applied to this URLContext as well. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int liteav_ffurl_open_whitelist(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options, + const char *whitelist, const char* blacklist, + URLContext *parent); + +int liteav_ffurl_open(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Accept an URLContext c on an URLContext s + * + * @param s server context + * @param c client context, must be unallocated. + * @return >= 0 on success, liteav_ff_neterrno() on failure. + */ +int liteav_ffurl_accept(URLContext *s, URLContext **c); + +/** + * Perform one step of the protocol handshake to accept a new client. + * See liteav_avio_handshake() for details. + * Implementations should try to return decreasing values. + * If the protocol uses an underlying protocol, the underlying handshake is + * usually the first step, and the return value can be: + * (largest value for this protocol) + (return value from other protocol) + * + * @param c the client context + * @return >= 0 on success or a negative value corresponding + * to an AVERROR code on failure + */ +int liteav_ffurl_handshake(URLContext *c); + +/** + * Read up to size bytes from the resource accessed by h, and store + * the read bytes in buf. + * + * @return The number of bytes actually read, or a negative value + * corresponding to an AVERROR code in case of error. A value of zero + * indicates that it is not possible to read more from the accessed + * resource (except if the value of the size argument is also zero). + */ +int liteav_ffurl_read(URLContext *h, unsigned char *buf, int size); + +/** + * Read as many bytes as possible (up to size), calling the + * read function multiple times if necessary. + * This makes special short-read handling in applications + * unnecessary, if the return value is < size then it is + * certain there was either an error or the end of file was reached. + */ +int liteav_ffurl_read_complete(URLContext *h, unsigned char *buf, int size); + +/** + * Write size bytes from buf to the resource accessed by h. + * + * @return the number of bytes actually written, or a negative value + * corresponding to an AVERROR code in case of failure + */ +int liteav_ffurl_write(URLContext *h, const unsigned char *buf, int size); + +/** + * Change the position that will be used by the next read/write + * operation on the resource accessed by h. + * + * @param pos specifies the new position to set + * @param whence specifies how pos should be interpreted, it must be + * one of SEEK_SET (seek from the beginning), SEEK_CUR (seek from the + * current position), SEEK_END (seek from the end), or AVSEEK_SIZE + * (return the filesize of the requested resource, pos is ignored). + * @return a negative value corresponding to an AVERROR code in case + * of failure, or the resulting file position, measured in bytes from + * the beginning of the file. You can use this feature together with + * SEEK_CUR to read the current file position. + */ +int64_t liteav_ffurl_seek(URLContext *h, int64_t pos, int whence); + +/** + * Close the resource accessed by the URLContext h, and free the + * memory used by it. Also set the URLContext pointer to NULL. + * + * @return a negative value if an error condition occurred, 0 + * otherwise + */ +int liteav_ffurl_closep(URLContext **h); +int liteav_ffurl_close(URLContext *h); + +/** + * Return the filesize of the resource accessed by h, AVERROR(ENOSYS) + * if the operation is not supported by h, or another negative value + * corresponding to an AVERROR error code in case of failure. + */ +int64_t liteav_ffurl_size(URLContext *h); + +/** + * Return the file descriptor associated with this URL. For RTP, this + * will return only the RTP file descriptor, not the RTCP file descriptor. + * + * @return the file descriptor associated with this URL, or <0 on error. + */ +int liteav_ffurl_get_file_handle(URLContext *h); + +/** + * Return the file descriptors associated with this URL. + * + * @return 0 on success or <0 on error. + */ +int liteav_ffurl_get_multi_file_handle(URLContext *h, int **handles, int *numhandles); + +/** + * Return the current short seek threshold value for this URL. + * + * @return threshold (>0) on success or <=0 on error. + */ +int liteav_ffurl_get_short_seek(URLContext *h); + +/** + * Signal the URLContext that we are done reading or writing the stream. + * + * @param h pointer to the resource + * @param flags flags which control how the resource indicated by url + * is to be shutdown + * + * @return a negative value if an error condition occurred, 0 + * otherwise + */ +int liteav_ffurl_shutdown(URLContext *h, int flags); + +/** + * Check if the user has requested to interrupt a blocking function + * associated with cb. + */ +int liteav_ff_check_interrupt(AVIOInterruptCB *cb); + +/* udp.c */ +int liteav_ff_udp_set_remote_url(URLContext *h, const char *uri); +int liteav_ff_udp_get_local_port(URLContext *h); + +/** + * Assemble a URL string from components. This is the reverse operation + * of av_url_split. + * + * Note, this requires networking to be initialized, so the caller must + * ensure liteav_ff_network_init has been called. + * + * @see av_url_split + * + * @param str the buffer to fill with the url + * @param size the size of the str buffer + * @param proto the protocol identifier, if null, the separator + * after the identifier is left out, too + * @param authorization an optional authorization string, may be null. + * An empty string is treated the same as a null string. + * @param hostname the host name string + * @param port the port number, left out from the string if negative + * @param fmt a generic format string for everything to add after the + * host/port, may be null + * @return the number of characters written to the destination buffer + */ +int liteav_ff_url_join(char *str, int size, const char *proto, + const char *authorization, const char *hostname, + int port, const char *fmt, ...) av_printf_format(7, 8); + +/** + * Convert a relative url into an absolute url, given a base url. + * + * @param buf the buffer where output absolute url is written + * @param size the size of buf + * @param base the base url, may be equal to buf. + * @param rel the new url, which is interpreted relative to base + */ +void liteav_ff_make_absolute_url(char *buf, int size, const char *base, + const char *rel); + +/** + * Allocate directory entry with default values. + * + * @return entry or NULL on error + */ +AVIODirEntry *liteav_ff_alloc_dir_entry(void); + +const AVClass *liteav_ff_urlcontext_child_class_next(const AVClass *prev); + +/** + * Construct a list of protocols matching a given whitelist and/or blacklist. + * + * @param whitelist a comma-separated list of allowed protocol names or NULL. If + * this is a non-empty string, only protocols in this list will + * be included. + * @param blacklist a comma-separated list of forbidden protocol names or NULL. + * If this is a non-empty string, all protocols in this list + * will be excluded. + * + * @return a NULL-terminated array of matching protocols. The array must be + * freed by the caller. + */ +const URLProtocol **liteav_ffurl_get_protocols(const char *whitelist, + const char *blacklist); + +#endif /* AVFORMAT_URL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/version.h new file mode 100644 index 0000000..ac14a55 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavformat/version.h @@ -0,0 +1,111 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_VERSION_H +#define AVFORMAT_VERSION_H + +/** + * @file + * @ingroup libavf + * Libavformat version macros + */ + +#include "libavutil/version.h" + +// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium) +// Also please add any ticket numbers that you believe might be affected here +#define LIBAVFORMAT_VERSION_MAJOR 58 +#define LIBAVFORMAT_VERSION_MINOR 20 +#define LIBAVFORMAT_VERSION_MICRO 100 + +#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT + +#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + * + */ +#ifndef FF_API_COMPUTE_PKT_FIELDS2 +#define FF_API_COMPUTE_PKT_FIELDS2 (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_OPEN_CALLBACKS +#define FF_API_OLD_OPEN_CALLBACKS (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_AVCTX +#define FF_API_LAVF_AVCTX (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_HTTP_USER_AGENT +#define FF_API_HTTP_USER_AGENT (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_HLS_WRAP +#define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_HLS_USE_LOCALTIME +#define FF_API_HLS_USE_LOCALTIME (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_KEEPSIDE_FLAG +#define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_ROTATE_API +#define FF_API_OLD_ROTATE_API (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_FORMAT_GET_SET +#define FF_API_FORMAT_GET_SET (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_AVIO_EOF_0 +#define FF_API_OLD_AVIO_EOF_0 (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_FFSERVER +#define FF_API_LAVF_FFSERVER (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_FORMAT_FILENAME +#define FF_API_FORMAT_FILENAME (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_RTSP_OPTIONS +#define FF_API_OLD_RTSP_OPTIONS (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_NEXT +#define FF_API_NEXT (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_DASH_MIN_SEG_DURATION +#define FF_API_DASH_MIN_SEG_DURATION (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_LAVF_MP4A_LATM +#define FF_API_LAVF_MP4A_LATM (LIBAVFORMAT_VERSION_MAJOR < 59) +#endif + + +#ifndef FF_API_R_FRAME_RATE +#define FF_API_R_FRAME_RATE 1 +#endif +#endif /* AVFORMAT_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/adler32.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/adler32.h new file mode 100644 index 0000000..52f4781 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/adler32.h @@ -0,0 +1,61 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_adler32 + * Public header for Adler-32 hash function implementation. + */ + +#ifndef AVUTIL_ADLER32_H +#define AVUTIL_ADLER32_H + +#include <stdint.h> +#include "attributes.h" + +/** + * @defgroup lavu_adler32 Adler-32 + * @ingroup lavu_hash + * Adler-32 hash function implementation. + * + * @{ + */ + +/** + * Calculate the Adler32 checksum of a buffer. + * + * Passing the return value to a subsequent liteav_av_adler32_update() call + * allows the checksum of multiple buffers to be calculated as though + * they were concatenated. + * + * @param adler initial checksum value + * @param buf pointer to input buffer + * @param len size of input buffer + * @return updated checksum + */ +unsigned long liteav_av_adler32_update(unsigned long adler, const uint8_t *buf, + unsigned int len) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_ADLER32_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes.h new file mode 100644 index 0000000..ae41e45 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes.h @@ -0,0 +1,66 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_H +#define AVUTIL_AES_H + +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_aes AES + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_aes_size; + +struct AVAES; + +/** + * Allocate an AVAES context. + */ +struct AVAES *liteav_av_aes_alloc(void); + +/** + * Initialize an AVAES context. + * @param key_bits 128, 192 or 256 + * @param decrypt 0 for encryption, 1 for decryption + */ +int liteav_av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * @param count number of 16 byte blocks + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_AES_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h new file mode 100644 index 0000000..665955d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/aes_ctr.h @@ -0,0 +1,89 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * AES-CTR cipher + * Copyright (c) 2015 Eran Kornblau <erankor at gmail dot com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_CTR_H +#define AVUTIL_AES_CTR_H + +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +#define AES_CTR_KEY_SIZE (16) +#define AES_CTR_IV_SIZE (8) + +struct AVAESCTR; + +/** + * Allocate an AVAESCTR context. + */ +struct AVAESCTR *liteav_av_aes_ctr_alloc(void); + +/** + * Initialize an AVAESCTR context. + * @param key encryption key, must have a length of AES_CTR_KEY_SIZE + */ +int liteav_av_aes_ctr_init(struct AVAESCTR *a, const uint8_t *key); + +/** + * Release an AVAESCTR context. + */ +void liteav_av_aes_ctr_free(struct AVAESCTR *a); + +/** + * Process a buffer using a previously initialized context. + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param size the size of src and dst + */ +void liteav_av_aes_ctr_crypt(struct AVAESCTR *a, uint8_t *dst, const uint8_t *src, int size); + +/** + * Get the current iv + */ +const uint8_t* liteav_av_aes_ctr_get_iv(struct AVAESCTR *a); + +/** + * Generate a random iv + */ +void liteav_av_aes_ctr_set_random_iv(struct AVAESCTR *a); + +/** + * Forcefully change the 8-byte iv + */ +void liteav_av_aes_ctr_set_iv(struct AVAESCTR *a, const uint8_t* iv); + +/** + * Forcefully change the "full" 16-byte iv, including the counter + */ +void liteav_av_aes_ctr_set_full_iv(struct AVAESCTR *a, const uint8_t* iv); + +/** + * Increment the top 64 bit of the iv (performed after each frame) + */ +void liteav_av_aes_ctr_increment_iv(struct AVAESCTR *a); + +/** + * @} + */ + +#endif /* AVUTIL_AES_CTR_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/attributes.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/attributes.h new file mode 100644 index 0000000..ced108a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/attributes.h @@ -0,0 +1,167 @@ +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Macro definitions for various function/variable attributes + */ + +#ifndef AVUTIL_ATTRIBUTES_H +#define AVUTIL_ATTRIBUTES_H + +#ifdef __GNUC__ +# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +# define AV_GCC_VERSION_AT_MOST(x,y) (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y)) +#else +# define AV_GCC_VERSION_AT_LEAST(x,y) 0 +# define AV_GCC_VERSION_AT_MOST(x,y) 0 +#endif + +#ifndef av_always_inline +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_always_inline __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +# define av_always_inline __forceinline +#else +# define av_always_inline inline +#endif +#endif + +#ifndef av_extern_inline +#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__) +# define av_extern_inline extern inline +#else +# define av_extern_inline inline +#endif +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,4) +# define av_warn_unused_result __attribute__((warn_unused_result)) +#else +# define av_warn_unused_result +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_noinline __attribute__((noinline)) +#elif defined(_MSC_VER) +# define av_noinline __declspec(noinline) +#else +# define av_noinline +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__) +# define av_pure __attribute__((pure)) +#else +# define av_pure +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,6) || defined(__clang__) +# define av_const __attribute__((const)) +#else +# define av_const +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) || defined(__clang__) +# define av_cold __attribute__((cold)) +#else +# define av_cold +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__) +# define av_flatten __attribute__((flatten)) +#else +# define av_flatten +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define attribute_deprecated __attribute__((deprecated)) +#elif defined(_MSC_VER) +# define attribute_deprecated __declspec(deprecated) +#else +# define attribute_deprecated +#endif + +/** + * Disable warnings about deprecated features + * This is useful for sections of code kept for backward compatibility and + * scheduled for removal. + */ +#ifndef AV_NOWARN_DEPRECATED +#if AV_GCC_VERSION_AT_LEAST(4,6) +# define AV_NOWARN_DEPRECATED(code) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ + code \ + _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define AV_NOWARN_DEPRECATED(code) \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) \ + code; \ + __pragma(warning(pop)) +#else +# define AV_NOWARN_DEPRECATED(code) code +#endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define av_unused __attribute__((unused)) +#else +# define av_unused +#endif + +/** + * Mark a variable as used and prevent the compiler from optimizing it + * away. This is useful for variables accessed only from inline + * assembler without the compiler being aware. + */ +#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__) +# define av_used __attribute__((used)) +#else +# define av_used +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,3) || defined(__clang__) +# define av_alias __attribute__((may_alias)) +#else +# define av_alias +#endif + +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__INTEL_COMPILER) +# define av_uninit(x) x=x +#else +# define av_uninit(x) x +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define av_builtin_constant_p __builtin_constant_p +# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) +#else +# define av_builtin_constant_p(x) 0 +# define av_printf_format(fmtpos, attrpos) +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,5) || defined(__clang__) +# define av_noreturn __attribute__((noreturn)) +#else +# define av_noreturn +#endif + +#endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h new file mode 100644 index 0000000..a31f07e --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/audio_fifo.h @@ -0,0 +1,188 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Audio FIFO + * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio FIFO Buffer + */ + +#ifndef AVUTIL_AUDIO_FIFO_H +#define AVUTIL_AUDIO_FIFO_H + +#include "avutil.h" +#include "fifo.h" +#include "samplefmt.h" + +/** + * @addtogroup lavu_audio + * @{ + * + * @defgroup lavu_audiofifo Audio FIFO Buffer + * @{ + */ + +/** + * Context for an Audio FIFO Buffer. + * + * - Operates at the sample level rather than the byte level. + * - Supports multiple channels with either planar or packed sample format. + * - Automatic reallocation when writing to a full buffer. + */ +typedef struct AVAudioFifo AVAudioFifo; + +/** + * Free an AVAudioFifo. + * + * @param af AVAudioFifo to free + */ +void liteav_av_audio_fifo_free(AVAudioFifo *af); + +/** + * Allocate an AVAudioFifo. + * + * @param sample_fmt sample format + * @param channels number of channels + * @param nb_samples initial allocation size, in samples + * @return newly allocated AVAudioFifo, or NULL on error + */ +AVAudioFifo *liteav_av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, + int nb_samples); + +/** + * Reallocate an AVAudioFifo. + * + * @param af AVAudioFifo to reallocate + * @param nb_samples new allocation size, in samples + * @return 0 if OK, or negative AVERROR code on failure + */ +av_warn_unused_result +int liteav_av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples); + +/** + * Write data to an AVAudioFifo. + * + * The AVAudioFifo will be reallocated automatically if the available space + * is less than nb_samples. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to write to + * @param data audio data plane pointers + * @param nb_samples number of samples to write + * @return number of samples actually written, or negative AVERROR + * code on failure. If successful, the number of samples + * actually written will always be nb_samples. + */ +int liteav_av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Peek data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to peek + * @return number of samples actually peek, or negative AVERROR code + * on failure. The number of samples actually peek will not + * be greater than nb_samples, and will only be less than + * nb_samples if liteav_av_audio_fifo_size is less than nb_samples. + */ +int liteav_av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Peek data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to peek + * @param offset offset from current read position + * @return number of samples actually peek, or negative AVERROR code + * on failure. The number of samples actually peek will not + * be greater than nb_samples, and will only be less than + * nb_samples if liteav_av_audio_fifo_size is less than nb_samples. + */ +int liteav_av_audio_fifo_peek_at(AVAudioFifo *af, void **data, int nb_samples, int offset); + +/** + * Read data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to read + * @return number of samples actually read, or negative AVERROR code + * on failure. The number of samples actually read will not + * be greater than nb_samples, and will only be less than + * nb_samples if liteav_av_audio_fifo_size is less than nb_samples. + */ +int liteav_av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Drain data from an AVAudioFifo. + * + * Removes the data without reading it. + * + * @param af AVAudioFifo to drain + * @param nb_samples number of samples to drain + * @return 0 if OK, or negative AVERROR code on failure + */ +int liteav_av_audio_fifo_drain(AVAudioFifo *af, int nb_samples); + +/** + * Reset the AVAudioFifo buffer. + * + * This empties all data in the buffer. + * + * @param af AVAudioFifo to reset + */ +void liteav_av_audio_fifo_reset(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for reading. + * + * @param af the AVAudioFifo to query + * @return number of samples available for reading + */ +int liteav_av_audio_fifo_size(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for writing. + * + * @param af the AVAudioFifo to query + * @return number of samples available for writing + */ +int liteav_av_audio_fifo_space(AVAudioFifo *af); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_AUDIO_FIFO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avassert.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avassert.h new file mode 100644 index 0000000..b73cd94 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avassert.h @@ -0,0 +1,76 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple assert() macros that are a bit more flexible than ISO C assert(). + * @author Michael Niedermayer <michaelni@gmx.at> + */ + +#ifndef AVUTIL_AVASSERT_H +#define AVUTIL_AVASSERT_H + +#include <stdlib.h> +#include "avutil.h" +#include "log.h" + +/** + * assert() equivalent, that is always enabled. + */ +#define av_assert0(cond) do { \ + if (!(cond)) { \ + liteav_av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \ + AV_STRINGIFY(cond), __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) + + +/** + * assert() equivalent, that does not lie in speed critical code. + * These asserts() thus can be enabled without fearing speed loss. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0 +#define av_assert1(cond) av_assert0(cond) +#else +#define av_assert1(cond) ((void)0) +#endif + + +/** + * assert() equivalent, that does lie in speed critical code. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 +#define av_assert2(cond) av_assert0(cond) +#define av_assert2_fpu() av_assert0_fpu() +#else +#define av_assert2(cond) ((void)0) +#define av_assert2_fpu() ((void)0) +#endif + +/** + * Assert that floating point operations can be executed. + * + * This will av_assert0() that the cpu is not in MMX state on X86 + */ +void av_assert0_fpu(void); + +#endif /* AVUTIL_AVASSERT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avconfig.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avconfig.h new file mode 100644 index 0000000..41fbcb6 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avconfig.h @@ -0,0 +1,16 @@ +/* Generated by ffmpeg configure */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H + +#if defined(FFMPEG_AV_HAVE_BIGENDIAN) +#define AV_HAVE_BIGENDIAN 1 +#else +#define AV_HAVE_BIGENDIAN 0 +#endif + +#if defined(FFMPEG_AV_HAVE_FAST_UNALIGNED) +#define AV_HAVE_FAST_UNALIGNED 1 +#else +#define AV_HAVE_FAST_UNALIGNED 0 +#endif +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avstring.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avstring.h new file mode 100644 index 0000000..9e829af --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avstring.h @@ -0,0 +1,408 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2007 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVSTRING_H +#define AVUTIL_AVSTRING_H + +#include <stddef.h> +#include <stdint.h> +#include "attributes.h" + +/** + * @addtogroup lavu_string + * @{ + */ + +/** + * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to + * the address of the first character in str after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int liteav_av_strstart(const char *str, const char *pfx, const char **ptr); + +/** + * Return non-zero if pfx is a prefix of str independent of case. If + * it is, *ptr is set to the address of the first character in str + * after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int liteav_av_stristart(const char *str, const char *pfx, const char **ptr); + +/** + * Locate the first case-independent occurrence in the string haystack + * of the string needle. A zero-length string needle is considered to + * match at the start of haystack. + * + * This function is a case-insensitive version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *liteav_av_stristr(const char *haystack, const char *needle); + +/** + * Locate the first occurrence of the string needle in the string haystack + * where not more than hay_length characters are searched. A zero-length + * string needle is considered to match at the start of haystack. + * + * This function is a length-limited version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @param hay_length length of string to search in + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *liteav_av_strnstr(const char *haystack, const char *needle, size_t hay_length); + +/** + * Copy the string src to dst, but no more than size - 1 bytes, and + * null-terminate dst. + * + * This function is the same as BSD strlcpy(). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the length of src + * + * @warning since the return value is the length of src, src absolutely + * _must_ be a properly 0-terminated string, otherwise this will read beyond + * the end of the buffer and possibly crash. + */ +size_t liteav_av_strlcpy(char *dst, const char *src, size_t size); + +/** + * Append the string src to the string dst, but to a total length of + * no more than size - 1 bytes, and null-terminate dst. + * + * This function is similar to BSD strlcat(), but differs when + * size <= strlen(dst). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the total length of src and dst + * + * @warning since the return value use the length of src and dst, these + * absolutely _must_ be a properly 0-terminated strings, otherwise this + * will read beyond the end of the buffer and possibly crash. + */ +size_t liteav_av_strlcat(char *dst, const char *src, size_t size); + +/** + * Append output to a string, according to a format. Never write out of + * the destination buffer, and always put a terminating 0 within + * the buffer. + * @param dst destination buffer (string to which the output is + * appended) + * @param size total size of the destination buffer + * @param fmt printf-compatible format string, specifying how the + * following parameters are used + * @return the length of the string that would have been generated + * if enough space had been available + */ +size_t liteav_av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); + +/** + * Get the count of continuous non zero chars starting from the beginning. + * + * @param len maximum number of characters to check in the string, that + * is the maximum value which is returned by the function + */ +static inline size_t av_strnlen(const char *s, size_t len) +{ + size_t i; + for (i = 0; i < len && s[i]; i++) + ; + return i; +} + +/** + * Print arguments following specified format into a large enough auto + * allocated buffer. It is similar to GNU asprintf(). + * @param fmt printf-compatible format string, specifying how the + * following parameters are used. + * @return the allocated string + * @note You have to free the string yourself with liteav_av_free(). + */ +char *liteav_av_asprintf(const char *fmt, ...) av_printf_format(1, 2); + +/** + * Convert a number to an av_malloced string. + */ +char *liteav_av_d2str(double d); + +/** + * Unescape the given string until a non escaped terminating char, + * and return the token corresponding to the unescaped string. + * + * The normal \ and ' escaping is supported. Leading and trailing + * whitespaces are removed, unless they are escaped with '\' or are + * enclosed between ''. + * + * @param buf the buffer to parse, buf will be updated to point to the + * terminating char + * @param term a 0-terminated list of terminating chars + * @return the malloced unescaped string, which must be av_freed by + * the user, NULL in case of allocation failure + */ +char *liteav_av_get_token(const char **buf, const char *term); + +/** + * Split the string into several tokens which can be accessed by + * successive calls to liteav_av_strtok(). + * + * A token is defined as a sequence of characters not belonging to the + * set specified in delim. + * + * On the first call to liteav_av_strtok(), s should point to the string to + * parse, and the value of saveptr is ignored. In subsequent calls, s + * should be NULL, and saveptr should be unchanged since the previous + * call. + * + * This function is similar to strtok_r() defined in POSIX.1. + * + * @param s the string to parse, may be NULL + * @param delim 0-terminated list of token delimiters, must be non-NULL + * @param saveptr user-provided pointer which points to stored + * information necessary for liteav_av_strtok() to continue scanning the same + * string. saveptr is updated to point to the next character after the + * first delimiter found, or to NULL if the string was terminated + * @return the found token, or NULL when no token is found + */ +char *liteav_av_strtok(char *s, const char *delim, char **saveptr); + +/** + * Locale-independent conversion of ASCII isdigit. + */ +static inline av_const int av_isdigit(int c) +{ + return c >= '0' && c <= '9'; +} + +/** + * Locale-independent conversion of ASCII isgraph. + */ +static inline av_const int av_isgraph(int c) +{ + return c > 32 && c < 127; +} + +/** + * Locale-independent conversion of ASCII isspace. + */ +static inline av_const int av_isspace(int c) +{ + return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || + c == '\v'; +} + +/** + * Locale-independent conversion of ASCII characters to uppercase. + */ +static inline av_const int av_toupper(int c) +{ + if (c >= 'a' && c <= 'z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII characters to lowercase. + */ +static inline av_const int av_tolower(int c) +{ + if (c >= 'A' && c <= 'Z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII isxdigit. + */ +static inline av_const int av_isxdigit(int c) +{ + c = av_tolower(c); + return av_isdigit(c) || (c >= 'a' && c <= 'f'); +} + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int liteav_av_strcasecmp(const char *a, const char *b); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int liteav_av_strncasecmp(const char *a, const char *b, size_t n); + +/** + * Locale-independent strings replace. + * @note This means only ASCII-range characters are replace + */ +char *liteav_av_strireplace(const char *str, const char *from, const char *to); + +/** + * Thread safe basename. + * @param path the path, on DOS both \ and / are considered separators. + * @return pointer to the basename substring. + */ +const char *liteav_av_basename(const char *path); + +/** + * Thread safe dirname. + * @param path the path, on DOS both \ and / are considered separators. + * @return the path with the separator replaced by the string terminator or ".". + * @note the function may change the input string. + */ +const char *liteav_av_dirname(char *path); + +/** + * Match instances of a name in a comma-separated list of names. + * List entries are checked from the start to the end of the names list, + * the first match ends further processing. If an entry prefixed with '-' + * matches, then 0 is returned. The "ALL" list entry is considered to + * match all names. + * + * @param name Name to look for. + * @param names List of names. + * @return 1 on match, 0 otherwise. + */ +int liteav_av_match_name(const char *name, const char *names); + +/** + * Append path component to the existing path. + * Path separator '/' is placed between when needed. + * Resulting string have to be freed with liteav_av_free(). + * @param path base path + * @param component component to be appended + * @return new path or NULL on error. + */ +char *liteav_av_append_path_component(const char *path, const char *component); + +enum AVEscapeMode { + AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode. + AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping. + AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping. +}; + +/** + * Consider spaces special and escape them even in the middle of the + * string. + * + * This is equivalent to adding the whitespace characters to the special + * characters lists, except it is guaranteed to use the exact same list + * of whitespace characters as the rest of libavutil. + */ +#define AV_ESCAPE_FLAG_WHITESPACE (1 << 0) + +/** + * Escape only specified special characters. + * Without this flag, escape also any characters that may be considered + * special by liteav_av_get_token(), such as the single quote. + */ +#define AV_ESCAPE_FLAG_STRICT (1 << 1) + +/** + * Escape string in src, and put the escaped string in an allocated + * string in *dst, which must be freed with liteav_av_free(). + * + * @param dst pointer where an allocated string is put + * @param src string to escape, must be non-NULL + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros + * @return the length of the allocated string, or a negative error code in case of error + * @see liteav_av_bprint_escape() + */ +av_warn_unused_result +int liteav_av_escape(char **dst, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES 1 ///< accept codepoints over 0x10FFFF +#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS 2 ///< accept non-characters - 0xFFFE and 0xFFFF +#define AV_UTF8_FLAG_ACCEPT_SURROGATES 4 ///< accept UTF-16 surrogates codes +#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML + +#define AV_UTF8_FLAG_ACCEPT_ALL \ + AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES + +/** + * Read and decode a single UTF-8 code point (character) from the + * buffer in *buf, and update *buf to point to the next byte to + * decode. + * + * In case of an invalid byte sequence, the pointer will be updated to + * the next byte after the invalid sequence and the function will + * return an error code. + * + * Depending on the specified flags, the function will also fail in + * case the decoded code point does not belong to a valid range. + * + * @note For speed-relevant code a carefully implemented use of + * GET_UTF8() may be preferred. + * + * @param codep pointer used to return the parsed code in case of success. + * The value in *codep is set even in case the range check fails. + * @param bufp pointer to the address the first byte of the sequence + * to decode, updated by the function to point to the + * byte next after the decoded sequence + * @param buf_end pointer to the end of the buffer, points to the next + * byte past the last in the buffer. This is used to + * avoid buffer overreads (in case of an unfinished + * UTF-8 sequence towards the end of the buffer). + * @param flags a collection of AV_UTF8_FLAG_* flags + * @return >= 0 in case a sequence was successfully read, a negative + * value in case of invalid sequence + */ +av_warn_unused_result +int liteav_av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end, + unsigned int flags); + +/** + * Check if a name is in a list. + * @returns 0 if not found, or the 1 based index where it has been found in the + * list. + */ +int liteav_av_match_list(const char *name, const char *list, char separator); + +/** + * @} + */ + +#endif /* AVUTIL_AVSTRING_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avutil.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avutil.h new file mode 100644 index 0000000..1f5d53f --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/avutil.h @@ -0,0 +1,366 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVUTIL_H +#define AVUTIL_AVUTIL_H + +/** + * @file + * @ingroup lavu + * Convenience header that includes @ref lavu "libavutil"'s core. + */ + +/** + * @mainpage + * + * @section ffmpeg_intro Introduction + * + * This document describes the usage of the different libraries + * provided by FFmpeg. + * + * @li @ref libavc "libavcodec" encoding/decoding library + * @li @ref lavfi "libavfilter" graph-based frame editing library + * @li @ref libavf "libavformat" I/O and muxing/demuxing library + * @li @ref lavd "libavdevice" special devices muxing/demuxing library + * @li @ref lavu "libavutil" common utility library + * @li @ref lswr "libswresample" audio resampling, format conversion and mixing + * @li @ref lpp "libpostproc" post processing library + * @li @ref libsws "libswscale" color conversion and scaling library + * + * @section ffmpeg_versioning Versioning and compatibility + * + * Each of the FFmpeg libraries contains a version.h header, which defines a + * major, minor and micro version number with the + * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version + * number is incremented with backward incompatible changes - e.g. removing + * parts of the public API, reordering public struct members, etc. The minor + * version number is incremented for backward compatible API changes or major + * new features - e.g. adding a new public function or a new decoder. The micro + * version number is incremented for smaller changes that a calling program + * might still want to check for - e.g. changing behavior in a previously + * unspecified situation. + * + * FFmpeg guarantees backward API and ABI compatibility for each library as long + * as its major version number is unchanged. This means that no public symbols + * will be removed or renamed. Types and names of the public struct members and + * values of public macros and enums will remain the same (unless they were + * explicitly declared as not part of the public API). Documented behavior will + * not change. + * + * In other words, any correct program that works with a given FFmpeg snapshot + * should work just as well without any changes with any later snapshot with the + * same major versions. This applies to both rebuilding the program against new + * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program + * links against. + * + * However, new public symbols may be added and new members may be appended to + * public structs whose size is not part of public ABI (most public structs in + * FFmpeg). New macros and enum values may be added. Behavior in undocumented + * situations may change slightly (and be documented). All those are accompanied + * by an entry in doc/APIchanges and incrementing either the minor or micro + * version number. + */ + +/** + * @defgroup lavu libavutil + * Common code shared across all FFmpeg libraries. + * + * @note + * libavutil is designed to be modular. In most cases, in order to use the + * functions provided by one component of libavutil you must explicitly include + * the specific header containing that feature. If you are only using + * media-related components, you could simply include libavutil/avutil.h, which + * brings in most of the "core" components. + * + * @{ + * + * @defgroup lavu_crypto Crypto and Hashing + * + * @{ + * @} + * + * @defgroup lavu_math Mathematics + * @{ + * + * @} + * + * @defgroup lavu_string String Manipulation + * + * @{ + * + * @} + * + * @defgroup lavu_mem Memory Management + * + * @{ + * + * @} + * + * @defgroup lavu_data Data Structures + * @{ + * + * @} + * + * @defgroup lavu_video Video related + * + * @{ + * + * @} + * + * @defgroup lavu_audio Audio related + * + * @{ + * + * @} + * + * @defgroup lavu_error Error Codes + * + * @{ + * + * @} + * + * @defgroup lavu_log Logging Facility + * + * @{ + * + * @} + * + * @defgroup lavu_misc Other + * + * @{ + * + * @defgroup preproc_misc Preprocessor String Macros + * + * @{ + * + * @} + * + * @defgroup version_utils Library Version Macros + * + * @{ + * + * @} + */ + + +/** + * @addtogroup lavu_ver + * @{ + */ + +/** + * Return the LIBAVUTIL_VERSION_INT constant. + */ +unsigned avutil_version(void); + +/** + * Return an informative version string. This usually is the actual release + * version number or a git commit description. This string has no fixed format + * and can change any time. It should never be parsed by code. + */ +const char *av_version_info(void); + +/** + * Return the libavutil build-time configuration. + */ +const char *avutil_configuration(void); + +/** + * Return the libavutil license. + */ +const char *avutil_license(void); + +/** + * @} + */ + +/** + * @addtogroup lavu_media Media Type + * @brief Media Type + */ + +enum AVMediaType { + AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA + AVMEDIA_TYPE_VIDEO, + AVMEDIA_TYPE_AUDIO, + AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous + AVMEDIA_TYPE_SUBTITLE, + AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse + AVMEDIA_TYPE_NB +}; + +/** + * Return a string describing the media_type enum, NULL if media_type + * is unknown. + */ +const char *av_get_media_type_string(enum AVMediaType media_type); + +/** + * @defgroup lavu_const Constants + * @{ + * + * @defgroup lavu_enc Encoding specific + * + * @note those definition should move to avcodec + * @{ + */ + +#define FF_LAMBDA_SHIFT 7 +#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT) +#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda +#define FF_LAMBDA_MAX (256*128-1) + +#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove + +/** + * @} + * @defgroup lavu_time Timestamp specific + * + * FFmpeg internal timebase and timestamp definitions + * + * @{ + */ + +/** + * @brief Undefined timestamp value + * + * Usually reported by demuxer that work on containers that do not provide + * either pts or dts. + */ + +#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000)) + +/** + * Internal time base represented as integer + */ + +#define AV_TIME_BASE 1000000 + +/** + * Internal time base represented as fractional value + */ + +#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE} + +/** + * @} + * @} + * @defgroup lavu_picture Image related + * + * AVPicture types, pixel formats and basic image planes manipulation. + * + * @{ + */ + +enum AVPictureType { + AV_PICTURE_TYPE_NONE = 0, ///< Undefined + AV_PICTURE_TYPE_I, ///< Intra + AV_PICTURE_TYPE_P, ///< Predicted + AV_PICTURE_TYPE_B, ///< Bi-dir predicted + AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4 + AV_PICTURE_TYPE_SI, ///< Switching Intra + AV_PICTURE_TYPE_SP, ///< Switching Predicted + AV_PICTURE_TYPE_BI, ///< BI type +}; + +/** + * Return a single letter to describe the given picture type + * pict_type. + * + * @param[in] pict_type the picture type @return a single character + * representing the picture type, '?' if pict_type is unknown + */ +char av_get_picture_type_char(enum AVPictureType pict_type); + +/** + * @} + */ + +#include "common.h" +#include "error.h" +#include "rational.h" +#include "version.h" +#include "macros.h" +#include "mathematics.h" +#include "log.h" +#include "pixfmt.h" + +/** + * Return x default pointer in case p is NULL. + */ +static inline void *av_x_if_null(const void *p, const void *x) +{ + return (void *)(intptr_t)(p ? p : x); +} + +/** + * Compute the length of an integer list. + * + * @param elsize size in bytes of each list element (only 1, 2, 4 or 8) + * @param term list terminator (usually 0 or -1) + * @param list pointer to the list + * @return length of the list, in elements, not counting the terminator + */ +unsigned av_int_list_length_for_size(unsigned elsize, + const void *list, uint64_t term) av_pure; + +/** + * Compute the length of an integer list. + * + * @param term list terminator (usually 0 or -1) + * @param list pointer to the list + * @return length of the list, in elements, not counting the terminator + */ +#define av_int_list_length(list, term) \ + av_int_list_length_for_size(sizeof(*(list)), list, term) + +/** + * Open a file using a UTF-8 filename. + * The API of this function matches POSIX fopen(), errors are returned through + * errno. + */ +FILE *liteav_av_fopen_utf8(const char *path, const char *mode); + +/** + * Return the fractional representation of the internal time base. + */ +AVRational av_get_time_base_q(void); + +#define AV_FOURCC_MAX_STRING_SIZE 32 + +#define av_fourcc2str(fourcc) av_fourcc_make_string((char[AV_FOURCC_MAX_STRING_SIZE]){0}, fourcc) + +/** + * Fill the provided buffer with a string containing a FourCC (four-character + * code) representation. + * + * @param buf a buffer with size in bytes of at least AV_FOURCC_MAX_STRING_SIZE + * @param fourcc the fourcc to represent + * @return the buffer in input + */ +char *av_fourcc_make_string(char *buf, uint32_t fourcc); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_AVUTIL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/base64.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/base64.h new file mode 100644 index 0000000..29bf711 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/base64.h @@ -0,0 +1,73 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com) + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BASE64_H +#define AVUTIL_BASE64_H + +#include <stdint.h> + +/** + * @defgroup lavu_base64 Base64 + * @ingroup lavu_crypto + * @{ + */ + +/** + * Decode a base64-encoded string. + * + * @param out buffer for decoded data + * @param in null-terminated input string + * @param out_size size in bytes of the out buffer, must be at + * least 3/4 of the length of in, that is AV_BASE64_DECODE_SIZE(strlen(in)) + * @return number of bytes written, or a negative value in case of + * invalid input + */ +int liteav_av_base64_decode(uint8_t *out, const char *in, int out_size); + +/** + * Calculate the output size in bytes needed to decode a base64 string + * with length x to a data buffer. + */ +#define AV_BASE64_DECODE_SIZE(x) ((x) * 3LL / 4) + +/** + * Encode data to base64 and null-terminate. + * + * @param out buffer for encoded data + * @param out_size size in bytes of the out buffer (including the + * null terminator), must be at least AV_BASE64_SIZE(in_size) + * @param in input buffer containing the data to encode + * @param in_size size in bytes of the in buffer + * @return out or NULL in case of error + */ +char *liteav_av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size); + +/** + * Calculate the output size needed to base64-encode x bytes to a + * null-terminated string. + */ +#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1) + + /** + * @} + */ + +#endif /* AVUTIL_BASE64_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/blowfish.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/blowfish.h new file mode 100644 index 0000000..e4b9f6f --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/blowfish.h @@ -0,0 +1,83 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Blowfish algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BLOWFISH_H +#define AVUTIL_BLOWFISH_H + +#include <stdint.h> + +/** + * @defgroup lavu_blowfish Blowfish + * @ingroup lavu_crypto + * @{ + */ + +#define AV_BF_ROUNDS 16 + +typedef struct AVBlowfish { + uint32_t p[AV_BF_ROUNDS + 2]; + uint32_t s[4][256]; +} AVBlowfish; + +/** + * Allocate an AVBlowfish context. + */ +AVBlowfish *liteav_av_blowfish_alloc(void); + +/** + * Initialize an AVBlowfish context. + * + * @param ctx an AVBlowfish context + * @param key a key + * @param key_len length of the key + */ +void liteav_av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param xl left four bytes halves of input to be encrypted + * @param xr right four bytes halves of input to be encrypted + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr, + int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_BLOWFISH_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bprint.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bprint.h new file mode 100644 index 0000000..23b028a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bprint.h @@ -0,0 +1,220 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BPRINT_H +#define AVUTIL_BPRINT_H + +#include <stdarg.h> + +#include "attributes.h" +#include "avstring.h" + +/** + * Define a structure with extra padding to a fixed size + * This helps ensuring binary compatibility with future versions. + */ + +#define FF_PAD_STRUCTURE(name, size, ...) \ +struct liteav_ff_pad_helper_##name { __VA_ARGS__ }; \ +typedef struct name { \ + __VA_ARGS__ \ + char reserved_padding[size - sizeof(struct liteav_ff_pad_helper_##name)]; \ +} name; + +/** + * Buffer to print data progressively + * + * The string buffer grows as necessary and is always 0-terminated. + * The content of the string is never accessed, and thus is + * encoding-agnostic and can even hold binary data. + * + * Small buffers are kept in the structure itself, and thus require no + * memory allocation at all (unless the contents of the buffer is needed + * after the structure goes out of scope). This is almost as lightweight as + * declaring a local "char buf[512]". + * + * The length of the string can go beyond the allocated size: the buffer is + * then truncated, but the functions still keep account of the actual total + * length. + * + * In other words, buf->len can be greater than buf->size and records the + * total length of what would have been to the buffer if there had been + * enough memory. + * + * Append operations do not need to be tested for failure: if a memory + * allocation fails, data stop being appended to the buffer, but the length + * is still updated. This situation can be tested with + * av_bprint_is_complete(). + * + * The size_max field determines several possible behaviours: + * + * size_max = -1 (= UINT_MAX) or any large value will let the buffer be + * reallocated as necessary, with an amortized linear cost. + * + * size_max = 0 prevents writing anything to the buffer: only the total + * length is computed. The write operations can then possibly be repeated in + * a buffer with exactly the necessary size + * (using size_init = size_max = len + 1). + * + * size_max = 1 is automatically replaced by the exact size available in the + * structure itself, thus ensuring no dynamic memory allocation. The + * internal buffer is large enough to hold a reasonable paragraph of text, + * such as the current paragraph. + */ + +FF_PAD_STRUCTURE(AVBPrint, 1024, + char *str; /**< string so far */ + unsigned len; /**< length so far */ + unsigned size; /**< allocated memory */ + unsigned size_max; /**< maximum allocated memory */ + char reserved_internal_buffer[1]; +) + +/** + * Convenience macros for special values for liteav_av_bprint_init() size_max + * parameter. + */ +#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1) +#define AV_BPRINT_SIZE_AUTOMATIC 1 +#define AV_BPRINT_SIZE_COUNT_ONLY 0 + +/** + * Init a print buffer. + * + * @param buf buffer to init + * @param size_init initial size (including the final 0) + * @param size_max maximum size; + * 0 means do not write anything, just count the length; + * 1 is replaced by the maximum value for automatic storage; + * any large value means that the internal buffer will be + * reallocated as needed up to that limit; -1 is converted to + * UINT_MAX, the largest limit possible. + * Check also AV_BPRINT_SIZE_* macros. + */ +void liteav_av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max); + +/** + * Init a print buffer using a pre-existing buffer. + * + * The buffer will not be reallocated. + * + * @param buf buffer structure to init + * @param buffer byte buffer to use for the string data + * @param size size of buffer + */ +void liteav_av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); + +/** + * Append a formatted string to a print buffer. + */ +void liteav_av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Append a formatted string to a print buffer. + */ +void liteav_av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg); + +/** + * Append char c n times to a print buffer. + */ +void liteav_av_bprint_chars(AVBPrint *buf, char c, unsigned n); + +/** + * Append data to a print buffer. + * + * param buf bprint buffer to use + * param data pointer to data + * param size size of data + */ +void liteav_av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size); + +struct tm; +/** + * Append a formatted date and time to a print buffer. + * + * param buf bprint buffer to use + * param fmt date and time format string, see strftime() + * param tm broken-down time structure to translate + * + * @note due to poor design of the standard strftime function, it may + * produce poor results if the format string expands to a very long text and + * the bprint buffer is near the limit stated by the size_max option. + */ +void liteav_av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm); + +/** + * Allocate bytes in the buffer for external use. + * + * @param[in] buf buffer structure + * @param[in] size required size + * @param[out] mem pointer to the memory area + * @param[out] actual_size size of the memory area after allocation; + * can be larger or smaller than size + */ +void liteav_av_bprint_get_buffer(AVBPrint *buf, unsigned size, + unsigned char **mem, unsigned *actual_size); + +/** + * Reset the string to "" but keep internal allocated data. + */ +void liteav_av_bprint_clear(AVBPrint *buf); + +/** + * Test if the print buffer is complete (not truncated). + * + * It may have been truncated due to a memory allocation failure + * or the size_max limit (compare size and size_max if necessary). + */ +static inline int av_bprint_is_complete(const AVBPrint *buf) +{ + return buf->len < buf->size; +} + +/** + * Finalize a print buffer. + * + * The print buffer can no longer be used afterwards, + * but the len and size fields are still valid. + * + * @arg[out] ret_str if not NULL, used to return a permanent copy of the + * buffer contents, or NULL if memory allocation fails; + * if NULL, the buffer is discarded and freed + * @return 0 for success or error code (probably AVERROR(ENOMEM)) + */ +int liteav_av_bprint_finalize(AVBPrint *buf, char **ret_str); + +/** + * Escape the content in src and append it to dstbuf. + * + * @param dstbuf already inited destination bprint buffer + * @param src string containing the text to escape + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros + */ +void liteav_av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#endif /* AVUTIL_BPRINT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bswap.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bswap.h new file mode 100644 index 0000000..6254af7 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/bswap.h @@ -0,0 +1,109 @@ +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * byte swapping routines + */ + +#ifndef AVUTIL_BSWAP_H +#define AVUTIL_BSWAP_H + +#include <stdint.h> +#include "libavutil/avconfig.h" +#include "attributes.h" + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_AARCH64 +# include "aarch64/bswap.h" +#elif ARCH_ARM +# include "arm/bswap.h" +#elif ARCH_AVR32 +# include "avr32/bswap.h" +#elif ARCH_SH4 +# include "sh4/bswap.h" +#elif ARCH_X86 +# include "x86/bswap.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff)) +#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16)) +#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32)) + +#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x) + +#ifndef av_bswap16 +static av_always_inline av_const uint16_t av_bswap16(uint16_t x) +{ + x= (x>>8) | (x<<8); + return x; +} +#endif + +#ifndef av_bswap32 +static av_always_inline av_const uint32_t av_bswap32(uint32_t x) +{ + return AV_BSWAP32C(x); +} +#endif + +#ifndef av_bswap64 +static inline uint64_t av_const av_bswap64(uint64_t x) +{ + return (uint64_t)av_bswap32((uint32_t)x) << 32 | av_bswap32((uint32_t)(x >> 32)); +} +#endif + +// be2ne ... big-endian to native-endian +// le2ne ... little-endian to native-endian + +#if AV_HAVE_BIGENDIAN +#define av_be2ne16(x) (x) +#define av_be2ne32(x) (x) +#define av_be2ne64(x) (x) +#define av_le2ne16(x) av_bswap16(x) +#define av_le2ne32(x) av_bswap32(x) +#define av_le2ne64(x) av_bswap64(x) +#define AV_BE2NEC(s, x) (x) +#define AV_LE2NEC(s, x) AV_BSWAPC(s, x) +#else +#define av_be2ne16(x) av_bswap16(x) +#define av_be2ne32(x) av_bswap32(x) +#define av_be2ne64(x) av_bswap64(x) +#define av_le2ne16(x) (x) +#define av_le2ne32(x) (x) +#define av_le2ne64(x) (x) +#define AV_BE2NEC(s, x) AV_BSWAPC(s, x) +#define AV_LE2NEC(s, x) (x) +#endif + +#define AV_BE2NE16C(x) AV_BE2NEC(16, x) +#define AV_BE2NE32C(x) AV_BE2NEC(32, x) +#define AV_BE2NE64C(x) AV_BE2NEC(64, x) +#define AV_LE2NE16C(x) AV_LE2NEC(16, x) +#define AV_LE2NE32C(x) AV_LE2NEC(32, x) +#define AV_LE2NE64C(x) AV_LE2NEC(64, x) + +#endif /* AVUTIL_BSWAP_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/buffer.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/buffer.h new file mode 100644 index 0000000..c26fb4d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/buffer.h @@ -0,0 +1,292 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_buffer + * refcounted data buffer API + */ + +#ifndef AVUTIL_BUFFER_H +#define AVUTIL_BUFFER_H + +#include <stdint.h> + +/** + * @defgroup lavu_buffer AVBuffer + * @ingroup lavu_data + * + * @{ + * AVBuffer is an API for reference-counted data buffers. + * + * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer + * represents the data buffer itself; it is opaque and not meant to be accessed + * by the caller directly, but only through AVBufferRef. However, the caller may + * e.g. compare two AVBuffer pointers to check whether two different references + * are describing the same data buffer. AVBufferRef represents a single + * reference to an AVBuffer and it is the object that may be manipulated by the + * caller directly. + * + * There are two functions provided for creating a new AVBuffer with a single + * reference -- liteav_av_buffer_alloc() to just allocate a new buffer, and + * liteav_av_buffer_create() to wrap an existing array in an AVBuffer. From an existing + * reference, additional references may be created with liteav_av_buffer_ref(). + * Use liteav_av_buffer_unref() to free a reference (this will automatically free the + * data once all the references are freed). + * + * The convention throughout this API and the rest of FFmpeg is such that the + * buffer is considered writable if there exists only one reference to it (and + * it has not been marked as read-only). The liteav_av_buffer_is_writable() function is + * provided to check whether this is true and liteav_av_buffer_make_writable() will + * automatically create a new writable buffer when necessary. + * Of course nothing prevents the calling code from violating this convention, + * however that is safe only when all the existing references are under its + * control. + * + * @note Referencing and unreferencing the buffers is thread-safe and thus + * may be done from multiple threads simultaneously without any need for + * additional locking. + * + * @note Two different references to the same buffer can point to different + * parts of the buffer (i.e. their AVBufferRef.data will not be equal). + */ + +/** + * A reference counted buffer type. It is opaque and is meant to be used through + * references (AVBufferRef). + */ +typedef struct AVBuffer AVBuffer; + +/** + * A reference to a data buffer. + * + * The size of this struct is not a part of the public ABI and it is not meant + * to be allocated directly. + */ +typedef struct AVBufferRef { + AVBuffer *buffer; + + /** + * The data buffer. It is considered writable if and only if + * this is the only reference to the buffer, in which case + * liteav_av_buffer_is_writable() returns 1. + */ + uint8_t *data; + /** + * Size of data in bytes. + */ + int size; +} AVBufferRef; + +/** + * Allocate an AVBuffer of the given size using liteav_av_malloc(). + * + * @return an AVBufferRef of given size or NULL when out of memory + */ +AVBufferRef *liteav_av_buffer_alloc(int size); + +/** + * Same as liteav_av_buffer_alloc(), except the returned buffer will be initialized + * to zero. + */ +AVBufferRef *liteav_av_buffer_allocz(int size); + +/** + * Always treat the buffer as read-only, even when it has only one + * reference. + */ +#define AV_BUFFER_FLAG_READONLY (1 << 0) + +/** + * Create an AVBuffer from an existing array. + * + * If this function is successful, data is owned by the AVBuffer. The caller may + * only access data through the returned AVBufferRef and references derived from + * it. + * If this function fails, data is left untouched. + * @param data data array + * @param size size of data in bytes + * @param free a callback for freeing this buffer's data + * @param opaque parameter to be got for processing or passed to free + * @param flags a combination of AV_BUFFER_FLAG_* + * + * @return an AVBufferRef referring to data on success, NULL on failure. + */ +AVBufferRef *liteav_av_buffer_create(uint8_t *data, int size, + void (*free)(void *opaque, uint8_t *data), + void *opaque, int flags); + +/** + * Default free callback, which calls liteav_av_free() on the buffer data. + * This function is meant to be passed to liteav_av_buffer_create(), not called + * directly. + */ +void liteav_av_buffer_default_free(void *opaque, uint8_t *data); + +/** + * Create a new reference to an AVBuffer. + * + * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on + * failure. + */ +AVBufferRef *liteav_av_buffer_ref(AVBufferRef *buf); + +/** + * Free a given reference and automatically free the buffer if there are no more + * references to it. + * + * @param buf the reference to be freed. The pointer is set to NULL on return. + */ +void liteav_av_buffer_unref(AVBufferRef **buf); + +/** + * @return 1 if the caller may write to the data referred to by buf (which is + * true if and only if buf is the only reference to the underlying AVBuffer). + * Return 0 otherwise. + * A positive answer is valid until liteav_av_buffer_ref() is called on buf. + */ +int liteav_av_buffer_is_writable(const AVBufferRef *buf); + +/** + * @return the opaque parameter set by liteav_av_buffer_create. + */ +void *liteav_av_buffer_get_opaque(const AVBufferRef *buf); + +int liteav_av_buffer_get_ref_count(const AVBufferRef *buf); + +/** + * Create a writable reference from a given buffer reference, avoiding data copy + * if possible. + * + * @param buf buffer reference to make writable. On success, buf is either left + * untouched, or it is unreferenced and a new writable AVBufferRef is + * written in its place. On failure, buf is left untouched. + * @return 0 on success, a negative AVERROR on failure. + */ +int liteav_av_buffer_make_writable(AVBufferRef **buf); + +/** + * Reallocate a given buffer. + * + * @param buf a buffer reference to reallocate. On success, buf will be + * unreferenced and a new reference with the required size will be + * written in its place. On failure buf will be left untouched. *buf + * may be NULL, then a new buffer is allocated. + * @param size required new buffer size. + * @return 0 on success, a negative AVERROR on failure. + * + * @note the buffer is actually reallocated with liteav_av_realloc() only if it was + * initially allocated through liteav_av_buffer_realloc(NULL) and there is only one + * reference to it (i.e. the one passed to this function). In all other cases + * a new buffer is allocated and the data is copied. + */ +int liteav_av_buffer_realloc(AVBufferRef **buf, int size); + +/** + * @} + */ + +/** + * @defgroup lavu_bufferpool AVBufferPool + * @ingroup lavu_data + * + * @{ + * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers. + * + * Frequently allocating and freeing large buffers may be slow. AVBufferPool is + * meant to solve this in cases when the caller needs a set of buffers of the + * same size (the most obvious use case being buffers for raw video or audio + * frames). + * + * At the beginning, the user must call liteav_av_buffer_pool_init() to create the + * buffer pool. Then whenever a buffer is needed, call liteav_av_buffer_pool_get() to + * get a reference to a new buffer, similar to liteav_av_buffer_alloc(). This new + * reference works in all aspects the same way as the one created by + * liteav_av_buffer_alloc(). However, when the last reference to this buffer is + * unreferenced, it is returned to the pool instead of being freed and will be + * reused for subsequent liteav_av_buffer_pool_get() calls. + * + * When the caller is done with the pool and no longer needs to allocate any new + * buffers, liteav_av_buffer_pool_uninit() must be called to mark the pool as freeable. + * Once all the buffers are released, it will automatically be freed. + * + * Allocating and releasing buffers with this API is thread-safe as long as + * either the default alloc callback is used, or the user-supplied one is + * thread-safe. + */ + +/** + * The buffer pool. This structure is opaque and not meant to be accessed + * directly. It is allocated with liteav_av_buffer_pool_init() and freed with + * liteav_av_buffer_pool_uninit(). + */ +typedef struct AVBufferPool AVBufferPool; + +/** + * Allocate and initialize a buffer pool. + * + * @param size size of each buffer in this pool + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. May be NULL, then the default allocator will be used + * (liteav_av_buffer_alloc()). + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *liteav_av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); + +/** + * Allocate and initialize a buffer pool with a more complex allocator. + * + * @param size size of each buffer in this pool + * @param opaque arbitrary user data used by the allocator + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. + * @param pool_free a function that will be called immediately before the pool + * is freed. I.e. after liteav_av_buffer_pool_uninit() is called + * by the caller and all the frames are returned to the pool + * and freed. It is intended to uninitialize the user opaque + * data. + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *liteav_av_buffer_pool_init2(int size, void *opaque, + AVBufferRef* (*alloc)(void *opaque, int size), + void (*pool_free)(void *opaque)); + +/** + * Mark the pool as being available for freeing. It will actually be freed only + * once all the allocated buffers associated with the pool are released. Thus it + * is safe to call this function while some of the allocated buffers are still + * in use. + * + * @param pool pointer to the pool to be freed. It will be set to NULL. + */ +void liteav_av_buffer_pool_uninit(AVBufferPool **pool); + +/** + * Allocate a new AVBuffer, reusing an old buffer from the pool when available. + * This function may be called simultaneously from multiple threads. + * + * @return a reference to the new buffer on success, NULL on error. + */ +AVBufferRef *liteav_av_buffer_pool_get(AVBufferPool *pool); + +/** + * @} + */ + +#endif /* AVUTIL_BUFFER_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/camellia.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/camellia.h new file mode 100644 index 0000000..9f2a6e4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/camellia.h @@ -0,0 +1,71 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * An implementation of the CAMELLIA algorithm as mentioned in RFC3713 + * Copyright (c) 2014 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CAMELLIA_H +#define AVUTIL_CAMELLIA_H + +#include <stdint.h> + + +/** + * @file + * @brief Public header for libavutil CAMELLIA algorithm + * @defgroup lavu_camellia CAMELLIA + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_camellia_size; + +struct AVCAMELLIA; + +/** + * Allocate an AVCAMELLIA context + * To free the struct: liteav_av_free(ptr) + */ +struct AVCAMELLIA *liteav_av_camellia_alloc(void); + +/** + * Initialize an AVCAMELLIA context. + * + * @param ctx an AVCAMELLIA context + * @param key a key of 16, 24, 32 bytes used for encryption/decryption + * @param key_bits number of keybits: possible are 128, 192, 256 + */ +int liteav_av_camellia_init(struct AVCAMELLIA *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVCAMELLIA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 16 byte blocks + * @paran iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_camellia_crypt(struct AVCAMELLIA *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt); + +/** + * @} + */ +#endif /* AVUTIL_CAMELLIA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cast5.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cast5.h new file mode 100644 index 0000000..62378cb --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cast5.h @@ -0,0 +1,81 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * An implementation of the CAST128 algorithm as mentioned in RFC2144 + * Copyright (c) 2014 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CAST5_H +#define AVUTIL_CAST5_H + +#include <stdint.h> + + +/** + * @file + * @brief Public header for libavutil CAST5 algorithm + * @defgroup lavu_cast5 CAST5 + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_cast5_size; + +struct AVCAST5; + +/** + * Allocate an AVCAST5 context + * To free the struct: liteav_av_free(ptr) + */ +struct AVCAST5 *liteav_av_cast5_alloc(void); +/** + * Initialize an AVCAST5 context. + * + * @param ctx an AVCAST5 context + * @param key a key of 5,6,...16 bytes used for encryption/decryption + * @param key_bits number of keybits: possible are 40,48,...,128 + * @return 0 on success, less than 0 on failure + */ +int liteav_av_cast5_init(struct AVCAST5 *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, ECB mode only + * + * @param ctx an AVCAST5 context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_cast5_crypt(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVCAST5 context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_cast5_crypt2(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); +/** + * @} + */ +#endif /* AVUTIL_CAST5_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/channel_layout.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/channel_layout.h new file mode 100644 index 0000000..62a327b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/channel_layout.h @@ -0,0 +1,233 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * Copyright (c) 2008 Peter Ross + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CHANNEL_LAYOUT_H +#define AVUTIL_CHANNEL_LAYOUT_H + +#include <stdint.h> + +/** + * @file + * audio channel layout utility functions + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup channel_masks Audio channel masks + * + * A channel layout is a 64-bits integer with a bit set for every channel. + * The number of bits set must be equal to the number of channels. + * The value 0 means that the channel layout is not known. + * @note this data structure is not powerful enough to handle channels + * combinations that have the same channel multiple times, such as + * dual-mono. + * + * @{ + */ +#define AV_CH_FRONT_LEFT 0x00000001 +#define AV_CH_FRONT_RIGHT 0x00000002 +#define AV_CH_FRONT_CENTER 0x00000004 +#define AV_CH_LOW_FREQUENCY 0x00000008 +#define AV_CH_BACK_LEFT 0x00000010 +#define AV_CH_BACK_RIGHT 0x00000020 +#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 +#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 +#define AV_CH_BACK_CENTER 0x00000100 +#define AV_CH_SIDE_LEFT 0x00000200 +#define AV_CH_SIDE_RIGHT 0x00000400 +#define AV_CH_TOP_CENTER 0x00000800 +#define AV_CH_TOP_FRONT_LEFT 0x00001000 +#define AV_CH_TOP_FRONT_CENTER 0x00002000 +#define AV_CH_TOP_FRONT_RIGHT 0x00004000 +#define AV_CH_TOP_BACK_LEFT 0x00008000 +#define AV_CH_TOP_BACK_CENTER 0x00010000 +#define AV_CH_TOP_BACK_RIGHT 0x00020000 +#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. +#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. +#define AV_CH_WIDE_LEFT 0x0000000080000000ULL +#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL +#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL +#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL +#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL + +/** Channel mask value used for AVCodecContext.request_channel_layout + to indicate that the user requests the channel order of the decoder output + to be the native codec channel order. */ +#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL + +/** + * @} + * @defgroup channel_mask_c Audio channel layouts + * @{ + * */ +#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) +#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT) +#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) + +enum AVMatrixEncoding { + AV_MATRIX_ENCODING_NONE, + AV_MATRIX_ENCODING_DOLBY, + AV_MATRIX_ENCODING_DPLII, + AV_MATRIX_ENCODING_DPLIIX, + AV_MATRIX_ENCODING_DPLIIZ, + AV_MATRIX_ENCODING_DOLBYEX, + AV_MATRIX_ENCODING_DOLBYHEADPHONE, + AV_MATRIX_ENCODING_NB +}; + +/** + * Return a channel layout id that matches name, or 0 if no match is found. + * + * name can be one or several of the following notations, + * separated by '+' or '|': + * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, + * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); + * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, + * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); + * - a number of channels, in decimal, followed by 'c', yielding + * the default channel layout for that number of channels (@see + * liteav_av_get_default_channel_layout); + * - a channel layout mask, in hexadecimal starting with "0x" (see the + * AV_CH_* macros). + * + * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" + */ +uint64_t liteav_av_get_channel_layout(const char *name); + +/** + * Return a channel layout and the number of channels based on the specified name. + * + * This function is similar to (@see liteav_av_get_channel_layout), but can also parse + * unknown channel layout specifications. + * + * @param[in] name channel layout specification string + * @param[out] channel_layout parsed channel layout (0 if unknown) + * @param[out] nb_channels number of channels + * + * @return 0 on success, AVERROR(EINVAL) if the parsing fails. + */ +int liteav_av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels); + +/** + * Return a description of a channel layout. + * If nb_channels is <= 0, it is guessed from the channel_layout. + * + * @param buf put here the string containing the channel layout + * @param buf_size size in bytes of the buffer + */ +void liteav_av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); + +struct AVBPrint; +/** + * Append a description of a channel layout to a bprint buffer. + */ +void liteav_av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); + +/** + * Return the number of channels in the channel layout. + */ +int liteav_av_get_channel_layout_nb_channels(uint64_t channel_layout); + +/** + * Return default channel layout for a given number of channels. + */ +int64_t liteav_av_get_default_channel_layout(int nb_channels); + +/** + * Get the index of a channel in channel_layout. + * + * @param channel a channel layout describing exactly one channel which must be + * present in channel_layout. + * + * @return index of channel in channel_layout on success, a negative AVERROR + * on error. + */ +int liteav_av_get_channel_layout_channel_index(uint64_t channel_layout, + uint64_t channel); + +/** + * Get the channel with the given index in channel_layout. + */ +uint64_t liteav_av_channel_layout_extract_channel(uint64_t channel_layout, int index); + +/** + * Get the name of a given channel. + * + * @return channel name on success, NULL on error. + */ +const char *liteav_av_get_channel_name(uint64_t channel); + +/** + * Get the description of a given channel. + * + * @param channel a channel layout with a single channel + * @return channel description on success, NULL on error + */ +const char *liteav_av_get_channel_description(uint64_t channel); + +/** + * Get the value and name of a standard channel layout. + * + * @param[in] index index in an internal list, starting at 0 + * @param[out] layout channel layout mask + * @param[out] name name of the layout + * @return 0 if the layout exists, + * <0 if index is beyond the limits + */ +int liteav_av_get_standard_channel_layout(unsigned index, uint64_t *layout, + const char **name); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_CHANNEL_LAYOUT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/common.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/common.h new file mode 100644 index 0000000..ded9c8c --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/common.h @@ -0,0 +1,561 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * common internal and external API header + */ + +#ifndef AVUTIL_COMMON_H +#define AVUTIL_COMMON_H + +#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C) +#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS +#endif + +#include <errno.h> +#include <inttypes.h> +#include <limits.h> +#include <math.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "attributes.h" +#include "macros.h" +#include "version.h" +#include "libavutil/avconfig.h" + +#if AV_HAVE_BIGENDIAN +# define AV_NE(be, le) (be) +#else +# define AV_NE(be, le) (le) +#endif + +//rounded division & shift +#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) +/* assume b>0 */ +#define ROUNDED_DIV(a,b) (((a)>=0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) +/* Fast a/(1<<b) rounded toward +inf. Assume a>=0 and b>=0 */ +#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ + : ((a) + (1<<(b)) - 1) >> (b)) +/* Backwards compat. */ +#define FF_CEIL_RSHIFT AV_CEIL_RSHIFT + +#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b)) +#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b)) + +/** + * Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they + * are not representable as absolute values of their type. This is the same + * as with *abs() + * @see FFNABS() + */ +#define FFABS(a) ((a) >= 0 ? (a) : (-(a))) +#define FFSIGN(a) ((a) > 0 ? 1 : -1) + +/** + * Negative Absolute value. + * this works for all integers of all types. + * As with many macros, this evaluates its argument twice, it thus must not have + * a sideeffect, that is FFNABS(x++) has undefined behavior. + */ +#define FFNABS(a) ((a) <= 0 ? (a) : (-(a))) + +/** + * Comparator. + * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0 + * if x == y. This is useful for instance in a qsort comparator callback. + * Furthermore, compilers are able to optimize this to branchless code, and + * there is no risk of overflow with signed types. + * As with many macros, this evaluates its argument multiple times, it thus + * must not have a side-effect. + */ +#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y))) + +#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) +#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) + +#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0) +#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) + +/* misc math functions */ + +#ifdef HAVE_AV_CONFIG_H +# include "config.h" +# include "intmath.h" +#endif + +/* Pull in unguarded fallback defines at the end of this file. */ +#include "common.h" + +#ifndef liteav_av_log2 +av_const int liteav_av_log2(unsigned v); +#endif + +#ifndef liteav_av_log2_16bit +av_const int liteav_av_log2_16bit(unsigned v); +#endif + +/** + * Clip a signed integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int av_clip_c(int a, int amin, int amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed 64bit integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed integer value into the 0-255 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint8_t av_clip_uint8_c(int a) +{ + if (a&(~0xFF)) return (~a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -128,127 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int8_t av_clip_int8_c(int a) +{ + if ((a+0x80U) & ~0xFF) return (a>>31) ^ 0x7F; + else return a; +} + +/** + * Clip a signed integer value into the 0-65535 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint16_t av_clip_uint16_c(int a) +{ + if (a&(~0xFFFF)) return (~a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -32768,32767 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int16_t av_clip_int16_c(int a) +{ + if ((a+0x8000U) & ~0xFFFF) return (a>>31) ^ 0x7FFF; + else return a; +} + +/** + * Clip a signed 64-bit integer value into the -2147483648,2147483647 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) +{ + if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF); + else return (int32_t)a; +} + +/** + * Clip a signed integer into the -(2^p),(2^p-1) range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const int av_clip_intp2_c(int a, int p) +{ + if (((unsigned)a + (1 << p)) & ~((2 << p) - 1)) + return (a >> 31) ^ ((1 << p) - 1); + else + return a; +} + +/** + * Clip a signed integer to an unsigned power of two range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) +{ + if (a & ~((1<<p) - 1)) return (~a) >> 31 & ((1<<p) - 1); + else return a; +} + +/** + * Clear high bits from an unsigned integer starting with specific bit position + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a, unsigned p) +{ + return a & ((1 << p) - 1); +} + +/** + * Add two signed 32-bit values with saturation. + * + * @param a one value + * @param b another value + * @return sum with signed saturation + */ +static av_always_inline int av_sat_add32_c(int a, int b) +{ + return av_clipl_int32((int64_t)a + b); +} + +/** + * Add a doubled value to another value with saturation at both stages. + * + * @param a first value + * @param b value doubled and added to a + * @return sum sat(a + sat(2*b)) with signed saturation + */ +static av_always_inline int av_sat_dadd32_c(int a, int b) +{ + return av_sat_add32(a, av_sat_add32(b, b)); +} + +/** + * Subtract two signed 32-bit values with saturation. + * + * @param a one value + * @param b another value + * @return difference with signed saturation + */ +static av_always_inline int av_sat_sub32_c(int a, int b) +{ + return av_clipl_int32((int64_t)a - b); +} + +/** + * Subtract a doubled value from another value with saturation at both stages. + * + * @param a first value + * @param b value doubled and subtracted from a + * @return difference sat(a - sat(2*b)) with signed saturation + */ +static av_always_inline int av_sat_dsub32_c(int a, int b) +{ + return av_sat_sub32(a, av_sat_add32(b, b)); +} + +/** + * Clip a float value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const float av_clipf_c(float a, float amin, float amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a double value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const double av_clipd_c(double a, double amin, double amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** Compute ceil(log2(x)). + * @param x value used to compute ceil(log2(x)) + * @return computed ceiling of log2(x) + */ +static av_always_inline av_const int av_ceil_log2_c(int x) +{ + return liteav_av_log2((x - 1) << 1); +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount_c(uint32_t x) +{ + x -= (x >> 1) & 0x55555555; + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x += x >> 8; + return (x + (x >> 16)) & 0x3F; +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount64_c(uint64_t x) +{ + return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32)); +} + +static av_always_inline av_const int av_parity_c(uint32_t v) +{ + return av_popcount(v) & 1; +} + +#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) +#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) + +/** + * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_BYTE Expression reading one byte from the input. + * Evaluated up to 7 times (4 for the currently + * assigned Unicode range). With a memory buffer + * input, this could be *ptr++. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + * + * @warning ERROR should not contain a loop control statement which + * could interact with the internal while loop, and should force an + * exit from the macro code (e.g. through a goto or a return) in order + * to prevent undefined results. + */ +#define GET_UTF8(val, GET_BYTE, ERROR)\ + val= (GET_BYTE);\ + {\ + uint32_t top = (val & 128) >> 1;\ + if ((val & 0xc0) == 0x80 || val >= 0xFE)\ + ERROR\ + while (val & top) {\ + int tmp= (GET_BYTE) - 128;\ + if(tmp>>6)\ + ERROR\ + val= (val<<6) + tmp;\ + top <<= 5;\ + }\ + val &= (top << 1) - 1;\ + } + +/** + * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_16BIT Expression returning two bytes of UTF-16 data converted + * to native byte order. Evaluated one or two times. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + */ +#define GET_UTF16(val, GET_16BIT, ERROR)\ + val = GET_16BIT;\ + {\ + unsigned int hi = val - 0xD800;\ + if (hi < 0x800) {\ + val = GET_16BIT - 0xDC00;\ + if (val > 0x3FFU || hi > 0x3FFU)\ + ERROR\ + val += (hi<<10) + 0x10000;\ + }\ + }\ + +/** + * @def PUT_UTF8(val, tmp, PUT_BYTE) + * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint8_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_BYTE. + * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. + * It could be a function or a statement, and uses tmp as the input byte. + * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be + * executed up to 4 times for values in the valid UTF-8 range and up to + * 7 times in the general case, depending on the length of the converted + * Unicode character. + */ +#define PUT_UTF8(val, tmp, PUT_BYTE)\ + {\ + int bytes, shift;\ + uint32_t in = val;\ + if (in < 0x80) {\ + tmp = in;\ + PUT_BYTE\ + } else {\ + bytes = (liteav_av_log2(in) + 4) / 5;\ + shift = (bytes - 1) * 6;\ + tmp = (256 - (256 >> bytes)) | (in >> shift);\ + PUT_BYTE\ + while (shift >= 6) {\ + shift -= 6;\ + tmp = 0x80 | ((in >> shift) & 0x3f);\ + PUT_BYTE\ + }\ + }\ + } + +/** + * @def PUT_UTF16(val, tmp, PUT_16BIT) + * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint16_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_16BIT. + * @param PUT_16BIT writes the converted UTF-16 data to any proper destination + * in desired endianness. It could be a function or a statement, and uses tmp + * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" + * PUT_BYTE will be executed 1 or 2 times depending on input character. + */ +#define PUT_UTF16(val, tmp, PUT_16BIT)\ + {\ + uint32_t in = val;\ + if (in < 0x10000) {\ + tmp = in;\ + PUT_16BIT\ + } else {\ + tmp = 0xD800 | ((in - 0x10000) >> 10);\ + PUT_16BIT\ + tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\ + PUT_16BIT\ + }\ + }\ + + + +#include "mem.h" + +#ifdef HAVE_AV_CONFIG_H +# include "internal.h" +#endif /* HAVE_AV_CONFIG_H */ + +#endif /* AVUTIL_COMMON_H */ + +/* + * The following definitions are outside the multiple inclusion guard + * to ensure they are immediately available in intmath.h. + */ + +#ifndef av_ceil_log2 +# define av_ceil_log2 av_ceil_log2_c +#endif +#ifndef av_clip +# define av_clip av_clip_c +#endif +#ifndef av_clip64 +# define av_clip64 av_clip64_c +#endif +#ifndef av_clip_uint8 +# define av_clip_uint8 av_clip_uint8_c +#endif +#ifndef av_clip_int8 +# define av_clip_int8 av_clip_int8_c +#endif +#ifndef av_clip_uint16 +# define av_clip_uint16 av_clip_uint16_c +#endif +#ifndef av_clip_int16 +# define av_clip_int16 av_clip_int16_c +#endif +#ifndef av_clipl_int32 +# define av_clipl_int32 av_clipl_int32_c +#endif +#ifndef av_clip_intp2 +# define av_clip_intp2 av_clip_intp2_c +#endif +#ifndef av_clip_uintp2 +# define av_clip_uintp2 av_clip_uintp2_c +#endif +#ifndef av_mod_uintp2 +# define av_mod_uintp2 av_mod_uintp2_c +#endif +#ifndef av_sat_add32 +# define av_sat_add32 av_sat_add32_c +#endif +#ifndef av_sat_dadd32 +# define av_sat_dadd32 av_sat_dadd32_c +#endif +#ifndef av_sat_sub32 +# define av_sat_sub32 av_sat_sub32_c +#endif +#ifndef av_sat_dsub32 +# define av_sat_dsub32 av_sat_dsub32_c +#endif +#ifndef av_clipf +# define av_clipf av_clipf_c +#endif +#ifndef av_clipd +# define av_clipd av_clipd_c +#endif +#ifndef av_popcount +# define av_popcount av_popcount_c +#endif +#ifndef av_popcount64 +# define av_popcount64 av_popcount64_c +#endif +#ifndef av_parity +# define av_parity av_parity_c +#endif diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cpu.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cpu.h new file mode 100644 index 0000000..decf084 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/cpu.h @@ -0,0 +1,131 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CPU_H +#define AVUTIL_CPU_H + +#include <stddef.h> + +#include "attributes.h" + +#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ + + /* lower 16 bits - CPU features */ +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions +#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer) +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions +#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions +#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 +#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 +#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used + +#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard +#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06 +#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07 + +#define AV_CPU_FLAG_ARMV5TE (1 << 0) +#define AV_CPU_FLAG_ARMV6 (1 << 1) +#define AV_CPU_FLAG_ARMV6T2 (1 << 2) +#define AV_CPU_FLAG_VFP (1 << 3) +#define AV_CPU_FLAG_VFPV3 (1 << 4) +#define AV_CPU_FLAG_NEON (1 << 5) +#define AV_CPU_FLAG_ARMV8 (1 << 6) +#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations +#define AV_CPU_FLAG_SETEND (1 <<16) + +/** + * Return the flags which specify extensions supported by the CPU. + * The returned value is affected by liteav_av_force_cpu_flags() if that was used + * before. So liteav_av_get_cpu_flags() can easily be used in an application to + * detect the enabled cpu flags. + */ +int liteav_av_get_cpu_flags(void); + +/** + * Disables cpu detection and forces the specified flags. + * -1 is a special case that disables forcing of specific flags. + */ +void liteav_av_force_cpu_flags(int flags); + +/** + * Set a mask on flags returned by liteav_av_get_cpu_flags(). + * This function is mainly useful for testing. + * Please use liteav_av_force_cpu_flags() and liteav_av_get_cpu_flags() instead which are more flexible + */ +attribute_deprecated void liteav_av_set_cpu_flags_mask(int mask); + +/** + * Parse CPU flags from a string. + * + * The returned flags contain the specified flags as well as related unspecified flags. + * + * This function exists only for compatibility with libav. + * Please use liteav_av_parse_cpu_caps() when possible. + * @return a combination of AV_CPU_* flags, negative on error. + */ +attribute_deprecated +int liteav_av_parse_cpu_flags(const char *s); + +/** + * Parse CPU caps from a string and update the given AV_CPU_* flags based on that. + * + * @return negative on error. + */ +int liteav_av_parse_cpu_caps(unsigned *flags, const char *s); + +/** + * @return the number of logical CPU cores present. + */ +int liteav_av_cpu_count(void); + +/** + * Get the maximum data alignment that may be required by FFmpeg. + * + * Note that this is affected by the build configuration and the CPU flags mask, + * so e.g. if the CPU supports AVX, but libavutil has been built with + * --disable-avx or the AV_CPU_FLAG_AVX flag has been disabled through + * liteav_av_set_cpu_flags_mask(), then this function will behave as if AVX is not + * present. + */ +size_t liteav_av_cpu_max_align(void); + +#endif /* AVUTIL_CPU_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/crc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/crc.h new file mode 100644 index 0000000..e399b0d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/crc.h @@ -0,0 +1,101 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_crc32 + * Public header for CRC hash function implementation. + */ + +#ifndef AVUTIL_CRC_H +#define AVUTIL_CRC_H + +#include <stdint.h> +#include <stddef.h> +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_crc32 CRC + * @ingroup lavu_hash + * CRC (Cyclic Redundancy Check) hash function implementation. + * + * This module supports numerous CRC polynomials, in addition to the most + * widely used CRC-32-IEEE. See @ref AVCRCId for a list of available + * polynomials. + * + * @{ + */ + +typedef uint32_t AVCRC; + +typedef enum { + AV_CRC_8_ATM, + AV_CRC_16_ANSI, + AV_CRC_16_CCITT, + AV_CRC_32_IEEE, + AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */ + AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */ + AV_CRC_24_IEEE, + AV_CRC_8_EBU, + AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */ +}AVCRCId; + +/** + * Initialize a CRC table. + * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024 + * @param le If 1, the lowest bit represents the coefficient for the highest + * exponent of the corresponding polynomial (both for poly and + * actual CRC). + * If 0, you must swap the CRC parameter and the result of liteav_av_crc + * if you need the standard representation (can be simplified in + * most cases to e.g. bswap16): + * av_bswap32(crc << (32-bits)) + * @param bits number of bits for the CRC + * @param poly generator polynomial without the x**bits coefficient, in the + * representation as specified by le + * @param ctx_size size of ctx in bytes + * @return <0 on failure + */ +int liteav_av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size); + +/** + * Get an initialized standard CRC table. + * @param crc_id ID of a standard CRC + * @return a pointer to the CRC table or NULL on failure + */ +const AVCRC *liteav_av_crc_get_table(AVCRCId crc_id); + +/** + * Calculate the CRC of a block. + * @param crc CRC of previous blocks if any or initial value for CRC + * @return CRC updated with the data from the given block + * + * @see liteav_av_crc_init() "le" parameter + */ +uint32_t liteav_av_crc(const AVCRC *ctx, uint32_t crc, + const uint8_t *buffer, size_t length) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_CRC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/des.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/des.h new file mode 100644 index 0000000..b98dd9a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/des.h @@ -0,0 +1,78 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * DES encryption/decryption + * Copyright (c) 2007 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DES_H +#define AVUTIL_DES_H + +#include <stdint.h> + +/** + * @defgroup lavu_des DES + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVDES { + uint64_t round_keys[3][16]; + int triple_des; +} AVDES; + +/** + * Allocate an AVDES context. + */ +AVDES *liteav_av_des_alloc(void); + +/** + * @brief Initializes an AVDES context. + * + * @param key_bits must be 64 or 192 + * @param decrypt 0 for encryption/CBC-MAC, 1 for decryption + * @return zero on success, negative value otherwise + */ +int liteav_av_des_init(struct AVDES *d, const uint8_t *key, int key_bits, int decrypt); + +/** + * @brief Encrypts / decrypts using the DES algorithm. + * + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + * @param iv initialization vector for CBC mode, if NULL then ECB will be used, + * must be 8-byte aligned + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_des_crypt(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @brief Calculates CBC-MAC using the DES algorithm. + * + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + */ +void liteav_av_des_mac(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count); + +/** + * @} + */ + +#endif /* AVUTIL_DES_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dict.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dict.h new file mode 100644 index 0000000..7794958 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dict.h @@ -0,0 +1,201 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Public dictionary API. + * @deprecated + * AVDictionary is provided for compatibility with libav. It is both in + * implementation as well as API inefficient. It does not scale and is + * extremely slow with large dictionaries. + * It is recommended that new code uses our tree container from tree.c/h + * where applicable, which uses AVL trees to achieve O(log n) performance. + */ + +#ifndef AVUTIL_DICT_H +#define AVUTIL_DICT_H + +#include <stdint.h> + +#include "version.h" + +/** + * @addtogroup lavu_dict AVDictionary + * @ingroup lavu_data + * + * @brief Simple key:value store + * + * @{ + * Dictionaries are used for storing key:value pairs. To create + * an AVDictionary, simply pass an address of a NULL pointer to + * liteav_av_dict_set(). NULL can be used as an empty dictionary wherever + * a pointer to an AVDictionary is required. + * Use liteav_av_dict_get() to retrieve an entry or iterate over all + * entries and finally liteav_av_dict_free() to free the dictionary + * and all its contents. + * + @code + AVDictionary *d = NULL; // "create" an empty dictionary + AVDictionaryEntry *t = NULL; + + liteav_av_dict_set(&d, "foo", "bar", 0); // add an entry + + char *k = liteav_av_strdup("key"); // if your strings are already allocated, + char *v = liteav_av_strdup("value"); // you can avoid copying them like this + liteav_av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + + while (t = liteav_av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + <....> // iterate over all entries in d + } + liteav_av_dict_free(&d); + @endcode + */ + +#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in liteav_av_dict_get(). */ +#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key, + ignoring the suffix of the found key string. Only relevant in liteav_av_dict_get(). */ +#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been + allocated with liteav_av_malloc() or another memory allocation function. */ +#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been + allocated with liteav_av_malloc() or another memory allocation function. */ +#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. +#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no + delimiter is added, the strings are simply concatenated. */ +#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys in the dictionary */ + +typedef struct AVDictionaryEntry { + char *key; + char *value; +} AVDictionaryEntry; + +typedef struct AVDictionary AVDictionary; + +/** + * Get a dictionary entry with matching key. + * + * The returned entry key or value must not be changed, or it will + * cause undefined behavior. + * + * To iterate through all the dictionary entries, you can set the matching key + * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag. + * + * @param prev Set to the previous matching element to find the next. + * If set to NULL the first matching element is returned. + * @param key matching key + * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved + * @return found entry or NULL in case no matching entry was found in the dictionary + */ +AVDictionaryEntry *liteav_av_dict_get(const AVDictionary *m, const char *key, + const AVDictionaryEntry *prev, int flags); + +/** + * Get number of entries in dictionary. + * + * @param m dictionary + * @return number of entries in dictionary + */ +int liteav_av_dict_count(const AVDictionary *m); + +/** + * Set the given entry in *pm, overwriting an existing entry. + * + * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set, + * these arguments will be freed on error. + * + * Warning: Adding a new entry to a dictionary invalidates all existing entries + * previously returned with liteav_av_dict_get. + * + * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL + * a dictionary struct is allocated and put in *pm. + * @param key entry key to add to *pm (will either be av_strduped or added as a new key depending on flags) + * @param value entry value to add to *pm (will be av_strduped or added as a new key depending on flags). + * Passing a NULL value will cause an existing entry to be deleted. + * @return >= 0 on success otherwise an error code <0 + */ +int liteav_av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); + +/** + * Convenience wrapper for liteav_av_dict_set that converts the value to a string + * and stores it. + * + * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error. + */ +int liteav_av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags); + +/** + * Parse the key/value pairs list and add the parsed entries to a dictionary. + * + * In case of failure, all the successfully set entries are stored in + * *pm. You may need to manually free the created dictionary. + * + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @param flags flags to use when adding to dictionary. + * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL + * are ignored since the key/value tokens will always + * be duplicated. + * @return 0 on success, negative AVERROR code on failure + */ +int liteav_av_dict_parse_string(AVDictionary **pm, const char *str, + const char *key_val_sep, const char *pairs_sep, + int flags); + +/** + * Copy entries from one AVDictionary struct into another. + * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL, + * this function will allocate a struct for you and put it in *dst + * @param src pointer to source AVDictionary struct + * @param flags flags to use when setting entries in *dst + * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag + * @return 0 on success, negative AVERROR code on failure. If dst was allocated + * by this function, callers should free the associated memory. + */ +int liteav_av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags); + +/** + * Free all the memory allocated for an AVDictionary struct + * and all keys and values. + */ +void liteav_av_dict_free(AVDictionary **m); + +/** + * Get dictionary entries as a string. + * + * Create a string containing dictionary's entries. + * Such string may be passed back to liteav_av_dict_parse_string(). + * @note String is escaped with backslashes ('\'). + * + * @param[in] m dictionary + * @param[out] buffer Pointer to buffer that will be allocated with string containg entries. + * Buffer must be freed by the caller when is no longer needed. + * @param[in] key_val_sep character used to separate key from value + * @param[in] pairs_sep character used to separate two pairs from each other + * @return >= 0 on success, negative on error + * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same. + */ +int liteav_av_dict_get_string(const AVDictionary *m, char **buffer, + const char key_val_sep, const char pairs_sep); + +/** + * @} + */ + +#endif /* AVUTIL_DICT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/display.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/display.h new file mode 100644 index 0000000..c492b96 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/display.h @@ -0,0 +1,115 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2014 Vittorio Giovara <vittorio.giovara@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Display matrix + */ + +#ifndef AVUTIL_DISPLAY_H +#define AVUTIL_DISPLAY_H + +#include <stdint.h> +#include "common.h" + +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_display Display transformation matrix functions + * @{ + */ + +/** + * @addtogroup lavu_video_display + * The display transformation matrix specifies an affine transformation that + * should be applied to video frames for correct presentation. It is compatible + * with the matrices stored in the ISO/IEC 14496-12 container format. + * + * The data is a 3x3 matrix represented as a 9-element array: + * + * @code{.unparsed} + * | a b u | + * (a, b, u, c, d, v, x, y, w) -> | c d v | + * | x y w | + * @endcode + * + * All numbers are stored in native endianness, as 16.16 fixed-point values, + * except for u, v and w, which are stored as 2.30 fixed-point values. + * + * The transformation maps a point (p, q) in the source (pre-transformation) + * frame to the point (p', q') in the destination (post-transformation) frame as + * follows: + * + * @code{.unparsed} + * | a b u | + * (p, q, 1) . | c d v | = z * (p', q', 1) + * | x y w | + * @endcode + * + * The transformation can also be more explicitly written in components as + * follows: + * + * @code{.unparsed} + * p' = (a * p + c * q + x) / z; + * q' = (b * p + d * q + y) / z; + * z = u * p + v * q + w + * @endcode + */ + +/** + * Extract the rotation component of the transformation matrix. + * + * @param matrix the transformation matrix + * @return the angle (in degrees) by which the transformation rotates the frame + * counterclockwise. The angle will be in range [-180.0, 180.0], + * or NaN if the matrix is singular. + * + * @note floating point numbers are inherently inexact, so callers are + * recommended to round the return value to nearest integer before use. + */ +double liteav_av_display_rotation_get(const int32_t matrix[9]); + +/** + * Initialize a transformation matrix describing a pure counterclockwise + * rotation by the specified angle (in degrees). + * + * @param matrix an allocated transformation matrix (will be fully overwritten + * by this function) + * @param angle rotation angle in degrees. + */ +void liteav_av_display_rotation_set(int32_t matrix[9], double angle); + +/** + * Flip the input matrix horizontally and/or vertically. + * + * @param matrix an allocated transformation matrix + * @param hflip whether the matrix should be flipped horizontally + * @param vflip whether the matrix should be flipped vertically + */ +void liteav_av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_DISPLAY_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h new file mode 100644 index 0000000..51a6533 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/dovi_meta.h @@ -0,0 +1,71 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2020 Vacing Fang <vacingfang@tencent.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * DOVI configuration + */ + + +#ifndef AVUTIL_DOVI_META_H +#define AVUTIL_DOVI_META_H + +#include <stdint.h> +#include <stddef.h> + +/* + * DOVI configuration + * ref: dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2 + dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2 + * @code + * uint8_t dv_version_major, the major version number that the stream complies with + * uint8_t dv_version_minor, the minor version number that the stream complies with + * uint8_t dv_profile, the Dolby Vision profile + * uint8_t dv_level, the Dolby Vision level + * uint8_t rpu_present_flag + * uint8_t el_present_flag + * uint8_t bl_present_flag + * uint8_t dv_bl_signal_compatibility_id + * @endcode + * + * @note The struct must be allocated with liteav_av_dovi_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVDOVIDecoderConfigurationRecord { + uint8_t dv_version_major; + uint8_t dv_version_minor; + uint8_t dv_profile; + uint8_t dv_level; + uint8_t rpu_present_flag; + uint8_t el_present_flag; + uint8_t bl_present_flag; + uint8_t dv_bl_signal_compatibility_id; +} AVDOVIDecoderConfigurationRecord; + +/** + * Allocate a AVDOVIDecoderConfigurationRecord structure and initialize its + * fields to default values. + * + * @return the newly allocated struct or NULL on failure + */ +AVDOVIDecoderConfigurationRecord *liteav_av_dovi_alloc(size_t *size); + +#endif /* AVUTIL_DOVI_META_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/downmix_info.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/downmix_info.h new file mode 100644 index 0000000..c42bd0a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/downmix_info.h @@ -0,0 +1,116 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2014 Tim Walker <tdskywalker@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DOWNMIX_INFO_H +#define AVUTIL_DOWNMIX_INFO_H + +#include "frame.h" + +/** + * @file + * audio downmix medatata + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup downmix_info Audio downmix metadata + * @{ + */ + +/** + * Possible downmix types. + */ +enum AVDownmixType { + AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */ + AV_DOWNMIX_TYPE_LORO, /**< Lo/Ro 2-channel downmix (Stereo). */ + AV_DOWNMIX_TYPE_LTRT, /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */ + AV_DOWNMIX_TYPE_DPLII, /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */ + AV_DOWNMIX_TYPE_NB /**< Number of downmix types. Not part of ABI. */ +}; + +/** + * This structure describes optional metadata relevant to a downmix procedure. + * + * All fields are set by the decoder to the value indicated in the audio + * bitstream (if present), or to a "sane" default otherwise. + */ +typedef struct AVDownmixInfo { + /** + * Type of downmix preferred by the mastering engineer. + */ + enum AVDownmixType preferred_downmix_type; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during a regular downmix. + */ + double center_mix_level; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during an Lt/Rt compatible downmix. + */ + double center_mix_level_ltrt; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during a regular downmix. + */ + double surround_mix_level; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during an Lt/Rt compatible downmix. + */ + double surround_mix_level_ltrt; + + /** + * Absolute scale factor representing the level at which the LFE data is + * mixed into L/R channels during downmixing. + */ + double lfe_mix_level; +} AVDownmixInfo; + +/** + * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing. + * + * If the side data is absent, it is created and added to the frame. + * + * @param frame the frame for which the side data is to be obtained or created + * + * @return the AVDownmixInfo structure to be edited by the caller, or NULL if + * the structure cannot be allocated. + */ +AVDownmixInfo *liteav_av_downmix_info_update_side_data(AVFrame *frame); + +/** + * @} + */ + +/** + * @} + */ + +#endif /* AVUTIL_DOWNMIX_INFO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/encryption_info.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/encryption_info.h new file mode 100644 index 0000000..f7ee2ee --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/encryption_info.h @@ -0,0 +1,206 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/** + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_ENCRYPTION_INFO_H +#define AVUTIL_ENCRYPTION_INFO_H + +#include <stddef.h> +#include <stdint.h> + +typedef struct AVSubsampleEncryptionInfo { + /** The number of bytes that are clear. */ + unsigned int bytes_of_clear_data; + + /** + * The number of bytes that are protected. If using pattern encryption, + * the pattern applies to only the protected bytes; if not using pattern + * encryption, all these bytes are encrypted. + */ + unsigned int bytes_of_protected_data; +} AVSubsampleEncryptionInfo; + +/** + * This describes encryption info for a packet. This contains frame-specific + * info for how to decrypt the packet before passing it to the decoder. + * + * The size of this struct is not part of the public ABI. + */ +typedef struct AVEncryptionInfo { + /** The fourcc encryption scheme, in big-endian byte order. */ + uint32_t scheme; + + /** + * Only used for pattern encryption. This is the number of 16-byte blocks + * that are encrypted. + */ + uint32_t crypt_byte_block; + + /** + * Only used for pattern encryption. This is the number of 16-byte blocks + * that are clear. + */ + uint32_t skip_byte_block; + + /** + * The ID of the key used to encrypt the packet. This should always be + * 16 bytes long, but may be changed in the future. + */ + uint8_t *key_id; + uint32_t key_id_size; + + /** + * The initialization vector. This may have been zero-filled to be the + * correct block size. This should always be 16 bytes long, but may be + * changed in the future. + */ + uint8_t *iv; + uint32_t iv_size; + + /** + * An array of subsample encryption info specifying how parts of the sample + * are encrypted. If there are no subsamples, then the whole sample is + * encrypted. + */ + AVSubsampleEncryptionInfo *subsamples; + uint32_t subsample_count; +} AVEncryptionInfo; + +/** + * This describes info used to initialize an encryption key system. + * + * The size of this struct is not part of the public ABI. + */ +typedef struct AVEncryptionInitInfo { + /** + * A unique identifier for the key system this is for, can be NULL if it + * is not known. This should always be 16 bytes, but may change in the + * future. + */ + uint8_t* system_id; + uint32_t system_id_size; + + /** + * An array of key IDs this initialization data is for. All IDs are the + * same length. Can be NULL if there are no known key IDs. + */ + uint8_t** key_ids; + /** The number of key IDs. */ + uint32_t num_key_ids; + /** + * The number of bytes in each key ID. This should always be 16, but may + * change in the future. + */ + uint32_t key_id_size; + + /** + * Key-system specific initialization data. This data is copied directly + * from the file and the format depends on the specific key system. This + * can be NULL if there is no initialization data; in that case, there + * will be at least one key ID. + */ + uint8_t* data; + uint32_t data_size; + + /** + * An optional pointer to the next initialization info in the list. + */ + struct AVEncryptionInitInfo *next; +} AVEncryptionInitInfo; + +/** + * Allocates an AVEncryptionInfo structure and sub-pointers to hold the given + * number of subsamples. This will allocate pointers for the key ID, IV, + * and subsample entries, set the size members, and zero-initialize the rest. + * + * @param subsample_count The number of subsamples. + * @param key_id_size The number of bytes in the key ID, should be 16. + * @param iv_size The number of bytes in the IV, should be 16. + * + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *liteav_av_encryption_info_alloc(uint32_t subsample_count, uint32_t key_id_size, uint32_t iv_size); + +/** + * Allocates an AVEncryptionInfo structure with a copy of the given data. + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *liteav_av_encryption_info_clone(const AVEncryptionInfo *info); + +/** + * Frees the given encryption info object. This MUST NOT be used to free the + * side-data data pointer, that should use normal side-data methods. + */ +void liteav_av_encryption_info_free(AVEncryptionInfo *info); + +/** + * Creates a copy of the AVEncryptionInfo that is contained in the given side + * data. The resulting object should be passed to liteav_av_encryption_info_free() + * when done. + * + * @return The new AVEncryptionInfo structure, or NULL on error. + */ +AVEncryptionInfo *liteav_av_encryption_info_get_side_data(const uint8_t *side_data, size_t side_data_size); + +/** + * Allocates and initializes side data that holds a copy of the given encryption + * info. The resulting pointer should be either freed using liteav_av_free or given + * to liteav_av_packet_add_side_data(). + * + * @return The new side-data pointer, or NULL. + */ +uint8_t *liteav_av_encryption_info_add_side_data( + const AVEncryptionInfo *info, size_t *side_data_size); + + +/** + * Allocates an AVEncryptionInitInfo structure and sub-pointers to hold the + * given sizes. This will allocate pointers and set all the fields. + * + * @return The new AVEncryptionInitInfo structure, or NULL on error. + */ +AVEncryptionInitInfo *liteav_av_encryption_init_info_alloc( + uint32_t system_id_size, uint32_t num_key_ids, uint32_t key_id_size, uint32_t data_size); + +/** + * Frees the given encryption init info object. This MUST NOT be used to free + * the side-data data pointer, that should use normal side-data methods. + */ +void liteav_av_encryption_init_info_free(AVEncryptionInitInfo* info); + +/** + * Creates a copy of the AVEncryptionInitInfo that is contained in the given + * side data. The resulting object should be passed to + * liteav_av_encryption_init_info_free() when done. + * + * @return The new AVEncryptionInitInfo structure, or NULL on error. + */ +AVEncryptionInitInfo *liteav_av_encryption_init_info_get_side_data( + const uint8_t* side_data, size_t side_data_size); + +/** + * Allocates and initializes side data that holds a copy of the given encryption + * init info. The resulting pointer should be either freed using liteav_av_free or + * given to liteav_av_packet_add_side_data(). + * + * @return The new side-data pointer, or NULL. + */ +uint8_t *liteav_av_encryption_init_info_add_side_data( + const AVEncryptionInitInfo *info, size_t *side_data_size); + +#endif /* AVUTIL_ENCRYPTION_INFO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/error.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/error.h new file mode 100644 index 0000000..269fb1c --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/error.h @@ -0,0 +1,134 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * error code definitions + */ + +#ifndef AVUTIL_ERROR_H +#define AVUTIL_ERROR_H + +#include <errno.h> +#include <stddef.h> + +/** + * @addtogroup lavu_error + * + * @{ + */ + + +/* error handling */ +#if EDOM > 0 +#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions. +#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value. +#else +/* Some platforms have E* and errno already negated. */ +#define AVERROR(e) (e) +#define AVUNERROR(e) (e) +#endif + +#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d)) + +#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found +#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2 +#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small +#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found +#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found +#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found +#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file +#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted +#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library +#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found +#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input +#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found +#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found +#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome +#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found + +#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found +/** + * This is semantically identical to AVERROR_BUG + * it has been introduced in Libav after our AVERROR_BUG and with a modified value. + */ +#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ') +#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library +#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it. +#define AVERROR_INPUT_CHANGED (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) +#define AVERROR_OUTPUT_CHANGED (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED) +/* HTTP & RTSP errors */ +#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8,'4','0','0') +#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8,'4','0','1') +#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8,'4','0','3') +#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8,'4','0','4') +#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8,'4','X','X') +#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8,'5','X','X') + +#define AV_ERROR_MAX_STRING_SIZE 64 + +#define AVERROR_NETERROR FFERRTAG( 'N','E','T','E') ///< Net Error + +/** + * Define this error, to find the stream error which may be caused by downloading component's giving some wrong data. + */ +#define AVERROR_STRMERROR FFERRTAG( 'S','T','M','E') ///< Stream Error + +/** + * Put a description of the AVERROR code errnum in errbuf. + * In case of failure the global variable errno is set to indicate the + * error. Even in case of failure liteav_av_strerror() will print a generic + * error message indicating the errnum provided to errbuf. + * + * @param errnum error code to describe + * @param errbuf buffer to which description is written + * @param errbuf_size the size in bytes of errbuf + * @return 0 on success, a negative value if a description for errnum + * cannot be found + */ +int liteav_av_strerror(int errnum, char *errbuf, size_t errbuf_size); + +/** + * Fill the provided buffer with a string containing an error string + * corresponding to the AVERROR code errnum. + * + * @param errbuf a buffer + * @param errbuf_size size in bytes of errbuf + * @param errnum error code to describe + * @return the buffer in input, filled with the error description + * @see liteav_av_strerror() + */ +static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum) +{ + liteav_av_strerror(errnum, errbuf, errbuf_size); + return errbuf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_err2str(errnum) \ + av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum) + +/** + * @} + */ + +#endif /* AVUTIL_ERROR_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/eval.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/eval.h new file mode 100644 index 0000000..33b1169 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/eval.h @@ -0,0 +1,114 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple arithmetic expression evaluator + */ + +#ifndef AVUTIL_EVAL_H +#define AVUTIL_EVAL_H + +#include "avutil.h" + +typedef struct AVExpr AVExpr; + +/** + * Parse and evaluate an expression. + * Note, this is significantly slower than liteav_av_expr_eval(). + * + * @param res a pointer to a double where is put the result value of + * the expression, or NAN in case of error + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param const_values a zero terminated array of values for the identifiers from const_names + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int liteav_av_expr_parse_and_eval(double *res, const char *s, + const char * const *const_names, const double *const_values, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + void *opaque, int log_offset, void *log_ctx); + +/** + * Parse an expression. + * + * @param expr a pointer where is put an AVExpr containing the parsed + * value in case of successful parsing, or NULL otherwise. + * The pointed to AVExpr must be freed with liteav_av_expr_free() by the user + * when it is not needed anymore. + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int liteav_av_expr_parse(AVExpr **expr, const char *s, + const char * const *const_names, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + int log_offset, void *log_ctx); + +/** + * Evaluate a previously parsed expression. + * + * @param const_values a zero terminated array of values for the identifiers from liteav_av_expr_parse() const_names + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @return the value of the expression + */ +double liteav_av_expr_eval(AVExpr *e, const double *const_values, void *opaque); + +/** + * Free a parsed expression previously created with liteav_av_expr_parse(). + */ +void liteav_av_expr_free(AVExpr *e); + +/** + * Parse the string in numstr and return its value as a double. If + * the string is empty, contains only whitespaces, or does not contain + * an initial substring that has the expected syntax for a + * floating-point number, no conversion is performed. In this case, + * returns a value of zero and the value returned in tail is the value + * of numstr. + * + * @param numstr a string representing a number, may contain one of + * the International System number postfixes, for example 'K', 'M', + * 'G'. If 'i' is appended after the postfix, powers of 2 are used + * instead of powers of 10. The 'B' postfix multiplies the value by + * 8, and can be appended after another postfix or used alone. This + * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. + * @param tail if non-NULL puts here the pointer to the char next + * after the last parsed character + */ +double liteav_av_strtod(const char *numstr, char **tail); + +#endif /* AVUTIL_EVAL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ffversion.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ffversion.h new file mode 100644 index 0000000..57873ec --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ffversion.h @@ -0,0 +1,5 @@ +/* Automatically generated by version.sh, do not manually edit! */ +#ifndef AVUTIL_FFVERSION_H +#define AVUTIL_FFVERSION_H +#define FFMPEG_VERSION "332c5c1-4.3.1" +#endif /* AVUTIL_FFVERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/fifo.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/fifo.h new file mode 100644 index 0000000..e13ddd1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/fifo.h @@ -0,0 +1,180 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * a very simple circular buffer FIFO implementation + */ + +#ifndef AVUTIL_FIFO_H +#define AVUTIL_FIFO_H + +#include <stdint.h> +#include "avutil.h" +#include "attributes.h" + +typedef struct AVFifoBuffer { + uint8_t *buffer; + uint8_t *rptr, *wptr, *end; + uint32_t rndx, wndx; +} AVFifoBuffer; + +/** + * Initialize an AVFifoBuffer. + * @param size of FIFO + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *liteav_av_fifo_alloc(unsigned int size); + +/** + * Initialize an AVFifoBuffer. + * @param nmemb number of elements + * @param size size of the single element + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *liteav_av_fifo_alloc_array(size_t nmemb, size_t size); + +/** + * Free an AVFifoBuffer. + * @param f AVFifoBuffer to free + */ +void liteav_av_fifo_free(AVFifoBuffer *f); + +/** + * Free an AVFifoBuffer and reset pointer to NULL. + * @param f AVFifoBuffer to free + */ +void liteav_av_fifo_freep(AVFifoBuffer **f); + +/** + * Reset the AVFifoBuffer to the state right after liteav_av_fifo_alloc, in particular it is emptied. + * @param f AVFifoBuffer to reset + */ +void liteav_av_fifo_reset(AVFifoBuffer *f); + +/** + * Return the amount of data in bytes in the AVFifoBuffer, that is the + * amount of data you can read from it. + * @param f AVFifoBuffer to read from + * @return size + */ +int liteav_av_fifo_size(const AVFifoBuffer *f); + +/** + * Return the amount of space in bytes in the AVFifoBuffer, that is the + * amount of data you can write into it. + * @param f AVFifoBuffer to write into + * @return size + */ +int liteav_av_fifo_space(const AVFifoBuffer *f); + +/** + * Feed data at specific position from an AVFifoBuffer to a user-supplied callback. + * Similar as av_fifo_gereric_read but without discarding data. + * @param f AVFifoBuffer to read from + * @param offset offset from current read position + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int liteav_av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * Similar as av_fifo_gereric_read but without discarding data. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int liteav_av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int liteav_av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from a user-supplied callback to an AVFifoBuffer. + * @param f AVFifoBuffer to write to + * @param src data source; non-const since it may be used as a + * modifiable context by the function defined in func + * @param size number of bytes to write + * @param func generic write function; the first parameter is src, + * the second is dest_buf, the third is dest_buf_size. + * func must return the number of bytes written to dest_buf, or <= 0 to + * indicate no more data available to write. + * If func is NULL, src is interpreted as a simple byte array for source data. + * @return the number of bytes written to the FIFO + */ +int liteav_av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); + +/** + * Resize an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * + * @param f AVFifoBuffer to resize + * @param size new AVFifoBuffer size in bytes + * @return <0 for failure, >=0 otherwise + */ +int liteav_av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); + +/** + * Enlarge an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * The new fifo size may be larger than the requested size. + * + * @param f AVFifoBuffer to resize + * @param additional_space the amount of space in bytes to allocate in addition to liteav_av_fifo_size() + * @return <0 for failure, >=0 otherwise + */ +int liteav_av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); + +/** + * Read and discard the specified amount of data from an AVFifoBuffer. + * @param f AVFifoBuffer to read from + * @param size amount of data to read in bytes + */ +void liteav_av_fifo_drain(AVFifoBuffer *f, int size); + +/** + * Return a pointer to the data stored in a FIFO buffer at a certain offset. + * The FIFO buffer is not modified. + * + * @param f AVFifoBuffer to peek at, f must be non-NULL + * @param offs an offset in bytes, its absolute value must be less + * than the used buffer size or the returned pointer will + * point outside to the buffer data. + * The used buffer size can be checked with liteav_av_fifo_size(). + */ +static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) +{ + uint8_t *ptr = f->rptr + offs; + if (ptr >= f->end) + ptr = f->buffer + (ptr - f->end); + else if (ptr < f->buffer) + ptr = f->end - (f->buffer - ptr); + return ptr; +} + +#endif /* AVUTIL_FIFO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/file.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/file.h new file mode 100644 index 0000000..a3050de --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/file.h @@ -0,0 +1,72 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FILE_H +#define AVUTIL_FILE_H + +#include <stdint.h> + +#include "avutil.h" + +/** + * @file + * Misc file utilities. + */ + +/** + * Read the file with name filename, and put its content in a newly + * allocated buffer or map it with mmap() when available. + * In case of success set *bufptr to the read or mmapped buffer, and + * *size to the size in bytes of the buffer in *bufptr. + * Unlike mmap this function succeeds with zero sized files, in this + * case *bufptr will be set to NULL and *size will be set to 0. + * The returned buffer must be released with liteav_av_file_unmap(). + * + * @param log_offset loglevel offset used for logging + * @param log_ctx context used for logging + * @return a non negative number in case of success, a negative value + * corresponding to an AVERROR error code in case of failure + */ +av_warn_unused_result +int liteav_av_file_map(const char *filename, uint8_t **bufptr, size_t *size, + int log_offset, void *log_ctx); + +/** + * Unmap or free the buffer bufptr created by liteav_av_file_map(). + * + * @param size size in bytes of bufptr, must be the same as returned + * by liteav_av_file_map() + */ +void liteav_av_file_unmap(uint8_t *bufptr, size_t size); + +/** + * Wrapper to work around the lack of mkstemp() on mingw. + * Also, tries to create file in /tmp first, if possible. + * *prefix can be a character constant; *filename will be allocated internally. + * @return file descriptor of opened file (or negative value corresponding to an + * AVERROR code on error) + * and opened file name in **filename. + * @note On very old libcs it is necessary to set a secure umask before + * calling this, liteav_av_tempfile() can't call umask itself as it is used in + * libraries and could interfere with the calling application. + * @deprecated as fd numbers cannot be passed saftely between libs on some platforms + */ +int liteav_av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx); + +#endif /* AVUTIL_FILE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/frame.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/frame.h new file mode 100644 index 0000000..2701a70 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/frame.h @@ -0,0 +1,902 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_frame + * reference-counted frame API + */ + +#ifndef AVUTIL_FRAME_H +#define AVUTIL_FRAME_H + +#include <stddef.h> +#include <stdint.h> + +#include "avutil.h" +#include "buffer.h" +#include "dict.h" +#include "rational.h" +#include "samplefmt.h" +#include "pixfmt.h" +#include "version.h" + + +/** + * @defgroup lavu_frame AVFrame + * @ingroup lavu_data + * + * @{ + * AVFrame is an abstraction for reference-counted raw multimedia data. + */ + +enum AVFrameSideDataType { + /** + * The data is the AVPanScan struct defined in libavcodec. + */ + AV_FRAME_DATA_PANSCAN, + /** + * ATSC A53 Part 4 Closed Captions. + * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data. + * The number of bytes of CC data is AVFrameSideData.size. + */ + AV_FRAME_DATA_A53_CC, + /** + * Stereoscopic 3d metadata. + * The data is the AVStereo3D struct defined in libavutil/stereo3d.h. + */ + AV_FRAME_DATA_STEREO3D, + /** + * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h. + */ + AV_FRAME_DATA_MATRIXENCODING, + /** + * Metadata relevant to a downmix procedure. + * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h. + */ + AV_FRAME_DATA_DOWNMIX_INFO, + /** + * ReplayGain information in the form of the AVReplayGain struct. + */ + AV_FRAME_DATA_REPLAYGAIN, + /** + * This side data contains a 3x3 transformation matrix describing an affine + * transformation that needs to be applied to the frame for correct + * presentation. + * + * See libavutil/display.h for a detailed description of the data. + */ + AV_FRAME_DATA_DISPLAYMATRIX, + /** + * Active Format Description data consisting of a single byte as specified + * in ETSI TS 101 154 using AVActiveFormatDescription enum. + */ + AV_FRAME_DATA_AFD, + /** + * Motion vectors exported by some codecs (on demand through the export_mvs + * flag set in the libavcodec AVCodecContext flags2 option). + * The data is the AVMotionVector struct defined in + * libavutil/motion_vector.h. + */ + AV_FRAME_DATA_MOTION_VECTORS, + /** + * Recommmends skipping the specified number of samples. This is exported + * only if the "skip_manual" AVOption is set in libavcodec. + * This has the same format as AV_PKT_DATA_SKIP_SAMPLES. + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_FRAME_DATA_SKIP_SAMPLES, + /** + * This side data must be associated with an audio frame and corresponds to + * enum AVAudioServiceType defined in avcodec.h. + */ + AV_FRAME_DATA_AUDIO_SERVICE_TYPE, + /** + * Mastering display metadata associated with a video frame. The payload is + * an AVMasteringDisplayMetadata type and contains information about the + * mastering display color volume. + */ + AV_FRAME_DATA_MASTERING_DISPLAY_METADATA, + /** + * The GOP timecode in 25 bit timecode format. Data format is 64-bit integer. + * This is set on the first frame of a GOP that has a temporal reference of 0. + */ + AV_FRAME_DATA_GOP_TIMECODE, + + /** + * The data represents the AVSphericalMapping structure defined in + * libavutil/spherical.h. + */ + AV_FRAME_DATA_SPHERICAL, + + /** + * Content light level (based on CTA-861.3). This payload contains data in + * the form of the AVContentLightMetadata struct. + */ + AV_FRAME_DATA_CONTENT_LIGHT_LEVEL, + + /** + * The data contains an ICC profile as an opaque octet buffer following the + * format described by ISO 15076-1 with an optional name defined in the + * metadata key entry "name". + */ + AV_FRAME_DATA_ICC_PROFILE, + +#if FF_API_FRAME_QP + /** + * Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA. + * The contents of this side data are undocumented and internal; use + * liteav_av_frame_set_qp_table() and liteav_av_frame_get_qp_table() to access this in a + * meaningful way instead. + */ + AV_FRAME_DATA_QP_TABLE_PROPERTIES, + + /** + * Raw QP table data. Its format is described by + * AV_FRAME_DATA_QP_TABLE_PROPERTIES. Use liteav_av_frame_set_qp_table() and + * liteav_av_frame_get_qp_table() to access this instead. + */ + AV_FRAME_DATA_QP_TABLE_DATA, +#endif + + /** + * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t + * where the first uint32_t describes how many (1-3) of the other timecodes are used. + * The timecode format is described in the liteav_av_timecode_get_smpte_from_framenum() + * function in libavutil/timecode.c. + */ + AV_FRAME_DATA_S12M_TIMECODE, +}; + +enum AVActiveFormatDescription { + AV_AFD_SAME = 8, + AV_AFD_4_3 = 9, + AV_AFD_16_9 = 10, + AV_AFD_14_9 = 11, + AV_AFD_4_3_SP_14_9 = 13, + AV_AFD_16_9_SP_14_9 = 14, + AV_AFD_SP_4_3 = 15, +}; + + +/** + * Structure to hold side data for an AVFrame. + * + * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + */ +typedef struct AVFrameSideData { + enum AVFrameSideDataType type; + uint8_t *data; + int size; + AVDictionary *metadata; + AVBufferRef *buf; +} AVFrameSideData; + +/** + * This structure describes decoded (raw) audio or video data. + * + * AVFrame must be allocated using liteav_av_frame_alloc(). Note that this only + * allocates the AVFrame itself, the buffers for the data must be managed + * through other means (see below). + * AVFrame must be freed with liteav_av_frame_free(). + * + * AVFrame is typically allocated once and then reused multiple times to hold + * different data (e.g. a single AVFrame to hold frames received from a + * decoder). In such a case, liteav_av_frame_unref() will free any references held by + * the frame and reset it to its original clean state before it + * is reused again. + * + * The data described by an AVFrame is usually reference counted through the + * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / + * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at + * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, + * every single data plane must be contained in one of the buffers in + * AVFrame.buf or AVFrame.extended_buf. + * There may be a single buffer for all the data, or one separate buffer for + * each plane, or anything in between. + * + * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + * + * Fields can be accessed through AVOptions, the name string used, matches the + * C structure field name for fields accessible through AVOptions. The AVClass + * for AVFrame can be obtained from avcodec_get_frame_class() + */ +typedef struct AVFrame { +#define AV_NUM_DATA_POINTERS 8 + /** + * pointer to the picture/channel planes. + * This might be different from the first allocated byte + * + * Some decoders access areas outside 0,0 - width,height, please + * see avcodec_align_dimensions2(). Some filters and swscale can read + * up to 16 bytes beyond the planes, if these filters are to be used, + * then 16 extra bytes must be allocated. + * + * NOTE: Except for hwaccel formats, pointers not needed by the format + * MUST be set to NULL. + */ + uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * For video, size in bytes of each picture line. + * For audio, size in bytes of each plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + * + * For video the linesizes should be multiples of the CPUs alignment + * preference, this is 16 or 32 for modern desktop CPUs. + * Some code requires such alignment other code can be slower without + * correct alignment, for yet other it makes no difference. + * + * @note The linesize may be larger than the size of usable data -- there + * may be extra padding present for performance reasons. + */ + int linesize[AV_NUM_DATA_POINTERS]; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data should always be set in a valid frame, + * but for planar audio with more channels that can fit in data, + * extended_data must be used in order to access all channels. + */ + uint8_t **extended_data; + + /** + * @name Video dimensions + * Video frames only. The coded dimensions (in pixels) of the video frame, + * i.e. the size of the rectangle that contains some well-defined values. + * + * @note The part of the frame intended for display/presentation is further + * restricted by the @ref cropping "Cropping rectangle". + * @{ + */ + int width, height; + /** + * @} + */ + + /** + * number of audio samples (per channel) described by this frame + */ + int nb_samples; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum AVPixelFormat for video frames, + * enum AVSampleFormat for audio) + */ + int format; + + /** + * 1 -> keyframe, 0-> not + */ + int key_frame; + + /** + * Picture type of the frame. + */ + enum AVPictureType pict_type; + + /** + * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + */ + AVRational sample_aspect_ratio; + + /** + * Presentation timestamp in time_base units (time when frame should be shown to user). + */ + int64_t pts; + +#if FF_API_PKT_PTS + /** + * PTS copied from the AVPacket that was decoded to produce this frame. + * @deprecated use the pts field instead + */ + attribute_deprecated + int64_t pkt_pts; +#endif + + /** + * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used) + * This is also the Presentation time of this AVFrame calculated from + * only AVPacket.dts values without pts values. + */ + int64_t pkt_dts; + + /** + * picture number in bitstream order + */ + int coded_picture_number; + /** + * picture number in display order + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + */ + int quality; + + /** + * for some private data of the user + */ + void *opaque; + +#if FF_API_ERROR_FRAME + /** + * @deprecated unused + */ + attribute_deprecated + uint64_t error[AV_NUM_DATA_POINTERS]; +#endif + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + */ + int repeat_pict; + + /** + * The content of the picture is interlaced. + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + */ + int top_field_first; + + /** + * Tell user application that palette has changed from previous frame. + */ + int palette_has_changed; + + /** + * reordered opaque 64 bits (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + */ + int64_t reordered_opaque; + + /** + * Sample rate of the audio data. + */ + int sample_rate; + + /** + * Channel layout of the audio data. + */ + uint64_t channel_layout; + + /** + * AVBuffer references backing the data for this frame. If all elements of + * this array are NULL, then this frame is not reference counted. This array + * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must + * also be non-NULL for all j < i. + * + * There may be at most one AVBuffer per data plane, so for video this array + * always contains all the references. For planar audio with more than + * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in + * this array. Then the extra AVBufferRef pointers are stored in the + * extended_buf array. + */ + AVBufferRef *buf[AV_NUM_DATA_POINTERS]; + + /** + * For planar audio which requires more than AV_NUM_DATA_POINTERS + * AVBufferRef pointers, this array will hold all the references which + * cannot fit into AVFrame.buf. + * + * Note that this is different from AVFrame.extended_data, which always + * contains all the pointers. This array only contains the extra pointers, + * which cannot fit into AVFrame.buf. + * + * This array is always allocated using liteav_av_malloc() by whoever constructs + * the frame. It is freed in liteav_av_frame_unref(). + */ + AVBufferRef **extended_buf; + /** + * Number of elements in extended_buf. + */ + int nb_extended_buf; + + AVFrameSideData **side_data; + int nb_side_data; + +/** + * @defgroup lavu_frame_flags AV_FRAME_FLAGS + * @ingroup lavu_frame + * Flags describing additional frame properties. + * + * @{ + */ + +/** + * The frame data may be corrupted, e.g. due to decoding errors. + */ +#define AV_FRAME_FLAG_CORRUPT (1 << 0) +/** + * A flag to mark the frames which need to be decoded, but shouldn't be output. + */ +#define AV_FRAME_FLAG_DISCARD (1 << 2) +/** + * @} + */ + + /** + * Frame flags, a combination of @ref lavu_frame_flags + */ + int flags; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + enum AVColorPrimaries color_primaries; + + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + enum AVChromaLocation chroma_location; + + /** + * frame timestamp estimated using various heuristics, in stream time base + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int64_t best_effort_timestamp; + + /** + * reordered pos from the last AVPacket that has been input into the decoder + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pos; + + /** + * duration of the corresponding packet, expressed in + * AVStream->time_base units, 0 if unknown. + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_duration; + + /** + * metadata. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVDictionary *metadata; + + /** + * decode error flags of the frame, set to a combination of + * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there + * were errors during the decoding. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int decode_error_flags; +#define FF_DECODE_ERROR_INVALID_BITSTREAM 1 +#define FF_DECODE_ERROR_MISSING_REFERENCE 2 + + /** + * number of audio channels, only used for audio. + * - encoding: unused + * - decoding: Read by user. + */ + int channels; + + /** + * size of the corresponding packet containing the compressed + * frame. + * It is set to a negative value if unknown. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int pkt_size; + +#if FF_API_FRAME_QP + /** + * QP table + */ + attribute_deprecated + int8_t *qscale_table; + /** + * QP store stride + */ + attribute_deprecated + int qstride; + + attribute_deprecated + int qscale_type; + + attribute_deprecated + AVBufferRef *qp_table_buf; +#endif + /** + * For hwaccel-format frames, this should be a reference to the + * AVHWFramesContext describing the frame. + */ + AVBufferRef *hw_frames_ctx; + + /** + * AVBufferRef for free use by the API user. FFmpeg will never check the + * contents of the buffer ref. FFmpeg calls liteav_av_buffer_unref() on it when + * the frame is unreferenced. liteav_av_frame_copy_props() calls create a new + * reference with liteav_av_buffer_ref() for the target frame's opaque_ref field. + * + * This is unrelated to the opaque field, although it serves a similar + * purpose. + */ + AVBufferRef *opaque_ref; + + /** + * @anchor cropping + * @name Cropping + * Video frames only. The number of pixels to discard from the the + * top/bottom/left/right border of the frame to obtain the sub-rectangle of + * the frame intended for presentation. + * @{ + */ + size_t crop_top; + size_t crop_bottom; + size_t crop_left; + size_t crop_right; + /** + * @} + */ + + /** + * AVBufferRef for internal use by a single libav* library. + * Must not be used to transfer data between libraries. + * Has to be NULL when ownership of the frame leaves the respective library. + * + * Code outside the FFmpeg libs should never check or change the contents of the buffer ref. + * + * FFmpeg calls liteav_av_buffer_unref() on it when the frame is unreferenced. + * liteav_av_frame_copy_props() calls create a new reference with liteav_av_buffer_ref() + * for the target frame's private_ref field. + */ + AVBufferRef *private_ref; +} AVFrame; + +#if FF_API_FRAME_GET_SET +/** + * Accessors for some AVFrame fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +attribute_deprecated +int64_t liteav_av_frame_get_best_effort_timestamp(const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); +attribute_deprecated +int64_t liteav_av_frame_get_pkt_duration (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_pkt_duration (AVFrame *frame, int64_t val); +attribute_deprecated +int64_t liteav_av_frame_get_pkt_pos (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_pkt_pos (AVFrame *frame, int64_t val); +attribute_deprecated +int64_t liteav_av_frame_get_channel_layout (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_channel_layout (AVFrame *frame, int64_t val); +attribute_deprecated +int liteav_av_frame_get_channels (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_channels (AVFrame *frame, int val); +attribute_deprecated +int liteav_av_frame_get_sample_rate (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_sample_rate (AVFrame *frame, int val); +attribute_deprecated +AVDictionary *liteav_av_frame_get_metadata (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_metadata (AVFrame *frame, AVDictionary *val); +attribute_deprecated +int liteav_av_frame_get_decode_error_flags (const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_decode_error_flags (AVFrame *frame, int val); +attribute_deprecated +int liteav_av_frame_get_pkt_size(const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_pkt_size(AVFrame *frame, int val); +#if FF_API_FRAME_QP +attribute_deprecated +int8_t *liteav_av_frame_get_qp_table(AVFrame *f, int *stride, int *type); +attribute_deprecated +int liteav_av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type); +#endif +attribute_deprecated +enum AVColorSpace liteav_av_frame_get_colorspace(const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val); +attribute_deprecated +enum AVColorRange liteav_av_frame_get_color_range(const AVFrame *frame); +attribute_deprecated +void liteav_av_frame_set_color_range(AVFrame *frame, enum AVColorRange val); +#endif + +/** + * Get the name of a colorspace. + * @return a static string identifying the colorspace; can be NULL. + */ +const char *liteav_av_get_colorspace_name(enum AVColorSpace val); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using liteav_av_frame_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * + * @note this only allocates the AVFrame itself, not the data buffers. Those + * must be allocated through other means, e.g. with liteav_av_frame_get_buffer() or + * manually. + */ +AVFrame *liteav_av_frame_alloc(void); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. If the frame is reference counted, it will be + * unreferenced first. + * + * @param frame frame to be freed. The pointer will be set to NULL. + */ +void liteav_av_frame_free(AVFrame **frame); + +/** + * Set up a new reference to the data described by the source frame. + * + * Copy frame properties from src to dst and create a new reference for each + * AVBufferRef from src. + * + * If src is not reference counted, new buffers are allocated and the data is + * copied. + * + * @warning: dst MUST have been either unreferenced with liteav_av_frame_unref(dst), + * or newly allocated with liteav_av_frame_alloc() before calling this + * function, or undefined behavior will occur. + * + * @return 0 on success, a negative AVERROR on error + */ +int liteav_av_frame_ref(AVFrame *dst, const AVFrame *src); + +/** + * Create a new frame that references the same data as src. + * + * This is a shortcut for liteav_av_frame_alloc()+liteav_av_frame_ref(). + * + * @return newly created AVFrame on success, NULL on error. + */ +AVFrame *liteav_av_frame_clone(const AVFrame *src); + +/** + * Unreference all the buffers referenced by frame and reset the frame fields. + */ +void liteav_av_frame_unref(AVFrame *frame); + +/** + * Move everything contained in src to dst and reset src. + * + * @warning: dst is not unreferenced, but directly overwritten without reading + * or deallocating its contents. Call liteav_av_frame_unref(dst) manually + * before calling this function to ensure that no memory is leaked. + */ +void liteav_av_frame_move_ref(AVFrame *dst, AVFrame *src); + +/** + * Allocate new buffer(s) for audio or video data. + * + * The following fields must be set on frame before calling this function: + * - format (pixel format for video, sample format for audio) + * - width and height for video + * - nb_samples and channel_layout for audio + * + * This function will fill AVFrame.data and AVFrame.buf arrays and, if + * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. + * For planar formats, one buffer will be allocated for each plane. + * + * @warning: if frame already has been allocated, calling this function will + * leak memory. In addition, undefined behavior can occur in certain + * cases. + * + * @param frame frame in which to store the new buffers. + * @param align Required buffer size alignment. If equal to 0, alignment will be + * chosen automatically for the current CPU. It is highly + * recommended to pass 0 here unless you know what you are doing. + * + * @return 0 on success, a negative AVERROR on error. + */ +int liteav_av_frame_get_buffer(AVFrame *frame, int align); + +/** + * Check if the frame data is writable. + * + * @return A positive value if the frame data is writable (which is true if and + * only if each of the underlying buffers has only one reference, namely the one + * stored in this frame). Return 0 otherwise. + * + * If 1 is returned the answer is valid until liteav_av_buffer_ref() is called on any + * of the underlying AVBufferRefs (e.g. through liteav_av_frame_ref() or directly). + * + * @see liteav_av_frame_make_writable(), liteav_av_buffer_is_writable() + */ +int liteav_av_frame_is_writable(AVFrame *frame); + +/** + * Ensure that the frame data is writable, avoiding data copy if possible. + * + * Do nothing if the frame is writable, allocate new buffers and copy the data + * if it is not. + * + * @return 0 on success, a negative AVERROR on error. + * + * @see liteav_av_frame_is_writable(), liteav_av_buffer_is_writable(), + * liteav_av_buffer_make_writable() + */ +int liteav_av_frame_make_writable(AVFrame *frame); + +/** + * Copy the frame data from src to dst. + * + * This function does not allocate anything, dst must be already initialized and + * allocated with the same parameters as src. + * + * This function only copies the frame data (i.e. the contents of the data / + * extended data arrays), not any other properties. + * + * @return >= 0 on success, a negative AVERROR on error. + */ +int liteav_av_frame_copy(AVFrame *dst, const AVFrame *src); + +/** + * Copy only "metadata" fields from src to dst. + * + * Metadata for the purpose of this function are those fields that do not affect + * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample + * aspect ratio (for video), but not width/height or channel layout. + * Side data is also copied. + */ +int liteav_av_frame_copy_props(AVFrame *dst, const AVFrame *src); + +/** + * Get the buffer reference a given data plane is stored in. + * + * @param plane index of the data plane of interest in frame->extended_data. + * + * @return the buffer reference that contains the plane or NULL if the input + * frame is not valid. + */ +AVBufferRef *liteav_av_frame_get_plane_buffer(AVFrame *frame, int plane); + +/** + * Add a new side data to a frame. + * + * @param frame a frame to which the side data should be added + * @param type type of the added side data + * @param size size of the side data + * + * @return newly added side data on success, NULL on error + */ +AVFrameSideData *liteav_av_frame_new_side_data(AVFrame *frame, + enum AVFrameSideDataType type, + int size); + +/** + * Add a new side data to a frame from an existing AVBufferRef + * + * @param frame a frame to which the side data should be added + * @param type the type of the added side data + * @param buf an AVBufferRef to add as side data. The ownership of + * the reference is transferred to the frame. + * + * @return newly added side data on success, NULL on error. On failure + * the frame is unchanged and the AVBufferRef remains owned by + * the caller. + */ +AVFrameSideData *liteav_av_frame_new_side_data_from_buf(AVFrame *frame, + enum AVFrameSideDataType type, + AVBufferRef *buf); + +/** + * @return a pointer to the side data of a given type on success, NULL if there + * is no side data with such type in this frame. + */ +AVFrameSideData *liteav_av_frame_get_side_data(const AVFrame *frame, + enum AVFrameSideDataType type); + +/** + * If side data of the supplied type exists in the frame, free it and remove it + * from the frame. + */ +void liteav_av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type); + + +/** + * Flags for frame cropping. + */ +enum { + /** + * Apply the maximum possible cropping, even if it requires setting the + * AVFrame.data[] entries to unaligned pointers. Passing unaligned data + * to FFmpeg API is generally not allowed, and causes undefined behavior + * (such as crashes). You can pass unaligned data only to FFmpeg APIs that + * are explicitly documented to accept it. Use this flag only if you + * absolutely know what you are doing. + */ + AV_FRAME_CROP_UNALIGNED = 1 << 0, +}; + +/** + * Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ + * crop_bottom fields. If cropping is successful, the function will adjust the + * data pointers and the width/height fields, and set the crop fields to 0. + * + * In all cases, the cropping boundaries will be rounded to the inherent + * alignment of the pixel format. In some cases, such as for opaque hwaccel + * formats, the left/top cropping is ignored. The crop fields are set to 0 even + * if the cropping was rounded or ignored. + * + * @param frame the frame which should be cropped + * @param flags Some combination of AV_FRAME_CROP_* flags, or 0. + * + * @return >= 0 on success, a negative AVERROR on error. If the cropping fields + * were invalid, AVERROR(ERANGE) is returned, and nothing is changed. + */ +int liteav_av_frame_apply_cropping(AVFrame *frame, int flags); + +/** + * @return a string identifying the side data type + */ +const char *liteav_av_frame_side_data_name(enum AVFrameSideDataType type); + +/** + * @} + */ + +#endif /* AVUTIL_FRAME_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hash.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hash.h new file mode 100644 index 0000000..5e29000 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hash.h @@ -0,0 +1,270 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_hash_generic + * Generic hashing API + */ + +#ifndef AVUTIL_HASH_H +#define AVUTIL_HASH_H + +#include <stdint.h> + +#include "version.h" + +/** + * @defgroup lavu_hash Hash Functions + * @ingroup lavu_crypto + * Hash functions useful in multimedia. + * + * Hash functions are widely used in multimedia, from error checking and + * concealment to internal regression testing. libavutil has efficient + * implementations of a variety of hash functions that may be useful for + * FFmpeg and other multimedia applications. + * + * @{ + * + * @defgroup lavu_hash_generic Generic Hashing API + * An abstraction layer for all hash functions supported by libavutil. + * + * If your application needs to support a wide range of different hash + * functions, then the Generic Hashing API is for you. It provides a generic, + * reusable API for @ref lavu_hash "all hash functions" implemented in libavutil. + * If you just need to use one particular hash function, use the @ref lavu_hash + * "individual hash" directly. + * + * @section Sample Code + * + * A basic template for using the Generic Hashing API follows: + * + * @code + * struct AVHashContext *ctx = NULL; + * const char *hash_name = NULL; + * uint8_t *output_buf = NULL; + * + * // Select from a string returned by liteav_av_hash_names() + * hash_name = ...; + * + * // Allocate a hash context + * ret = liteav_av_hash_alloc(&ctx, hash_name); + * if (ret < 0) + * return ret; + * + * // Initialize the hash context + * liteav_av_hash_init(ctx); + * + * // Update the hash context with data + * while (data_left) { + * liteav_av_hash_update(ctx, data, size); + * } + * + * // Now we have no more data, so it is time to finalize the hash and get the + * // output. But we need to first allocate an output buffer. Note that you can + * // use any memory allocation function, including malloc(), not just + * // liteav_av_malloc(). + * output_buf = liteav_av_malloc(liteav_av_hash_get_size(ctx)); + * if (!output_buf) + * return AVERROR(ENOMEM); + * + * // Finalize the hash context. + * // You can use any of the liteav_av_hash_final*() functions provided, for other + * // output formats. If you do so, be sure to adjust the memory allocation + * // above. See the function documentation below for the exact amount of extra + * // memory needed. + * liteav_av_hash_final(ctx, output_buffer); + * + * // Free the context + * liteav_av_hash_freep(&ctx); + * @endcode + * + * @section Hash Function-Specific Information + * If the CRC32 hash is selected, the #AV_CRC_32_IEEE polynomial will be + * used. + * + * If the Murmur3 hash is selected, the default seed will be used. See @ref + * lavu_murmur3_seedinfo "Murmur3" for more information. + * + * @{ + */ + +/** + * @example ffhash.c + * This example is a simple command line application that takes one or more + * arguments. It demonstrates a typical use of the hashing API with allocation, + * initialization, updating, and finalizing. + */ + +struct AVHashContext; + +/** + * Allocate a hash context for the algorithm specified by name. + * + * @return >= 0 for success, a negative error code for failure + * + * @note The context is not initialized after a call to this function; you must + * call liteav_av_hash_init() to do so. + */ +int liteav_av_hash_alloc(struct AVHashContext **ctx, const char *name); + +/** + * Get the names of available hash algorithms. + * + * This function can be used to enumerate the algorithms. + * + * @param[in] i Index of the hash algorithm, starting from 0 + * @return Pointer to a static string or `NULL` if `i` is out of range + */ +const char *liteav_av_hash_names(int i); + +/** + * Get the name of the algorithm corresponding to the given hash context. + */ +const char *liteav_av_hash_get_name(const struct AVHashContext *ctx); + +/** + * Maximum value that liteav_av_hash_get_size() will currently return. + * + * You can use this if you absolutely want or need to use static allocation for + * the output buffer and are fine with not supporting hashes newly added to + * libavutil without recompilation. + * + * @warning + * Adding new hashes with larger sizes, and increasing the macro while doing + * so, will not be considered an ABI change. To prevent your code from + * overflowing a buffer, either dynamically allocate the output buffer with + * liteav_av_hash_get_size(), or limit your use of the Hashing API to hashes that are + * already in FFmpeg during the time of compilation. + */ +#define AV_HASH_MAX_SIZE 64 + +/** + * Get the size of the resulting hash value in bytes. + * + * The maximum value this function will currently return is available as macro + * #AV_HASH_MAX_SIZE. + * + * @param[in] ctx Hash context + * @return Size of the hash value in bytes + */ +int liteav_av_hash_get_size(const struct AVHashContext *ctx); + +/** + * Initialize or reset a hash context. + * + * @param[in,out] ctx Hash context + */ +void liteav_av_hash_init(struct AVHashContext *ctx); + +/** + * Update a hash context with additional data. + * + * @param[in,out] ctx Hash context + * @param[in] src Data to be added to the hash context + * @param[in] len Size of the additional data + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len); +#else +void liteav_av_hash_update(struct AVHashContext *ctx, const uint8_t *src, size_t len); +#endif + +/** + * Finalize a hash context and compute the actual hash value. + * + * The minimum size of `dst` buffer is given by liteav_av_hash_get_size() or + * #AV_HASH_MAX_SIZE. The use of the latter macro is discouraged. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * + * @see liteav_av_hash_final_bin() provides an alternative API + */ +void liteav_av_hash_final(struct AVHashContext *ctx, uint8_t *dst); + +/** + * Finalize a hash context and store the actual hash value in a buffer. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * If `size` is smaller than the hash size (given by liteav_av_hash_get_size()), the + * hash is truncated; if size is larger, the buffer is padded with 0. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * @param[in] size Number of bytes to write to `dst` + */ +void liteav_av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Finalize a hash context and store the hexadecimal representation of the + * actual hash value as a string. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * The string is always 0-terminated. + * + * If `size` is smaller than `2 * hash_size + 1`, where `hash_size` is the + * value returned by liteav_av_hash_get_size(), the string will be truncated. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the string will be stored + * @param[in] size Maximum number of bytes to write to `dst` + */ +void liteav_av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Finalize a hash context and store the Base64 representation of the + * actual hash value as a string. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * The string is always 0-terminated. + * + * If `size` is smaller than AV_BASE64_SIZE(hash_size), where `hash_size` is + * the value returned by liteav_av_hash_get_size(), the string will be truncated. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * @param[in] size Maximum number of bytes to write to `dst` + */ +void liteav_av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Free hash context and set hash context pointer to `NULL`. + * + * @param[in,out] ctx Pointer to hash context + */ +void liteav_av_hash_freep(struct AVHashContext **ctx); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_HASH_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hmac.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hmac.h new file mode 100644 index 0000000..5007cdf --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hmac.h @@ -0,0 +1,101 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2012 Martin Storsjo + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HMAC_H +#define AVUTIL_HMAC_H + +#include <stdint.h> + +#include "version.h" +/** + * @defgroup lavu_hmac HMAC + * @ingroup lavu_crypto + * @{ + */ + +enum AVHMACType { + AV_HMAC_MD5, + AV_HMAC_SHA1, + AV_HMAC_SHA224, + AV_HMAC_SHA256, + AV_HMAC_SHA384, + AV_HMAC_SHA512, +}; + +typedef struct AVHMAC AVHMAC; + +/** + * Allocate an AVHMAC context. + * @param type The hash function used for the HMAC. + */ +AVHMAC *liteav_av_hmac_alloc(enum AVHMACType type); + +/** + * Free an AVHMAC context. + * @param ctx The context to free, may be NULL + */ +void liteav_av_hmac_free(AVHMAC *ctx); + +/** + * Initialize an AVHMAC context with an authentication key. + * @param ctx The HMAC context + * @param key The authentication key + * @param keylen The length of the key, in bytes + */ +void liteav_av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen); + +/** + * Hash data with the HMAC. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + */ +void liteav_av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len); + +/** + * Finish hashing and output the HMAC digest. + * @param ctx The HMAC context + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int liteav_av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen); + +/** + * Hash an array of data with a key. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + * @param key The authentication key + * @param keylen The length of the key, in bytes + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int liteav_av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len, + const uint8_t *key, unsigned int keylen, + uint8_t *out, unsigned int outlen); + +/** + * @} + */ + +#endif /* AVUTIL_HMAC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext.h new file mode 100644 index 0000000..e97a9f4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext.h @@ -0,0 +1,585 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_H +#define AVUTIL_HWCONTEXT_H + +#include "buffer.h" +#include "frame.h" +#include "log.h" +#include "pixfmt.h" + +enum AVHWDeviceType { + AV_HWDEVICE_TYPE_NONE, + AV_HWDEVICE_TYPE_VDPAU, + AV_HWDEVICE_TYPE_CUDA, + AV_HWDEVICE_TYPE_VAAPI, + AV_HWDEVICE_TYPE_DXVA2, + AV_HWDEVICE_TYPE_QSV, + AV_HWDEVICE_TYPE_VIDEOTOOLBOX, + AV_HWDEVICE_TYPE_D3D11VA, + AV_HWDEVICE_TYPE_DRM, + AV_HWDEVICE_TYPE_OPENCL, + AV_HWDEVICE_TYPE_MEDIACODEC, +}; + +typedef struct AVHWDeviceInternal AVHWDeviceInternal; + +/** + * This struct aggregates all the (hardware/vendor-specific) "high-level" state, + * i.e. state that is not tied to a concrete processing configuration. + * E.g., in an API that supports hardware-accelerated encoding and decoding, + * this struct will (if possible) wrap the state that is common to both encoding + * and decoding and from which specific instances of encoders or decoders can be + * derived. + * + * This struct is reference-counted with the AVBuffer mechanism. The + * liteav_av_hwdevice_ctx_alloc() constructor yields a reference, whose data field + * points to the actual AVHWDeviceContext. Further objects derived from + * AVHWDeviceContext (such as AVHWFramesContext, describing a frame pool with + * specific properties) will hold an internal reference to it. After all the + * references are released, the AVHWDeviceContext itself will be freed, + * optionally invoking a user-specified callback for uninitializing the hardware + * state. + */ +typedef struct AVHWDeviceContext { + /** + * A class for logging. Set by liteav_av_hwdevice_ctx_alloc(). + */ + const AVClass *av_class; + + /** + * Private data used internally by libavutil. Must not be accessed in any + * way by the caller. + */ + AVHWDeviceInternal *internal; + + /** + * This field identifies the underlying API used for hardware access. + * + * This field is set when this struct is allocated and never changed + * afterwards. + */ + enum AVHWDeviceType type; + + /** + * The format-specific data, allocated and freed by libavutil along with + * this context. + * + * Should be cast by the user to the format-specific context defined in the + * corresponding header (hwcontext_*.h) and filled as described in the + * documentation before calling liteav_av_hwdevice_ctx_init(). + * + * After calling liteav_av_hwdevice_ctx_init() this struct should not be modified + * by the caller. + */ + void *hwctx; + + /** + * This field may be set by the caller before calling liteav_av_hwdevice_ctx_init(). + * + * If non-NULL, this callback will be called when the last reference to + * this context is unreferenced, immediately before it is freed. + * + * @note when other objects (e.g an AVHWFramesContext) are derived from this + * struct, this callback will be invoked after all such child objects + * are fully uninitialized and their respective destructors invoked. + */ + void (*free)(struct AVHWDeviceContext *ctx); + + /** + * Arbitrary user data, to be used e.g. by the free() callback. + */ + void *user_opaque; +} AVHWDeviceContext; + +typedef struct AVHWFramesInternal AVHWFramesInternal; + +/** + * This struct describes a set or pool of "hardware" frames (i.e. those with + * data not located in normal system memory). All the frames in the pool are + * assumed to be allocated in the same way and interchangeable. + * + * This struct is reference-counted with the AVBuffer mechanism and tied to a + * given AVHWDeviceContext instance. The liteav_av_hwframe_ctx_alloc() constructor + * yields a reference, whose data field points to the actual AVHWFramesContext + * struct. + */ +typedef struct AVHWFramesContext { + /** + * A class for logging. + */ + const AVClass *av_class; + + /** + * Private data used internally by libavutil. Must not be accessed in any + * way by the caller. + */ + AVHWFramesInternal *internal; + + /** + * A reference to the parent AVHWDeviceContext. This reference is owned and + * managed by the enclosing AVHWFramesContext, but the caller may derive + * additional references from it. + */ + AVBufferRef *device_ref; + + /** + * The parent AVHWDeviceContext. This is simply a pointer to + * device_ref->data provided for convenience. + * + * Set by libavutil in liteav_av_hwframe_ctx_init(). + */ + AVHWDeviceContext *device_ctx; + + /** + * The format-specific data, allocated and freed automatically along with + * this context. + * + * Should be cast by the user to the format-specific context defined in the + * corresponding header (hwframe_*.h) and filled as described in the + * documentation before calling liteav_av_hwframe_ctx_init(). + * + * After any frames using this context are created, the contents of this + * struct should not be modified by the caller. + */ + void *hwctx; + + /** + * This field may be set by the caller before calling liteav_av_hwframe_ctx_init(). + * + * If non-NULL, this callback will be called when the last reference to + * this context is unreferenced, immediately before it is freed. + */ + void (*free)(struct AVHWFramesContext *ctx); + + /** + * Arbitrary user data, to be used e.g. by the free() callback. + */ + void *user_opaque; + + /** + * A pool from which the frames are allocated by liteav_av_hwframe_get_buffer(). + * This field may be set by the caller before calling liteav_av_hwframe_ctx_init(). + * The buffers returned by calling liteav_av_buffer_pool_get() on this pool must + * have the properties described in the documentation in the corresponding hw + * type's header (hwcontext_*.h). The pool will be freed strictly before + * this struct's free() callback is invoked. + * + * This field may be NULL, then libavutil will attempt to allocate a pool + * internally. Note that certain device types enforce pools allocated at + * fixed size (frame count), which cannot be extended dynamically. In such a + * case, initial_pool_size must be set appropriately. + */ + AVBufferPool *pool; + + /** + * Initial size of the frame pool. If a device type does not support + * dynamically resizing the pool, then this is also the maximum pool size. + * + * May be set by the caller before calling liteav_av_hwframe_ctx_init(). Must be + * set if pool is NULL and the device type does not support dynamic pools. + */ + int initial_pool_size; + + /** + * The pixel format identifying the underlying HW surface type. + * + * Must be a hwaccel format, i.e. the corresponding descriptor must have the + * AV_PIX_FMT_FLAG_HWACCEL flag set. + * + * Must be set by the user before calling liteav_av_hwframe_ctx_init(). + */ + enum AVPixelFormat format; + + /** + * The pixel format identifying the actual data layout of the hardware + * frames. + * + * Must be set by the caller before calling liteav_av_hwframe_ctx_init(). + * + * @note when the underlying API does not provide the exact data layout, but + * only the colorspace/bit depth, this field should be set to the fully + * planar version of that format (e.g. for 8-bit 420 YUV it should be + * AV_PIX_FMT_YUV420P, not AV_PIX_FMT_NV12 or anything else). + */ + enum AVPixelFormat sw_format; + + /** + * The allocated dimensions of the frames in this pool. + * + * Must be set by the user before calling liteav_av_hwframe_ctx_init(). + */ + int width, height; +} AVHWFramesContext; + +/** + * Look up an AVHWDeviceType by name. + * + * @param name String name of the device type (case-insensitive). + * @return The type from enum AVHWDeviceType, or AV_HWDEVICE_TYPE_NONE if + * not found. + */ +enum AVHWDeviceType liteav_av_hwdevice_find_type_by_name(const char *name); + +/** Get the string name of an AVHWDeviceType. + * + * @param type Type from enum AVHWDeviceType. + * @return Pointer to a static string containing the name, or NULL if the type + * is not valid. + */ +const char *liteav_av_hwdevice_get_type_name(enum AVHWDeviceType type); + +/** + * Iterate over supported device types. + * + * @param type AV_HWDEVICE_TYPE_NONE initially, then the previous type + * returned by this function in subsequent iterations. + * @return The next usable device type from enum AVHWDeviceType, or + * AV_HWDEVICE_TYPE_NONE if there are no more. + */ +enum AVHWDeviceType liteav_av_hwdevice_iterate_types(enum AVHWDeviceType prev); + +/** + * Allocate an AVHWDeviceContext for a given hardware type. + * + * @param type the type of the hardware device to allocate. + * @return a reference to the newly created AVHWDeviceContext on success or NULL + * on failure. + */ +AVBufferRef *liteav_av_hwdevice_ctx_alloc(enum AVHWDeviceType type); + +/** + * Finalize the device context before use. This function must be called after + * the context is filled with all the required information and before it is + * used in any way. + * + * @param ref a reference to the AVHWDeviceContext + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_hwdevice_ctx_init(AVBufferRef *ref); + +/** + * Open a device of the specified type and create an AVHWDeviceContext for it. + * + * This is a convenience function intended to cover the simple cases. Callers + * who need to fine-tune device creation/management should open the device + * manually and then wrap it in an AVHWDeviceContext using + * liteav_av_hwdevice_ctx_alloc()/liteav_av_hwdevice_ctx_init(). + * + * The returned context is already initialized and ready for use, the caller + * should not call liteav_av_hwdevice_ctx_init() on it. The user_opaque/free fields of + * the created AVHWDeviceContext are set by this function and should not be + * touched by the caller. + * + * @param device_ctx On success, a reference to the newly-created device context + * will be written here. The reference is owned by the caller + * and must be released with liteav_av_buffer_unref() when no longer + * needed. On failure, NULL will be written to this pointer. + * @param type The type of the device to create. + * @param device A type-specific string identifying the device to open. + * @param opts A dictionary of additional (type-specific) options to use in + * opening the device. The dictionary remains owned by the caller. + * @param flags currently unused + * + * @return 0 on success, a negative AVERROR code on failure. + */ +int liteav_av_hwdevice_ctx_create(AVBufferRef **device_ctx, enum AVHWDeviceType type, + const char *device, AVDictionary *opts, int flags); + +/** + * Create a new device of the specified type from an existing device. + * + * If the source device is a device of the target type or was originally + * derived from such a device (possibly through one or more intermediate + * devices of other types), then this will return a reference to the + * existing device of the same type as is requested. + * + * Otherwise, it will attempt to derive a new device from the given source + * device. If direct derivation to the new type is not implemented, it will + * attempt the same derivation from each ancestor of the source device in + * turn looking for an implemented derivation method. + * + * @param dst_ctx On success, a reference to the newly-created + * AVHWDeviceContext. + * @param type The type of the new device to create. + * @param src_ctx A reference to an existing AVHWDeviceContext which will be + * used to create the new device. + * @param flags Currently unused; should be set to zero. + * @return Zero on success, a negative AVERROR code on failure. + */ +int liteav_av_hwdevice_ctx_create_derived(AVBufferRef **dst_ctx, + enum AVHWDeviceType type, + AVBufferRef *src_ctx, int flags); + + +/** + * Allocate an AVHWFramesContext tied to a given device context. + * + * @param device_ctx a reference to a AVHWDeviceContext. This function will make + * a new reference for internal use, the one passed to the + * function remains owned by the caller. + * @return a reference to the newly created AVHWFramesContext on success or NULL + * on failure. + */ +AVBufferRef *liteav_av_hwframe_ctx_alloc(AVBufferRef *device_ctx); + +/** + * Finalize the context before use. This function must be called after the + * context is filled with all the required information and before it is attached + * to any frames. + * + * @param ref a reference to the AVHWFramesContext + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_hwframe_ctx_init(AVBufferRef *ref); + +/** + * Allocate a new frame attached to the given AVHWFramesContext. + * + * @param hwframe_ctx a reference to an AVHWFramesContext + * @param frame an empty (freshly allocated or unreffed) frame to be filled with + * newly allocated buffers. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR code on failure + */ +int liteav_av_hwframe_get_buffer(AVBufferRef *hwframe_ctx, AVFrame *frame, int flags); + +/** + * Copy data to or from a hw surface. At least one of dst/src must have an + * AVHWFramesContext attached. + * + * If src has an AVHWFramesContext attached, then the format of dst (if set) + * must use one of the formats returned by liteav_av_hwframe_transfer_get_formats(src, + * AV_HWFRAME_TRANSFER_DIRECTION_FROM). + * If dst has an AVHWFramesContext attached, then the format of src must use one + * of the formats returned by liteav_av_hwframe_transfer_get_formats(dst, + * AV_HWFRAME_TRANSFER_DIRECTION_TO) + * + * dst may be "clean" (i.e. with data/buf pointers unset), in which case the + * data buffers will be allocated by this function using liteav_av_frame_get_buffer(). + * If dst->format is set, then this format will be used, otherwise (when + * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be chosen. + * + * The two frames must have matching allocated dimensions (i.e. equal to + * AVHWFramesContext.width/height), since not all device types support + * transferring a sub-rectangle of the whole surface. The display dimensions + * (i.e. AVFrame.width/height) may be smaller than the allocated dimensions, but + * also have to be equal for both frames. When the display dimensions are + * smaller than the allocated dimensions, the content of the padding in the + * destination frame is unspecified. + * + * @param dst the destination frame. dst is not touched on failure. + * @param src the source frame. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR error code on failure. + */ +int liteav_av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags); + +enum AVHWFrameTransferDirection { + /** + * Transfer the data from the queried hw frame. + */ + AV_HWFRAME_TRANSFER_DIRECTION_FROM, + + /** + * Transfer the data to the queried hw frame. + */ + AV_HWFRAME_TRANSFER_DIRECTION_TO, +}; + +/** + * Get a list of possible source or target formats usable in + * liteav_av_hwframe_transfer_data(). + * + * @param hwframe_ctx the frame context to obtain the information for + * @param dir the direction of the transfer + * @param formats the pointer to the output format list will be written here. + * The list is terminated with AV_PIX_FMT_NONE and must be freed + * by the caller when no longer needed using liteav_av_free(). + * If this function returns successfully, the format list will + * have at least one item (not counting the terminator). + * On failure, the contents of this pointer are unspecified. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR code on failure. + */ +int liteav_av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ctx, + enum AVHWFrameTransferDirection dir, + enum AVPixelFormat **formats, int flags); + + +/** + * This struct describes the constraints on hardware frames attached to + * a given device with a hardware-specific configuration. This is returned + * by liteav_av_hwdevice_get_hwframe_constraints() and must be freed by + * liteav_av_hwframe_constraints_free() after use. + */ +typedef struct AVHWFramesConstraints { + /** + * A list of possible values for format in the hw_frames_ctx, + * terminated by AV_PIX_FMT_NONE. This member will always be filled. + */ + enum AVPixelFormat *valid_hw_formats; + + /** + * A list of possible values for sw_format in the hw_frames_ctx, + * terminated by AV_PIX_FMT_NONE. Can be NULL if this information is + * not known. + */ + enum AVPixelFormat *valid_sw_formats; + + /** + * The minimum size of frames in this hw_frames_ctx. + * (Zero if not known.) + */ + int min_width; + int min_height; + + /** + * The maximum size of frames in this hw_frames_ctx. + * (INT_MAX if not known / no limit.) + */ + int max_width; + int max_height; +} AVHWFramesConstraints; + +/** + * Allocate a HW-specific configuration structure for a given HW device. + * After use, the user must free all members as required by the specific + * hardware structure being used, then free the structure itself with + * liteav_av_free(). + * + * @param device_ctx a reference to the associated AVHWDeviceContext. + * @return The newly created HW-specific configuration structure on + * success or NULL on failure. + */ +void *liteav_av_hwdevice_hwconfig_alloc(AVBufferRef *device_ctx); + +/** + * Get the constraints on HW frames given a device and the HW-specific + * configuration to be used with that device. If no HW-specific + * configuration is provided, returns the maximum possible capabilities + * of the device. + * + * @param ref a reference to the associated AVHWDeviceContext. + * @param hwconfig a filled HW-specific configuration structure, or NULL + * to return the maximum possible capabilities of the device. + * @return AVHWFramesConstraints structure describing the constraints + * on the device, or NULL if not available. + */ +AVHWFramesConstraints *liteav_av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, + const void *hwconfig); + +/** + * Free an AVHWFrameConstraints structure. + * + * @param constraints The (filled or unfilled) AVHWFrameConstraints structure. + */ +void liteav_av_hwframe_constraints_free(AVHWFramesConstraints **constraints); + + +/** + * Flags to apply to frame mappings. + */ +enum { + /** + * The mapping must be readable. + */ + AV_HWFRAME_MAP_READ = 1 << 0, + /** + * The mapping must be writeable. + */ + AV_HWFRAME_MAP_WRITE = 1 << 1, + /** + * The mapped frame will be overwritten completely in subsequent + * operations, so the current frame data need not be loaded. Any values + * which are not overwritten are unspecified. + */ + AV_HWFRAME_MAP_OVERWRITE = 1 << 2, + /** + * The mapping must be direct. That is, there must not be any copying in + * the map or unmap steps. Note that performance of direct mappings may + * be much lower than normal memory. + */ + AV_HWFRAME_MAP_DIRECT = 1 << 3, +}; + +/** + * Map a hardware frame. + * + * This has a number of different possible effects, depending on the format + * and origin of the src and dst frames. On input, src should be a usable + * frame with valid buffers and dst should be blank (typically as just created + * by liteav_av_frame_alloc()). src should have an associated hwframe context, and + * dst may optionally have a format and associated hwframe context. + * + * If src was created by mapping a frame from the hwframe context of dst, + * then this function undoes the mapping - dst is replaced by a reference to + * the frame that src was originally mapped from. + * + * If both src and dst have an associated hwframe context, then this function + * attempts to map the src frame from its hardware context to that of dst and + * then fill dst with appropriate data to be usable there. This will only be + * possible if the hwframe contexts and associated devices are compatible - + * given compatible devices, liteav_av_hwframe_ctx_create_derived() can be used to + * create a hwframe context for dst in which mapping should be possible. + * + * If src has a hwframe context but dst does not, then the src frame is + * mapped to normal memory and should thereafter be usable as a normal frame. + * If the format is set on dst, then the mapping will attempt to create dst + * with that format and fail if it is not possible. If format is unset (is + * AV_PIX_FMT_NONE) then dst will be mapped with whatever the most appropriate + * format to use is (probably the sw_format of the src hwframe context). + * + * A return value of AVERROR(ENOSYS) indicates that the mapping is not + * possible with the given arguments and hwframe setup, while other return + * values indicate that it failed somehow. + * + * @param dst Destination frame, to contain the mapping. + * @param src Source frame, to be mapped. + * @param flags Some combination of AV_HWFRAME_MAP_* flags. + * @return Zero on success, negative AVERROR code on failure. + */ +int liteav_av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags); + + +/** + * Create and initialise an AVHWFramesContext as a mapping of another existing + * AVHWFramesContext on a different device. + * + * liteav_av_hwframe_ctx_init() should not be called after this. + * + * @param derived_frame_ctx On success, a reference to the newly created + * AVHWFramesContext. + * @param derived_device_ctx A reference to the device to create the new + * AVHWFramesContext on. + * @param source_frame_ctx A reference to an existing AVHWFramesContext + * which will be mapped to the derived context. + * @param flags Some combination of AV_HWFRAME_MAP_* flags, defining the + * mapping parameters to apply to frames which are allocated + * in the derived device. + * @return Zero on success, negative AVERROR code on failure. + */ +int liteav_av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx, + enum AVPixelFormat format, + AVBufferRef *derived_device_ctx, + AVBufferRef *source_frame_ctx, + int flags); + +#endif /* AVUTIL_HWCONTEXT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h new file mode 100644 index 0000000..81a0552 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_cuda.h @@ -0,0 +1,52 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef AVUTIL_HWCONTEXT_CUDA_H +#define AVUTIL_HWCONTEXT_CUDA_H + +#ifndef CUDA_VERSION +#include <cuda.h> +#endif + +#include "pixfmt.h" + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_CUDA. + * + * This API supports dynamic frame pools. AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a CUdeviceptr. + */ + +typedef struct AVCUDADeviceContextInternal AVCUDADeviceContextInternal; + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVCUDADeviceContext { + CUcontext cuda_ctx; + CUstream stream; + AVCUDADeviceContextInternal *internal; +} AVCUDADeviceContext; + +/** + * AVHWFramesContext.hwctx is currently not used + */ + +#endif /* AVUTIL_HWCONTEXT_CUDA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h new file mode 100644 index 0000000..0eb694a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_d3d11va.h @@ -0,0 +1,170 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_D3D11VA_H +#define AVUTIL_HWCONTEXT_D3D11VA_H + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_D3D11VA. + * + * The default pool implementation will be fixed-size if initial_pool_size is + * set (and allocate elements from an array texture). Otherwise it will allocate + * individual textures. Be aware that decoding requires a single array texture. + * + * Using sw_format==AV_PIX_FMT_YUV420P has special semantics, and maps to + * DXGI_FORMAT_420_OPAQUE. liteav_av_hwframe_transfer_data() is not supported for + * this format. Refer to MSDN for details. + * + * liteav_av_hwdevice_ctx_create() for this device type supports a key named "debug" + * for the AVDictionary entry. If this is set to any value, the device creation + * code will try to load various supported D3D debugging layers. + */ + +#include <d3d11.h> +#include <stdint.h> + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVD3D11VADeviceContext { + /** + * Device used for texture creation and access. This can also be used to + * set the libavcodec decoding device. + * + * Must be set by the user. This is the only mandatory field - the other + * device context fields are set from this and are available for convenience. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11Device *device; + + /** + * If unset, this will be set from the device field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11DeviceContext *device_context; + + /** + * If unset, this will be set from the device field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11VideoDevice *video_device; + + /** + * If unset, this will be set from the device_context field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11VideoContext *video_context; + + /** + * Callbacks for locking. They protect accesses to device_context and + * video_context calls. They also protect access to the internal staging + * texture (for liteav_av_hwframe_transfer_data() calls). They do NOT protect + * access to hwcontext or decoder state in general. + * + * If unset on init, the hwcontext implementation will set them to use an + * internal mutex. + * + * The underlying lock must be recursive. lock_ctx is for free use by the + * locking implementation. + */ + void (*lock)(void *lock_ctx); + void (*unlock)(void *lock_ctx); + void *lock_ctx; +} AVD3D11VADeviceContext; + +/** + * D3D11 frame descriptor for pool allocation. + * + * In user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer pointing at an object of this type describing the + * planes of the frame. + * + * This has no use outside of custom allocation, and AVFrame AVBufferRef do not + * necessarily point to an instance of this struct. + */ +typedef struct AVD3D11FrameDescriptor { + /** + * The texture in which the frame is located. The reference count is + * managed by the AVBufferRef, and destroying the reference will release + * the interface. + * + * Normally stored in AVFrame.data[0]. + */ + ID3D11Texture2D *texture; + + /** + * The index into the array texture element representing the frame, or 0 + * if the texture is not an array texture. + * + * Normally stored in AVFrame.data[1] (cast from intptr_t). + */ + intptr_t index; +} AVD3D11FrameDescriptor; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVD3D11VAFramesContext { + /** + * The canonical texture used for pool allocation. If this is set to NULL + * on init, the hwframes implementation will allocate and set an array + * texture if initial_pool_size > 0. + * + * The only situation when the API user should set this is: + * - the user wants to do manual pool allocation (setting + * AVHWFramesContext.pool), instead of letting AVHWFramesContext + * allocate the pool + * - of an array texture + * - and wants it to use it for decoding + * - this has to be done before calling liteav_av_hwframe_ctx_init() + * + * Deallocating the AVHWFramesContext will always release this interface, + * and it does not matter whether it was user-allocated. + * + * This is in particular used by the libavcodec D3D11VA hwaccel, which + * requires a single array texture. It will create ID3D11VideoDecoderOutputView + * objects for each array texture element on decoder initialization. + */ + ID3D11Texture2D *texture; + + /** + * D3D11_TEXTURE2D_DESC.BindFlags used for texture creation. The user must + * at least set D3D11_BIND_DECODER if the frames context is to be used for + * video decoding. + * This field is ignored/invalid if a user-allocated texture is provided. + */ + UINT BindFlags; + + /** + * D3D11_TEXTURE2D_DESC.MiscFlags used for texture creation. + * This field is ignored/invalid if a user-allocated texture is provided. + */ + UINT MiscFlags; +} AVD3D11VAFramesContext; + +#endif /* AVUTIL_HWCONTEXT_D3D11VA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h new file mode 100644 index 0000000..42709f2 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_drm.h @@ -0,0 +1,169 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_DRM_H +#define AVUTIL_HWCONTEXT_DRM_H + +#include <stddef.h> +#include <stdint.h> + +/** + * @file + * API-specific header for AV_HWDEVICE_TYPE_DRM. + * + * Internal frame allocation is not currently supported - all frames + * must be allocated by the user. Thus AVHWFramesContext is always + * NULL, though this may change if support for frame allocation is + * added in future. + */ + +enum { + /** + * The maximum number of layers/planes in a DRM frame. + */ + AV_DRM_MAX_PLANES = 4 +}; + +/** + * DRM object descriptor. + * + * Describes a single DRM object, addressing it as a PRIME file + * descriptor. + */ +typedef struct AVDRMObjectDescriptor { + /** + * DRM PRIME fd for the object. + */ + int fd; + /** + * Total size of the object. + * + * (This includes any parts not which do not contain image data.) + */ + size_t size; + /** + * Format modifier applied to the object (DRM_FORMAT_MOD_*). + * + * If the format modifier is unknown then this should be set to + * DRM_FORMAT_MOD_INVALID. + */ + uint64_t format_modifier; +} AVDRMObjectDescriptor; + +/** + * DRM plane descriptor. + * + * Describes a single plane of a layer, which is contained within + * a single object. + */ +typedef struct AVDRMPlaneDescriptor { + /** + * Index of the object containing this plane in the objects + * array of the enclosing frame descriptor. + */ + int object_index; + /** + * Offset within that object of this plane. + */ + ptrdiff_t offset; + /** + * Pitch (linesize) of this plane. + */ + ptrdiff_t pitch; +} AVDRMPlaneDescriptor; + +/** + * DRM layer descriptor. + * + * Describes a single layer within a frame. This has the structure + * defined by its format, and will contain one or more planes. + */ +typedef struct AVDRMLayerDescriptor { + /** + * Format of the layer (DRM_FORMAT_*). + */ + uint32_t format; + /** + * Number of planes in the layer. + * + * This must match the number of planes required by format. + */ + int nb_planes; + /** + * Array of planes in this layer. + */ + AVDRMPlaneDescriptor planes[AV_DRM_MAX_PLANES]; +} AVDRMLayerDescriptor; + +/** + * DRM frame descriptor. + * + * This is used as the data pointer for AV_PIX_FMT_DRM_PRIME frames. + * It is also used by user-allocated frame pools - allocating in + * AVHWFramesContext.pool must return AVBufferRefs which contain + * an object of this type. + * + * The fields of this structure should be set such it can be + * imported directly by EGL using the EGL_EXT_image_dma_buf_import + * and EGL_EXT_image_dma_buf_import_modifiers extensions. + * (Note that the exact layout of a particular format may vary between + * platforms - we only specify that the same platform should be able + * to import it.) + * + * The total number of planes must not exceed AV_DRM_MAX_PLANES, and + * the order of the planes by increasing layer index followed by + * increasing plane index must be the same as the order which would + * be used for the data pointers in the equivalent software format. + */ +typedef struct AVDRMFrameDescriptor { + /** + * Number of DRM objects making up this frame. + */ + int nb_objects; + /** + * Array of objects making up the frame. + */ + AVDRMObjectDescriptor objects[AV_DRM_MAX_PLANES]; + /** + * Number of layers in the frame. + */ + int nb_layers; + /** + * Array of layers in the frame. + */ + AVDRMLayerDescriptor layers[AV_DRM_MAX_PLANES]; +} AVDRMFrameDescriptor; + +/** + * DRM device. + * + * Allocated as AVHWDeviceContext.hwctx. + */ +typedef struct AVDRMDeviceContext { + /** + * File descriptor of DRM device. + * + * This is used as the device to create frames on, and may also be + * used in some derivation and mapping operations. + * + * If no device is required, set to -1. + */ + int fd; +} AVDRMDeviceContext; + +#endif /* AVUTIL_HWCONTEXT_DRM_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h new file mode 100644 index 0000000..e1b79bc --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h @@ -0,0 +1,75 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef AVUTIL_HWCONTEXT_DXVA2_H +#define AVUTIL_HWCONTEXT_DXVA2_H + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_DXVA2. + * + * Only fixed-size pools are supported. + * + * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer set to a pointer to IDirect3DSurface9. + */ + +#include <d3d9.h> +#include <dxva2api.h> + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVDXVA2DeviceContext { + IDirect3DDeviceManager9 *devmgr; +} AVDXVA2DeviceContext; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVDXVA2FramesContext { + /** + * The surface type (e.g. DXVA2_VideoProcessorRenderTarget or + * DXVA2_VideoDecoderRenderTarget). Must be set by the caller. + */ + DWORD surface_type; + + /** + * The surface pool. When an external pool is not provided by the caller, + * this will be managed (allocated and filled on init, freed on uninit) by + * libavutil. + */ + IDirect3DSurface9 **surfaces; + int nb_surfaces; + + /** + * Certain drivers require the decoder to be destroyed before the surfaces. + * To allow internally managed pools to work properly in such cases, this + * field is provided. + * + * If it is non-NULL, libavutil will call IDirectXVideoDecoder_Release() on + * it just before the internal surface pool is freed. + * + * This is for convenience only. Some code uses other methods to manage the + * decoder reference. + */ + IDirectXVideoDecoder *decoder_to_release; +} AVDXVA2FramesContext; + +#endif /* AVUTIL_HWCONTEXT_DXVA2_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h new file mode 100644 index 0000000..101a980 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_mediacodec.h @@ -0,0 +1,36 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_MEDIACODEC_H +#define AVUTIL_HWCONTEXT_MEDIACODEC_H + +/** + * MediaCodec details. + * + * Allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVMediaCodecDeviceContext { + /** + * android/view/Surface handle, to be filled by the user. + * + * This is the default surface used by decoders on this device. + */ + void *surface; +} AVMediaCodecDeviceContext; + +#endif /* AVUTIL_HWCONTEXT_MEDIACODEC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h new file mode 100644 index 0000000..b98d611 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_qsv.h @@ -0,0 +1,53 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_QSV_H +#define AVUTIL_HWCONTEXT_QSV_H + +#include <mfx/mfxvideo.h> + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_QSV. + * + * This API does not support dynamic frame pools. AVHWFramesContext.pool must + * contain AVBufferRefs whose data pointer points to an mfxFrameSurface1 struct. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVQSVDeviceContext { + mfxSession session; +} AVQSVDeviceContext; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVQSVFramesContext { + mfxFrameSurface1 *surfaces; + int nb_surfaces; + + /** + * A combination of MFX_MEMTYPE_* describing the frame pool. + */ + int frame_type; +} AVQSVFramesContext; + +#endif /* AVUTIL_HWCONTEXT_QSV_H */ + diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h new file mode 100644 index 0000000..46b6be7 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h @@ -0,0 +1,118 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VAAPI_H +#define AVUTIL_HWCONTEXT_VAAPI_H + +#include <va/va.h> + +/** + * @file + * API-specific header for AV_HWDEVICE_TYPE_VAAPI. + * + * Dynamic frame pools are supported, but note that any pool used as a render + * target is required to be of fixed size in order to be be usable as an + * argument to vaCreateContext(). + * + * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer set to a VASurfaceID. + */ + +enum { + /** + * The quirks field has been set by the user and should not be detected + * automatically by liteav_av_hwdevice_ctx_init(). + */ + AV_VAAPI_DRIVER_QUIRK_USER_SET = (1 << 0), + /** + * The driver does not destroy parameter buffers when they are used by + * vaRenderPicture(). Additional code will be required to destroy them + * separately afterwards. + */ + AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS = (1 << 1), + + /** + * The driver does not support the VASurfaceAttribMemoryType attribute, + * so the surface allocation code will not try to use it. + */ + AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE = (1 << 2), + + /** + * The driver does not support surface attributes at all. + * The surface allocation code will never pass them to surface allocation, + * and the results of the vaQuerySurfaceAttributes() call will be faked. + */ + AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES = (1 << 3), +}; + +/** + * VAAPI connection details. + * + * Allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVVAAPIDeviceContext { + /** + * The VADisplay handle, to be filled by the user. + */ + VADisplay display; + /** + * Driver quirks to apply - this is filled by liteav_av_hwdevice_ctx_init(), + * with reference to a table of known drivers, unless the + * AV_VAAPI_DRIVER_QUIRK_USER_SET bit is already present. The user + * may need to refer to this field when performing any later + * operations using VAAPI with the same VADisplay. + */ + unsigned int driver_quirks; +} AVVAAPIDeviceContext; + +/** + * VAAPI-specific data associated with a frame pool. + * + * Allocated as AVHWFramesContext.hwctx. + */ +typedef struct AVVAAPIFramesContext { + /** + * Set by the user to apply surface attributes to all surfaces in + * the frame pool. If null, default settings are used. + */ + VASurfaceAttrib *attributes; + int nb_attributes; + /** + * The surfaces IDs of all surfaces in the pool after creation. + * Only valid if AVHWFramesContext.initial_pool_size was positive. + * These are intended to be used as the render_targets arguments to + * vaCreateContext(). + */ + VASurfaceID *surface_ids; + int nb_surfaces; +} AVVAAPIFramesContext; + +/** + * VAAPI hardware pipeline configuration details. + * + * Allocated with liteav_av_hwdevice_hwconfig_alloc(). + */ +typedef struct AVVAAPIHWConfig { + /** + * ID of a VAAPI pipeline configuration. + */ + VAConfigID config_id; +} AVVAAPIHWConfig; + +#endif /* AVUTIL_HWCONTEXT_VAAPI_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h new file mode 100644 index 0000000..1b7ea1e --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h @@ -0,0 +1,44 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VDPAU_H +#define AVUTIL_HWCONTEXT_VDPAU_H + +#include <vdpau/vdpau.h> + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_VDPAU. + * + * This API supports dynamic frame pools. AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a VdpVideoSurface. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVVDPAUDeviceContext { + VdpDevice device; + VdpGetProcAddress *get_proc_address; +} AVVDPAUDeviceContext; + +/** + * AVHWFramesContext.hwctx is currently not used + */ + +#endif /* AVUTIL_HWCONTEXT_VDPAU_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h new file mode 100644 index 0000000..4f77b9d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/hwcontext_videotoolbox.h @@ -0,0 +1,55 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H +#define AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H + +#include <stdint.h> + +#include <VideoToolbox/VideoToolbox.h> + +#include "pixfmt.h" + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX. + * + * This API currently does not support frame allocation, as the raw VideoToolbox + * API does allocation, and FFmpeg itself never has the need to allocate frames. + * + * If the API user sets a custom pool, AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a CVImageBufferRef or CVPixelBufferRef. + * + * Currently AVHWDeviceContext.hwctx and AVHWFramesContext.hwctx are always + * NULL. + */ + +/** + * Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat. + * Returns AV_PIX_FMT_NONE if no known equivalent was found. + */ +enum AVPixelFormat liteav_av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt); + +/** + * Convert an AVPixelFormat to a VideoToolbox (actually CoreVideo) format. + * Returns 0 if no known equivalent was found. + */ +uint32_t liteav_av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt); + +#endif /* AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/imgutils.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/imgutils.h new file mode 100644 index 0000000..fc0216b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/imgutils.h @@ -0,0 +1,278 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_IMGUTILS_H +#define AVUTIL_IMGUTILS_H + +/** + * @file + * misc image utilities + * + * @addtogroup lavu_picture + * @{ + */ + +#include "avutil.h" +#include "pixdesc.h" +#include "rational.h" + +/** + * Compute the max pixel step for each plane of an image with a + * format described by pixdesc. + * + * The pixel step is the distance in bytes between the first byte of + * the group of bytes which describe a pixel component and the first + * byte of the successive group in the same plane for the same + * component. + * + * @param max_pixsteps an array which is filled with the max pixel step + * for each plane. Since a plane may contain different pixel + * components, the computed max_pixsteps[plane] is relative to the + * component in the plane with the max pixel step. + * @param max_pixstep_comps an array which is filled with the component + * for each plane which has the max pixel step. May be NULL. + */ +void liteav_av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], + const AVPixFmtDescriptor *pixdesc); + +/** + * Compute the size of an image line with format pix_fmt and width + * width for the plane plane. + * + * @return the computed size in bytes + */ +int liteav_av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane); + +/** + * Fill plane linesizes for an image with pixel format pix_fmt and + * width width. + * + * @param linesizes array to be filled with the linesize for each plane + * @return >= 0 in case of success, a negative error code otherwise + */ +int liteav_av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width); + +/** + * Fill plane data pointers for an image with pixel format pix_fmt and + * height height. + * + * @param data pointers array to be filled with the pointer for each image plane + * @param ptr the pointer to a buffer which will contain the image + * @param linesizes the array containing the linesize for each + * plane, should be filled by liteav_av_image_fill_linesizes() + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int liteav_av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, + uint8_t *ptr, const int linesizes[4]); + +/** + * Allocate an image with size w and h and pixel format pix_fmt, and + * fill pointers and linesizes accordingly. + * The allocated image buffer has to be freed by using + * liteav_av_freep(&pointers[0]). + * + * @param align the value to use for buffer size alignment + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int liteav_av_image_alloc(uint8_t *pointers[4], int linesizes[4], + int w, int h, enum AVPixelFormat pix_fmt, int align); + +/** + * Copy image plane from src to dst. + * That is, copy "height" number of lines of "bytewidth" bytes each. + * The first byte of each successive line is separated by *_linesize + * bytes. + * + * bytewidth must be contained by both absolute values of dst_linesize + * and src_linesize, otherwise the function behavior is undefined. + * + * @param dst_linesize linesize for the image plane in dst + * @param src_linesize linesize for the image plane in src + */ +void liteav_av_image_copy_plane(uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize, + int bytewidth, int height); + +/** + * Copy image in src_data to dst_data. + * + * @param dst_linesizes linesizes for the image in dst_data + * @param src_linesizes linesizes for the image in src_data + */ +void liteav_av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], + const uint8_t *src_data[4], const int src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Copy image data located in uncacheable (e.g. GPU mapped) memory. Where + * available, this function will use special functionality for reading from such + * memory, which may result in greatly improved performance compared to plain + * liteav_av_image_copy(). + * + * The data pointers and the linesizes must be aligned to the maximum required + * by the CPU architecture. + * + * @note The linesize parameters have the type ptrdiff_t here, while they are + * int for liteav_av_image_copy(). + * @note On x86, the linesizes currently need to be aligned to the cacheline + * size (i.e. 64) to get improved performance. + */ +void liteav_av_image_copy_uc_from(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4], + const uint8_t *src_data[4], const ptrdiff_t src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Setup the data pointers and linesizes based on the specified image + * parameters and the provided array. + * + * The fields of the given image are filled in by using the src + * address which points to the image data buffer. Depending on the + * specified pixel format, one or multiple image data pointers and + * line sizes will be set. If a planar format is specified, several + * pointers will be set pointing to the different picture planes and + * the line sizes of the different planes will be stored in the + * lines_sizes array. Call with src == NULL to get the required + * size for the src buffer. + * + * To allocate the buffer and fill in the dst_data and dst_linesize in + * one call, use liteav_av_image_alloc(). + * + * @param dst_data data pointers to be filled in + * @param dst_linesize linesizes for the image in dst_data to be filled in + * @param src buffer which will contain or contains the actual image data, can be NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the value used in src for linesize alignment + * @return the size in bytes required for src, a negative error code + * in case of failure + */ +int liteav_av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Return the size in bytes of the amount of data required to store an + * image with the given parameters. + * + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the assumed linesize alignment + * @return the buffer size in bytes, a negative error code in case of failure + */ +int liteav_av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Copy image data from an image into a buffer. + * + * liteav_av_image_get_buffer_size() can be used to compute the required size + * for the buffer to fill. + * + * @param dst a buffer into which picture data will be copied + * @param dst_size the size in bytes of dst + * @param src_data pointers containing the source image data + * @param src_linesize linesizes for the image in src_data + * @param pix_fmt the pixel format of the source image + * @param width the width of the source image in pixels + * @param height the height of the source image in pixels + * @param align the assumed linesize alignment for dst + * @return the number of bytes written to dst, or a negative value + * (error code) on error + */ +int liteav_av_image_copy_to_buffer(uint8_t *dst, int dst_size, + const uint8_t * const src_data[4], const int src_linesize[4], + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of the image can be addressed with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int liteav_av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of a plane of an image with the specified pix_fmt can be addressed + * with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param max_pixels the maximum number of pixels the user wants to accept + * @param pix_fmt the pixel format, can be AV_PIX_FMT_NONE if unknown. + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int liteav_av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx); + +/** + * Check if the given sample aspect ratio of an image is valid. + * + * It is considered invalid if the denominator is 0 or if applying the ratio + * to the image size would make the smaller dimension less than 1. If the + * sar numerator is 0, it is considered unknown and will return as valid. + * + * @param w width of the image + * @param h height of the image + * @param sar sample aspect ratio of the image + * @return 0 if valid, a negative AVERROR code otherwise + */ +int liteav_av_image_check_sar(unsigned int w, unsigned int h, AVRational sar); + +/** + * Overwrite the image data with black. This is suitable for filling a + * sub-rectangle of an image, meaning the padding between the right most pixel + * and the left most pixel on the next line will not be overwritten. For some + * formats, the image size might be rounded up due to inherent alignment. + * + * If the pixel format has alpha, the alpha is cleared to opaque. + * + * This can return an error if the pixel format is not supported. Normally, all + * non-hwaccel pixel formats should be supported. + * + * Passing NULL for dst_data is allowed. Then the function returns whether the + * operation would have succeeded. (It can return an error if the pix_fmt is + * not supported.) + * + * @param dst_data data pointers to destination image + * @param dst_linesize linesizes for the destination image + * @param pix_fmt the pixel format of the image + * @param range the color range of the image (important for colorspaces such as YUV) + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @return 0 if the image data was cleared, a negative AVERROR code otherwise + */ +int liteav_av_image_fill_black(uint8_t *dst_data[4], const ptrdiff_t dst_linesize[4], + enum AVPixelFormat pix_fmt, enum AVColorRange range, + int width, int height); + +/** + * @} + */ + + +#endif /* AVUTIL_IMGUTILS_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intfloat.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intfloat.h new file mode 100644 index 0000000..fe3d7ec --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intfloat.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_H +#define AVUTIL_INTFLOAT_H + +#include <stdint.h> +#include "attributes.h" + +union av_intfloat32 { + uint32_t i; + float f; +}; + +union av_intfloat64 { + uint64_t i; + double f; +}; + +/** + * Reinterpret a 32-bit integer as a float. + */ +static av_always_inline float av_int2float(uint32_t i) +{ + union av_intfloat32 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a float as a 32-bit integer. + */ +static av_always_inline uint32_t av_float2int(float f) +{ + union av_intfloat32 v; + v.f = f; + return v.i; +} + +/** + * Reinterpret a 64-bit integer as a double. + */ +static av_always_inline double av_int2double(uint64_t i) +{ + union av_intfloat64 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a double as a 64-bit integer. + */ +static av_always_inline uint64_t av_double2int(double f) +{ + union av_intfloat64 v; + v.f = f; + return v.i; +} + +#endif /* AVUTIL_INTFLOAT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h new file mode 100644 index 0000000..67c763b --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/intreadwrite.h @@ -0,0 +1,629 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTREADWRITE_H +#define AVUTIL_INTREADWRITE_H + +#include <stdint.h> +#include "libavutil/avconfig.h" +#include "attributes.h" +#include "bswap.h" + +typedef union { + uint64_t u64; + uint32_t u32[2]; + uint16_t u16[4]; + uint8_t u8 [8]; + double f64; + float f32[2]; +} av_alias av_alias64; + +typedef union { + uint32_t u32; + uint16_t u16[2]; + uint8_t u8 [4]; + float f32; +} av_alias av_alias32; + +typedef union { + uint16_t u16; + uint8_t u8 [2]; +} av_alias av_alias16; + +/* + * Arch-specific headers can provide any combination of + * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. + * Preprocessor symbols must be defined, even if these are implemented + * as inline functions. + * + * R/W means read/write, B/L/N means big/little/native endianness. + * The following macros require aligned access, compared to their + * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A. + * Incorrect usage may range from abysmal performance to crash + * depending on the platform. + * + * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U. + */ + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/intreadwrite.h" +#elif ARCH_AVR32 +# include "avr32/intreadwrite.h" +#elif ARCH_MIPS +# include "mips/intreadwrite.h" +#elif ARCH_PPC +# include "ppc/intreadwrite.h" +#elif ARCH_TOMI +# include "tomi/intreadwrite.h" +#elif ARCH_X86 +# include "x86/intreadwrite.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +/* + * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. + */ + +#if AV_HAVE_BIGENDIAN + +# if defined(AV_RN16) && !defined(AV_RB16) +# define AV_RB16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RB16) +# define AV_RN16(p) AV_RB16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WB16) +# define AV_WB16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WB16) +# define AV_WN16(p, v) AV_WB16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RB24) +# define AV_RB24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RB24) +# define AV_RN24(p) AV_RB24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WB24) +# define AV_WB24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WB24) +# define AV_WN24(p, v) AV_WB24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RB32) +# define AV_RB32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RB32) +# define AV_RN32(p) AV_RB32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WB32) +# define AV_WB32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WB32) +# define AV_WN32(p, v) AV_WB32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RB48) +# define AV_RB48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RB48) +# define AV_RN48(p) AV_RB48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WB48) +# define AV_WB48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WB48) +# define AV_WN48(p, v) AV_WB48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RB64) +# define AV_RB64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RB64) +# define AV_RN64(p) AV_RB64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WB64) +# define AV_WB64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WB64) +# define AV_WN64(p, v) AV_WB64(p, v) +# endif + +#else /* AV_HAVE_BIGENDIAN */ + +# if defined(AV_RN16) && !defined(AV_RL16) +# define AV_RL16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RL16) +# define AV_RN16(p) AV_RL16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WL16) +# define AV_WL16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WL16) +# define AV_WN16(p, v) AV_WL16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RL24) +# define AV_RL24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RL24) +# define AV_RN24(p) AV_RL24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WL24) +# define AV_WL24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WL24) +# define AV_WN24(p, v) AV_WL24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RL32) +# define AV_RL32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RL32) +# define AV_RN32(p) AV_RL32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WL32) +# define AV_WL32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WL32) +# define AV_WN32(p, v) AV_WL32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RL48) +# define AV_RL48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RL48) +# define AV_RN48(p) AV_RL48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WL48) +# define AV_WL48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WL48) +# define AV_WN48(p, v) AV_WL48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RL64) +# define AV_RL64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RL64) +# define AV_RN64(p) AV_RL64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WL64) +# define AV_WL64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WL64) +# define AV_WN64(p, v) AV_WL64(p, v) +# endif + +#endif /* !AV_HAVE_BIGENDIAN */ + +/* + * Define AV_[RW]N helper macros to simplify definitions not provided + * by per-arch headers. + */ + +#if defined(__GNUC__) + +union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; +union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; +union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; + +# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) +# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) + +#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_X64) || defined(_M_ARM64)) && AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) +# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#else + +#ifndef AV_RB16 +# define AV_RB16(x) \ + ((((const uint8_t*)(x))[0] << 8) | \ + ((const uint8_t*)(x))[1]) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, val) do { \ + uint16_t d = (val); \ + ((uint8_t*)(p))[1] = (d); \ + ((uint8_t*)(p))[0] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RL16 +# define AV_RL16(x) \ + ((((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, val) do { \ + uint16_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RB32 +# define AV_RB32(x) \ + (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, val) do { \ + uint32_t d = (val); \ + ((uint8_t*)(p))[3] = (d); \ + ((uint8_t*)(p))[2] = (d)>>8; \ + ((uint8_t*)(p))[1] = (d)>>16; \ + ((uint8_t*)(p))[0] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RL32 +# define AV_RL32(x) \ + (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, val) do { \ + uint32_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RB64 +# define AV_RB64(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ + (uint64_t)((const uint8_t*)(x))[7]) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, val) do { \ + uint64_t d = (val); \ + ((uint8_t*)(p))[7] = (d); \ + ((uint8_t*)(p))[6] = (d)>>8; \ + ((uint8_t*)(p))[5] = (d)>>16; \ + ((uint8_t*)(p))[4] = (d)>>24; \ + ((uint8_t*)(p))[3] = (d)>>32; \ + ((uint8_t*)(p))[2] = (d)>>40; \ + ((uint8_t*)(p))[1] = (d)>>48; \ + ((uint8_t*)(p))[0] = (d)>>56; \ + } while(0) +#endif + +#ifndef AV_RL64 +# define AV_RL64(x) \ + (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, val) do { \ + uint64_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + ((uint8_t*)(p))[6] = (d)>>48; \ + ((uint8_t*)(p))[7] = (d)>>56; \ + } while(0) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RN(s, p) AV_RB##s(p) +# define AV_WN(s, p, v) AV_WB##s(p, v) +#else +# define AV_RN(s, p) AV_RL##s(p) +# define AV_WN(s, p, v) AV_WL##s(p, v) +#endif + +#endif /* HAVE_FAST_UNALIGNED */ + +#ifndef AV_RN16 +# define AV_RN16(p) AV_RN(16, p) +#endif + +#ifndef AV_RN32 +# define AV_RN32(p) AV_RN(32, p) +#endif + +#ifndef AV_RN64 +# define AV_RN64(p) AV_RN(64, p) +#endif + +#ifndef AV_WN16 +# define AV_WN16(p, v) AV_WN(16, p, v) +#endif + +#ifndef AV_WN32 +# define AV_WN32(p, v) AV_WN(32, p, v) +#endif + +#ifndef AV_WN64 +# define AV_WN64(p, v) AV_WN(64, p, v) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RB(s, p) AV_RN##s(p) +# define AV_WB(s, p, v) AV_WN##s(p, v) +# define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) +#else +# define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) +# define AV_RL(s, p) AV_RN##s(p) +# define AV_WL(s, p, v) AV_WN##s(p, v) +#endif + +#define AV_RB8(x) (((const uint8_t*)(x))[0]) +#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) + +#define AV_RL8(x) AV_RB8(x) +#define AV_WL8(p, d) AV_WB8(p, d) + +#ifndef AV_RB16 +# define AV_RB16(p) AV_RB(16, p) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, v) AV_WB(16, p, v) +#endif + +#ifndef AV_RL16 +# define AV_RL16(p) AV_RL(16, p) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, v) AV_WL(16, p, v) +#endif + +#ifndef AV_RB32 +# define AV_RB32(p) AV_RB(32, p) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, v) AV_WB(32, p, v) +#endif + +#ifndef AV_RL32 +# define AV_RL32(p) AV_RL(32, p) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, v) AV_WL(32, p, v) +#endif + +#ifndef AV_RB64 +# define AV_RB64(p) AV_RB(64, p) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, v) AV_WB(64, p, v) +#endif + +#ifndef AV_RL64 +# define AV_RL64(p) AV_RL(64, p) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, v) AV_WL(64, p, v) +#endif + +#ifndef AV_RB24 +# define AV_RB24(x) \ + ((((const uint8_t*)(x))[0] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[2]) +#endif +#ifndef AV_WB24 +# define AV_WB24(p, d) do { \ + ((uint8_t*)(p))[2] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[0] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RL24 +# define AV_RL24(x) \ + ((((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL24 +# define AV_WL24(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RB48 +# define AV_RB48(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ + (uint64_t)((const uint8_t*)(x))[5]) +#endif +#ifndef AV_WB48 +# define AV_WB48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[5] = (d); \ + ((uint8_t*)(p))[4] = (d)>>8; \ + ((uint8_t*)(p))[3] = (d)>>16; \ + ((uint8_t*)(p))[2] = (d)>>24; \ + ((uint8_t*)(p))[1] = (d)>>32; \ + ((uint8_t*)(p))[0] = (d)>>40; \ + } while(0) +#endif + +#ifndef AV_RL48 +# define AV_RL48(x) \ + (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL48 +# define AV_WL48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + } while(0) +#endif + +/* + * The AV_[RW]NA macros access naturally aligned data + * in a type-safe way. + */ + +#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) +#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#ifndef AV_RN16A +# define AV_RN16A(p) AV_RNA(16, p) +#endif + +#ifndef AV_RN32A +# define AV_RN32A(p) AV_RNA(32, p) +#endif + +#ifndef AV_RN64A +# define AV_RN64A(p) AV_RNA(64, p) +#endif + +#ifndef AV_WN16A +# define AV_WN16A(p, v) AV_WNA(16, p, v) +#endif + +#ifndef AV_WN32A +# define AV_WN32A(p, v) AV_WNA(32, p, v) +#endif + +#ifndef AV_WN64A +# define AV_WN64A(p, v) AV_WNA(64, p, v) +#endif + +/* + * The AV_COPYxxU macros are suitable for copying data to/from unaligned + * memory locations. + */ + +#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); + +#ifndef AV_COPY16U +# define AV_COPY16U(d, s) AV_COPYU(16, d, s) +#endif + +#ifndef AV_COPY32U +# define AV_COPY32U(d, s) AV_COPYU(32, d, s) +#endif + +#ifndef AV_COPY64U +# define AV_COPY64U(d, s) AV_COPYU(64, d, s) +#endif + +#ifndef AV_COPY128U +# define AV_COPY128U(d, s) \ + do { \ + AV_COPY64U(d, s); \ + AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ + } while(0) +#endif + +/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be + * naturally aligned. They may be implemented using MMX, + * so emms_c() must be called before using any float code + * afterwards. + */ + +#define AV_COPY(n, d, s) \ + (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) + +#ifndef AV_COPY16 +# define AV_COPY16(d, s) AV_COPY(16, d, s) +#endif + +#ifndef AV_COPY32 +# define AV_COPY32(d, s) AV_COPY(32, d, s) +#endif + +#ifndef AV_COPY64 +# define AV_COPY64(d, s) AV_COPY(64, d, s) +#endif + +#ifndef AV_COPY128 +# define AV_COPY128(d, s) \ + do { \ + AV_COPY64(d, s); \ + AV_COPY64((char*)(d)+8, (char*)(s)+8); \ + } while(0) +#endif + +#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) + +#ifndef AV_SWAP64 +# define AV_SWAP64(a, b) AV_SWAP(64, a, b) +#endif + +#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) + +#ifndef AV_ZERO16 +# define AV_ZERO16(d) AV_ZERO(16, d) +#endif + +#ifndef AV_ZERO32 +# define AV_ZERO32(d) AV_ZERO(32, d) +#endif + +#ifndef AV_ZERO64 +# define AV_ZERO64(d) AV_ZERO(64, d) +#endif + +#ifndef AV_ZERO128 +# define AV_ZERO128(d) \ + do { \ + AV_ZERO64(d); \ + AV_ZERO64((char*)(d)+8); \ + } while(0) +#endif + +#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lfg.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lfg.h new file mode 100644 index 0000000..2a92441 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lfg.h @@ -0,0 +1,72 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Lagged Fibonacci PRNG + * Copyright (c) 2008 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LFG_H +#define AVUTIL_LFG_H + +#include <stdint.h> + +typedef struct AVLFG { + unsigned int state[64]; + int index; +} AVLFG; + +void liteav_av_lfg_init(AVLFG *c, unsigned int seed); + +/** + * Seed the state of the ALFG using binary data. + * + * Return value: 0 on success, negative value (AVERROR) on failure. + */ +int liteav_av_lfg_init_from_data(AVLFG *c, const uint8_t *data, unsigned int length); + +/** + * Get the next random unsigned 32-bit number using an ALFG. + * + * Please also consider a simple LCG like state= state*1664525+1013904223, + * it may be good enough and faster for your specific use case. + */ +static inline unsigned int av_lfg_get(AVLFG *c){ + c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63]; + return c->state[c->index++ & 63]; +} + +/** + * Get the next random unsigned 32-bit number using a MLFG. + * + * Please also consider av_lfg_get() above, it is faster. + */ +static inline unsigned int av_mlfg_get(AVLFG *c){ + unsigned int a= c->state[(c->index-55) & 63]; + unsigned int b= c->state[(c->index-24) & 63]; + return c->state[c->index++ & 63] = 2*a*b+a+b; +} + +/** + * Get the next two numbers generated by a Box-Muller Gaussian + * generator using the random numbers issued by lfg. + * + * @param out array where the two generated numbers are placed + */ +void liteav_av_bmg_get(AVLFG *lfg, double out[2]); + +#endif /* AVUTIL_LFG_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/log.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/log.h new file mode 100644 index 0000000..9396fcc --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/log.h @@ -0,0 +1,411 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LOG_H +#define AVUTIL_LOG_H + +#include <stdarg.h> +#include "avutil.h" +#include "attributes.h" +#include "version.h" + +typedef enum { + AV_CLASS_CATEGORY_NA = 0, + AV_CLASS_CATEGORY_INPUT, + AV_CLASS_CATEGORY_OUTPUT, + AV_CLASS_CATEGORY_MUXER, + AV_CLASS_CATEGORY_DEMUXER, + AV_CLASS_CATEGORY_ENCODER, + AV_CLASS_CATEGORY_DECODER, + AV_CLASS_CATEGORY_FILTER, + AV_CLASS_CATEGORY_BITSTREAM_FILTER, + AV_CLASS_CATEGORY_SWSCALER, + AV_CLASS_CATEGORY_SWRESAMPLER, + AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40, + AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT, + AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT, + AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT, + AV_CLASS_CATEGORY_DEVICE_OUTPUT, + AV_CLASS_CATEGORY_DEVICE_INPUT, + AV_CLASS_CATEGORY_NB ///< not part of ABI/API +}AVClassCategory; + +#define AV_IS_INPUT_DEVICE(category) \ + (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT)) + +#define AV_IS_OUTPUT_DEVICE(category) \ + (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT)) + +struct AVOptionRanges; + +/** + * Describe the class of an AVClass context structure. That is an + * arbitrary struct of which the first field is a pointer to an + * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). + */ +typedef struct AVClass { + /** + * The name of the class; usually it is the same name as the + * context structure type to which the AVClass is associated. + */ + const char* class_name; + + /** + * A pointer to a function which returns the name of a context + * instance ctx associated with the class. + */ + const char* (*item_name)(void* ctx); + + /** + * a pointer to the first option specified in the class if any or NULL + * + * @see av_set_default_options() + */ + const struct AVOption *option; + + /** + * LIBAVUTIL_VERSION with which this structure was created. + * This is used to allow fields to be added without requiring major + * version bumps everywhere. + */ + + int version; + + /** + * Offset in the structure where log_level_offset is stored. + * 0 means there is no such variable + */ + int log_level_offset_offset; + + /** + * Offset in the structure where a pointer to the parent context for + * logging is stored. For example a decoder could pass its AVCodecContext + * to eval as such a parent context, which an liteav_av_log() implementation + * could then leverage to display the parent context. + * The offset can be NULL. + */ + int parent_log_context_offset; + + /** + * Return next AVOptions-enabled child or NULL + */ + void* (*child_next)(void *obj, void *prev); + + /** + * Return an AVClass corresponding to the next potential + * AVOptions-enabled child. + * + * The difference between child_next and this is that + * child_next iterates over _already existing_ objects, while + * child_class_next iterates over _all possible_ children. + */ + const struct AVClass* (*child_class_next)(const struct AVClass *prev); + + /** + * Category used for visualization (like color) + * This is only set if the category is equal for all objects using this class. + * available since version (51 << 16 | 56 << 8 | 100) + */ + AVClassCategory category; + + /** + * Callback to return the category. + * available since version (51 << 16 | 59 << 8 | 100) + */ + AVClassCategory (*get_category)(void* ctx); + + /** + * Callback to return the supported/allowed ranges. + * available since version (52.12) + */ + int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); +} AVClass; + +/** + * @addtogroup lavu_log + * + * @{ + * + * @defgroup lavu_log_constants Logging Constants + * + * @{ + */ + +/** + * Print no output. + */ +#define AV_LOG_QUIET -8 + +/** + * Something went really wrong and we will crash now. + */ +#define AV_LOG_PANIC 0 + +/** + * Something went wrong and recovery is not possible. + * For example, no header was found for a format which depends + * on headers or an illegal combination of parameters is used. + */ +#define AV_LOG_FATAL 8 + +/** + * Something went wrong and cannot losslessly be recovered. + * However, not all future data is affected. + */ +#define AV_LOG_ERROR 16 + +/** + * Something somehow does not look correct. This may or may not + * lead to problems. An example would be the use of '-vstrict -2'. + */ +#define AV_LOG_WARNING 24 + +/** + * Standard information. + */ +#define AV_LOG_INFO 32 + +/** + * Detailed information. + */ +#define AV_LOG_VERBOSE 40 + +/** + * Stuff which is only useful for libav* developers. + */ +#define AV_LOG_DEBUG 48 + +/** + * Extremely verbose debugging, useful for libav* development. + */ +#define AV_LOG_TRACE 56 + +#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET) + +/** + * @} + */ + +/** + * Sets additional colors for extended debugging sessions. + * @code + liteav_av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n"); + @endcode + * Requires 256color terminal support. Uses outside debugging is not + * recommended. + */ +#define AV_LOG_C(x) ((x) << 8) + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see liteav_av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct or NULL if general log. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + */ +void liteav_av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); + + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see liteav_av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void liteav_av_vlog(void *avcl, int level, const char *fmt, va_list vl); + +/** + * Get the current log level + * + * @see lavu_log_constants + * + * @return Current log level + */ +int liteav_av_log_get_level(void); + +/** + * Set the log level + * + * @see lavu_log_constants + * + * @param level Logging level + */ +void liteav_av_log_set_level(int level); + +/** + * Set the logging callback + * + * @note The callback must be thread safe, even if the application does not use + * threads itself as some codecs are multithreaded. + * + * @see liteav_av_log_default_callback + * + * @param callback A logging function with a compatible signature. + */ +void liteav_av_log_set_callback(void (*callback)(void*, int, const char*, va_list)); + +/** + * Default logging callback + * + * It prints the message to stderr, optionally colorizing it. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void liteav_av_log_default_callback(void *avcl, int level, const char *fmt, + va_list vl); + +/** + * Return the context name + * + * @param ctx The AVClass context + * + * @return The AVClass class_name + */ +const char* liteav_av_default_item_name(void* ctx); +AVClassCategory liteav_av_default_get_category(void *ptr); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formatted line + * @param line_size size of the buffer + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + */ +void liteav_av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formatted line; + * may be NULL if line_size is 0 + * @param line_size size of the buffer; at most line_size-1 characters will + * be written to the buffer, plus one null terminator + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + * @return Returns a negative value if an error occurred, otherwise returns + * the number of characters that would have been written for a + * sufficiently large buffer, not including the terminating null + * character. If the return value is not less than line_size, it means + * that the log message was truncated to fit the buffer. + */ +int liteav_av_log_format_line2(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * Skip repeated messages, this requires the user app to use liteav_av_log() instead of + * (f)printf as the 2 would otherwise interfere and lead to + * "Last message repeated x times" messages below (f)printf messages with some + * bad luck. + * Also to receive the last, "last repeated" line if any, the user app must + * call liteav_av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end + */ +#define AV_LOG_SKIP_REPEATED 1 + +/** + * Include the log severity in messages originating from codecs. + * + * Results in messages such as: + * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts + */ +#define AV_LOG_PRINT_LEVEL 2 + +void liteav_av_log_set_flags(int arg); +int liteav_av_log_get_flags(void); + +enum FFmpegMsgType { + FFMPEG_MSG_TYPE_DATAREPORT, +}; + +enum FFmpegDataReportType { + FFMPEG_DATAREPORT_TYPE_NETERROR = 0, //Some Net Error happened, will send last error in tcp.c to app + FFMPEG_DATAREPORT_TYPE_BYTES, //Size that we got from net this time + FFMPEG_DATAREPORT_TYPE_REDIRECTIP, //The redirected ip + FFMPEG_DATAREPORT_TYPE_SVRCONNECTED, //The time when svr is connected + FFMPEG_DATAREPORT_TYPE_DURERROR, //This ts's duration is different from m3u8's defination + FFMPEG_DATAREPORT_TYPE_M3U8ERROR, //This ts's m3u8 is wrong, so we cannot trust its seq_no, and sometimes it may skip 1 Ts_file. + FFMPEG_DATAREPORT_TYPE_TCPCONNECTTIME, //Time(in micro seconds) taken for a TCP connection. It's reported for every successful TCP connection. + FFMPEG_DATAREPORT_TYPE_M3U8DATETIME, //The value of the #EXT-X-PROGRAM-DATE-TIME tag for the current segment + FFMPEG_DATAREPORT_TYPE_M3U8ADTIME, //The value of the #EXT-QQHLS-AD tag for the current segment + + FFMPEG_DATAREPORT_TYPE_GETSTREAMDATATIME, //get stream data at the probe data + FFMPEG_DATAREPORT_TYPE_TCPRECVERROR, //tcp recv error + FFMPEG_DATAREPORT_TYPE_REGISTER_ALL_FAIL // av_reigister_all fail + +}; + +enum FFmpegNetErrorType { + NETERROR_TYPE_GETADDR = 0x00010000, + NETERROR_TYPE_OPENSOCKET = 0x00020000, + NETERROR_TYPE_BINDFAIL = 0x00030000, + NETERROR_TYPE_LISTENFAIL = 0x00040000, + NETERROR_TYPE_POLLFAIL = 0x00050000, + NETERROR_TYPE_ACCEPTFAIL = 0x00060000, + NETERROR_TYPE_RECV = 0x00070000, + NETERROR_TYPE_READTIMEOUT = 0x00080000, + NETERROR_TYPE_SEND = 0x00090000, + NETERROR_TYPE_WRITETIMEOUT = 0x000A0000, + NETERROR_TYPE_OPENTIMEOUT = 0x000B0000, + NETERROR_TYPE_OTHER = 0x40000000, +}; + +typedef void (*av_msg_callback_t)(int, int, const char*, int, void*); + +// This function is a little tricky. +// There's no simple way to send a customized message to the caller with a specific context, +// so we take the AVIOInterruptCB->opaque as the context. But AVIOInterruptCB is defined in +// avformat module, to avoid compiling errors, we define a macro here to access the opaque field. +// +#define INTERRUPT_CB_OPAQUE(pCb) (pCb ? pCb->opaque : NULL) +void liteav_av_msg(int nMsgType, int nSubType, const char* pucMsgBuf, int nBufSize, void* pContext); + +void liteav_av_msg_set_callback(av_msg_callback_t cb); + +/** + * @} + */ + +#endif /* AVUTIL_LOG_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lzo.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lzo.h new file mode 100644 index 0000000..c034039 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/lzo.h @@ -0,0 +1,66 @@ +/* + * LZO 1x decompression + * copyright (c) 2006 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LZO_H +#define AVUTIL_LZO_H + +/** + * @defgroup lavu_lzo LZO + * @ingroup lavu_crypto + * + * @{ + */ + +#include <stdint.h> + +/** @name Error flags returned by av_lzo1x_decode + * @{ */ +/// end of the input buffer reached before decoding finished +#define AV_LZO_INPUT_DEPLETED 1 +/// decoded data did not fit into output buffer +#define AV_LZO_OUTPUT_FULL 2 +/// a reference to previously decoded data was wrong +#define AV_LZO_INVALID_BACKPTR 4 +/// a non-specific error in the compressed bitstream +#define AV_LZO_ERROR 8 +/** @} */ + +#define AV_LZO_INPUT_PADDING 8 +#define AV_LZO_OUTPUT_PADDING 12 + +/** + * @brief Decodes LZO 1x compressed data. + * @param out output buffer + * @param outlen size of output buffer, number of bytes left are returned here + * @param in input buffer + * @param inlen size of input buffer, number of bytes left are returned here + * @return 0 on success, otherwise a combination of the error flags above + * + * Make sure all buffers are appropriately padded, in must provide + * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. + */ +int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); + +/** + * @} + */ + +#endif /* AVUTIL_LZO_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/macros.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/macros.h new file mode 100644 index 0000000..2007ee5 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/macros.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu + * Utility Preprocessor macros + */ + +#ifndef AVUTIL_MACROS_H +#define AVUTIL_MACROS_H + +/** + * @addtogroup preproc_misc Preprocessor String Macros + * + * String manipulation macros + * + * @{ + */ + +#define AV_STRINGIFY(s) AV_TOSTRING(s) +#define AV_TOSTRING(s) #s + +#define AV_GLUE(a, b) a ## b +#define AV_JOIN(a, b) AV_GLUE(a, b) + +/** + * @} + */ + +#define AV_PRAGMA(s) _Pragma(#s) + +#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +#endif /* AVUTIL_MACROS_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h new file mode 100644 index 0000000..03670cc --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mastering_display_metadata.h @@ -0,0 +1,129 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2016 Neil Birkbeck <neil.birkbeck@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MASTERING_DISPLAY_METADATA_H +#define AVUTIL_MASTERING_DISPLAY_METADATA_H + +#include "frame.h" +#include "rational.h" + + +/** + * Mastering display metadata capable of representing the color volume of + * the display used to master the content (SMPTE 2086:2014). + * + * To be used as payload of a AVFrameSideData or AVPacketSideData with the + * appropriate type. + * + * @note The struct should be allocated with liteav_av_mastering_display_metadata_alloc() + * and its size is not a part of the public ABI. + */ +typedef struct AVMasteringDisplayMetadata { + /** + * CIE 1931 xy chromaticity coords of color primaries (r, g, b order). + */ + AVRational display_primaries[3][2]; + + /** + * CIE 1931 xy chromaticity coords of white point. + */ + AVRational white_point[2]; + + /** + * Min luminance of mastering display (cd/m^2). + */ + AVRational min_luminance; + + /** + * Max luminance of mastering display (cd/m^2). + */ + AVRational max_luminance; + + /** + * Flag indicating whether the display primaries (and white point) are set. + */ + int has_primaries; + + /** + * Flag indicating whether the luminance (min_ and max_) have been set. + */ + int has_luminance; + +} AVMasteringDisplayMetadata; + +/** + * Allocate an AVMasteringDisplayMetadata structure and set its fields to + * default values. The resulting struct can be freed using liteav_av_freep(). + * + * @return An AVMasteringDisplayMetadata filled with default values or NULL + * on failure. + */ +AVMasteringDisplayMetadata *liteav_av_mastering_display_metadata_alloc(void); + +/** + * Allocate a complete AVMasteringDisplayMetadata and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVMasteringDisplayMetadata structure to be filled by caller. + */ +AVMasteringDisplayMetadata *liteav_av_mastering_display_metadata_create_side_data(AVFrame *frame); + +/** + * Content light level needed by to transmit HDR over HDMI (CTA-861.3). + * + * To be used as payload of a AVFrameSideData or AVPacketSideData with the + * appropriate type. + * + * @note The struct should be allocated with liteav_av_content_light_metadata_alloc() + * and its size is not a part of the public ABI. + */ +typedef struct AVContentLightMetadata { + /** + * Max content light level (cd/m^2). + */ + unsigned MaxCLL; + + /** + * Max average light level per frame (cd/m^2). + */ + unsigned MaxFALL; +} AVContentLightMetadata; + +/** + * Allocate an AVContentLightMetadata structure and set its fields to + * default values. The resulting struct can be freed using liteav_av_freep(). + * + * @return An AVContentLightMetadata filled with default values or NULL + * on failure. + */ +AVContentLightMetadata *liteav_av_content_light_metadata_alloc(size_t *size); + +/** + * Allocate a complete AVContentLightMetadata and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVContentLightMetadata structure to be filled by caller. + */ +AVContentLightMetadata *liteav_av_content_light_metadata_create_side_data(AVFrame *frame); + +#endif /* AVUTIL_MASTERING_DISPLAY_METADATA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mathematics.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mathematics.h new file mode 100644 index 0000000..085fb2d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mathematics.h @@ -0,0 +1,243 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @addtogroup lavu_math + * Mathematical utilities for working with timestamp and time base. + */ + +#ifndef AVUTIL_MATHEMATICS_H +#define AVUTIL_MATHEMATICS_H + +#include <stdint.h> +#include <math.h> +#include "attributes.h" +#include "rational.h" +#include "intfloat.h" + +#ifndef M_E +#define M_E 2.7182818284590452354 /* e */ +#endif +#ifndef M_LN2 +#define M_LN2 0.69314718055994530942 /* log_e 2 */ +#endif +#ifndef M_LN10 +#define M_LN10 2.30258509299404568402 /* log_e 10 */ +#endif +#ifndef M_LOG2_10 +#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */ +#endif +#ifndef M_PHI +#define M_PHI 1.61803398874989484820 /* phi / golden ratio */ +#endif +#ifndef M_PI +#define M_PI 3.14159265358979323846 /* pi */ +#endif +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 /* pi/2 */ +#endif +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ +#endif +#ifndef M_SQRT2 +#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ +#endif +#ifndef NAN +#define NAN av_int2float(0x7fc00000) +#endif +#ifndef INFINITY +#define INFINITY av_int2float(0x7f800000) +#endif + +/** + * @addtogroup lavu_math + * + * @{ + */ + +/** + * Rounding methods. + */ +enum AVRounding { + AV_ROUND_ZERO = 0, ///< Round toward zero. + AV_ROUND_INF = 1, ///< Round away from zero. + AV_ROUND_DOWN = 2, ///< Round toward -infinity. + AV_ROUND_UP = 3, ///< Round toward +infinity. + AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. + /** + * Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through + * unchanged, avoiding special cases for #AV_NOPTS_VALUE. + * + * Unlike other values of the enumeration AVRounding, this value is a + * bitmask that must be used in conjunction with another value of the + * enumeration through a bitwise OR, in order to set behavior for normal + * cases. + * + * @code{.c} + * liteav_av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); + * // Rescaling 3: + * // Calculating 3 * 1 / 2 + * // 3 / 2 is rounded up to 2 + * // => 2 + * + * liteav_av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); + * // Rescaling AV_NOPTS_VALUE: + * // AV_NOPTS_VALUE == INT64_MIN + * // AV_NOPTS_VALUE is passed through + * // => AV_NOPTS_VALUE + * @endcode + */ + AV_ROUND_PASS_MINMAX = 8192, +}; + +/** + * Compute the greatest common divisor of two integer operands. + * + * @param a,b Operands + * @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0; + * if a == 0 and b == 0, returns 0. + */ +int64_t av_const liteav_av_gcd(int64_t a, int64_t b); + +/** + * Rescale a 64-bit integer with rounding to nearest. + * + * The operation is mathematically equivalent to `a * b / c`, but writing that + * directly can overflow. + * + * This function is equivalent to liteav_av_rescale_rnd() with #AV_ROUND_NEAR_INF. + * + * @see liteav_av_rescale_rnd(), liteav_av_rescale_q(), liteav_av_rescale_q_rnd() + */ +int64_t liteav_av_rescale(int64_t a, int64_t b, int64_t c) av_const; + +/** + * Rescale a 64-bit integer with specified rounding. + * + * The operation is mathematically equivalent to `a * b / c`, but writing that + * directly can overflow, and does not support different rounding methods. + * + * @see liteav_av_rescale(), liteav_av_rescale_q(), liteav_av_rescale_q_rnd() + */ +int64_t liteav_av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers. + * + * The operation is mathematically equivalent to `a * bq / cq`. + * + * This function is equivalent to liteav_av_rescale_q_rnd() with #AV_ROUND_NEAR_INF. + * + * @see liteav_av_rescale(), liteav_av_rescale_rnd(), liteav_av_rescale_q_rnd() + */ +int64_t liteav_av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers with specified rounding. + * + * The operation is mathematically equivalent to `a * bq / cq`. + * + * @see liteav_av_rescale(), liteav_av_rescale_rnd(), liteav_av_rescale_q() + */ +int64_t liteav_av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, + enum AVRounding rnd) av_const; + +/** + * Compare two timestamps each in its own time base. + * + * @return One of the following values: + * - -1 if `ts_a` is before `ts_b` + * - 1 if `ts_a` is after `ts_b` + * - 0 if they represent the same position + * + * @warning + * The result of the function is undefined if one of the timestamps is outside + * the `int64_t` range when represented in the other's timebase. + */ +int liteav_av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); + +/** + * Compare the remainders of two integer operands divided by a common divisor. + * + * In other words, compare the least significant `log2(mod)` bits of integers + * `a` and `b`. + * + * @code{.c} + * liteav_av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02 % 0x10 (0x2) + * liteav_av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11) > 0x02 % 0x20 (0x02) + * @endcode + * + * @param a,b Operands + * @param mod Divisor; must be a power of 2 + * @return + * - a negative value if `a % mod < b % mod` + * - a positive value if `a % mod > b % mod` + * - zero if `a % mod == b % mod` + */ +int64_t liteav_av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); + +/** + * Rescale a timestamp while preserving known durations. + * + * This function is designed to be called per audio packet to scale the input + * timestamp to a different time base. Compared to a simple liteav_av_rescale_q() + * call, this function is robust against possible inconsistent frame durations. + * + * The `last` parameter is a state variable that must be preserved for all + * subsequent calls for the same stream. For the first call, `*last` should be + * initialized to #AV_NOPTS_VALUE. + * + * @param[in] in_tb Input time base + * @param[in] in_ts Input timestamp + * @param[in] fs_tb Duration time base; typically this is finer-grained + * (greater) than `in_tb` and `out_tb` + * @param[in] duration Duration till the next call to this function (i.e. + * duration of the current packet/frame) + * @param[in,out] last Pointer to a timestamp expressed in terms of + * `fs_tb`, acting as a state variable + * @param[in] out_tb Output timebase + * @return Timestamp expressed in terms of `out_tb` + * + * @note In the context of this function, "duration" is in term of samples, not + * seconds. + */ +int64_t liteav_av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); + +/** + * Add a value to a timestamp. + * + * This function guarantees that when the same value is repeatly added that + * no accumulation of rounding errors occurs. + * + * @param[in] ts Input timestamp + * @param[in] ts_tb Input timestamp time base + * @param[in] inc Value to be added + * @param[in] inc_tb Time base of `inc` + */ +int64_t liteav_av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc); + + +/** + * @} + */ + +#endif /* AVUTIL_MATHEMATICS_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/md5.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/md5.h new file mode 100644 index 0000000..b55c0f3 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/md5.h @@ -0,0 +1,99 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_md5 + * Public header for MD5 hash function implementation. + */ + +#ifndef AVUTIL_MD5_H +#define AVUTIL_MD5_H + +#include <stddef.h> +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_md5 MD5 + * @ingroup lavu_hash + * MD5 hash function implementation. + * + * @{ + */ + +extern const int liteav_av_md5_size; + +struct AVMD5; + +/** + * Allocate an AVMD5 context. + */ +struct AVMD5 *liteav_av_md5_alloc(void); + +/** + * Initialize MD5 hashing. + * + * @param ctx pointer to the function context (of size liteav_av_md5_size) + */ +void liteav_av_md5_init(struct AVMD5 *ctx); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param src input data to update hash with + * @param len input data length + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); +#else +void liteav_av_md5_update(struct AVMD5 *ctx, const uint8_t *src, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param ctx hash function context + * @param dst buffer where output digest value is stored + */ +void liteav_av_md5_final(struct AVMD5 *ctx, uint8_t *dst); + +/** + * Hash an array of data. + * + * @param dst The output buffer to write the digest into + * @param src The data to hash + * @param len The length of the data, in bytes + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); +#else +void liteav_av_md5_sum(uint8_t *dst, const uint8_t *src, size_t len); +#endif + +/** + * @} + */ + +#endif /* AVUTIL_MD5_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mem.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mem.h new file mode 100644 index 0000000..1a1a26a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/mem.h @@ -0,0 +1,701 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_mem + * Memory handling functions + */ + +#ifndef AVUTIL_MEM_H +#define AVUTIL_MEM_H + +#include <limits.h> +#include <stdint.h> + +#include "attributes.h" +#include "error.h" +#include "avutil.h" + +/** + * @addtogroup lavu_mem + * Utilities for manipulating memory. + * + * FFmpeg has several applications of memory that are not required of a typical + * program. For example, the computing-heavy components like video decoding and + * encoding can be sped up significantly through the use of aligned memory. + * + * However, for each of FFmpeg's applications of memory, there might not be a + * recognized or standardized API for that specific use. Memory alignment, for + * instance, varies wildly depending on operating systems, architectures, and + * compilers. Hence, this component of @ref libavutil is created to make + * dealing with memory consistently possible on all platforms. + * + * @{ + * + * @defgroup lavu_mem_macros Alignment Macros + * Helper macros for declaring aligned variables. + * @{ + */ + +/** + * @def DECLARE_ALIGNED(n,t,v) + * Declare a variable that is aligned in memory. + * + * @code{.c} + * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42; + * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128]; + * + * // The default-alignment equivalent would be + * uint16_t aligned_int = 42; + * uint8_t aligned_array[128]; + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +/** + * @def DECLARE_ASM_ALIGNED(n,t,v) + * Declare an aligned variable appropriate for use in inline assembly code. + * + * @code{.c} + * DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008); + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +/** + * @def DECLARE_ASM_CONST(n,t,v) + * Declare a static constant aligned variable appropriate for use in inline + * assembly code. + * + * @code{.c} + * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008); + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v +#elif defined(__DJGPP__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v + #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v +#elif defined(__GNUC__) || defined(__clang__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v +#elif defined(_MSC_VER) + #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v +#else + #define DECLARE_ALIGNED(n,t,v) t v + #define DECLARE_ASM_ALIGNED(n,t,v) t v + #define DECLARE_ASM_CONST(n,t,v) static const t v +#endif + +/** + * @} + */ + +/** + * @defgroup lavu_mem_attrs Function Attributes + * Function attributes applicable to memory handling functions. + * + * These function attributes can help compilers emit more useful warnings, or + * generate better code. + * @{ + */ + +/** + * @def av_malloc_attrib + * Function attribute denoting a malloc-like function. + * + * @see <a href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007bmalloc_007d-function-attribute-3251">Function attribute `malloc` in GCC's documentation</a> + */ + +#if AV_GCC_VERSION_AT_LEAST(3,1) + #define av_malloc_attrib __attribute__((__malloc__)) +#else + #define av_malloc_attrib +#endif + +/** + * @def av_alloc_size(...) + * Function attribute used on a function that allocates memory, whose size is + * given by the specified parameter(s). + * + * @code{.c} + * void *liteav_av_malloc(size_t size) av_alloc_size(1); + * void *liteav_av_calloc(size_t nmemb, size_t size) av_alloc_size(1, 2); + * @endcode + * + * @param ... One or two parameter indexes, separated by a comma + * + * @see <a href="https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-g_t_0040code_007balloc_005fsize_007d-function-attribute-3220">Function attribute `alloc_size` in GCC's documentation</a> + */ + +#if AV_GCC_VERSION_AT_LEAST(4,3) + #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) +#else + #define av_alloc_size(...) +#endif + +/** + * @} + */ + +/** + * @defgroup lavu_mem_funcs Heap Management + * Functions responsible for allocating, freeing, and copying memory. + * + * All memory allocation functions have a built-in upper limit of `INT_MAX` + * bytes. This may be changed with liteav_av_max_alloc(), although exercise extreme + * caution when doing so. + * + * @{ + */ + +/** + * Allocate a memory block with alignment suitable for all memory accesses + * (including vectors if available on the CPU). + * + * @param size Size in bytes for the memory block to be allocated + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * @see liteav_av_mallocz() + */ +void *liteav_av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a memory block with alignment suitable for all memory accesses + * (including vectors if available on the CPU) and zero all the bytes of the + * block. + * + * @param size Size in bytes for the memory block to be allocated + * @return Pointer to the allocated block, or `NULL` if it cannot be allocated + * @see liteav_av_malloc() + */ +void *liteav_av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a memory block for an array with liteav_av_malloc(). + * + * The allocated memory will have size `size * nmemb` bytes. + * + * @param nmemb Number of element + * @param size Size of a single element + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * @see liteav_av_malloc() + */ +av_alloc_size(1, 2) void *liteav_av_malloc_array(size_t nmemb, size_t size); + +/** + * Allocate a memory block for an array with liteav_av_mallocz(). + * + * The allocated memory will have size `size * nmemb` bytes. + * + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * + * @see liteav_av_mallocz() + * @see liteav_av_malloc_array() + */ +av_alloc_size(1, 2) void *liteav_av_mallocz_array(size_t nmemb, size_t size); + +/** + * Non-inlined equivalent of liteav_av_mallocz_array(). + * + * Created for symmetry with the calloc() C function. + */ +void *liteav_av_calloc(size_t nmemb, size_t size) av_malloc_attrib; + +/** + * Allocate, reallocate, or free a block of memory. + * + * If `ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is + * zero, free the memory block pointed to by `ptr`. Otherwise, expand or + * shrink that block of memory according to `size`. + * + * @param ptr Pointer to a memory block already allocated with + * liteav_av_realloc() or `NULL` + * @param size Size in bytes of the memory block to be allocated or + * reallocated + * + * @return Pointer to a newly-reallocated block or `NULL` if the block + * cannot be reallocated or the function is used to free the memory block + * + * @warning Unlike liteav_av_malloc(), the returned pointer is not guaranteed to be + * correctly aligned. + * @see liteav_av_fast_realloc() + * @see liteav_av_reallocp() + */ +void *liteav_av_realloc(void *ptr, size_t size) av_alloc_size(2); + +/** + * Allocate, reallocate, or free a block of memory through a pointer to a + * pointer. + * + * If `*ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is + * zero, free the memory block pointed to by `*ptr`. Otherwise, expand or + * shrink that block of memory according to `size`. + * + * @param[in,out] ptr Pointer to a pointer to a memory block already allocated + * with liteav_av_realloc(), or a pointer to `NULL`. The pointer + * is updated on success, or freed on failure. + * @param[in] size Size in bytes for the memory block to be allocated or + * reallocated + * + * @return Zero on success, an AVERROR error code on failure + * + * @warning Unlike liteav_av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + */ +av_warn_unused_result +int liteav_av_reallocp(void *ptr, size_t size); + +/** + * Allocate, reallocate, or free a block of memory. + * + * This function does the same thing as liteav_av_realloc(), except: + * - It takes two size arguments and allocates `nelem * elsize` bytes, + * after checking the result of the multiplication for integer overflow. + * - It frees the input block in case of failure, thus avoiding the memory + * leak with the classic + * @code{.c} + * buf = realloc(buf); + * if (!buf) + * return -1; + * @endcode + * pattern. + */ +void *liteav_av_realloc_f(void *ptr, size_t nelem, size_t elsize); + +/** + * Allocate, reallocate, or free an array. + * + * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block. If + * `nmemb` is zero, free the memory block pointed to by `ptr`. + * + * @param ptr Pointer to a memory block already allocated with + * liteav_av_realloc() or `NULL` + * @param nmemb Number of elements in the array + * @param size Size of the single element of the array + * + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block + * + * @warning Unlike liteav_av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + * @see liteav_av_reallocp_array() + */ +av_alloc_size(2, 3) void *liteav_av_realloc_array(void *ptr, size_t nmemb, size_t size); + +/** + * Allocate, reallocate, or free an array through a pointer to a pointer. + * + * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block. If `nmemb` is + * zero, free the memory block pointed to by `*ptr`. + * + * @param[in,out] ptr Pointer to a pointer to a memory block already + * allocated with liteav_av_realloc(), or a pointer to `NULL`. + * The pointer is updated on success, or freed on failure. + * @param[in] nmemb Number of elements + * @param[in] size Size of the single element + * + * @return Zero on success, an AVERROR error code on failure + * + * @warning Unlike liteav_av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + */ +av_alloc_size(2, 3) int liteav_av_reallocp_array(void *ptr, size_t nmemb, size_t size); + +/** + * Reallocate the given buffer if it is not large enough, otherwise do nothing. + * + * If the given buffer is `NULL`, then a new uninitialized buffer is allocated. + * + * If the given buffer is not large enough, and reallocation fails, `NULL` is + * returned and `*size` is set to 0, but the original buffer is not changed or + * freed. + * + * A typical use pattern follows: + * + * @code{.c} + * uint8_t *buf = ...; + * uint8_t *new_buf = liteav_av_fast_realloc(buf, ¤t_size, size_needed); + * if (!new_buf) { + * // Allocation failed; clean up original buffer + * liteav_av_freep(&buf); + * return AVERROR(ENOMEM); + * } + * @endcode + * + * @param[in,out] ptr Already allocated buffer, or `NULL` + * @param[in,out] size Pointer to current size of buffer `ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `ptr` + * @return `ptr` if the buffer is large enough, a pointer to newly reallocated + * buffer if the buffer was not large enough, or `NULL` in case of + * error + * @see liteav_av_realloc() + * @see liteav_av_fast_malloc() + */ +void *liteav_av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to liteav_av_fast_realloc(), the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special handling to + * avoid memleaks is necessary. + * + * `*ptr` is allowed to be `NULL`, in which case allocation always happens if + * `size_needed` is greater than 0. + * + * @code{.c} + * uint8_t *buf = ...; + * liteav_av_fast_malloc(&buf, ¤t_size, size_needed); + * if (!buf) { + * // Allocation failed; buf already freed + * return AVERROR(ENOMEM); + * } + * @endcode + * + * @param[in,out] ptr Pointer to pointer to an already allocated buffer. + * `*ptr` will be overwritten with pointer to new + * buffer on success or `NULL` on failure + * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `*ptr` + * @see liteav_av_realloc() + * @see liteav_av_fast_mallocz() + */ +void liteav_av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate and clear a buffer, reusing the given one if large enough. + * + * Like liteav_av_fast_malloc(), but all newly allocated space is initially cleared. + * Reused buffer is not cleared. + * + * `*ptr` is allowed to be `NULL`, in which case allocation always happens if + * `size_needed` is greater than 0. + * + * @param[in,out] ptr Pointer to pointer to an already allocated buffer. + * `*ptr` will be overwritten with pointer to new + * buffer on success or `NULL` on failure + * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `*ptr` + * @see liteav_av_fast_malloc() + */ +void liteav_av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Free a memory block which has been allocated with a function of liteav_av_malloc() + * or liteav_av_realloc() family. + * + * @param ptr Pointer to the memory block which should be freed. + * + * @note `ptr = NULL` is explicitly allowed. + * @note It is recommended that you use liteav_av_freep() instead, to prevent leaving + * behind dangling pointers. + * @see liteav_av_freep() + */ +void liteav_av_free(void *ptr); + +/** + * Free a memory block which has been allocated with a function of liteav_av_malloc() + * or liteav_av_realloc() family, and set the pointer pointing to it to `NULL`. + * + * @code{.c} + * uint8_t *buf = liteav_av_malloc(16); + * liteav_av_free(buf); + * // buf now contains a dangling pointer to freed memory, and accidental + * // dereference of buf will result in a use-after-free, which may be a + * // security risk. + * + * uint8_t *buf = liteav_av_malloc(16); + * liteav_av_freep(&buf); + * // buf is now NULL, and accidental dereference will only result in a + * // NULL-pointer dereference. + * @endcode + * + * @param ptr Pointer to the pointer to the memory block which should be freed + * @note `*ptr = NULL` is safe and leads to no action. + * @see liteav_av_free() + */ +void liteav_av_freep(void *ptr); + +/** + * Duplicate a string. + * + * @param s String to be duplicated + * @return Pointer to a newly-allocated string containing a + * copy of `s` or `NULL` if the string cannot be allocated + * @see liteav_av_strndup() + */ +char *liteav_av_strdup(const char *s) av_malloc_attrib; + +/** + * Duplicate a substring of a string. + * + * @param s String to be duplicated + * @param len Maximum length of the resulting string (not counting the + * terminating byte) + * @return Pointer to a newly-allocated string containing a + * substring of `s` or `NULL` if the string cannot be allocated + */ +char *liteav_av_strndup(const char *s, size_t len) av_malloc_attrib; + +/** + * Duplicate a buffer with liteav_av_malloc(). + * + * @param p Buffer to be duplicated + * @param size Size in bytes of the buffer copied + * @return Pointer to a newly allocated buffer containing a + * copy of `p` or `NULL` if the buffer cannot be allocated + */ +void *liteav_av_memdup(const void *p, size_t size); + +/** + * Overlapping memcpy() implementation. + * + * @param dst Destination buffer + * @param back Number of bytes back to start copying (i.e. the initial size of + * the overlapping window); must be > 0 + * @param cnt Number of bytes to copy; must be >= 0 + * + * @note `cnt > back` is valid, this will copy the bytes we just copied, + * thus creating a repeating pattern with a period length of `back`. + */ +void liteav_av_memcpy_backptr(uint8_t *dst, int back, int cnt); + +/** + * @} + */ + +/** + * @defgroup lavu_mem_dynarray Dynamic Array + * + * Utilities to make an array grow when needed. + * + * Sometimes, the programmer would want to have an array that can grow when + * needed. The libavutil dynamic array utilities fill that need. + * + * libavutil supports two systems of appending elements onto a dynamically + * allocated array, the first one storing the pointer to the value in the + * array, and the second storing the value directly. In both systems, the + * caller is responsible for maintaining a variable containing the length of + * the array, as well as freeing of the array after use. + * + * The first system stores pointers to values in a block of dynamically + * allocated memory. Since only pointers are stored, the function does not need + * to know the size of the type. Both liteav_av_dynarray_add() and + * liteav_av_dynarray_add_nofree() implement this system. + * + * @code + * type **array = NULL; //< an array of pointers to values + * int nb = 0; //< a variable to keep track of the length of the array + * + * type to_be_added = ...; + * type to_be_added2 = ...; + * + * liteav_av_dynarray_add(&array, &nb, &to_be_added); + * if (nb == 0) + * return AVERROR(ENOMEM); + * + * liteav_av_dynarray_add(&array, &nb, &to_be_added2); + * if (nb == 0) + * return AVERROR(ENOMEM); + * + * // Now: + * // nb == 2 + * // &to_be_added == array[0] + * // &to_be_added2 == array[1] + * + * liteav_av_freep(&array); + * @endcode + * + * The second system stores the value directly in a block of memory. As a + * result, the function has to know the size of the type. liteav_av_dynarray2_add() + * implements this mechanism. + * + * @code + * type *array = NULL; //< an array of values + * int nb = 0; //< a variable to keep track of the length of the array + * + * type to_be_added = ...; + * type to_be_added2 = ...; + * + * type *addr = liteav_av_dynarray2_add((void **)&array, &nb, sizeof(*array), NULL); + * if (!addr) + * return AVERROR(ENOMEM); + * memcpy(addr, &to_be_added, sizeof(to_be_added)); + * + * // Shortcut of the above. + * type *addr = liteav_av_dynarray2_add((void **)&array, &nb, sizeof(*array), + * (const void *)&to_be_added2); + * if (!addr) + * return AVERROR(ENOMEM); + * + * // Now: + * // nb == 2 + * // to_be_added == array[0] + * // to_be_added2 == array[1] + * + * liteav_av_freep(&array); + * @endcode + * + * @{ + */ + +/** + * Add the pointer to an element to a dynamic array. + * + * The array to grow is supposed to be an array of pointers to + * structures, and the element to add must be a pointer to an already + * allocated structure. + * + * The array is reallocated when its size reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by `nb_ptr` + * is incremented. + * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and + * `*nb_ptr` is set to 0. + * + * @param[in,out] tab_ptr Pointer to the array to grow + * @param[in,out] nb_ptr Pointer to the number of elements in the array + * @param[in] elem Element to add + * @see liteav_av_dynarray_add_nofree(), liteav_av_dynarray2_add() + */ +void liteav_av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element to a dynamic array. + * + * Function has the same functionality as liteav_av_dynarray_add(), + * but it doesn't free memory on fails. It returns error code + * instead and leave current buffer untouched. + * + * @return >=0 on success, negative otherwise + * @see liteav_av_dynarray_add(), liteav_av_dynarray2_add() + */ +av_warn_unused_result +int liteav_av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element of size `elem_size` to a dynamic array. + * + * The array is reallocated when its number of elements reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by `nb_ptr` + * is incremented. + * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and + * `*nb_ptr` is set to 0. + * + * @param[in,out] tab_ptr Pointer to the array to grow + * @param[in,out] nb_ptr Pointer to the number of elements in the array + * @param[in] elem_size Size in bytes of an element in the array + * @param[in] elem_data Pointer to the data of the element to add. If + * `NULL`, the space of the newly added element is + * allocated but left uninitialized. + * + * @return Pointer to the data of the element to copy in the newly allocated + * space + * @see liteav_av_dynarray_add(), liteav_av_dynarray_add_nofree() + */ +void *liteav_av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, + const uint8_t *elem_data); + +/** + * @} + */ + +/** + * @defgroup lavu_mem_misc Miscellaneous Functions + * + * Other functions related to memory allocation. + * + * @{ + */ + +/** + * Multiply two `size_t` values checking for overflow. + * + * @param[in] a,b Operands of multiplication + * @param[out] r Pointer to the result of the operation + * @return 0 on success, AVERROR(EINVAL) on overflow + */ +static inline int av_size_mult(size_t a, size_t b, size_t *r) +{ + size_t t = a * b; + /* Hack inspired from glibc: don't try the division if nelem and elsize + * are both less than sqrt(SIZE_MAX). */ + if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) + return AVERROR(EINVAL); + *r = t; + return 0; +} + +/** + * Set the maximum size that may be allocated in one block. + * + * The value specified with this function is effective for all libavutil's @ref + * lavu_mem_funcs "heap management functions." + * + * By default, the max value is defined as `INT_MAX`. + * + * @param max Value to be set as the new maximum size + * + * @warning Exercise extreme caution when using this function. Don't touch + * this if you do not understand the full consequence of doing so. + */ +void liteav_av_max_alloc(size_t max); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_MEM_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/motion_vector.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/motion_vector.h new file mode 100644 index 0000000..ec29556 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/motion_vector.h @@ -0,0 +1,57 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MOTION_VECTOR_H +#define AVUTIL_MOTION_VECTOR_H + +#include <stdint.h> + +typedef struct AVMotionVector { + /** + * Where the current macroblock comes from; negative value when it comes + * from the past, positive value when it comes from the future. + * XXX: set exact relative ref frame reference instead of a +/- 1 "direction". + */ + int32_t source; + /** + * Width and height of the block. + */ + uint8_t w, h; + /** + * Absolute source position. Can be outside the frame area. + */ + int16_t src_x, src_y; + /** + * Absolute destination position. Can be outside the frame area. + */ + int16_t dst_x, dst_y; + /** + * Extra flag information. + * Currently unused. + */ + uint64_t flags; + /** + * Motion vector + * src_x = dst_x + motion_x / motion_scale + * src_y = dst_y + motion_y / motion_scale + */ + int32_t motion_x, motion_y; + uint16_t motion_scale; +} AVMotionVector; + +#endif /* AVUTIL_MOTION_VECTOR_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/murmur3.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/murmur3.h new file mode 100644 index 0000000..0225105 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/murmur3.h @@ -0,0 +1,121 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_murmur3 + * Public header for MurmurHash3 hash function implementation. + */ + +#ifndef AVUTIL_MURMUR3_H +#define AVUTIL_MURMUR3_H + +#include <stdint.h> + +#include "version.h" + +/** + * @defgroup lavu_murmur3 Murmur3 + * @ingroup lavu_hash + * MurmurHash3 hash function implementation. + * + * MurmurHash3 is a non-cryptographic hash function, of which three + * incompatible versions were created by its inventor Austin Appleby: + * + * - 32-bit output + * - 128-bit output for 32-bit platforms + * - 128-bit output for 64-bit platforms + * + * FFmpeg only implements the last variant: 128-bit output designed for 64-bit + * platforms. Even though the hash function was designed for 64-bit platforms, + * the function in reality works on 32-bit systems too, only with reduced + * performance. + * + * @anchor lavu_murmur3_seedinfo + * By design, MurmurHash3 requires a seed to operate. In response to this, + * libavutil provides two functions for hash initiation, one that requires a + * seed (liteav_av_murmur3_init_seeded()) and one that uses a fixed arbitrary integer + * as the seed, and therefore does not (liteav_av_murmur3_init()). + * + * To make hashes comparable, you should provide the same seed for all calls to + * this hash function -- if you are supplying one yourself, that is. + * + * @{ + */ + +/** + * Allocate an AVMurMur3 hash context. + * + * @return Uninitialized hash context or `NULL` in case of error + */ +struct AVMurMur3 *liteav_av_murmur3_alloc(void); + +/** + * Initialize or reinitialize an AVMurMur3 hash context with a seed. + * + * @param[out] c Hash context + * @param[in] seed Random seed + * + * @see liteav_av_murmur3_init() + * @see @ref lavu_murmur3_seedinfo "Detailed description" on a discussion of + * seeds for MurmurHash3. + */ +void liteav_av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); + +/** + * Initialize or reinitialize an AVMurMur3 hash context. + * + * Equivalent to liteav_av_murmur3_init_seeded() with a built-in seed. + * + * @param[out] c Hash context + * + * @see liteav_av_murmur3_init_seeded() + * @see @ref lavu_murmur3_seedinfo "Detailed description" on a discussion of + * seeds for MurmurHash3. + */ +void liteav_av_murmur3_init(struct AVMurMur3 *c); + +/** + * Update hash context with new data. + * + * @param[out] c Hash context + * @param[in] src Input data to update hash with + * @param[in] len Number of bytes to read from `src` + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); +#else +void liteav_av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param[in,out] c Hash context + * @param[out] dst Buffer where output digest value is stored + */ +void liteav_av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); + +/** + * @} + */ + +#endif /* AVUTIL_MURMUR3_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/opt.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/opt.h new file mode 100644 index 0000000..9948bae --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/opt.h @@ -0,0 +1,867 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * AVOptions + * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OPT_H +#define AVUTIL_OPT_H + +/** + * @file + * AVOptions + */ + +#include "rational.h" +#include "avutil.h" +#include "dict.h" +#include "log.h" +#include "pixfmt.h" +#include "samplefmt.h" +#include "version.h" + +/** + * @defgroup avoptions AVOptions + * @ingroup lavu_data + * @{ + * AVOptions provide a generic system to declare options on arbitrary structs + * ("objects"). An option can have a help text, a type and a range of possible + * values. Options may then be enumerated, read and written to. + * + * @section avoptions_implement Implementing AVOptions + * This section describes how to add AVOptions capabilities to a struct. + * + * All AVOptions-related information is stored in an AVClass. Therefore + * the first member of the struct should be a pointer to an AVClass describing it. + * The option field of the AVClass must be set to a NULL-terminated static array + * of AVOptions. Each AVOption must have a non-empty name, a type, a default + * value and for number-type AVOptions also a range of allowed values. It must + * also declare an offset in bytes from the start of the struct, where the field + * associated with this AVOption is located. Other fields in the AVOption struct + * should also be set when applicable, but are not required. + * + * The following example illustrates an AVOptions-enabled struct: + * @code + * typedef struct test_struct { + * const AVClass *class; + * int int_opt; + * char *str_opt; + * uint8_t *bin_opt; + * int bin_len; + * } test_struct; + * + * static const AVOption test_options[] = { + * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), + * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, + * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), + * AV_OPT_TYPE_STRING }, + * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt), + * AV_OPT_TYPE_BINARY }, + * { NULL }, + * }; + * + * static const AVClass test_class = { + * .class_name = "test class", + * .item_name = liteav_av_default_item_name, + * .option = test_options, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * @endcode + * + * Next, when allocating your struct, you must ensure that the AVClass pointer + * is set to the correct value. Then, liteav_av_opt_set_defaults() can be called to + * initialize defaults. After that the struct is ready to be used with the + * AVOptions API. + * + * When cleaning up, you may use the liteav_av_opt_free() function to automatically + * free all the allocated string and binary options. + * + * Continuing with the above example: + * + * @code + * test_struct *alloc_test_struct(void) + * { + * test_struct *ret = liteav_av_mallocz(sizeof(*ret)); + * ret->class = &test_class; + * liteav_av_opt_set_defaults(ret); + * return ret; + * } + * void free_test_struct(test_struct **foo) + * { + * liteav_av_opt_free(*foo); + * liteav_av_freep(foo); + * } + * @endcode + * + * @subsection avoptions_implement_nesting Nesting + * It may happen that an AVOptions-enabled struct contains another + * AVOptions-enabled struct as a member (e.g. AVCodecContext in + * libavcodec exports generic options, while its priv_data field exports + * codec-specific options). In such a case, it is possible to set up the + * parent struct to export a child's options. To do that, simply + * implement AVClass.child_next() and AVClass.child_class_next() in the + * parent struct's AVClass. + * Assuming that the test_struct from above now also contains a + * child_struct field: + * + * @code + * typedef struct child_struct { + * AVClass *class; + * int flags_opt; + * } child_struct; + * static const AVOption child_opts[] = { + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX }, + * { NULL }, + * }; + * static const AVClass child_class = { + * .class_name = "child class", + * .item_name = liteav_av_default_item_name, + * .option = child_opts, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * + * void *child_next(void *obj, void *prev) + * { + * test_struct *t = obj; + * if (!prev && t->child_struct) + * return t->child_struct; + * return NULL + * } + * const AVClass child_class_next(const AVClass *prev) + * { + * return prev ? NULL : &child_class; + * } + * @endcode + * Putting child_next() and child_class_next() as defined above into + * test_class will now make child_struct's options accessible through + * test_struct (again, proper setup as described above needs to be done on + * child_struct right after it is created). + * + * From the above example it might not be clear why both child_next() + * and child_class_next() are needed. The distinction is that child_next() + * iterates over actually existing objects, while child_class_next() + * iterates over all possible child classes. E.g. if an AVCodecContext + * was initialized to use a codec which has private options, then its + * child_next() will return AVCodecContext.priv_data and finish + * iterating. OTOH child_class_next() on AVCodecContext.av_class will + * iterate over all available codecs with private options. + * + * @subsection avoptions_implement_named_constants Named constants + * It is possible to create named constants for options. Simply set the unit + * field of the option the constants should apply to a string and + * create the constants themselves as options of type AV_OPT_TYPE_CONST + * with their unit field set to the same string. + * Their default_val field should contain the value of the named + * constant. + * For example, to add some named constants for the test_flags option + * above, put the following into the child_opts array: + * @code + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" }, + * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" }, + * @endcode + * + * @section avoptions_use Using AVOptions + * This section deals with accessing options in an AVOptions-enabled struct. + * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or + * AVFormatContext in libavformat. + * + * @subsection avoptions_use_examine Examining AVOptions + * The basic functions for examining options are liteav_av_opt_next(), which iterates + * over all options defined for one object, and liteav_av_opt_find(), which searches + * for an option with the given name. + * + * The situation is more complicated with nesting. An AVOptions-enabled struct + * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag + * to liteav_av_opt_find() will make the function search children recursively. + * + * For enumerating there are basically two cases. The first is when you want to + * get all options that may potentially exist on the struct and its children + * (e.g. when constructing documentation). In that case you should call + * liteav_av_opt_child_class_next() recursively on the parent struct's AVClass. The + * second case is when you have an already initialized struct with all its + * children and you want to get all options that can be actually written or read + * from it. In that case you should call liteav_av_opt_child_next() recursively (and + * liteav_av_opt_next() on each result). + * + * @subsection avoptions_use_get_set Reading and writing AVOptions + * When setting options, you often have a string read directly from the + * user. In such a case, simply passing it to liteav_av_opt_set() is enough. For + * non-string type options, liteav_av_opt_set() will parse the string according to the + * option type. + * + * Similarly liteav_av_opt_get() will read any option type and convert it to a string + * which will be returned. Do not forget that the string is allocated, so you + * have to free it with liteav_av_free(). + * + * In some cases it may be more convenient to put all options into an + * AVDictionary and call liteav_av_opt_set_dict() on it. A specific case of this + * are the format/codec open functions in lavf/lavc which take a dictionary + * filled with option as a parameter. This makes it possible to set some options + * that cannot be set otherwise, since e.g. the input file format is not known + * before the file is actually opened. + */ + +enum AVOptionType{ + AV_OPT_TYPE_FLAGS, + AV_OPT_TYPE_INT, + AV_OPT_TYPE_INT64, + AV_OPT_TYPE_DOUBLE, + AV_OPT_TYPE_FLOAT, + AV_OPT_TYPE_STRING, + AV_OPT_TYPE_RATIONAL, + AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + AV_OPT_TYPE_DICT, + AV_OPT_TYPE_UINT64, + AV_OPT_TYPE_CONST, + AV_OPT_TYPE_IMAGE_SIZE, ///< offset must point to two consecutive integers + AV_OPT_TYPE_PIXEL_FMT, + AV_OPT_TYPE_SAMPLE_FMT, + AV_OPT_TYPE_VIDEO_RATE, ///< offset must point to AVRational + AV_OPT_TYPE_DURATION, + AV_OPT_TYPE_COLOR, + AV_OPT_TYPE_CHANNEL_LAYOUT, + AV_OPT_TYPE_BOOL, +}; + +/** + * AVOption + */ +typedef struct AVOption { + const char *name; + + /** + * short English help text + * @todo What about other languages? + */ + const char *help; + + /** + * The offset relative to the context structure where the option + * value is stored. It should be 0 for named constants. + */ + int offset; + enum AVOptionType type; + + /** + * the default value for scalar options + */ + union { + int64_t i64; + double dbl; + const char *str; + /* TODO those are unused now */ + AVRational q; + } default_val; + double min; ///< minimum valid value for the option + double max; ///< maximum valid value for the option + + int flags; +#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding +#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding +#define AV_OPT_FLAG_AUDIO_PARAM 8 +#define AV_OPT_FLAG_VIDEO_PARAM 16 +#define AV_OPT_FLAG_SUBTITLE_PARAM 32 +/** + * The option is intended for exporting values to the caller. + */ +#define AV_OPT_FLAG_EXPORT 64 +/** + * The option may not be set through the AVOptions API, only read. + * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set. + */ +#define AV_OPT_FLAG_READONLY 128 +#define AV_OPT_FLAG_BSF_PARAM (1<<8) ///< a generic parameter which can be set by the user for bit stream filtering +#define AV_OPT_FLAG_RUNTIME_PARAM (1<<15) ///< a generic parameter which can be set by the user at runtime +#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering +#define AV_OPT_FLAG_DEPRECATED (1<<17) ///< set if option is deprecated, users should refer to AVOption.help text for more information +//FIXME think about enc-audio, ... style flags + + /** + * The logical unit to which the option belongs. Non-constant + * options and corresponding named constants share the same + * unit. May be NULL. + */ + const char *unit; +} AVOption; + +/** + * A single allowed range of values, or a single allowed value. + */ +typedef struct AVOptionRange { + const char *str; + /** + * Value range. + * For string ranges this represents the min/max length. + * For dimensions this represents the min/max pixel count or width/height in multi-component case. + */ + double value_min, value_max; + /** + * Value's component range. + * For string this represents the unicode range for chars, 0-127 limits to ASCII. + */ + double component_min, component_max; + /** + * Range flag. + * If set to 1 the struct encodes a range, if set to 0 a single value. + */ + int is_range; +} AVOptionRange; + +/** + * List of AVOptionRange structs. + */ +typedef struct AVOptionRanges { + /** + * Array of option ranges. + * + * Most of option types use just one component. + * Following describes multi-component option types: + * + * AV_OPT_TYPE_IMAGE_SIZE: + * component index 0: range of pixel count (width * height). + * component index 1: range of width. + * component index 2: range of height. + * + * @note To obtain multi-component version of this structure, user must + * provide AV_OPT_MULTI_COMPONENT_RANGE to liteav_av_opt_query_ranges or + * liteav_av_opt_query_ranges_default function. + * + * Multi-component range can be read as in following example: + * + * @code + * int range_index, component_index; + * AVOptionRanges *ranges; + * AVOptionRange *range[3]; //may require more than 3 in the future. + * liteav_av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE); + * for (range_index = 0; range_index < ranges->nb_ranges; range_index++) { + * for (component_index = 0; component_index < ranges->nb_components; component_index++) + * range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index]; + * //do something with range here. + * } + * liteav_av_opt_freep_ranges(&ranges); + * @endcode + */ + AVOptionRange **range; + /** + * Number of ranges per component. + */ + int nb_ranges; + /** + * Number of componentes. + */ + int nb_components; +} AVOptionRanges; + +/** + * Show the obj options. + * + * @param req_flags requested flags for the options to show. Show only the + * options for which it is opt->flags & req_flags. + * @param rej_flags rejected flags for the options to show. Show only the + * options for which it is !(opt->flags & req_flags). + * @param av_log_obj log context to use for showing the options + */ +int liteav_av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags); + +/** + * Set the values of all AVOption fields to their default values. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + */ +void liteav_av_opt_set_defaults(void *s); + +/** + * Set the values of all AVOption fields to their default values. Only these + * AVOption fields for which (opt->flags & mask) == flags will have their + * default applied to s. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + * @param mask combination of AV_OPT_FLAG_* + * @param flags combination of AV_OPT_FLAG_* + */ +void liteav_av_opt_set_defaults2(void *s, int mask, int flags); + +/** + * Parse the key/value pairs list in opts. For each key/value pair + * found, stores the value in the field in ctx that is named like the + * key. ctx must be an AVClass context, storing is done using + * AVOptions. + * + * @param opts options string to parse, may be NULL + * @param key_val_sep a 0-terminated list of characters used to + * separate key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @return the number of successfully set key/value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by liteav_av_opt_set() if a key/value pair + * cannot be set + */ +int liteav_av_set_options_string(void *ctx, const char *opts, + const char *key_val_sep, const char *pairs_sep); + +/** + * Parse the key-value pairs list in opts. For each key=value pair found, + * set the value of the corresponding option in ctx. + * + * @param ctx the AVClass object to set options on + * @param opts the options string, key-value pairs separated by a + * delimiter + * @param shorthand a NULL-terminated array of options names for shorthand + * notation: if the first field in opts has no key part, + * the key is taken from the first element of shorthand; + * then again for the second, etc., until either opts is + * finished, shorthand is finished or a named option is + * found; after that, all options must be named + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @return the number of successfully set key=value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + * + * Options names must use only the following characters: a-z A-Z 0-9 - . / _ + * Separators must use characters distinct from option names and from each + * other. + */ +int liteav_av_opt_set_from_string(void *ctx, const char *opts, + const char *const *shorthand, + const char *key_val_sep, const char *pairs_sep); +/** + * Free all allocated objects in obj. + */ +void liteav_av_opt_free(void *obj); + +/** + * Check whether a particular flag is set in a flags field. + * + * @param field_name the name of the flag field option + * @param flag_name the name of the flag to check + * @return non-zero if the flag is set, zero if the flag isn't set, + * isn't of the right type, or the flags field doesn't exist. + */ +int liteav_av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name); + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with liteav_av_dict_free(). + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see liteav_av_dict_copy() + */ +int liteav_av_opt_set_dict(void *obj, struct AVDictionary **options); + + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with liteav_av_dict_free(). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see liteav_av_dict_copy() + */ +int liteav_av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags); + +/** + * Extract a key-value pair from the beginning of a string. + * + * @param ropts pointer to the options string, will be updated to + * point to the rest of the string (one of the pairs_sep + * or the final NUL) + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @param flags flags; see the AV_OPT_FLAG_* values below + * @param rkey parsed key; must be freed using liteav_av_free() + * @param rval parsed value; must be freed using liteav_av_free() + * + * @return >=0 for success, or a negative value corresponding to an + * AVERROR code in case of error; in particular: + * AVERROR(EINVAL) if no key is present + * + */ +int liteav_av_opt_get_key_value(const char **ropts, + const char *key_val_sep, const char *pairs_sep, + unsigned flags, + char **rkey, char **rval); + +enum { + + /** + * Accept to parse a value without a key; the key will then be returned + * as NULL. + */ + AV_OPT_FLAG_IMPLICIT_KEY = 1, +}; + +/** + * @defgroup opt_eval_funcs Evaluating option strings + * @{ + * This group of functions can be used to evaluate option strings + * and get numbers out of them. They do the same thing as liteav_av_opt_set(), + * except the result is written into the caller-supplied pointer. + * + * @param obj a struct whose first element is a pointer to AVClass. + * @param o an option for which the string is to be evaluated. + * @param val string to be evaluated. + * @param *_out value of the string will be written here. + * + * @return 0 on success, a negative number on failure. + */ +int liteav_av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out); +int liteav_av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out); +int liteav_av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out); +int liteav_av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out); +int liteav_av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out); +int liteav_av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out); +/** + * @} + */ + +#define AV_OPT_SEARCH_CHILDREN (1 << 0) /**< Search in possible children of the + given object first. */ +/** + * The obj passed to liteav_av_opt_find() is fake -- only a double pointer to AVClass + * instead of a required pointer to a struct containing AVClass. This is + * useful for searching for options without needing to allocate the corresponding + * object. + */ +#define AV_OPT_SEARCH_FAKE_OBJ (1 << 1) + +/** + * In liteav_av_opt_get, return NULL if the option has a pointer type and is set to NULL, + * rather than returning an empty string. + */ +#define AV_OPT_ALLOW_NULL (1 << 2) + +/** + * Allows liteav_av_opt_query_ranges and liteav_av_opt_query_ranges_default to return more than + * one component for certain option types. + * @see AVOptionRanges for details. + */ +#define AV_OPT_MULTI_COMPONENT_RANGE (1 << 12) + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return A pointer to the option found, or NULL if no option + * was found. + * + * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable + * directly with liteav_av_opt_set(). Use special calls which take an options + * AVDictionary (e.g. avformat_open_input()) to set options found with this + * flag. + */ +const AVOption *liteav_av_opt_find(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags); + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * @param[out] target_obj if non-NULL, an object to which the option belongs will be + * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present + * in search_flags. This parameter is ignored if search_flags contain + * AV_OPT_SEARCH_FAKE_OBJ. + * + * @return A pointer to the option found, or NULL if no option + * was found. + */ +const AVOption *liteav_av_opt_find2(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags, void **target_obj); + +/** + * Iterate over all AVOptions belonging to obj. + * + * @param obj an AVOptions-enabled struct or a double pointer to an + * AVClass describing it. + * @param prev result of the previous call to liteav_av_opt_next() on this object + * or NULL + * @return next AVOption or NULL + */ +const AVOption *liteav_av_opt_next(const void *obj, const AVOption *prev); + +/** + * Iterate over AVOptions-enabled children of obj. + * + * @param prev result of a previous call to this function or NULL + * @return next AVOptions-enabled child or NULL + */ +void *liteav_av_opt_child_next(void *obj, void *prev); + +/** + * Iterate over potential AVOptions-enabled children of parent. + * + * @param prev result of a previous call to this function or NULL + * @return AVClass corresponding to next potential child or NULL + */ +const AVClass *liteav_av_opt_child_class_next(const AVClass *parent, const AVClass *prev); + +/** + * @defgroup opt_set_funcs Option setting functions + * @{ + * Those functions set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. In case of liteav_av_opt_set() if the field is not + * of a string type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param search_flags flags passed to liteav_av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be set on a child of obj. + * + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + */ +int liteav_av_opt_set (void *obj, const char *name, const char *val, int search_flags); +int liteav_av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags); +int liteav_av_opt_set_double (void *obj, const char *name, double val, int search_flags); +int liteav_av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags); +int liteav_av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags); +int liteav_av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags); +int liteav_av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); +int liteav_av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); +int liteav_av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); +int liteav_av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); +/** + * @note Any old dictionary present is discarded and replaced with a copy of the new one. The + * caller still owns val is and responsible for freeing it. + */ +int liteav_av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, int search_flags); + +/** + * Set a binary option to an integer list. + * + * @param obj AVClass object to set options on + * @param name name of the binary option + * @param val pointer to an integer list (must have the correct type with + * regard to the contents of the list) + * @param term list terminator (usually 0 or -1) + * @param flags search flags + */ +#define av_opt_set_int_list(obj, name, val, term, flags) \ + (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \ + AVERROR(EINVAL) : \ + liteav_av_opt_set_bin(obj, name, (const uint8_t *)(val), \ + av_int_list_length(val, term) * sizeof(*(val)), flags)) + +/** + * @} + */ + +/** + * @defgroup opt_get_funcs Option getting functions + * @{ + * Those functions get a value of the option with the given name from an object. + * + * @param[in] obj a struct whose first element is a pointer to an AVClass. + * @param[in] name name of the option to get. + * @param[in] search_flags flags passed to liteav_av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be found in a child of obj. + * @param[out] out_val value of the option will be written here + * @return >=0 on success, a negative error code otherwise + */ +/** + * @note the returned string will be liteav_av_malloc()ed and must be liteav_av_free()ed by the caller + * + * @note if AV_OPT_ALLOW_NULL is set in search_flags in liteav_av_opt_get, and the option has + * AV_OPT_TYPE_STRING or AV_OPT_TYPE_BINARY and is set to NULL, *out_val will be set + * to NULL instead of an allocated empty string. + */ +int liteav_av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); +int liteav_av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); +int liteav_av_opt_get_double (void *obj, const char *name, int search_flags, double *out_val); +int liteav_av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val); +int liteav_av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out); +int liteav_av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); +int liteav_av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); +int liteav_av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); +int liteav_av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); +/** + * @param[out] out_val The returned dictionary is a copy of the actual value and must + * be freed with liteav_av_dict_free() by the caller + */ +int liteav_av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val); +/** + * @} + */ +/** + * Gets a pointer to the requested field in a struct. + * This function allows accessing a struct even when its fields are moved or + * renamed since the application making the access has been compiled, + * + * @returns a pointer to the field, it can be cast to the correct type and read + * or written to. + */ +void *liteav_av_opt_ptr(const AVClass *avclass, void *obj, const char *name); + +/** + * Free an AVOptionRanges struct and set it to NULL. + */ +void liteav_av_opt_freep_ranges(AVOptionRanges **ranges); + +/** + * Get a list of allowed ranges for the given option. + * + * The returned list may depend on other fields in obj like for example profile. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges + * + * The result must be freed with liteav_av_opt_freep_ranges. + * + * @return number of compontents returned on success, a negative errro code otherwise + */ +int liteav_av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Copy options from src object into dest object. + * + * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object. + * Original memory allocated for such options is freed unless both src and dest options points to the same memory. + * + * @param dest Object to copy from + * @param src Object to copy into + * @return 0 on success, negative on error + */ +int liteav_av_opt_copy(void *dest, const void *src); + +/** + * Get a default list of allowed ranges for the given option. + * + * This list is constructed without using the AVClass.query_ranges() callback + * and can be used as fallback from within the callback. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges + * + * The result must be freed with av_opt_free_ranges. + * + * @return number of compontents returned on success, a negative errro code otherwise + */ +int liteav_av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Check if given option is set to its default value. + * + * Options o must belong to the obj. This function must not be called to check child's options state. + * @see liteav_av_opt_is_set_to_default_by_name(). + * + * @param obj AVClass object to check option on + * @param o option to be checked + * @return >0 when option is set to its default, + * 0 when option is not set its default, + * <0 on error + */ +int liteav_av_opt_is_set_to_default(void *obj, const AVOption *o); + +/** + * Check if given option is set to its default value. + * + * @param obj AVClass object to check option on + * @param name option name + * @param search_flags combination of AV_OPT_SEARCH_* + * @return >0 when option is set to its default, + * 0 when option is not set its default, + * <0 on error + */ +int liteav_av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags); + + +#define AV_OPT_SERIALIZE_SKIP_DEFAULTS 0x00000001 ///< Serialize options that are not set to default values only. +#define AV_OPT_SERIALIZE_OPT_FLAGS_EXACT 0x00000002 ///< Serialize options that exactly match opt_flags only. + +/** + * Serialize object's options. + * + * Create a string containing object's serialized options. + * Such string may be passed back to liteav_av_opt_set_from_string() in order to restore option values. + * A key/value or pairs separator occurring in the serialized value or + * name string are escaped through the liteav_av_escape() function. + * + * @param[in] obj AVClass object to serialize + * @param[in] opt_flags serialize options with all the specified flags set (AV_OPT_FLAG) + * @param[in] flags combination of AV_OPT_SERIALIZE_* flags + * @param[out] buffer Pointer to buffer that will be allocated with string containg serialized options. + * Buffer must be freed by the caller when is no longer needed. + * @param[in] key_val_sep character used to separate key from value + * @param[in] pairs_sep character used to separate two pairs from each other + * @return >= 0 on success, negative on error + * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same. + */ +int liteav_av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer, + const char key_val_sep, const char pairs_sep); +/** + * @} + */ + +#endif /* AVUTIL_OPT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/parseutils.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/parseutils.h new file mode 100644 index 0000000..5da32c4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/parseutils.h @@ -0,0 +1,194 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PARSEUTILS_H +#define AVUTIL_PARSEUTILS_H + +#include <time.h> + +#include "rational.h" + +/** + * @file + * misc parsing utilities + */ + +/** + * Parse str and store the parsed ratio in q. + * + * Note that a ratio with infinite (1/0) or negative value is + * considered valid, so you should check on the returned value if you + * want to exclude those values. + * + * The undefined value can be expressed using the "0:0" string. + * + * @param[in,out] q pointer to the AVRational which will contain the ratio + * @param[in] str the string to parse: it has to be a string in the format + * num:den, a float number or an expression + * @param[in] max the maximum allowed numerator and denominator + * @param[in] log_offset log level offset which is applied to the log + * level of log_ctx + * @param[in] log_ctx parent logging context + * @return >= 0 on success, a negative error code otherwise + */ +int liteav_av_parse_ratio(AVRational *q, const char *str, int max, + int log_offset, void *log_ctx); + +#define av_parse_ratio_quiet(rate, str, max) \ + liteav_av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL) + +/** + * Parse str and put in width_ptr and height_ptr the detected values. + * + * @param[in,out] width_ptr pointer to the variable which will contain the detected + * width value + * @param[in,out] height_ptr pointer to the variable which will contain the detected + * height value + * @param[in] str the string to parse: it has to be a string in the format + * width x height or a valid video size abbreviation. + * @return >= 0 on success, a negative error code otherwise + */ +int liteav_av_parse_video_size(int *width_ptr, int *height_ptr, const char *str); + +/** + * Parse str and store the detected values in *rate. + * + * @param[in,out] rate pointer to the AVRational which will contain the detected + * frame rate + * @param[in] str the string to parse: it has to be a string in the format + * rate_num / rate_den, a float number or a valid video rate abbreviation + * @return >= 0 on success, a negative error code otherwise + */ +int liteav_av_parse_video_rate(AVRational *rate, const char *str); + +/** + * Put the RGBA values that correspond to color_string in rgba_color. + * + * @param color_string a string specifying a color. It can be the name of + * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence, + * possibly followed by "@" and a string representing the alpha + * component. + * The alpha component may be a string composed by "0x" followed by an + * hexadecimal number or a decimal number between 0.0 and 1.0, which + * represents the opacity value (0x00/0.0 means completely transparent, + * 0xff/1.0 completely opaque). + * If the alpha component is not specified then 0xff is assumed. + * The string "random" will result in a random color. + * @param slen length of the initial part of color_string containing the + * color. It can be set to -1 if color_string is a null terminated string + * containing nothing else than the color. + * @return >= 0 in case of success, a negative value in case of + * failure (for example if color_string cannot be parsed). + */ +int liteav_av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, + void *log_ctx); + +/** + * Get the name of a color from the internal table of hard-coded named + * colors. + * + * This function is meant to enumerate the color names recognized by + * liteav_av_parse_color(). + * + * @param color_idx index of the requested color, starting from 0 + * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB + * @return the color name string or NULL if color_idx is not in the array + */ +const char *liteav_av_get_known_color_name(int color_idx, const uint8_t **rgb); + +/** + * Parse timestr and return in *time a corresponding number of + * microseconds. + * + * @param timeval puts here the number of microseconds corresponding + * to the string in timestr. If the string represents a duration, it + * is the number of microseconds contained in the time interval. If + * the string is a date, is the number of microseconds since 1st of + * January, 1970 up to the time of the parsed date. If timestr cannot + * be successfully parsed, set *time to INT64_MIN. + + * @param timestr a string representing a date or a duration. + * - If a date the syntax is: + * @code + * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z] + * now + * @endcode + * If the value is "now" it takes the current time. + * Time is local time unless Z is appended, in which case it is + * interpreted as UTC. + * If the year-month-day part is not specified it takes the current + * year-month-day. + * - If a duration the syntax is: + * @code + * [-][HH:]MM:SS[.m...] + * [-]S+[.m...] + * @endcode + * @param duration flag which tells how to interpret timestr, if not + * zero timestr is interpreted as a duration, otherwise as a date + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int liteav_av_parse_time(int64_t *timeval, const char *timestr, int duration); + +/** + * Attempt to find a specific tag in a URL. + * + * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. + * Return 1 if found. + */ +int liteav_av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); + +/** + * Simplified version of strptime + * + * Parse the input string p according to the format string fmt and + * store its results in the structure dt. + * This implementation supports only a subset of the formats supported + * by the standard strptime(). + * + * The supported input field descriptors are listed below. + * - %H: the hour as a decimal number, using a 24-hour clock, in the + * range '00' through '23' + * - %J: hours as a decimal number, in the range '0' through INT_MAX + * - %M: the minute as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %S: the second as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %Y: the year as a decimal number, using the Gregorian calendar + * - %m: the month as a decimal number, in the range '1' through '12' + * - %d: the day of the month as a decimal number, in the range '1' + * through '31' + * - %T: alias for '%H:%M:%S' + * - %%: a literal '%' + * + * @return a pointer to the first character not processed in this function + * call. In case the input string contains more characters than + * required by the format string the return value points right after + * the last consumed input character. In case the whole input string + * is consumed the return value points to the null byte at the end of + * the string. On failure NULL is returned. + */ +char *liteav_av_small_strptime(const char *p, const char *fmt, struct tm *dt); + +/** + * Convert the decomposed UTC time in tm to a time_t value. + */ +time_t liteav_av_timegm(struct tm *tm); + +#endif /* AVUTIL_PARSEUTILS_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixdesc.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixdesc.h new file mode 100644 index 0000000..07e10c4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixdesc.h @@ -0,0 +1,441 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * pixel format descriptor + * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXDESC_H +#define AVUTIL_PIXDESC_H + +#include <inttypes.h> + +#include "attributes.h" +#include "pixfmt.h" +#include "version.h" + +typedef struct AVComponentDescriptor { + /** + * Which of the 4 planes contains the component. + */ + int plane; + + /** + * Number of elements between 2 horizontally consecutive pixels. + * Elements are bits for bitstream formats, bytes otherwise. + */ + int step; + + /** + * Number of elements before the component of the first pixel. + * Elements are bits for bitstream formats, bytes otherwise. + */ + int offset; + + /** + * Number of least significant bits that must be shifted away + * to get the value. + */ + int shift; + + /** + * Number of bits in the component. + */ + int depth; + +#if FF_API_PLUS1_MINUS1 + /** deprecated, use step instead */ + attribute_deprecated int step_minus1; + + /** deprecated, use depth instead */ + attribute_deprecated int depth_minus1; + + /** deprecated, use offset instead */ + attribute_deprecated int offset_plus1; +#endif +} AVComponentDescriptor; + +/** + * Descriptor that unambiguously describes how the bits of a pixel are + * stored in the up to 4 data planes of an image. It also stores the + * subsampling factors and number of components. + * + * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV + * and all the YUV variants) AVPixFmtDescriptor just stores how values + * are stored not what these values represent. + */ +typedef struct AVPixFmtDescriptor { + const char *name; + uint8_t nb_components; ///< The number of components each pixel has, (1-4) + + /** + * Amount to shift the luma width right to find the chroma width. + * For YV12 this is 1 for example. + * chroma_width = AV_CEIL_RSHIFT(luma_width, log2_chroma_w) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_w; + + /** + * Amount to shift the luma height right to find the chroma height. + * For YV12 this is 1 for example. + * chroma_height= AV_CEIL_RSHIFT(luma_height, log2_chroma_h) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_h; + + /** + * Combination of AV_PIX_FMT_FLAG_... flags. + */ + uint64_t flags; + + /** + * Parameters that describe how pixels are packed. + * If the format has 1 or 2 components, then luma is 0. + * If the format has 3 or 4 components: + * if the RGB flag is set then 0 is red, 1 is green and 2 is blue; + * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. + * + * If present, the Alpha channel is always the last component. + */ + AVComponentDescriptor comp[4]; + + /** + * Alternative comma-separated names. + */ + const char *alias; +} AVPixFmtDescriptor; + +/** + * Pixel format is big-endian. + */ +#define AV_PIX_FMT_FLAG_BE (1 << 0) +/** + * Pixel format has a palette in data[1], values are indexes in this palette. + */ +#define AV_PIX_FMT_FLAG_PAL (1 << 1) +/** + * All values of a component are bit-wise packed end to end. + */ +#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2) +/** + * Pixel format is an HW accelerated format. + */ +#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3) +/** + * At least one pixel component is not in the first data plane. + */ +#define AV_PIX_FMT_FLAG_PLANAR (1 << 4) +/** + * The pixel format contains RGB-like data (as opposed to YUV/grayscale). + */ +#define AV_PIX_FMT_FLAG_RGB (1 << 5) + +/** + * The pixel format is "pseudo-paletted". This means that it contains a + * fixed palette in the 2nd plane but the palette is fixed/constant for each + * PIX_FMT. This allows interpreting the data as if it was PAL8, which can + * in some cases be simpler. Or the data can be interpreted purely based on + * the pixel format without using the palette. + * An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8 + * + * @deprecated This flag is deprecated, and will be removed. When it is removed, + * the extra palette allocation in AVFrame.data[1] is removed as well. Only + * actual paletted formats (as indicated by AV_PIX_FMT_FLAG_PAL) will have a + * palette. Starting with FFmpeg versions which have this flag deprecated, the + * extra "pseudo" palette is already ignored, and API users are not required to + * allocate a palette for AV_PIX_FMT_FLAG_PSEUDOPAL formats (it was required + * before the deprecation, though). + */ +#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) + +/** + * The pixel format has an alpha channel. This is set on all formats that + * support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always + * straight, never pre-multiplied. + * + * If a codec or a filter does not support alpha, it should set all alpha to + * opaque, or use the equivalent pixel formats without alpha component, e.g. + * AV_PIX_FMT_RGB0 (or AV_PIX_FMT_RGB24 etc.) instead of AV_PIX_FMT_RGBA. + */ +#define AV_PIX_FMT_FLAG_ALPHA (1 << 7) + +/** + * The pixel format is following a Bayer pattern + */ +#define AV_PIX_FMT_FLAG_BAYER (1 << 8) + +/** + * The pixel format contains IEEE-754 floating point values. Precision (double, + * single, or half) should be determined by the pixel size (64, 32, or 16 bits). + */ +#define AV_PIX_FMT_FLAG_FLOAT (1 << 9) + +/** + * Return the number of bits per pixel used by the pixel format + * described by pixdesc. Note that this is not the same as the number + * of bits per sample. + * + * The returned number of bits refers to the number of bits actually + * used for storing the pixel information, that is padding bits are + * not counted. + */ +int liteav_av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * Return the number of bits per pixel for the pixel format + * described by pixdesc, including any padding or unused bits. + */ +int liteav_av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * @return a pixel format descriptor for provided pixel format or NULL if + * this pixel format is unknown. + */ +const AVPixFmtDescriptor *liteav_av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt); + +/** + * Iterate over all pixel format descriptors known to libavutil. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVPixFmtDescriptor *liteav_av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev); + +/** + * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc + * is not a valid pointer to a pixel format descriptor. + */ +enum AVPixelFormat liteav_av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc); + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w (horizontal/width shift) + * @param[out] v_shift store log2_chroma_h (vertical/height shift) + * + * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format + */ +int liteav_av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, + int *h_shift, int *v_shift); + +/** + * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a + * valid pixel format. + */ +int liteav_av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); + +/** + * @return the name for provided color range or NULL if unknown. + */ +const char *liteav_av_color_range_name(enum AVColorRange range); + +/** + * @return the AVColorRange value for name or an AVError if not found. + */ +int liteav_av_color_range_from_name(const char *name); + +/** + * @return the name for provided color primaries or NULL if unknown. + */ +const char *liteav_av_color_primaries_name(enum AVColorPrimaries primaries); + +/** + * @return the AVColorPrimaries value for name or an AVError if not found. + */ +int liteav_av_color_primaries_from_name(const char *name); + +/** + * @return the name for provided color transfer or NULL if unknown. + */ +const char *liteav_av_color_transfer_name(enum AVColorTransferCharacteristic transfer); + +/** + * @return the AVColorTransferCharacteristic value for name or an AVError if not found. + */ +int liteav_av_color_transfer_from_name(const char *name); + +/** + * @return the name for provided color space or NULL if unknown. + */ +const char *liteav_av_color_space_name(enum AVColorSpace space); + +/** + * @return the AVColorSpace value for name or an AVError if not found. + */ +int liteav_av_color_space_from_name(const char *name); + +/** + * @return the name for provided chroma location or NULL if unknown. + */ +const char *liteav_av_chroma_location_name(enum AVChromaLocation location); + +/** + * @return the AVChromaLocation value for name or an AVError if not found. + */ +int liteav_av_chroma_location_from_name(const char *name); + +/** + * Return the pixel format corresponding to name. + * + * If there is no pixel format with name name, then looks for a + * pixel format with the name corresponding to the native endian + * format of name. + * For example in a little-endian system, first looks for "gray16", + * then for "gray16le". + * + * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. + */ +enum AVPixelFormat liteav_av_get_pix_fmt(const char *name); + +/** + * Return the short name for a pixel format, NULL in case pix_fmt is + * unknown. + * + * @see liteav_av_get_pix_fmt(), liteav_av_get_pix_fmt_string() + */ +const char *liteav_av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); + +/** + * Print in buf the string corresponding to the pixel format with + * number pix_fmt, or a header if pix_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param pix_fmt the number of the pixel format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + */ +char *liteav_av_get_pix_fmt_string(char *buf, int buf_size, + enum AVPixelFormat pix_fmt); + +/** + * Read a line from an image, and write the values of the + * pixel format component c to dst. + * + * @param data the array containing the pointers to the planes of the image + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to read + * @param y the vertical coordinate of the first pixel to read + * @param w the width of the line to read, that is the number of + * values to write to dst + * @param read_pal_component if not zero and the format is a paletted + * format writes the values corresponding to the palette + * component c in data[1] to dst, rather than the palette indexes in + * data[0]. The behavior is undefined if the format is not paletted. + * @param dst_element_size size of elements in dst array (2 or 4 byte) + */ +void liteav_av_read_image_line2(void *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component, + int dst_element_size); + +void liteav_av_read_image_line(uint16_t *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component); + +/** + * Write the values from src to the pixel format component c of an + * image line. + * + * @param src array containing the values to write + * @param data the array containing the pointers to the planes of the + * image to write into. It is supposed to be zeroed. + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to write + * @param y the vertical coordinate of the first pixel to write + * @param w the width of the line to write, that is the number of + * values to write to the image line + * @param src_element_size size of elements in src array (2 or 4 byte) + */ +void liteav_av_write_image_line2(const void *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int src_element_size); + +void liteav_av_write_image_line(const uint16_t *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w); + +/** + * Utility function to swap the endianness of a pixel format. + * + * @param[in] pix_fmt the pixel format + * + * @return pixel format with swapped endianness if it exists, + * otherwise AV_PIX_FMT_NONE + */ +enum AVPixelFormat liteav_av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt); + +#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ +#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ +#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ +#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ +#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ +#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * av_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +int liteav_av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, + enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * av_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +enum AVPixelFormat liteav_av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +#endif /* AVUTIL_PIXDESC_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixfmt.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixfmt.h new file mode 100644 index 0000000..6815f8d --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pixfmt.h @@ -0,0 +1,542 @@ +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXFMT_H +#define AVUTIL_PIXFMT_H + +/** + * @file + * pixel format definitions + */ + +#include "libavutil/avconfig.h" +#include "version.h" + +#define AVPALETTE_SIZE 1024 +#define AVPALETTE_COUNT 256 + +/** + * Pixel format. + * + * @note + * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little-endian CPU architectures and ARGB on + * big-endian CPUs. + * + * @note + * If the resolution is not a multiple of the chroma subsampling factor + * then the chroma plane resolution must be rounded up. + * + * @par + * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized + * image data is stored in AVFrame.data[0]. The palette is transported in + * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is + * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is + * also endian-specific). Note also that the individual RGB32 palette + * components stored in AVFrame.data[1] should be in the range 0..255. + * This is important as many custom PAL8 video codecs that were designed + * to run on the IBM VGA graphics adapter use 6-bit palette components. + * + * @par + * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like + * for pal8. This palette is filled in automatically by the function + * allocating the picture. + */ +enum AVPixelFormat { + AV_PIX_FMT_NONE = -1, + AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + AV_PIX_FMT_GRAY8, ///< Y , 8bpp + AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette + AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range + AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range + AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range + AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range + AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) + AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined + + AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined + +#if FF_API_VAAPI + /** @name Deprecated pixel formats */ + /**@{*/ + AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID + /**@}*/ + AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD, +#else + /** + * Hardware acceleration through VA-API, data[3] contains a + * VASurfaceID. + */ + AV_PIX_FMT_VAAPI, +#endif + + AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha + + AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + + AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP + AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian + AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian + AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian + AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian + AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian + AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + + AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb + + AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian) + AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian) + + AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp + AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian + AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian + /** + * HW acceleration through QSV, data[3] contains a pointer to the + * mfxFrameSurface1 structure. + */ + AV_PIX_FMT_QSV, + /** + * HW acceleration though MMAL, data[3] contains a pointer to the + * MMAL_BUFFER_HEADER_T structure. + */ + AV_PIX_FMT_MMAL, + + AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer + + /** + * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers + * exactly as for system memory frames. + */ + AV_PIX_FMT_CUDA, + + AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined + AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined + AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined + AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined + + AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian + AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian + AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian + AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian + AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + + AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing + + AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + + AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox + + AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian + AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian + + AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian + + AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian + AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian + + AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec + + AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian + AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian + AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian + AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian + + AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian + AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian + + /** + * Hardware surfaces for Direct3D11. + * + * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11 + * hwaccel API and filtering support AV_PIX_FMT_D3D11 only. + * + * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the + * texture array index of the frame as intptr_t if the ID3D11Texture2D is + * an array texture (or always 0 if it's a normal texture). + */ + AV_PIX_FMT_D3D11, + + AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian + AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian + + AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian + AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian + AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian + AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian + + /** + * DRM-managed buffers exposed through PRIME buffer sharing. + * + * data[0] points to an AVDRMFrameDescriptor. + */ + AV_PIX_FMT_DRM_PRIME, + /** + * Hardware surfaces for OpenCL. + * + * data[i] contain 2D image objects (typed in C as cl_mem, used + * in OpenCL as image2d_t) for each plane of the surface. + */ + AV_PIX_FMT_OPENCL, + + AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian + AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian + + AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian + AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian + + AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +}; + +#if AV_HAVE_BIGENDIAN +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be +#else +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le +#endif + +#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA) +#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR) +#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA) +#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB) +#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) +#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) + +#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE) +#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE) +#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE) +#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE) +#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) +#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE) +#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) +#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE) +#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE) +#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE) +#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE) +#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE) +#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE) +#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE) +#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE) +#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE) + +#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) +#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) +#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) +#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) +#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) +#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE) +#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) +#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) +#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) +#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE) +#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) +#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE) +#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE) +#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE) +#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE) +#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE) +#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE) + +#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) +#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) +#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) +#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) +#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE) +#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE) +#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE) +#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) +#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) + +#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE) +#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE) + +#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE) + +#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) +#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) +#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) +#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) +#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) +#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) +#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) +#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) +#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) + +#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) +#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE) +#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE) +#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE) + +/** + * Chromaticity coordinates of the source primaries. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.1. + */ +enum AVColorPrimaries { + AVCOL_PRI_RESERVED0 = 0, + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_RESERVED = 3, + AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C + AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 + AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) + AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, + AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 + AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 + AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors + AVCOL_PRI_NB ///< Not part of ABI +}; + +/** + * Color Transfer Characteristic. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.2. + */ +enum AVColorTransferCharacteristic { + AVCOL_TRC_RESERVED0 = 0, + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_RESERVED = 3, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics" + AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" + AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" + AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 + AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut + AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) + AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system + AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system + AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084, + AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1 + AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428, + AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" + AVCOL_TRC_NB ///< Not part of ABI +}; + +/** + * YUV colorspace type. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.3. + */ +enum AVColorSpace { + AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_RESERVED = 3, + AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above + AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO, + AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system + AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system + AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x + AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system + AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system + AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp + AVCOL_SPC_NB ///< Not part of ABI +}; + +/** + * MPEG vs JPEG YUV range. + */ +enum AVColorRange { + AVCOL_RANGE_UNSPECIFIED = 0, + AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges + AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges + AVCOL_RANGE_NB ///< Not part of ABI +}; + +/** + * Location of chroma samples. + * + * Illustration showing the location of the first (top left) chroma sample of the + * image, the left shows only luma, the right + * shows the location of the chroma sample, the 2 could be imagined to overlay + * each other but are drawn separately due to limitations of ASCII + * + * 1st 2nd 1st 2nd horizontal luma sample positions + * v v v v + * ______ ______ + *1st luma line > |X X ... |3 4 X ... X are luma samples, + * | |1 2 1-6 are possible chroma positions + *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position + */ +enum AVChromaLocation { + AVCHROMA_LOC_UNSPECIFIED = 0, + AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0 + AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0 + AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2 + AVCHROMA_LOC_TOP = 4, + AVCHROMA_LOC_BOTTOMLEFT = 5, + AVCHROMA_LOC_BOTTOM = 6, + AVCHROMA_LOC_NB ///< Not part of ABI +}; + +#endif /* AVUTIL_PIXFMT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h new file mode 100644 index 0000000..c31040e --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/pthread_helper.h @@ -0,0 +1,39 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2009 Baptiste Coudurier <qoroliang@tencent.com> + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PTHREAD_HELPER_H +#define AVUTIL_PTHREAD_HELPER_H + +/** + * Wait for a task + * + * @param poll_max_count poll max count + * @param poll_interval_time poll interval time, in microsecond + * + * @return poll count + * + */ + +int liteav_ff_wait_thread(int poll_max_count, int poll_interval_time, int *running); + +#ifdef _WIN32 +unsigned long pthread_self(); +#endif + +#endif diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/random_seed.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/random_seed.h new file mode 100644 index 0000000..7a26962 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/random_seed.h @@ -0,0 +1,44 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RANDOM_SEED_H +#define AVUTIL_RANDOM_SEED_H + +#include <stdint.h> +/** + * @addtogroup lavu_crypto + * @{ + */ + +/** + * Get a seed to use in conjunction with random functions. + * This function tries to provide a good seed at a best effort bases. + * Its possible to call this function multiple times if more bits are needed. + * It can be quite slow, which is why it should only be used as seed for a faster + * PRNG. The quality of the seed depends on the platform. + */ +uint32_t liteav_av_get_random_seed(void); + +/** + * @} + */ + +#endif /* AVUTIL_RANDOM_SEED_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rational.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rational.h new file mode 100644 index 0000000..d3c038a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rational.h @@ -0,0 +1,215 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * rational numbers + * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_math_rational + * Utilties for rational number calculation. + * @author Michael Niedermayer <michaelni@gmx.at> + */ + +#ifndef AVUTIL_RATIONAL_H +#define AVUTIL_RATIONAL_H + +#include <stdint.h> +#include <limits.h> +#include "attributes.h" + +/** + * @defgroup lavu_math_rational AVRational + * @ingroup lavu_math + * Rational number calculation. + * + * While rational numbers can be expressed as floating-point numbers, the + * conversion process is a lossy one, so are floating-point operations. On the + * other hand, the nature of FFmpeg demands highly accurate calculation of + * timestamps. This set of rational number utilities serves as a generic + * interface for manipulating rational numbers as pairs of numerators and + * denominators. + * + * Many of the functions that operate on AVRational's have the suffix `_q`, in + * reference to the mathematical symbol "ℚ" (Q) which denotes the set of all + * rational numbers. + * + * @{ + */ + +/** + * Rational number (pair of numerator and denominator). + */ +typedef struct AVRational{ + int num; ///< Numerator + int den; ///< Denominator +} AVRational; + +/** + * Create an AVRational. + * + * Useful for compilers that do not support compound literals. + * + * @note The return value is not reduced. + * @see liteav_av_reduce() + */ +static inline AVRational av_make_q(int num, int den) +{ + AVRational r = { num, den }; + return r; +} + +/** + * Compare two rationals. + * + * @param a First rational + * @param b Second rational + * + * @return One of the following values: + * - 0 if `a == b` + * - 1 if `a > b` + * - -1 if `a < b` + * - `INT_MIN` if one of the values is of the form `0 / 0` + */ +static inline int av_cmp_q(AVRational a, AVRational b){ + const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den; + + if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1; + else if(b.den && a.den) return 0; + else if(a.num && b.num) return (a.num>>31) - (b.num>>31); + else return INT_MIN; +} + +/** + * Convert an AVRational to a `double`. + * @param a AVRational to convert + * @return `a` in floating-point form + * @see liteav_av_d2q() + */ +static inline double av_q2d(AVRational a){ + return a.num / (double) a.den; +} + +/** + * Reduce a fraction. + * + * This is useful for framerate calculations. + * + * @param[out] dst_num Destination numerator + * @param[out] dst_den Destination denominator + * @param[in] num Source numerator + * @param[in] den Source denominator + * @param[in] max Maximum allowed values for `dst_num` & `dst_den` + * @return 1 if the operation is exact, 0 otherwise + */ +int liteav_av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); + +/** + * Multiply two rationals. + * @param b First rational + * @param c Second rational + * @return b*c + */ +AVRational liteav_av_mul_q(AVRational b, AVRational c) av_const; + +/** + * Divide one rational by another. + * @param b First rational + * @param c Second rational + * @return b/c + */ +AVRational liteav_av_div_q(AVRational b, AVRational c) av_const; + +/** + * Add two rationals. + * @param b First rational + * @param c Second rational + * @return b+c + */ +AVRational liteav_av_add_q(AVRational b, AVRational c) av_const; + +/** + * Subtract one rational from another. + * @param b First rational + * @param c Second rational + * @return b-c + */ +AVRational liteav_av_sub_q(AVRational b, AVRational c) av_const; + +/** + * Invert a rational. + * @param q value + * @return 1 / q + */ +static av_always_inline AVRational av_inv_q(AVRational q) +{ + AVRational r = { q.den, q.num }; + return r; +} + +/** + * Convert a double precision floating point number to a rational. + * + * In case of infinity, the returned value is expressed as `{1, 0}` or + * `{-1, 0}` depending on the sign. + * + * @param d `double` to convert + * @param max Maximum allowed numerator and denominator + * @return `d` in AVRational form + * @see av_q2d() + */ +AVRational liteav_av_d2q(double d, int max) av_const; + +/** + * Find which of the two rationals is closer to another rational. + * + * @param q Rational to be compared against + * @param q1,q2 Rationals to be tested + * @return One of the following values: + * - 1 if `q1` is nearer to `q` than `q2` + * - -1 if `q2` is nearer to `q` than `q1` + * - 0 if they have the same distance + */ +int liteav_av_nearer_q(AVRational q, AVRational q1, AVRational q2); + +/** + * Find the value in a list of rationals nearest a given reference rational. + * + * @param q Reference rational + * @param q_list Array of rationals terminated by `{0, 0}` + * @return Index of the nearest value found in the array + */ +int liteav_av_find_nearest_q_idx(AVRational q, const AVRational* q_list); + +/** + * Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point + * format. + * + * @param q Rational to be converted + * @return Equivalent floating-point value, expressed as an unsigned 32-bit + * integer. + * @note The returned value is platform-indepedant. + */ +uint32_t liteav_av_q2intfloat(AVRational q); + +/** + * @} + */ + +#endif /* AVUTIL_RATIONAL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rc4.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rc4.h new file mode 100644 index 0000000..7eeb575 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/rc4.h @@ -0,0 +1,67 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * RC4 encryption/decryption/pseudo-random number generator + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RC4_H +#define AVUTIL_RC4_H + +#include <stdint.h> + +/** + * @defgroup lavu_rc4 RC4 + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVRC4 { + uint8_t state[256]; + int x, y; +} AVRC4; + +/** + * Allocate an AVRC4 context. + */ +AVRC4 *liteav_av_rc4_alloc(void); + +/** + * @brief Initializes an AVRC4 context. + * + * @param key_bits must be a multiple of 8 + * @param decrypt 0 for encryption, 1 for decryption, currently has no effect + * @return zero on success, negative value otherwise + */ +int liteav_av_rc4_init(struct AVRC4 *d, const uint8_t *key, int key_bits, int decrypt); + +/** + * @brief Encrypts / decrypts using the RC4 algorithm. + * + * @param count number of bytes + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst, may be NULL + * @param iv not (yet) used for RC4, should be NULL + * @param decrypt 0 for encryption, 1 for decryption, not (yet) used + */ +void liteav_av_rc4_crypt(struct AVRC4 *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_RC4_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/replaygain.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/replaygain.h new file mode 100644 index 0000000..b49bf1a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/replaygain.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_REPLAYGAIN_H +#define AVUTIL_REPLAYGAIN_H + +#include <stdint.h> + +/** + * ReplayGain information (see + * http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification). + * The size of this struct is a part of the public ABI. + */ +typedef struct AVReplayGain { + /** + * Track replay gain in microbels (divide by 100000 to get the value in dB). + * Should be set to INT32_MIN when unknown. + */ + int32_t track_gain; + /** + * Peak track amplitude, with 100000 representing full scale (but values + * may overflow). 0 when unknown. + */ + uint32_t track_peak; + /** + * Same as track_gain, but for the whole album. + */ + int32_t album_gain; + /** + * Same as track_peak, but for the whole album, + */ + uint32_t album_peak; +} AVReplayGain; + +#endif /* AVUTIL_REPLAYGAIN_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ripemd.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ripemd.h new file mode 100644 index 0000000..2137d35 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/ripemd.h @@ -0,0 +1,88 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at> + * Copyright (C) 2013 James Almer <jamrial@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_ripemd + * Public header for RIPEMD hash function implementation. + */ + +#ifndef AVUTIL_RIPEMD_H +#define AVUTIL_RIPEMD_H + +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_ripemd RIPEMD + * @ingroup lavu_hash + * RIPEMD hash function implementation. + * + * @{ + */ + +extern const int liteav_av_ripemd_size; + +struct AVRIPEMD; + +/** + * Allocate an AVRIPEMD context. + */ +struct AVRIPEMD *liteav_av_ripemd_alloc(void); + +/** + * Initialize RIPEMD hashing. + * + * @param context pointer to the function context (of size liteav_av_ripemd_size) + * @param bits number of bits in digest (128, 160, 256 or 320 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int liteav_av_ripemd_init(struct AVRIPEMD* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len); +#else +void liteav_av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void liteav_av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_RIPEMD_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/samplefmt.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/samplefmt.h new file mode 100644 index 0000000..118f25a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/samplefmt.h @@ -0,0 +1,273 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SAMPLEFMT_H +#define AVUTIL_SAMPLEFMT_H + +#include <stdint.h> + +#include "avutil.h" +#include "attributes.h" + +/** + * @addtogroup lavu_audio + * @{ + * + * @defgroup lavu_sampfmts Audio sample formats + * + * Audio sample format enumeration and related convenience functions. + * @{ + */ + +/** + * Audio sample formats + * + * - The data described by the sample format is always in native-endian order. + * Sample values can be expressed by native C types, hence the lack of a signed + * 24-bit sample format even though it is a common raw audio data format. + * + * - The floating-point formats are based on full volume being in the range + * [-1.0, 1.0]. Any values outside this range are beyond full volume level. + * + * - The data layout as used in liteav_av_samples_fill_arrays() and elsewhere in FFmpeg + * (such as AVFrame in libavcodec) is as follows: + * + * @par + * For planar sample formats, each audio channel is in a separate data plane, + * and linesize is the buffer size, in bytes, for a single plane. All data + * planes must be the same size. For packed sample formats, only the first data + * plane is used, and samples for each channel are interleaved. In this case, + * linesize is the buffer size, in bytes, for the 1 plane. + * + */ +enum AVSampleFormat { + AV_SAMPLE_FMT_NONE = -1, + AV_SAMPLE_FMT_U8, ///< unsigned 8 bits + AV_SAMPLE_FMT_S16, ///< signed 16 bits + AV_SAMPLE_FMT_S32, ///< signed 32 bits + AV_SAMPLE_FMT_FLT, ///< float + AV_SAMPLE_FMT_DBL, ///< double + + AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar + AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar + AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar + AV_SAMPLE_FMT_FLTP, ///< float, planar + AV_SAMPLE_FMT_DBLP, ///< double, planar + AV_SAMPLE_FMT_S64, ///< signed 64 bits + AV_SAMPLE_FMT_S64P, ///< signed 64 bits, planar + + AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically +}; + +/** + * Return the name of sample_fmt, or NULL if sample_fmt is not + * recognized. + */ +const char *liteav_av_get_sample_fmt_name(enum AVSampleFormat sample_fmt); + +/** + * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE + * on error. + */ +enum AVSampleFormat liteav_av_get_sample_fmt(const char *name); + +/** + * Return the planar<->packed alternative form of the given sample format, or + * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the + * requested planar/packed format, the format returned is the same as the + * input. + */ +enum AVSampleFormat liteav_av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar); + +/** + * Get the packed alternative form of the given sample format. + * + * If the passed sample_fmt is already in packed format, the format returned is + * the same as the input. + * + * @return the packed alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat liteav_av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Get the planar alternative form of the given sample format. + * + * If the passed sample_fmt is already in planar format, the format returned is + * the same as the input. + * + * @return the planar alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat liteav_av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Generate a string corresponding to the sample format with + * sample_fmt, or a header if sample_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param sample_fmt the number of the sample format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + * @return the pointer to the filled buffer or NULL if sample_fmt is + * unknown or in case of other errors + */ +char *liteav_av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt); + +/** + * Return number of bytes per sample. + * + * @param sample_fmt the sample format + * @return number of bytes per sample or zero if unknown for the given + * sample format + */ +int liteav_av_get_bytes_per_sample(enum AVSampleFormat sample_fmt); + +/** + * Check if the sample format is planar. + * + * @param sample_fmt the sample format to inspect + * @return 1 if the sample format is planar, 0 if it is interleaved + */ +int liteav_av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt); + +/** + * Get the required buffer size for the given audio parameters. + * + * @param[out] linesize calculated linesize, may be NULL + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return required buffer size, or negative error code on failure + */ +int liteav_av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * @} + * + * @defgroup lavu_sampmanip Samples manipulation + * + * Functions that manipulate audio samples + * @{ + */ + +/** + * Fill plane data pointers and linesize for samples with sample + * format sample_fmt. + * + * The audio_data array is filled with the pointers to the samples data planes: + * for planar, set the start point of each channel's data within the buffer, + * for packed, set the start point of the entire buffer only. + * + * The value pointed to by linesize is set to the aligned size of each + * channel's data buffer for planar layout, or to the aligned size of the + * buffer for all channels for packed layout. + * + * The buffer in buf must be big enough to contain all the samples + * (use liteav_av_samples_get_buffer_size() to compute its minimum size), + * otherwise the audio_data pointers will point to invalid data. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize calculated linesize, may be NULL + * @param buf the pointer to a buffer containing the samples + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return minimum size in bytes required for the buffer in case + * of success at the next bump + */ +int liteav_av_samples_fill_arrays(uint8_t **audio_data, int *linesize, + const uint8_t *buf, + int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a samples buffer for nb_samples samples, and fill data pointers and + * linesize accordingly. + * The allocated samples buffer can be freed by using liteav_av_freep(&audio_data[0]) + * Allocated data will be initialized to silence. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize aligned size for audio buffer(s), may be NULL + * @param nb_channels number of audio channels + * @param nb_samples number of samples per channel + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return the size of the allocated buffer in case of success at the next bump + * @see liteav_av_samples_fill_arrays() + * @see liteav_av_samples_alloc_array_and_samples() + */ +int liteav_av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a data pointers array, samples buffer for nb_samples + * samples, and fill data pointers and linesize accordingly. + * + * This is the same as liteav_av_samples_alloc(), but also allocates the data + * pointers array. + * + * @see liteav_av_samples_alloc() + */ +int liteav_av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Copy samples from src to dst. + * + * @param dst destination array of pointers to data planes + * @param src source array of pointers to data planes + * @param dst_offset offset in samples at which the data will be written to dst + * @param src_offset offset in samples at which the data will be read from src + * @param nb_samples number of samples to be copied + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int liteav_av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset, + int src_offset, int nb_samples, int nb_channels, + enum AVSampleFormat sample_fmt); + +/** + * Fill an audio buffer with silence. + * + * @param audio_data array of pointers to data planes + * @param offset offset in samples at which to start filling + * @param nb_samples number of samples to fill + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int liteav_av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, + int nb_channels, enum AVSampleFormat sample_fmt); + +/** + * @} + * @} + */ +#endif /* AVUTIL_SAMPLEFMT_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha.h new file mode 100644 index 0000000..3eed047 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha.h @@ -0,0 +1,96 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_sha + * Public header for SHA-1 & SHA-256 hash function implementations. + */ + +#ifndef AVUTIL_SHA_H +#define AVUTIL_SHA_H + +#include <stddef.h> +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha SHA + * @ingroup lavu_hash + * SHA-1 and SHA-256 (Secure Hash Algorithm) hash function implementations. + * + * This module supports the following SHA hash functions: + * + * - SHA-1: 160 bits + * - SHA-224: 224 bits, as a variant of SHA-2 + * - SHA-256: 256 bits, as a variant of SHA-2 + * + * @see For SHA-384, SHA-512, and variants thereof, see @ref lavu_sha512. + * + * @{ + */ + +extern const int liteav_av_sha_size; + +struct AVSHA; + +/** + * Allocate an AVSHA context. + */ +struct AVSHA *liteav_av_sha_alloc(void); + +/** + * Initialize SHA-1 or SHA-2 hashing. + * + * @param context pointer to the function context (of size liteav_av_sha_size) + * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int liteav_av_sha_init(struct AVSHA* context, int bits); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param data input data to update hash with + * @param len input data length + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_sha_update(struct AVSHA *ctx, const uint8_t *data, unsigned int len); +#else +void liteav_av_sha_update(struct AVSHA *ctx, const uint8_t *data, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void liteav_av_sha_final(struct AVSHA* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha512.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha512.h new file mode 100644 index 0000000..0975da1 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/sha512.h @@ -0,0 +1,98 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at> + * Copyright (C) 2013 James Almer <jamrial@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_sha512 + * Public header for SHA-512 implementation. + */ + +#ifndef AVUTIL_SHA512_H +#define AVUTIL_SHA512_H + +#include <stddef.h> +#include <stdint.h> + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha512 SHA-512 + * @ingroup lavu_hash + * SHA-512 (Secure Hash Algorithm) hash function implementations. + * + * This module supports the following SHA-2 hash functions: + * + * - SHA-512/224: 224 bits + * - SHA-512/256: 256 bits + * - SHA-384: 384 bits + * - SHA-512: 512 bits + * + * @see For SHA-1, SHA-256, and variants thereof, see @ref lavu_sha. + * + * @{ + */ + +extern const int liteav_av_sha512_size; + +struct AVSHA512; + +/** + * Allocate an AVSHA512 context. + */ +struct AVSHA512 *liteav_av_sha512_alloc(void); + +/** + * Initialize SHA-2 512 hashing. + * + * @param context pointer to the function context (of size liteav_av_sha512_size) + * @param bits number of bits in digest (224, 256, 384 or 512 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int liteav_av_sha512_init(struct AVSHA512* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +#if FF_API_CRYPTO_SIZE_T +void liteav_av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); +#else +void liteav_av_sha512_update(struct AVSHA512* context, const uint8_t* data, size_t len); +#endif + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void liteav_av_sha512_final(struct AVSHA512* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA512_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/spherical.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/spherical.h new file mode 100644 index 0000000..18a4ce0 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/spherical.h @@ -0,0 +1,233 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2016 Vittorio Giovara <vittorio.giovara@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Spherical video + */ + +#ifndef AVUTIL_SPHERICAL_H +#define AVUTIL_SPHERICAL_H + +#include <stddef.h> +#include <stdint.h> + +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_spherical Spherical video mapping + * @{ + */ + +/** + * @addtogroup lavu_video_spherical + * A spherical video file contains surfaces that need to be mapped onto a + * sphere. Depending on how the frame was converted, a different distortion + * transformation or surface recomposition function needs to be applied before + * the video should be mapped and displayed. + */ + +/** + * Projection of the video surface(s) on a sphere. + */ +enum AVSphericalProjection { + /** + * Video represents a sphere mapped on a flat surface using + * equirectangular projection. + */ + AV_SPHERICAL_EQUIRECTANGULAR, + + /** + * Video frame is split into 6 faces of a cube, and arranged on a + * 3x2 layout. Faces are oriented upwards for the front, left, right, + * and back faces. The up face is oriented so the top of the face is + * forwards and the down face is oriented so the top of the face is + * to the back. + */ + AV_SPHERICAL_CUBEMAP, + + /** + * Video represents a portion of a sphere mapped on a flat surface + * using equirectangular projection. The @ref bounding fields indicate + * the position of the current video in a larger surface. + */ + AV_SPHERICAL_EQUIRECTANGULAR_TILE, +}; + +/** + * This structure describes how to handle spherical videos, outlining + * information about projection, initial layout, and any other view modifier. + * + * @note The struct must be allocated with liteav_av_spherical_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVSphericalMapping { + /** + * Projection type. + */ + enum AVSphericalProjection projection; + + /** + * @name Initial orientation + * @{ + * There fields describe additional rotations applied to the sphere after + * the video frame is mapped onto it. The sphere is rotated around the + * viewer, who remains stationary. The order of transformation is always + * yaw, followed by pitch, and finally by roll. + * + * The coordinate system matches the one defined in OpenGL, where the + * forward vector (z) is coming out of screen, and it is equivalent to + * a rotation matrix of R = r_y(yaw) * r_x(pitch) * r_z(roll). + * + * A positive yaw rotates the portion of the sphere in front of the viewer + * toward their right. A positive pitch rotates the portion of the sphere + * in front of the viewer upwards. A positive roll tilts the portion of + * the sphere in front of the viewer to the viewer's right. + * + * These values are exported as 16.16 fixed point. + * + * See this equirectangular projection as example: + * + * @code{.unparsed} + * Yaw + * -180 0 180 + * 90 +-------------+-------------+ 180 + * | | | up + * P | | | y| forward + * i | ^ | | /z + * t 0 +-------------X-------------+ 0 Roll | / + * c | | | | / + * h | | | 0|/_____right + * | | | x + * -90 +-------------+-------------+ -180 + * + * X - the default camera center + * ^ - the default up vector + * @endcode + */ + int32_t yaw; ///< Rotation around the up vector [-180, 180]. + int32_t pitch; ///< Rotation around the right vector [-90, 90]. + int32_t roll; ///< Rotation around the forward vector [-180, 180]. + /** + * @} + */ + + /** + * @name Bounding rectangle + * @anchor bounding + * @{ + * These fields indicate the location of the current tile, and where + * it should be mapped relative to the original surface. They are + * exported as 0.32 fixed point, and can be converted to classic + * pixel values with av_spherical_bounds(). + * + * @code{.unparsed} + * +----------------+----------+ + * | |bound_top | + * | +--------+ | + * | bound_left |tile | | + * +<---------->| |<--->+bound_right + * | +--------+ | + * | | | + * | bound_bottom| | + * +----------------+----------+ + * @endcode + * + * If needed, the original video surface dimensions can be derived + * by adding the current stream or frame size to the related bounds, + * like in the following example: + * + * @code{c} + * original_width = tile->width + bound_left + bound_right; + * original_height = tile->height + bound_top + bound_bottom; + * @endcode + * + * @note These values are valid only for the tiled equirectangular + * projection type (@ref AV_SPHERICAL_EQUIRECTANGULAR_TILE), + * and should be ignored in all other cases. + */ + uint32_t bound_left; ///< Distance from the left edge + uint32_t bound_top; ///< Distance from the top edge + uint32_t bound_right; ///< Distance from the right edge + uint32_t bound_bottom; ///< Distance from the bottom edge + /** + * @} + */ + + /** + * Number of pixels to pad from the edge of each cube face. + * + * @note This value is valid for only for the cubemap projection type + * (@ref AV_SPHERICAL_CUBEMAP), and should be ignored in all other + * cases. + */ + uint32_t padding; +} AVSphericalMapping; + +/** + * Allocate a AVSphericalVideo structure and initialize its fields to default + * values. + * + * @return the newly allocated struct or NULL on failure + */ +AVSphericalMapping *liteav_av_spherical_alloc(size_t *size); + +/** + * Convert the @ref bounding fields from an AVSphericalVideo + * from 0.32 fixed point to pixels. + * + * @param map The AVSphericalVideo map to read bound values from. + * @param width Width of the current frame or stream. + * @param height Height of the current frame or stream. + * @param left Pixels from the left edge. + * @param top Pixels from the top edge. + * @param right Pixels from the right edge. + * @param bottom Pixels from the bottom edge. + */ +void liteav_av_spherical_tile_bounds(const AVSphericalMapping *map, + size_t width, size_t height, + size_t *left, size_t *top, + size_t *right, size_t *bottom); + +/** + * Provide a human-readable name of a given AVSphericalProjection. + * + * @param projection The input AVSphericalProjection. + * + * @return The name of the AVSphericalProjection, or "unknown". + */ +const char *liteav_av_spherical_projection_name(enum AVSphericalProjection projection); + +/** + * Get the AVSphericalProjection form a human-readable name. + * + * @param name The input string. + * + * @return The AVSphericalProjection value, or -1 if not found. + */ +int liteav_av_spherical_from_name(const char *name); +/** + * @} + * @} + */ + +#endif /* AVUTIL_SPHERICAL_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/stereo3d.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/stereo3d.h new file mode 100644 index 0000000..74c0421 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/stereo3d.h @@ -0,0 +1,234 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Stereoscopic video + */ + +#ifndef AVUTIL_STEREO3D_H +#define AVUTIL_STEREO3D_H + +#include <stdint.h> + +#include "frame.h" + +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_stereo3d Stereo3D types and functions + * @{ + */ + +/** + * @addtogroup lavu_video_stereo3d + * A stereoscopic video file consists in multiple views embedded in a single + * frame, usually describing two views of a scene. This file describes all + * possible codec-independent view arrangements. + * */ + +/** + * List of possible 3D Types + */ +enum AVStereo3DType { + /** + * Video is not stereoscopic (and metadata has to be there). + */ + AV_STEREO3D_2D, + + /** + * Views are next to each other. + * + * @code{.unparsed} + * LLLLRRRR + * LLLLRRRR + * LLLLRRRR + * ... + * @endcode + */ + AV_STEREO3D_SIDEBYSIDE, + + /** + * Views are on top of each other. + * + * @code{.unparsed} + * LLLLLLLL + * LLLLLLLL + * RRRRRRRR + * RRRRRRRR + * @endcode + */ + AV_STEREO3D_TOPBOTTOM, + + /** + * Views are alternated temporally. + * + * @code{.unparsed} + * frame0 frame1 frame2 ... + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * ... ... ... + * @endcode + */ + AV_STEREO3D_FRAMESEQUENCE, + + /** + * Views are packed in a checkerboard-like structure per pixel. + * + * @code{.unparsed} + * LRLRLRLR + * RLRLRLRL + * LRLRLRLR + * ... + * @endcode + */ + AV_STEREO3D_CHECKERBOARD, + + /** + * Views are next to each other, but when upscaling + * apply a checkerboard pattern. + * + * @code{.unparsed} + * LLLLRRRR L L L L R R R R + * LLLLRRRR => L L L L R R R R + * LLLLRRRR L L L L R R R R + * LLLLRRRR L L L L R R R R + * @endcode + */ + AV_STEREO3D_SIDEBYSIDE_QUINCUNX, + + /** + * Views are packed per line, as if interlaced. + * + * @code{.unparsed} + * LLLLLLLL + * RRRRRRRR + * LLLLLLLL + * ... + * @endcode + */ + AV_STEREO3D_LINES, + + /** + * Views are packed per column. + * + * @code{.unparsed} + * LRLRLRLR + * LRLRLRLR + * LRLRLRLR + * ... + * @endcode + */ + AV_STEREO3D_COLUMNS, +}; + +/** + * List of possible view types. + */ +enum AVStereo3DView { + /** + * Frame contains two packed views. + */ + AV_STEREO3D_VIEW_PACKED, + + /** + * Frame contains only the left view. + */ + AV_STEREO3D_VIEW_LEFT, + + /** + * Frame contains only the right view. + */ + AV_STEREO3D_VIEW_RIGHT, +}; + +/** + * Inverted views, Right/Bottom represents the left view. + */ +#define AV_STEREO3D_FLAG_INVERT (1 << 0) + +/** + * Stereo 3D type: this structure describes how two videos are packed + * within a single video surface, with additional information as needed. + * + * @note The struct must be allocated with liteav_av_stereo3d_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVStereo3D { + /** + * How views are packed within the video. + */ + enum AVStereo3DType type; + + /** + * Additional information about the frame packing. + */ + int flags; + + /** + * Determines which views are packed. + */ + enum AVStereo3DView view; +} AVStereo3D; + +/** + * Allocate an AVStereo3D structure and set its fields to default values. + * The resulting struct can be freed using liteav_av_freep(). + * + * @return An AVStereo3D filled with default values or NULL on failure. + */ +AVStereo3D *liteav_av_stereo3d_alloc(void); + +/** + * Allocate a complete AVFrameSideData and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVStereo3D structure to be filled by caller. + */ +AVStereo3D *liteav_av_stereo3d_create_side_data(AVFrame *frame); + +/** + * Provide a human-readable name of a given stereo3d type. + * + * @param type The input stereo3d type value. + * + * @return The name of the stereo3d value, or "unknown". + */ +const char *liteav_av_stereo3d_type_name(unsigned int type); + +/** + * Get the AVStereo3DType form a human-readable name. + * + * @param name The input string. + * + * @return The AVStereo3DType value, or -1 if not found. + */ +int liteav_av_stereo3d_from_name(const char *name); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_STEREO3D_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tea.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tea.h new file mode 100644 index 0000000..5d618c0 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tea.h @@ -0,0 +1,72 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * A 32-bit implementation of the TEA algorithm + * Copyright (c) 2015 Vesselin Bontchev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TEA_H +#define AVUTIL_TEA_H + +#include <stdint.h> + +/** + * @file + * @brief Public header for libavutil TEA algorithm + * @defgroup lavu_tea TEA + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_tea_size; + +struct AVTEA; + +/** + * Allocate an AVTEA context + * To free the struct: liteav_av_free(ptr) + */ +struct AVTEA *liteav_av_tea_alloc(void); + +/** + * Initialize an AVTEA context. + * + * @param ctx an AVTEA context + * @param key a key of 16 bytes used for encryption/decryption + * @param rounds the number of rounds in TEA (64 is the "standard") + */ +void liteav_av_tea_init(struct AVTEA *ctx, const uint8_t key[16], int rounds); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_tea_crypt(struct AVTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_TEA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/threadmessage.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/threadmessage.h new file mode 100644 index 0000000..f6a9b31 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/threadmessage.h @@ -0,0 +1,116 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_THREADMESSAGE_H +#define AVUTIL_THREADMESSAGE_H + +typedef struct AVThreadMessageQueue AVThreadMessageQueue; + +typedef enum AVThreadMessageFlags { + + /** + * Perform non-blocking operation. + * If this flag is set, send and recv operations are non-blocking and + * return AVERROR(EAGAIN) immediately if they can not proceed. + */ + AV_THREAD_MESSAGE_NONBLOCK = 1, + +} AVThreadMessageFlags; + +/** + * Allocate a new message queue. + * + * @param mq pointer to the message queue + * @param nelem maximum number of elements in the queue + * @param elsize size of each element in the queue + * @return >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if + * lavu was built without thread support + */ +int liteav_av_thread_message_queue_alloc(AVThreadMessageQueue **mq, + unsigned nelem, + unsigned elsize); + +/** + * Free a message queue. + * + * The message queue must no longer be in use by another thread. + */ +void liteav_av_thread_message_queue_free(AVThreadMessageQueue **mq); + +/** + * Send a message on the queue. + */ +int liteav_av_thread_message_queue_send(AVThreadMessageQueue *mq, + void *msg, + unsigned flags); + +/** + * Receive a message from the queue. + */ +int liteav_av_thread_message_queue_recv(AVThreadMessageQueue *mq, + void *msg, + unsigned flags); + +/** + * Set the sending error code. + * + * If the error code is set to non-zero, liteav_av_thread_message_queue_send() will + * return it immediately. Conventional values, such as AVERROR_EOF or + * AVERROR(EAGAIN), can be used to cause the sending thread to stop or + * suspend its operation. + */ +void liteav_av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, + int err); + +/** + * Set the receiving error code. + * + * If the error code is set to non-zero, liteav_av_thread_message_queue_recv() will + * return it immediately when there are no longer available messages. + * Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used + * to cause the receiving thread to stop or suspend its operation. + */ +void liteav_av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, + int err); + +/** + * Set the optional free message callback function which will be called if an + * operation is removing messages from the queue. + */ +void liteav_av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq, + void (*free_func)(void *msg)); + +/** + * Return the current number of messages in the queue. + * + * @return the current number of messages or AVERROR(ENOSYS) if lavu was built + * without thread support + */ +int liteav_av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq); + +/** + * Flush the message queue + * + * This function is mostly equivalent to reading and free-ing every message + * except that it will be done in a single operation (no lock/unlock between + * reads). + */ +void liteav_av_thread_message_flush(AVThreadMessageQueue *mq); + +#endif /* AVUTIL_THREADMESSAGE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/time.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/time.h new file mode 100644 index 0000000..37d5bf4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/time.h @@ -0,0 +1,57 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2000-2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TIME_H +#define AVUTIL_TIME_H + +#include <stdint.h> + +/** + * Get the current time in microseconds. + */ +int64_t liteav_av_gettime(void); + +/** + * Get the current time in microseconds since some unspecified starting point. + * On platforms that support it, the time comes from a monotonic clock + * This property makes this time source ideal for measuring relative time. + * The returned values may not be monotonic on platforms where a monotonic + * clock is not available. + */ +int64_t liteav_av_gettime_relative(void); + +/** + * Indicates with a boolean result if the liteav_av_gettime_relative() time source + * is monotonic. + */ +int liteav_av_gettime_relative_is_monotonic(void); + +/** + * Sleep for a period of time. Although the duration is expressed in + * microseconds, the actual delay may be rounded to the precision of the + * system timer. + * + * @param usec Number of microseconds to sleep. + * @return zero on success or (negative) error code. + */ +int liteav_av_usleep(unsigned usec); + +#endif /* AVUTIL_TIME_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timecode.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timecode.h new file mode 100644 index 0000000..adc8c46 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timecode.h @@ -0,0 +1,141 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier <baptiste.coudurier@gmail.com> + * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Timecode helpers header + */ + +#ifndef AVUTIL_TIMECODE_H +#define AVUTIL_TIMECODE_H + +#include <stdint.h> +#include "rational.h" + +#define AV_TIMECODE_STR_SIZE 23 + +enum AVTimecodeFlag { + AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame + AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours + AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed +}; + +typedef struct { + int start; ///< timecode frame start (first base frame number) + uint32_t flags; ///< flags such as drop frame, +24 hours support, ... + AVRational rate; ///< frame rate in rational form + unsigned fps; ///< frame per second; must be consistent with the rate field +} AVTimecode; + +/** + * Adjust frame number for NTSC drop frame time code. + * + * @param framenum frame number to adjust + * @param fps frame per second, 30 or 60 + * @return adjusted frame number + * @warning adjustment is only valid in NTSC 29.97 and 59.94 + */ +int liteav_av_timecode_adjust_ntsc_framenum2(int framenum, int fps); + +/** + * Convert frame number to SMPTE 12M binary representation. + * + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the SMPTE binary representation + * + * @note Frame number adjustment is automatically done in case of drop timecode, + * you do NOT have to call liteav_av_timecode_adjust_ntsc_framenum2(). + * @note The frame number is relative to tc->start. + * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity + * correction (PC) bits are set to zero. + */ +uint32_t liteav_av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum); + +/** + * Load timecode string in buf. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the buf parameter + * + * @note Timecode representation can be a negative timecode and have more than + * 24 hours, but will only be honored if the flags are correctly set. + * @note The frame number is relative to tc->start. + */ +char *liteav_av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum); + +/** + * Get the timecode string from the SMPTE timecode format. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tcsmpte the 32-bit SMPTE timecode + * @param prevent_df prevent the use of a drop flag when it is known the DF bit + * is arbitrary + * @return the buf parameter + */ +char *liteav_av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df); + +/** + * Get the timecode string from the 25-bit timecode format (MPEG GOP format). + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc25bit the 25-bits timecode + * @return the buf parameter + */ +char *liteav_av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit); + +/** + * Init a timecode struct with the passed parameters. + * + * @param log_ctx a pointer to an arbitrary struct of which the first field + * is a pointer to an AVClass struct (used for liteav_av_log) + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param flags miscellaneous flags such as drop frame, +24 hours, ... + * (see AVTimecodeFlag) + * @param frame_start the first frame number + * @return 0 on success, AVERROR otherwise + */ +int liteav_av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx); + +/** + * Parse timecode representation (hh:mm:ss[:;.]ff). + * + * @param log_ctx a pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct (used for liteav_av_log). + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param str timecode string which will determine the frame start + * @return 0 on success, AVERROR otherwise + */ +int liteav_av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx); + +/** + * Check if the timecode feature is available for the given frame rate + * + * @return 0 if supported, <0 otherwise + */ +int liteav_av_timecode_check_frame_rate(AVRational rate); + +#endif /* AVUTIL_TIMECODE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timestamp.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timestamp.h new file mode 100644 index 0000000..e082f01 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/timestamp.h @@ -0,0 +1,78 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * timestamp utils, mostly useful for debugging/logging purposes + */ + +#ifndef AVUTIL_TIMESTAMP_H +#define AVUTIL_TIMESTAMP_H + +#include "common.h" + +#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64) +#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS +#endif + +#define AV_TS_MAX_STRING_SIZE 32 + +/** + * Fill the provided buffer with a string containing a timestamp + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @return the buffer in input + */ +static inline char *av_ts_make_string(char *buf, int64_t ts) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%" PRId64, ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts) + +/** + * Fill the provided buffer with a string containing a timestamp time + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @param tb the timebase of the timestamp + * @return the buffer in input + */ +static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb) + +#endif /* AVUTIL_TIMESTAMP_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tree.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tree.h new file mode 100644 index 0000000..de0c881 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tree.h @@ -0,0 +1,139 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * A tree container. + * @author Michael Niedermayer <michaelni@gmx.at> + */ + +#ifndef AVUTIL_TREE_H +#define AVUTIL_TREE_H + +#include "attributes.h" +#include "version.h" + +/** + * @addtogroup lavu_tree AVTree + * @ingroup lavu_data + * + * Low-complexity tree container + * + * Insertion, removal, finding equal, largest which is smaller than and + * smallest which is larger than, all have O(log n) worst-case complexity. + * @{ + */ + + +struct AVTreeNode; +extern const int liteav_av_tree_node_size; + +/** + * Allocate an AVTreeNode. + */ +struct AVTreeNode *liteav_av_tree_node_alloc(void); + +/** + * Find an element. + * @param root a pointer to the root node of the tree + * @param next If next is not NULL, then next[0] will contain the previous + * element and next[1] the next element. If either does not exist, + * then the corresponding entry in next is unchanged. + * @param cmp compare function used to compare elements in the tree, + * API identical to that of Standard C's qsort + * It is guaranteed that the first and only the first argument to cmp() + * will be the key parameter to liteav_av_tree_find(), thus it could if the + * user wants, be a different type (like an opaque context). + * @return An element with cmp(key, elem) == 0 or NULL if no such element + * exists in the tree. + */ +void *liteav_av_tree_find(const struct AVTreeNode *root, void *key, + int (*cmp)(const void *key, const void *b), void *next[2]); + +/** + * Insert or remove an element. + * + * If *next is NULL, then the supplied element will be removed if it exists. + * If *next is non-NULL, then the supplied element will be inserted, unless + * it already exists in the tree. + * + * @param rootp A pointer to a pointer to the root node of the tree; note that + * the root node can change during insertions, this is required + * to keep the tree balanced. + * @param key pointer to the element key to insert in the tree + * @param next Used to allocate and free AVTreeNodes. For insertion the user + * must set it to an allocated and zeroed object of at least + * liteav_av_tree_node_size bytes size. liteav_av_tree_insert() will set it to + * NULL if it has been consumed. + * For deleting elements *next is set to NULL by the user and + * liteav_av_tree_insert() will set it to the AVTreeNode which was + * used for the removed element. + * This allows the use of flat arrays, which have + * lower overhead compared to many malloced elements. + * You might want to define a function like: + * @code + * void *tree_insert(struct AVTreeNode **rootp, void *key, + * int (*cmp)(void *key, const void *b), + * AVTreeNode **next) + * { + * if (!*next) + * *next = liteav_av_mallocz(liteav_av_tree_node_size); + * return liteav_av_tree_insert(rootp, key, cmp, next); + * } + * void *tree_remove(struct AVTreeNode **rootp, void *key, + * int (*cmp)(void *key, const void *b, AVTreeNode **next)) + * { + * liteav_av_freep(next); + * return liteav_av_tree_insert(rootp, key, cmp, next); + * } + * @endcode + * @param cmp compare function used to compare elements in the tree, API identical + * to that of Standard C's qsort + * @return If no insertion happened, the found element; if an insertion or + * removal happened, then either key or NULL will be returned. + * Which one it is depends on the tree state and the implementation. You + * should make no assumptions that it's one or the other in the code. + */ +void *liteav_av_tree_insert(struct AVTreeNode **rootp, void *key, + int (*cmp)(const void *key, const void *b), + struct AVTreeNode **next); + +void liteav_av_tree_destroy(struct AVTreeNode *t); + +/** + * Apply enu(opaque, &elem) to all the elements in the tree in a given range. + * + * @param cmp a comparison function that returns < 0 for an element below the + * range, > 0 for an element above the range and == 0 for an + * element inside the range + * + * @note The cmp function should use the same ordering used to construct the + * tree. + */ +void liteav_av_tree_enumerate(struct AVTreeNode *t, void *opaque, + int (*cmp)(void *opaque, void *elem), + int (*enu)(void *opaque, void *elem)); + +/** + * @} + */ + +#endif /* AVUTIL_TREE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/twofish.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/twofish.h new file mode 100644 index 0000000..eeb1057 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/twofish.h @@ -0,0 +1,71 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * An implementation of the TwoFish algorithm + * Copyright (c) 2015 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TWOFISH_H +#define AVUTIL_TWOFISH_H + +#include <stdint.h> + + +/** + * @file + * @brief Public header for libavutil TWOFISH algorithm + * @defgroup lavu_twofish TWOFISH + * @ingroup lavu_crypto + * @{ + */ + +extern const int liteav_av_twofish_size; + +struct AVTWOFISH; + +/** + * Allocate an AVTWOFISH context + * To free the struct: liteav_av_free(ptr) + */ +struct AVTWOFISH *liteav_av_twofish_alloc(void); + +/** + * Initialize an AVTWOFISH context. + * + * @param ctx an AVTWOFISH context + * @param key a key of size ranging from 1 to 32 bytes used for encryption/decryption + * @param key_bits number of keybits: 128, 192, 256 If less than the required, padded with zeroes to nearest valid value; return value is 0 if key_bits is 128/192/256, -1 if less than 0, 1 otherwise + */ +int liteav_av_twofish_init(struct AVTWOFISH *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVTWOFISH context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 16 byte blocks + * @paran iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_twofish_crypt(struct AVTWOFISH *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt); + +/** + * @} + */ +#endif /* AVUTIL_TWOFISH_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tx.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tx.h new file mode 100644 index 0000000..8b13dd4 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/tx.h @@ -0,0 +1,82 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TX_H +#define AVUTIL_TX_H + +#include <stdint.h> +#include <stddef.h> + +typedef struct AVTXContext AVTXContext; + +typedef struct AVComplexFloat { + float re, im; +} AVComplexFloat; + +enum AVTXType { + /** + * Standard complex to complex FFT with sample data type AVComplexFloat. + * Scaling currently unsupported + */ + AV_TX_FLOAT_FFT = 0, + /** + * Standard MDCT with sample data type of float and a scale type of + * float. Length is the frame size, not the window size (which is 2x frame) + */ + AV_TX_FLOAT_MDCT = 1, +}; + +/** + * Function pointer to a function to perform the transform. + * + * @note Using a different context than the one allocated during liteav_av_tx_init() + * is not allowed. + * + * @param s the transform context + * @param out the output array + * @param in the input array + * @param stride the input or output stride (depending on transform direction) + * in bytes, currently implemented for all MDCT transforms + */ +typedef void (*av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride); + +/** + * Initialize a transform context with the given configuration + * Currently power of two lengths from 4 to 131072 are supported, along with + * any length decomposable to a power of two and either 3, 5 or 15. + * + * @param ctx the context to allocate, will be NULL on error + * @param tx pointer to the transform function pointer to set + * @param type type the type of transform + * @param inv whether to do an inverse or a forward transform + * @param len the size of the transform in samples + * @param scale pointer to the value to scale the output if supported by type + * @param flags currently unused + * + * @return 0 on success, negative error code on failure + */ +int liteav_av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, + int inv, int len, const void *scale, uint64_t flags); + +/** + * Frees a context and sets ctx to NULL, does nothing when ctx == NULL + */ +void liteav_av_tx_uninit(AVTXContext **ctx); + +#endif /* AVUTIL_TX_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/version.h new file mode 100644 index 0000000..8f6da6a --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/version.h @@ -0,0 +1,139 @@ +/* + * copyright (c) 2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu + * Libavutil version macros + */ + +#ifndef AVUTIL_VERSION_H +#define AVUTIL_VERSION_H + +#include "macros.h" + +/** + * @addtogroup version_utils + * + * Useful to check and match library version in order to maintain + * backward compatibility. + * + * The FFmpeg libraries follow a versioning sheme very similar to + * Semantic Versioning (http://semver.org/) + * The difference is that the component called PATCH is called MICRO in FFmpeg + * and its value is reset to 100 instead of 0 to keep it above or equal to 100. + * Also we do not increase MICRO for every bugfix or change in git master. + * + * Prior to FFmpeg 3.2 point releases did not change any lib version number to + * avoid aliassing different git master checkouts. + * Starting with FFmpeg 3.2, the released library versions will occupy + * a separate MAJOR.MINOR that is not used on the master development branch. + * That is if we branch a release of master 55.10.123 we will bump to 55.11.100 + * for the release and master will continue at 55.12.100 after it. Each new + * point release will then bump the MICRO improving the usefulness of the lib + * versions. + * + * @{ + */ + +#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c)) +#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c +#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) + +/** + * Extract version components from the full ::AV_VERSION_INT int as returned + * by functions like ::avformat_version() and ::avcodec_version() + */ +#define AV_VERSION_MAJOR(a) ((a) >> 16) +#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8) +#define AV_VERSION_MICRO(a) ((a) & 0xFF) + +/** + * @} + */ + +/** + * @defgroup lavu_ver Version and Build diagnostics + * + * Macros and function useful to check at compiletime and at runtime + * which version of libavutil is in use. + * + * @{ + */ + +#define LIBAVUTIL_VERSION_MAJOR 56 +#define LIBAVUTIL_VERSION_MINOR 22 +#define LIBAVUTIL_VERSION_MICRO 100 + +#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT + +#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) + +/** + * @defgroup lavu_depr_guards Deprecation Guards + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + * + * @{ + */ + +#ifndef FF_API_VAAPI +#define FF_API_VAAPI (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_FRAME_QP +#define FF_API_FRAME_QP (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_PLUS1_MINUS1 +#define FF_API_PLUS1_MINUS1 (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_ERROR_FRAME +#define FF_API_ERROR_FRAME (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_PKT_PTS +#define FF_API_PKT_PTS (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_CRYPTO_SIZE_T +#define FF_API_CRYPTO_SIZE_T (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_FRAME_GET_SET +#define FF_API_FRAME_GET_SET (LIBAVUTIL_VERSION_MAJOR < 57) +#endif +#ifndef FF_API_PSEUDOPAL +#define FF_API_PSEUDOPAL (LIBAVUTIL_VERSION_MAJOR < 57) +#endif + + +/** + * @} + * @} + */ + +#endif /* AVUTIL_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/xtea.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/xtea.h new file mode 100644 index 0000000..66a6842 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libavutil/xtea.h @@ -0,0 +1,95 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * A 32-bit implementation of the XTEA algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_XTEA_H +#define AVUTIL_XTEA_H + +#include <stdint.h> + +/** + * @file + * @brief Public header for libavutil XTEA algorithm + * @defgroup lavu_xtea XTEA + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVXTEA { + uint32_t key[16]; +} AVXTEA; + +/** + * Allocate an AVXTEA context. + */ +AVXTEA *liteav_av_xtea_alloc(void); + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption, + * interpreted as big endian 32 bit numbers + */ +void liteav_av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption, + * interpreted as little endian 32 bit numbers + */ +void liteav_av_xtea_le_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, + * in big endian format. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, + * in little endian format. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void liteav_av_xtea_le_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_XTEA_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/swresample.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/swresample.h new file mode 100644 index 0000000..cb7e7b7 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/swresample.h @@ -0,0 +1,581 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_SWRESAMPLE_H +#define SWRESAMPLE_SWRESAMPLE_H + +/** + * @file + * @ingroup lswr + * libswresample public header + */ + +/** + * @defgroup lswr libswresample + * @{ + * + * Audio resampling, sample format conversion and mixing library. + * + * Interaction with lswr is done through SwrContext, which is + * allocated with swr_alloc() or liteav_swr_alloc_set_opts(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * The first thing you will need to do in order to use lswr is to allocate + * SwrContext. This can be done with swr_alloc() or liteav_swr_alloc_set_opts(). If you + * are using the former, you must set options through the @ref avoptions API. + * The latter function provides the same feature, but it allows you to set some + * common options in the same statement. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix). This is using the swr_alloc() function. + * @code + * SwrContext *swr = swr_alloc(); + * liteav_av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * liteav_av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * liteav_av_opt_set_int(swr, "in_sample_rate", 48000, 0); + * liteav_av_opt_set_int(swr, "out_sample_rate", 44100, 0); + * liteav_av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * liteav_av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * The same job can be done using liteav_swr_alloc_set_opts() as well: + * @code + * SwrContext *swr = liteav_swr_alloc_set_opts(NULL, // we're allocating a new context + * AV_CH_LAYOUT_STEREO, // out_ch_layout + * AV_SAMPLE_FMT_S16, // out_sample_fmt + * 44100, // out_sample_rate + * AV_CH_LAYOUT_5POINT1, // in_ch_layout + * AV_SAMPLE_FMT_FLTP, // in_sample_fmt + * 48000, // in_sample_rate + * 0, // log_offset + * NULL); // log_ctx + * @endcode + * + * Once all values have been set, it must be initialized with liteav_swr_init(). If + * you need to change the conversion parameters, you can change the parameters + * using @ref AVOptions, as described above in the first example; or by using + * liteav_swr_alloc_set_opts(), but with the first argument the allocated context. + * You must then call liteav_swr_init() again. + * + * The conversion itself is done by repeatedly calling liteav_swr_convert(). + * Note that the samples may get buffered in swr if you provide insufficient + * output space or if sample rate conversion is done, which requires "future" + * samples. Samples that do not require future input can be retrieved at any + * time by using liteav_swr_convert() (in_count can be set to 0). + * At the end of conversion the resampling buffer can be flushed by calling + * liteav_swr_convert() with NULL in and 0 in_count. + * + * The samples used in the conversion process can be managed with the libavutil + * @ref lavu_sampmanip "samples manipulation" API, including liteav_av_samples_alloc() + * function used in the following example. + * + * The delay between input and output, can at any time be found by using + * liteav_swr_get_delay(). + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_samples; + * + * while (get_input(&input, &in_samples)) { + * uint8_t *output; + * int out_samples = liteav_av_rescale_rnd(liteav_swr_get_delay(swr, 48000) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * liteav_av_samples_alloc(&output, NULL, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = liteav_swr_convert(swr, &output, out_samples, + * input, in_samples); + * handle_output(output, out_samples); + * liteav_av_freep(&output); + * } + * @endcode + * + * When the conversion is finished, the conversion + * context and everything associated with it must be freed with liteav_swr_free(). + * A liteav_swr_close() function is also available, but it exists mainly for + * compatibility with libavresample, and is not required to be called. + * + * There will be no memory leak if the data is not completely flushed before + * liteav_swr_free(). + */ + +#include <stdint.h> +#include "libavutil/channel_layout.h" +#include "libavutil/frame.h" +#include "libavutil/samplefmt.h" + +#include "libswresample/version.h" + +/** + * @name Option constants + * These constants are used for the @ref avoptions interface for lswr. + * @{ + * + */ + +#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate +//TODO use int resample ? +//long term TODO can we enable this dynamically? + +/** Dithering algorithms */ +enum SwrDitherType { + SWR_DITHER_NONE = 0, + SWR_DITHER_RECTANGULAR, + SWR_DITHER_TRIANGULAR, + SWR_DITHER_TRIANGULAR_HIGHPASS, + + SWR_DITHER_NS = 64, ///< not part of API/ABI + SWR_DITHER_NS_LIPSHITZ, + SWR_DITHER_NS_F_WEIGHTED, + SWR_DITHER_NS_MODIFIED_E_WEIGHTED, + SWR_DITHER_NS_IMPROVED_E_WEIGHTED, + SWR_DITHER_NS_SHIBATA, + SWR_DITHER_NS_LOW_SHIBATA, + SWR_DITHER_NS_HIGH_SHIBATA, + SWR_DITHER_NB, ///< not part of API/ABI +}; + +/** Resampling Engines */ +enum SwrEngine { + SWR_ENGINE_SWR, /**< SW Resampler */ + SWR_ENGINE_SOXR, /**< SoX Resampler */ + SWR_ENGINE_NB, ///< not part of API/ABI +}; + +/** Resampling Filter Types */ +enum SwrFilterType { + SWR_FILTER_TYPE_CUBIC, /**< Cubic */ + SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall windowed sinc */ + SWR_FILTER_TYPE_KAISER, /**< Kaiser windowed sinc */ +}; + +/** + * @} + */ + +/** + * The libswresample context. Unlike libavcodec and libavformat, this structure + * is opaque. This means that if you would like to set options, you must use + * the @ref avoptions API and cannot directly set values to members of the + * structure. + */ +typedef struct SwrContext SwrContext; + +/** + * Get the AVClass for SwrContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + * @return the AVClass of SwrContext + */ +const AVClass *swr_get_class(void); + +/** + * @name SwrContext constructor functions + * @{ + */ + +/** + * Allocate SwrContext. + * + * If you use this function you will need to set the parameters (manually or + * with liteav_swr_alloc_set_opts()) before calling liteav_swr_init(). + * + * @see liteav_swr_alloc_set_opts(), liteav_swr_init(), liteav_swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc(void); + +/** + * Initialize context after user parameters have been set. + * @note The context must be configured using the AVOption API. + * + * @see liteav_av_opt_set_int() + * @see liteav_av_opt_set_dict() + * + * @param[in,out] s Swr context to initialize + * @return AVERROR error code in case of failure. + */ +int liteav_swr_init(struct SwrContext *s); + +/** + * Check whether an swr context has been initialized or not. + * + * @param[in] s Swr context to check + * @see liteav_swr_init() + * @return positive if it has been initialized, 0 if not initialized + */ +int liteav_swr_is_initialized(struct SwrContext *s); + +/** + * Allocate SwrContext if needed and set/reset common parameters. + * + * This function does not require s to be allocated with swr_alloc(). On the + * other hand, swr_alloc() can use liteav_swr_alloc_set_opts() to set the parameters + * on the allocated context. + * + * @param s existing Swr context if available, or NULL if not + * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*) + * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*). + * @param out_sample_rate output sample rate (frequency in Hz) + * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*) + * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*). + * @param in_sample_rate input sample rate (frequency in Hz) + * @param log_offset logging level offset + * @param log_ctx parent logging context, can be NULL + * + * @see liteav_swr_init(), liteav_swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *liteav_swr_alloc_set_opts(struct SwrContext *s, + int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, + int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, + int resampleUseSimd, + int log_offset, void *log_ctx); + +/** + * @} + * + * @name SwrContext destructor functions + * @{ + */ + +/** + * Free the given SwrContext and set the pointer to NULL. + * + * @param[in] s a pointer to a pointer to Swr context + */ +void liteav_swr_free(struct SwrContext **s); + +/** + * Closes the context so that liteav_swr_is_initialized() returns 0. + * + * The context can be brought back to life by running liteav_swr_init(), + * liteav_swr_init() can also be used without liteav_swr_close(). + * This function is mainly provided for simplifying the usecase + * where one tries to support libavresample and libswresample. + * + * @param[in,out] s Swr context to be closed + */ +void liteav_swr_close(struct SwrContext *s); + +/** + * @} + * + * @name Core conversion functions + * @{ + */ + +/** Convert audio. + * + * in and in_count can be set to 0 to flush the last few samples out at the + * end. + * + * If more input is provided than output space, then the input will be buffered. + * You can avoid this buffering by using liteav_swr_get_out_samples() to retrieve an + * upper bound on the required number of output samples for the given number of + * input samples. Conversion will run directly without copying whenever possible. + * + * @param s allocated Swr context, with parameters set + * @param out output buffers, only the first one need be set in case of packed audio + * @param out_count amount of space available for output in samples per channel + * @param in input buffers, only the first one need to be set in case of packed audio + * @param in_count number of input samples available in one channel + * + * @return number of samples output per channel, negative value on error + */ +int liteav_swr_convert(struct SwrContext *s, uint8_t **out, int out_count, + const uint8_t **in , int in_count); + +/** + * Convert the next timestamp from input to output + * timestamps are in 1/(in_sample_rate * out_sample_rate) units. + * + * @note There are 2 slightly differently behaving modes. + * @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX) + * in this case timestamps will be passed through with delays compensated + * @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX) + * in this case the output timestamps will match output sample numbers. + * See ffmpeg-resampler(1) for the two modes of compensation. + * + * @param s[in] initialized Swr context + * @param pts[in] timestamp for the next input sample, INT64_MIN if unknown + * @see liteav_swr_set_compensation(), liteav_swr_drop_output(), and liteav_swr_inject_silence() are + * function used internally for timestamp compensation. + * @return the output timestamp for the next output sample + */ +int64_t liteav_swr_next_pts(struct SwrContext *s, int64_t pts); + +/** + * @} + * + * @name Low-level option setting functions + * These functons provide a means to set low-level options that is not possible + * with the AVOption API. + * @{ + */ + +/** + * Activate resampling compensation ("soft" compensation). This function is + * internally called when needed in liteav_swr_next_pts(). + * + * @param[in,out] s allocated Swr context. If it is not initialized, + * or SWR_FLAG_RESAMPLE is not set, liteav_swr_init() is + * called with the flag set. + * @param[in] sample_delta delta in PTS per sample + * @param[in] compensation_distance number of samples to compensate for + * @return >= 0 on success, AVERROR error codes if: + * @li @c s is NULL, + * @li @c compensation_distance is less than 0, + * @li @c compensation_distance is 0 but sample_delta is not, + * @li compensation unsupported by resampler, or + * @li liteav_swr_init() fails when called. + */ +int liteav_swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance); + +/** + * Set a customized input channel mapping. + * + * @param[in,out] s allocated Swr context, not yet initialized + * @param[in] channel_map customized input channel mapping (array of channel + * indexes, -1 for a muted channel) + * @return >= 0 on success, or AVERROR error code in case of failure. + */ +int liteav_swr_set_channel_mapping(struct SwrContext *s, const int *channel_map); + +/** + * Generate a channel mixing matrix. + * + * This function is the one used internally by libswresample for building the + * default mixing matrix. It is made public just as a utility function for + * building custom matrices. + * + * @param in_layout input channel layout + * @param out_layout output channel layout + * @param center_mix_level mix level for the center channel + * @param surround_mix_level mix level for the surround channel(s) + * @param lfe_mix_level mix level for the low-frequency effects channel + * @param rematrix_maxval if 1.0, coefficients will be normalized to prevent + * overflow. if INT_MAX, coefficients will not be + * normalized. + * @param[out] matrix mixing coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o. + * @param stride distance between adjacent input channels in the + * matrix array + * @param matrix_encoding matrixed stereo downmix mode (e.g. dplii) + * @param log_ctx parent logging context, can be NULL + * @return 0 on success, negative AVERROR code on failure + */ +int liteav_swr_build_matrix(uint64_t in_layout, uint64_t out_layout, + double center_mix_level, double surround_mix_level, + double lfe_mix_level, double rematrix_maxval, + double rematrix_volume, double *matrix, + int stride, enum AVMatrixEncoding matrix_encoding, + void *log_ctx); + +/** + * Set a customized remix matrix. + * + * @param s allocated Swr context, not yet initialized + * @param matrix remix coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o + * @param stride offset between lines of the matrix + * @return >= 0 on success, or AVERROR error code in case of failure. + */ +int liteav_swr_set_matrix(struct SwrContext *s, const double *matrix, int stride); + +/** + * @} + * + * @name Sample handling functions + * @{ + */ + +/** + * Drops the specified number of output samples. + * + * This function, along with liteav_swr_inject_silence(), is called by liteav_swr_next_pts() + * if needed for "hard" compensation. + * + * @param s allocated Swr context + * @param count number of samples to be dropped + * + * @return >= 0 on success, or a negative AVERROR code on failure + */ +int liteav_swr_drop_output(struct SwrContext *s, int count); + +/** + * Injects the specified number of silence samples. + * + * This function, along with liteav_swr_drop_output(), is called by liteav_swr_next_pts() + * if needed for "hard" compensation. + * + * @param s allocated Swr context + * @param count number of samples to be dropped + * + * @return >= 0 on success, or a negative AVERROR code on failure + */ +int liteav_swr_inject_silence(struct SwrContext *s, int count); + +/** + * Gets the delay the next input sample will experience relative to the next output sample. + * + * Swresample can buffer data if more input has been provided than available + * output space, also converting between sample rates needs a delay. + * This function returns the sum of all such delays. + * The exact delay is not necessarily an integer value in either input or + * output sample rate. Especially when downsampling by a large value, the + * output sample rate may be a poor choice to represent the delay, similarly + * for upsampling and the input sample rate. + * + * @param s swr context + * @param base timebase in which the returned delay will be: + * @li if it's set to 1 the returned delay is in seconds + * @li if it's set to 1000 the returned delay is in milliseconds + * @li if it's set to the input sample rate then the returned + * delay is in input samples + * @li if it's set to the output sample rate then the returned + * delay is in output samples + * @li if it's the least common multiple of in_sample_rate and + * out_sample_rate then an exact rounding-free delay will be + * returned + * @returns the delay in 1 / @c base units. + */ +int64_t liteav_swr_get_delay(struct SwrContext *s, int64_t base); + +/** + * Find an upper bound on the number of samples that the next liteav_swr_convert + * call will output, if called with in_samples of input samples. This + * depends on the internal state, and anything changing the internal state + * (like further liteav_swr_convert() calls) will may change the number of samples + * liteav_swr_get_out_samples() returns for the same number of input samples. + * + * @param in_samples number of input samples. + * @note any call to liteav_swr_inject_silence(), liteav_swr_convert(), liteav_swr_next_pts() + * or liteav_swr_set_compensation() invalidates this limit + * @note it is recommended to pass the correct available buffer size + * to all functions like liteav_swr_convert() even if liteav_swr_get_out_samples() + * indicates that less would be used. + * @returns an upper bound on the number of samples that the next liteav_swr_convert + * will output or a negative value to indicate an error + */ +int liteav_swr_get_out_samples(struct SwrContext *s, int in_samples); + +/** + * @} + * + * @name Configuration accessors + * @{ + */ + +/** + * Return the @ref LIBSWRESAMPLE_VERSION_INT constant. + * + * This is useful to check if the build-time libswresample has the same version + * as the run-time one. + * + * @returns the unsigned int-typed version + */ +unsigned liteav_swresample_version(void); + +/** + * Return the swr build-time configuration. + * + * @returns the build-time @c ./configure flags + */ +const char *liteav_swresample_configuration(void); + +/** + * Return the swr license. + * + * @returns the license of libswresample, determined at build-time + */ +const char *liteav_swresample_license(void); + +/** + * @} + * + * @name AVFrame based API + * @{ + */ + +/** + * Convert the samples in the input AVFrame and write them to the output AVFrame. + * + * Input and output AVFrames must have channel_layout, sample_rate and format set. + * + * If the output AVFrame does not have the data pointers allocated the nb_samples + * field will be set using liteav_av_frame_get_buffer() + * is called to allocate the frame. + * + * The output AVFrame can be NULL or have fewer allocated samples than required. + * In this case, any remaining samples not written to the output will be added + * to an internal FIFO buffer, to be returned at the next call to this function + * or to liteav_swr_convert(). + * + * If converting sample rate, there may be data remaining in the internal + * resampling delay buffer. liteav_swr_get_delay() tells the number of + * remaining samples. To get this data as output, call this function or + * liteav_swr_convert() with NULL input. + * + * If the SwrContext configuration does not match the output and + * input AVFrame settings the conversion does not take place and depending on + * which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED + * or the result of a bitwise-OR of them is returned. + * + * @see swr_delay() + * @see liteav_swr_convert() + * @see liteav_swr_get_delay() + * + * @param swr audio resample context + * @param output output AVFrame + * @param input input AVFrame + * @return 0 on success, AVERROR on failure or nonmatching + * configuration. + */ +int liteav_swr_convert_frame(SwrContext *swr, + AVFrame *output, const AVFrame *input); + +/** + * Configure or reconfigure the SwrContext using the information + * provided by the AVFrames. + * + * The original resampling context is reset even on failure. + * The function calls liteav_swr_close() internally if the context is open. + * + * @see liteav_swr_close(); + * + * @param swr audio resample context + * @param output output AVFrame + * @param input input AVFrame + * @return 0 on success, AVERROR on failure. + */ +int liteav_swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in); + +/** + * @} + * @} + */ + +#endif /* SWRESAMPLE_SWRESAMPLE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/version.h new file mode 100644 index 0000000..8555d55 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswresample/version.h @@ -0,0 +1,45 @@ +/* + * Version macros. + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_VERSION_H +#define SWRESAMPLE_VERSION_H + +/** + * @file + * Libswresample version macros + */ + +#include "libavutil/avutil.h" + +#define LIBSWRESAMPLE_VERSION_MAJOR 3 +#define LIBSWRESAMPLE_VERSION_MINOR 3 +#define LIBSWRESAMPLE_VERSION_MICRO 100 + +#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT + +#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION) + +#endif /* SWRESAMPLE_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/swscale.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/swscale.h new file mode 100644 index 0000000..312a4fc --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/swscale.h @@ -0,0 +1,337 @@ +#include "third_party/ffmpeg/ffmpeg_rename_defines.h" // add by source_replacer.py +/* + * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_SWSCALE_H +#define SWSCALE_SWSCALE_H + +/** + * @file + * @ingroup libsws + * external API header + */ + +#include <stdint.h> + +#include "libavutil/avutil.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "version.h" + +/** + * @defgroup libsws libswscale + * Color conversion and scaling library. + * + * @{ + * + * Return the LIBSWSCALE_VERSION_INT constant. + */ +unsigned liteav_swscale_version(void); + +/** + * Return the libswscale build-time configuration. + */ +const char *liteav_swscale_configuration(void); + +/** + * Return the libswscale license. + */ +const char *liteav_swscale_license(void); + +/* values for the flags, the stuff on the command line is different */ +#define SWS_FAST_BILINEAR 1 +#define SWS_BILINEAR 2 +#define SWS_BICUBIC 4 +#define SWS_X 8 +#define SWS_POINT 0x10 +#define SWS_AREA 0x20 +#define SWS_BICUBLIN 0x40 +#define SWS_GAUSS 0x80 +#define SWS_SINC 0x100 +#define SWS_LANCZOS 0x200 +#define SWS_SPLINE 0x400 + +#define SWS_SRC_V_CHR_DROP_MASK 0x30000 +#define SWS_SRC_V_CHR_DROP_SHIFT 16 + +#define SWS_PARAM_DEFAULT 123456 + +#define SWS_PRINT_INFO 0x1000 + +//the following 3 flags are not completely implemented +//internal chrominance subsampling info +#define SWS_FULL_CHR_H_INT 0x2000 +//input subsampling info +#define SWS_FULL_CHR_H_INP 0x4000 +#define SWS_DIRECT_BGR 0x8000 +#define SWS_ACCURATE_RND 0x40000 +#define SWS_BITEXACT 0x80000 +#define SWS_ERROR_DIFFUSION 0x800000 + +#define SWS_MAX_REDUCE_CUTOFF 0.002 + +#define SWS_CS_ITU709 1 +#define SWS_CS_FCC 4 +#define SWS_CS_ITU601 5 +#define SWS_CS_ITU624 5 +#define SWS_CS_SMPTE170M 5 +#define SWS_CS_SMPTE240M 7 +#define SWS_CS_DEFAULT 5 +#define SWS_CS_BT2020 9 + +/** + * Return a pointer to yuv<->rgb coefficients for the given colorspace + * suitable for liteav_sws_setColorspaceDetails(). + * + * @param colorspace One of the SWS_CS_* macros. If invalid, + * SWS_CS_DEFAULT is used. + */ +const int *liteav_sws_getCoefficients(int colorspace); + +// when used for filters they must have an odd number of elements +// coeffs cannot be shared between vectors +typedef struct SwsVector { + double *coeff; ///< pointer to the list of coefficients + int length; ///< number of coefficients in the vector +} SwsVector; + +// vectors can be shared +typedef struct SwsFilter { + SwsVector *lumH; + SwsVector *lumV; + SwsVector *chrH; + SwsVector *chrV; +} SwsFilter; + +struct SwsContext; + +/** + * Return a positive value if pix_fmt is a supported input format, 0 + * otherwise. + */ +int liteav_sws_isSupportedInput(enum AVPixelFormat pix_fmt); + +/** + * Return a positive value if pix_fmt is a supported output format, 0 + * otherwise. + */ +int liteav_sws_isSupportedOutput(enum AVPixelFormat pix_fmt); + +/** + * @param[in] pix_fmt the pixel format + * @return a positive value if an endianness conversion for pix_fmt is + * supported, 0 otherwise. + */ +int liteav_sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt); + +/** + * Allocate an empty SwsContext. This must be filled and passed to + * liteav_sws_init_context(). For filling see AVOptions, options.c and + * liteav_sws_setColorspaceDetails(). + */ +struct SwsContext *liteav_sws_alloc_context(void); + +/** + * Initialize the swscaler context sws_context. + * + * @return zero or positive value on success, a negative value on + * error + */ +av_warn_unused_result +int liteav_sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); + +/** + * Free the swscaler context swsContext. + * If swsContext is NULL, then does nothing. + */ +void liteav_sws_freeContext(struct SwsContext *swsContext); + +/** + * Allocate and return an SwsContext. You need it to perform + * scaling/conversion operations using liteav_sws_scale(). + * + * @param srcW the width of the source image + * @param srcH the height of the source image + * @param srcFormat the source image format + * @param dstW the width of the destination image + * @param dstH the height of the destination image + * @param dstFormat the destination image format + * @param flags specify which algorithm and options to use for rescaling + * @param param extra parameters to tune the used scaler + * For SWS_BICUBIC param[0] and [1] tune the shape of the basis + * function, param[0] tunes f(1) and param[1] f´(1) + * For SWS_GAUSS param[0] tunes the exponent and thus cutoff + * frequency + * For SWS_LANCZOS param[0] tunes the width of the window function + * @return a pointer to an allocated context, or NULL in case of error + * @note this function is to be removed after a saner alternative is + * written + */ +struct SwsContext *liteav_sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Scale the image slice in srcSlice and put the resulting scaled + * slice in the image in dst. A slice is a sequence of consecutive + * rows in an image. + * + * Slices have to be provided in sequential order, either in + * top-bottom or bottom-top order. If slices are provided in + * non-sequential order the behavior of the function is undefined. + * + * @param c the scaling context previously created with + * liteav_sws_getContext() + * @param srcSlice the array containing the pointers to the planes of + * the source slice + * @param srcStride the array containing the strides for each plane of + * the source image + * @param srcSliceY the position in the source image of the slice to + * process, that is the number (counted starting from + * zero) in the image of the first row of the slice + * @param srcSliceH the height of the source slice, that is the number + * of rows in the slice + * @param dst the array containing the pointers to the planes of + * the destination image + * @param dstStride the array containing the strides for each plane of + * the destination image + * @return the height of the output slice + */ +int liteav_sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], + const int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *const dst[], const int dstStride[]); + +/** + * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg) + * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg) + * @param table the yuv2rgb coefficients describing the output yuv space, normally liteav_ff_yuv2rgb_coeffs[x] + * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally liteav_ff_yuv2rgb_coeffs[x] + * @param brightness 16.16 fixed point brightness correction + * @param contrast 16.16 fixed point contrast correction + * @param saturation 16.16 fixed point saturation correction + * @return -1 if not supported + */ +int liteav_sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], + int srcRange, const int table[4], int dstRange, + int brightness, int contrast, int saturation); + +/** + * @return -1 if not supported + */ +int liteav_sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, + int *srcRange, int **table, int *dstRange, + int *brightness, int *contrast, int *saturation); + +/** + * Allocate and return an uninitialized vector with length coefficients. + */ +SwsVector *liteav_sws_allocVec(int length); + +/** + * Return a normalized Gaussian curve used to filter stuff + * quality = 3 is high quality, lower is lower quality. + */ +SwsVector *liteav_sws_getGaussianVec(double variance, double quality); + +/** + * Scale all the coefficients of a by the scalar value. + */ +void liteav_sws_scaleVec(SwsVector *a, double scalar); + +/** + * Scale all the coefficients of a so that their sum equals height. + */ +void liteav_sws_normalizeVec(SwsVector *a, double height); + +#if FF_API_SWS_VECTOR +attribute_deprecated SwsVector *liteav_sws_getConstVec(double c, int length); +attribute_deprecated SwsVector *liteav_sws_getIdentityVec(void); +attribute_deprecated void liteav_sws_convVec(SwsVector *a, SwsVector *b); +attribute_deprecated void liteav_sws_addVec(SwsVector *a, SwsVector *b); +attribute_deprecated void liteav_sws_subVec(SwsVector *a, SwsVector *b); +attribute_deprecated void liteav_sws_shiftVec(SwsVector *a, int shift); +attribute_deprecated SwsVector *liteav_sws_cloneVec(SwsVector *a); +attribute_deprecated void liteav_sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); +#endif + +void liteav_sws_freeVec(SwsVector *a); + +SwsFilter *liteav_sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, + float lumaSharpen, float chromaSharpen, + float chromaHShift, float chromaVShift, + int verbose); +void liteav_sws_freeFilter(SwsFilter *filter); + +/** + * Check if context can be reused, otherwise reallocate a new one. + * + * If context is NULL, just calls liteav_sws_getContext() to get a new + * context. Otherwise, checks if the parameters are the ones already + * saved in context. If that is the case, returns the current + * context. Otherwise, frees context and gets a new context with + * the new parameters. + * + * Be warned that srcFilter and dstFilter are not checked, they + * are assumed to remain the same. + */ +struct SwsContext *liteav_sws_getCachedContext(struct SwsContext *context, + int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. + * + * The output frame will have the same packed format as the palette. + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void liteav_sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. + * + * With the palette format "ABCD", the destination frame ends up with the format "ABC". + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void liteav_sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Get the AVClass for swsContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see liteav_av_opt_find(). + */ +const AVClass *liteav_sws_get_class(void); + +/** + * @} + */ + +#endif /* SWSCALE_SWSCALE_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/version.h b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/version.h new file mode 100644 index 0000000..f1bed09 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Headers/libswscale/version.h @@ -0,0 +1,53 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_VERSION_H +#define SWSCALE_VERSION_H + +/** + * @file + * swscale version macros + */ + +#include "libavutil/version.h" + +#define LIBSWSCALE_VERSION_MAJOR 5 +#define LIBSWSCALE_VERSION_MINOR 3 +#define LIBSWSCALE_VERSION_MICRO 100 + +#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT + +#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_SWS_VECTOR +#define FF_API_SWS_VECTOR (LIBSWSCALE_VERSION_MAJOR < 6) +#endif + +#endif /* SWSCALE_VERSION_H */ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Info.plist b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Info.plist new file mode 100644 index 0000000..e20a2d6 Binary files /dev/null and b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Info.plist differ diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Modules/module.modulemap b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Modules/module.modulemap new file mode 100644 index 0000000..b5be796 --- /dev/null +++ b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/Modules/module.modulemap @@ -0,0 +1,6 @@ +framework module TXFFmpeg { + umbrella header "TXFFmpeg.h" + + export * + module * { export * } +} diff --git a/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/TXFFmpeg b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/TXFFmpeg new file mode 100755 index 0000000..f30c0a0 Binary files /dev/null and b/HHVDoctorSDK/TXFFmpeg.xcframework/ios-x86_64-simulator/TXFFmpeg.framework/TXFFmpeg differ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/.DS_Store b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/.DS_Store new file mode 100644 index 0000000..db9f358 Binary files /dev/null and b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/.DS_Store differ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/ITRTCAudioPacketListener.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/ITRTCAudioPacketListener.h new file mode 100644 index 0000000..3930f24 --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/ITRTCAudioPacketListener.h @@ -0,0 +1,34 @@ +// Copyright (c) 2021 Tencent. All rights reserved. + +#ifndef SDK_TRTC_INCLUDE_ITRTCAUDIOPACKETLISTENER_H_ +#define SDK_TRTC_INCLUDE_ITRTCAUDIOPACKETLISTENER_H_ + +#include <stdio.h> + +#include "TXLiteAVBuffer.h" +#include "TXLiteAVSymbolExport.h" + +#ifdef __cplusplus + +namespace liteav { +struct LITEAV_EXPORT TRTCAudioPacket { + const char* userId; + liteav::TXLiteAVBuffer* extraData; +}; + +class LITEAV_EXPORT ITRTCAudioPacketListener { + public: + virtual ~ITRTCAudioPacketListener() {} + + // 网络层接收到音频数据包 + virtual bool onRecvAudioPacket(TRTCAudioPacket& data) { return false; } + + // 网络层即将发送的音频数据包 + virtual bool onSendAudioPacket(TRTCAudioPacket& data) { return false; } +}; + +} // namespace liteav + +#endif // __cplusplus + +#endif // SDK_TRTC_INCLUDE_ITRTCAUDIOPACKETLISTENER_H_ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloud.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloud.h index 0e3a2f0..dc5cc81 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloud.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloud.h @@ -1,11 +1,10 @@ -/* +// Copyright (c) 2021 Tencent. All rights reserved. + +/** * Module: TRTCCloud @ TXLiteAVSDK - * - * Function: 腾讯云视频通话功能的主要接口类 - * - * Version: 8.6.10094 + * Function: 腾讯云 TRTC 主功能接口 + * Version: <:Version:> */ - #import <Foundation/Foundation.h> #import <VideoToolbox/VideoToolbox.h> #import "TRTCCloudDelegate.h" @@ -14,1265 +13,1224 @@ #import "TXAudioEffectManager.h" #import "TXDeviceManager.h" +NS_ASSUME_NONNULL_BEGIN + /// @defgroup TRTCCloud_ios TRTCCloud -/// 腾讯云视频通话功能的主要接口类 +/// 腾讯云 TRTC 主功能接口 /// @{ -@interface TRTCCloud : NSObject - -// 请使用 +sharedIntance 方法 -+ (instancetype)new __attribute__((unavailable("Use +sharedInstance instead"))); -- (instancetype)init __attribute__((unavailable("Use +sharedInstance instead"))); - +LITEAV_EXPORT @interface TRTCCloud : NSObject ///////////////////////////////////////////////////////////////////////////////// // -// SDK 基础函数 +// 创建实例和事件回调 // ///////////////////////////////////////////////////////////////////////////////// - -/// @name 创建与销毁 +/// @name 创建实例和事件回调 /// @{ /** -* 创建 TRTCCloud 单例 -*/ + * 1.1 创建 TRTCCloud 实例(单例模式) + */ + (instancetype)sharedInstance; /** -* 销毁 TRTCCloud 单例 -*/ + * 1.2 销毁 TRTCCloud 实例(单例模式) + */ + (void)destroySharedIntance; /** -* 设置回调接口 TRTCCloudDelegate -* -* 您可以通过 TRTCCloudDelegate 获得来自 SDK 的各种状态通知,详见 TRTCCloudDelegate.h 中的定义 -*/ -@property (nonatomic, weak) id<TRTCCloudDelegate> delegate; + * 1.3 设置 TRTC 事件回调 + * + * 您可以通过 {@link TRTCCloudDelegate} 获得来自 SDK 的各类事件通知(比如:错误码,警告码,音视频状态参数等)。 + */ +@property(nonatomic, weak, nullable) id<TRTCCloudDelegate> delegate; /** -* 设置驱动 TRTCCloudDelegate 回调的队列 -* -* SDK 默认会采用 Main Queue 作为驱动 TRTCCloudDelegate。如果您不指定自己的 delegateQueue, -* SDK 的 TRTCCloudDelegate 回调都将由 Main Queue 来调用。此时您在 TRTCCloudDelegate 的回调函数里操作 UI 是线程安全的。 -*/ -@property (nonatomic, strong) dispatch_queue_t delegateQueue; + * 1.4 设置驱动 TRTCCloudDelegate 事件回调的队列 + * + * 如果您不指定自己的 delegateQueue,SDK 默认会采用 MainQueue 作为驱动 {@link TRTCCloudDelegate} 事件回调的队列。 + * 也就是当您不设置 delegateQueue 属性时,{@link TRTCCloudDelegate} 中的所有回调函数都是由 MainQueue 来驱动的。 + * @note 如果您指定了自己的 delegateQueue,请不要在 {@link TRTCCloudDelegate} 回调函数中操作 UI,否则会引发线程安全问题。 + */ +@property(nonatomic, strong) dispatch_queue_t delegateQueue; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (一)房间相关接口函数 +// 房间相关接口函数 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 房间相关接口函数 /// @name 房间相关接口函数 /// @{ /** - * 1.1 进入房间 - * - * 调用接口后,您会收到来自 TRTCCloudDelegate 中的 onEnterRoom(result) 回调: - * - 如果加入成功,result 会是一个正数(result > 0),表示加入房间的时间消耗,单位是毫秒(ms)。 - * - 如果加入失败,result 会是一个负数(result < 0),表示进房失败的错误码。 - * - * 进房失败的错误码含义请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 - * - * - 【视频通话】{@link TRTCAppSceneVideoCall}:<br> - * 能力:支持720P、1080P高清画质,单个房间最多支持300人同时在线,最高支持50人同时发言。<br> - * 适用:[1对1视频通话]、[300人视频会议]、[在线问诊]、[视频聊天]、[远程面试]等。<br> - * - 【语音通话】{@link TRTCAppSceneAudioCall}:<br> - * 能力:支持 48kHz,支持双声道。单个房间最多支持300人同时在线,最高支持50人同时发言。<br> - * 适用:[[1对1语音通话]、[300人语音会议]、[语音聊天]、[语音会议]、[在线狼人杀]等。<br> - * - 【连麦直播】{@link TRTCAppSceneLIVE}:<br> - * 能力:支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。<br> - * 适用:[视频低延时直播]、[十万人互动课堂]、[视频直播 PK]、[视频相亲房]、[互动课堂]、[远程培训]、[超大型会议]等。<br> - * - 【语聊房】{@link TRTCAppSceneVoiceChatRoom}:<br> - * 能力:支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。<br> - * 适用:[语聊房]、[语音直播连麦]、[K 歌房]、[FM 电台]等。<br> - * - * @param param 进房参数,请参考 TRTCParams - * @param scene 应用场景,目前支持视频通话(VideoCall)、连麦直播(Live)、语音通话(AudioCall)、语聊房(VoiceChatRoom)四种场景。 + * 2.1 进入房间 * + * TRTC 的所有用户都需要进入房间才能“发布”或“订阅”音视频流,“发布”是指将自己的音频和视频推送到云端,“订阅”是指从云端拉取房间里其他用户的音视频流。 + * 调用该接口时,您需要指定您的应用场景 {@link TRTCAppScene} 以获取最佳的音视频传输体验,这些场景可以分成两大类: + * **实时通话:** + * 包括 {@link TRTCAppSceneVideoCall} 和 {@link TRTCAppSceneAudioCall} 两个可选项,分别是视频通话和语音通话,该模式适合 1对1 的音视频通话,或者参会人数在 300 人以内的在线会议。 + * **在线直播:** + * 包括 {@link TRTCAppSceneLIVE} 和 {@link TRTCAppSceneVoiceChatRoom} 两个可选项,分别是视频直播和语音直播,该模式适合十万人以内的在线直播场景,但需要您在接下来介绍的 TRTCParams 参数中指定 **角色(role)** 这个字段,也就是将房间中的用户区分为 **主播** + * ({@link TRTCRoleAnchor}) 和 **观众** ({@link TRTCRoleAudience}) 两种不同的角色。 调用该接口后,您会收到来自 {@link TRTCCloudDelegate} 中的 onEnterRoom(result) 回调: + * - 如果进房成功,参数 result 会是一个正数(result > 0),表示从函数调用到进入房间所花费的时间,单位是毫秒(ms)。 + * - 如果进房失败,参数 result 会是一个负数(result < 0),表示进房失败的[错误码](https://cloud.tencent.com/document/product/647/32257)。 + * @param param 进房参数,用于指定用户的身份、角色和安全票据等信息,详情请参考 {@link TRTCParams} 。 + * @param scene 应用场景,用于指定您的业务场景,同一个房间内的所有用户需要设定相同的 {@link TRTCAppScene}。 * @note - * 1. 当 scene 选择为 TRTCAppSceneLIVE 或 TRTCAppSceneVoiceChatRoom 时,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。<br> - * 2. 不管进房是否成功,enterRoom 都必须与 exitRoom 配对使用,在调用 exitRoom 前再次调用 enterRoom 函数会导致不可预期的错误问题。 + * 1. 同一个房间内的所有用户需要设定相同的 scene。不同的 scene 会导致偶现的异常问题。 + * 2. 当您指定参数 scene 为 {@link TRTCAppSceneLIVE} 或 {@link TRTCAppSceneVoiceChatRoom} 时,您必须通过 {@link TRTCParams} 中的 “role” 字段为当前用户设定他/她在房间中的角色。 + * 3. 请您尽量保证 {@link enterRoom} 与 {@link exitRoom} 前后配对使用,即保证”先退出前一个房间再进入下一个房间”,否则会导致很多异常问题。 */ - (void)enterRoom:(TRTCParams *)param appScene:(TRTCAppScene)scene; /** - * 1.2 离开房间 - * - * 调用 exitRoom() 接口会执行退出房间的相关逻辑,例如释放音视频设备资源和编解码器资源等。 - * 待资源释放完毕,SDK 会通过 TRTCCloudDelegate 中的 onExitRoom() 回调通知到您。 + * 2.2 离开房间 * - * 如果您要再次调用 enterRoom() 或者切换到其他的音视频 SDK,请等待 onExitRoom() 回调到来之后再执行相关操作。 - * 否则可能会遇到摄像头或麦克风(例如 iOS 里的 AudioSession)被占用等各种异常问题。 + * 调用该接口会让用户离开自己所在的音视频房间,并释放摄像头、麦克风、扬声器等设备资源。 + * 等资源释放完毕之后,SDK 会通过 {@link TRTCCloudDelegate} 中的 onExitRoom() 回调向您通知。 + * 如果您要再次调用 {@link enterRoom} 或者切换到其他的供应商的 SDK,建议等待 onExitRoom() 回调到来之后再执行之后的操作,以避免摄像头或麦克风被占用的问题。 */ - (void)exitRoom; - /** - * 1.3 切换角色,仅适用于直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom) + * 2.3 切换角色 * - * 在直播场景下,一个用户可能需要在“观众”和“主播”之间来回切换。 - * 您可以在进房前通过 TRTCParams 中的 role 字段确定角色,也可以通过 switchRole 在进房后切换角色。 - * - * @param role 目标角色,默认为主播: - * - {@link TRTCRoleAnchor} 主播,可以上行视频和音频,一个房间里最多支持50个主播同时上行音视频。 - * - {@link TRTCRoleAudience} 观众,只能观看,不能上行视频和音频,一个房间里的观众人数没有上限。 + * 调用本接口可以实现用户在“主播”和“观众”两种角色之间来回切换。 + * 由于视频直播和语音聊天室需要支持多达10万名观众同时观看,所以设定了“只有主播才能发布自己的音视频”的规则。 + * 因此,当有些观众希望发布自己的音视频流(以便能跟主播互动)时,就需要先把自己的角色切换成“主播”。 + * 您可以在进入房间时通过 {@link TRTCParams} 中的 role 字段事先确定用户的角色,也可以在进入房间后通过 switchRole 接口动态切换角色。 + * @param role 角色,默认为“主播”: + * - {@link TRTCRoleAnchor} :主播,可以发布自己的音视频,同一个房间里最多支持50个主播同时发布音视频。 + * - {@link TRTCRoleAudience} :观众,不能发布自己的音视频流,只能观看房间中其他主播的音视频。如果要发布自己的音视频,需要先通过 {@link switchRole} 切换成“主播”,同一个房间内同时最多可以容纳 10 万名观众。 + * @note + * 1. 该接口仅适用于视频直播({@link TRTCAppSceneLIVE})和语音聊天室({@link TRTCAppSceneVoiceChatRoom})这两个场景。 + * 2. 如果您在 {@link enterRoom} 时指定的 scene 为 {@link TRTCAppSceneVideoCall} 或 {@link TRTCAppSceneAudioCall},请不要调用这个接口。 */ --(void)switchRole:(TRTCRoleType)role; - +- (void)switchRole:(TRTCRoleType)role; /** - * 1.4 请求跨房通话(主播 PK) + * 2.4 切换角色(支持设置权限位) * - * TRTC 中两个不同音视频房间中的主播,可以通过“跨房通话”功能拉通连麦通话功能。使用此功能时, - * 两个主播无需退出各自原来的直播间即可进行“连麦 PK”。 + * 调用本接口可以实现用户在“主播”和“观众”两种角色之间来回切换。 + * 由于视频直播和语音聊天室需要支持多达10万名观众同时观看,所以设定了“只有主播才能发布自己的音视频”的规则。 + * 因此,当有些观众希望发布自己的音视频流(以便能跟主播互动)时,就需要先把自己的角色切换成“主播”。 + * 您可以在进入房间时通过 {@link TRTCParams} 中的 role 字段事先确定用户的角色,也可以在进入房间后通过 switchRole 接口动态切换角色。 + * @param role 角色,默认为“主播”: + * - {@link TRTCRoleAnchor} :主播,可以发布自己的音视频,同一个房间里最多支持50个主播同时发布音视频。 + * - {@link TRTCRoleAudience} :观众,不能发布自己的音视频流,只能观看房间中其他主播的音视频。如果要发布自己的音视频,需要先通过 {@link switchRole} 切换成“主播”,同一个房间内同时最多可以容纳 10 万名观众。 + * @param privateMapKey 用于权限控制的权限票据,当您希望某个房间只能让特定的 userId 进入或者上行视频时,需要使用 privateMapKey 进行权限保护。 + * - 仅建议有高级别安全需求的客户使用,更多详情请参见 [开启高级权限控制](https://cloud.tencent.com/document/product/647/32240)。 + * @note + * 1. 该接口仅适用于视频直播({@link TRTCAppSceneLIVE})和语音聊天室({@link TRTCAppSceneVoiceChatRoom})这两个场景。 + * 2. 如果您在 {@link enterRoom} 时指定的 scene 为 {@link TRTCAppSceneVideoCall} 或 {@link TRTCAppSceneAudioCall},请不要调用这个接口。 + */ +- (void)switchRole:(TRTCRoleType)role privateMapKey:(NSString *)privateMapKey; + +/** + * 2.5 切换房间 * - * 例如:当房间“001”中的主播 A 通过 connectOtherRoom() 跟房间“002”中的主播 B 拉通跨房通话后, - * 房间“001”中的用户都会收到主播 B 的 onUserEnter(B) 回调和 onUserVideoAvailable(B,YES) 回调。 - * 房间“002”中的用户都会收到主播 A 的 onUserEnter(A) 回调和 onUserVideoAvailable(A,YES) 回调。 + * 使用该接口可以让用户可以快速从一个房间切换到另一个房间。 + * - 如果用户的身份是“观众”,该接口的调用效果等同于 exitRoom(当前房间) + enterRoom(新的房间)。 + * - 如果用户的身份是“主播”,该接口在切换房间的同时还会保持自己的音视频发布状态,因此在房间切换过程中,摄像头的预览和声音的采集都不会中断。 * - * 简言之,跨房通话的本质,就是把两个不同房间中的主播相互分享,让每个房间里的观众都能看到两个主播。 + * 该接口适用于在线教育场景中,监课老师在多个房间中进行快速切换的场景。在该场景下使用 switchRoom 可以获得比 exitRoom+enterRoom 更好的流畅性和更少的代码量。 + * 接口调用结果会通过 {@link TRTCCloudDelegate} 中的 onSwitchRoom(errCode, errMsg) 回调。 * + * @param config 房间参数,详情请参考 {@link TRTCSwitchRoomConfig} 。 + * @note 由于对老版本 SDK 兼容的需求,参数 config 中同时包含 roomId 与 strRoomId 两个参数,这两个参数的填写格外讲究,请注意如下事项: + * 1. 若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,将优先选用 roomId。 + * 2. 所有房间需要同时使用 strRoomId 或同时使用 roomId,不可混用,否则将会出现很多预期之外的 bug。 + */ +- (void)switchRoom:(TRTCSwitchRoomConfig *)config; + +/** + * 2.6 请求跨房通话 + * + * 默认情况下,只有同一个房间中的用户之间可以进行音视频通话,不同的房间之间的音视频流是相互隔离的。 + * 但您可以通过调用该接口,将另一个房间中某个主播音视频流发布到自己所在的房间中,与此同时,该接口也会将自己的音视频流发布到目标主播的房间中。 + * 也就是说,您可以使用该接口让身处两个不同房间中的主播进行跨房间的音视频流分享,从而让每个房间中的观众都能观看到这两个主播的音视频。该功能可以用来实现主播之间的 PK 功能。 + * 跨房通话的请求结果会通过 {@link TRTCCloudDelegate} 中的 onConnectOtherRoom() 回调通知给您。 + * 例如:当房间“101”中的主播 A 通过 connectOtherRoom() 跟房间“102”中的主播 B 建立跨房通话后, + * - 房间“101”中的用户都会收到主播 B 的 onRemoteUserEnterRoom(B) 和 onUserVideoAvailable(B,YES) 这两个事件回调,即房间“101”中的用户都可以订阅主播 B 的音视频。 + * - 房间“102”中的用户都会收到主播 A 的 onRemoteUserEnterRoom(A) 和 onUserVideoAvailable(A,YES) 这两个事件回调,即房间“102”中的用户都可以订阅主播 A 的音视频。 + *  + * 跨房通话的参数考虑到后续扩展字段的兼容性问题,暂时采用了 JSON 格式的参数: + * **情况一:数字房间号** + * 如果房间“101”中的主播 A 要跟房间“102”中的主播 B 连麦,那么主播 A 调用该接口时需要传入:{"roomId": 102, "userId": "userB"} + * 示例代码如下: * <pre> - * 房间 001 房间 002 - * ------------- ------------ - * 跨房通话前:| 主播 A | | 主播 B | - * | 观众 U V W | | 观众 X Y Z | - * ------------- ------------ - * - * 房间 001 房间 002 - * ------------- ------------ - * 跨房通话后:| 主播 A B | | 主播 B A | - * | 观众 U V W | | 观众 X Y Z | - * ------------- ------------ + * NSMutableDictionaryjsonDict = [[NSMutableDictionary alloc] init]; + * [jsonDict setObject:@(102) forKey:@"roomId"]; + * [jsonDict setObject:@"userB" forKey:@"userId"]; + * NSData* jsonData = [NSJSONSerialization dataWithJSONObject:jsonDict options:NSJSONWritingPrettyPrinted error:nil]; + * NSString* jsonString = [[NSString alloc] initWithData:jsonData encoding:NSUTF8StringEncoding]; + * [trtc connectOtherRoom:jsonString]; * </pre> * - * 跨房通话的参数考虑到后续扩展字段的兼容性问题,暂时采用了 JSON 格式的参数,要求至少包含两个字段: - * - roomId:房间“001”中的主播 A 要跟房间“002”中的主播 B 连麦,主播 A 调用 connectOtherRoom() 时 roomId 应指定为“002”。 - * - userId:房间“001”中的主播 A 要跟房间“002”中的主播 B 连麦,主播 A 调用 connectOtherRoom() 时 userId 应指定为 B 的 userId。 - * - * 跨房通话的请求结果会通过 TRTCCloudDelegate 中的 onConnectOtherRoom() 回调通知给您。 - * + * **情况二:字符串房间号** + * 如果您使用的是字符串房间号,务必请将 json 中的 “roomId” 替换成 “strRoomId”: {"strRoomId": "102", "userId": "userB"} + * 示例代码如下: * <pre> - * NSMutableDictionary * jsonDict = [[NSMutableDictionary alloc] init]; - * [jsonDict setObject:@(002) forKey:@"roomId"]; + * NSMutableDictionaryjsonDict = [[NSMutableDictionary alloc] init]; + * [jsonDict setObject:@"102" forKey:@"strRoomId"]; * [jsonDict setObject:@"userB" forKey:@"userId"]; * NSData* jsonData = [NSJSONSerialization dataWithJSONObject:jsonDict options:NSJSONWritingPrettyPrinted error:nil]; * NSString* jsonString = [[NSString alloc] initWithData:jsonData encoding:NSUTF8StringEncoding]; * [trtc connectOtherRoom:jsonString]; * </pre> * - * @param param JSON 字符串连麦参数,roomId 代表目标房间号,userId 代表目标用户 ID。 - * - **/ + * @param param 需要你传入 JSON 格式的字符串参数,roomId 代表数字格式的房间号,strRoomId 代表字符串格式的房间号,userId 代表目标主播的用户ID。 + */ - (void)connectOtherRoom:(NSString *)param; /** - * 1.5 退出跨房通话 + * 2.7 退出跨房通话 * - * 跨房通话的退出结果会通过 TRTCCloudDelegate 中的 onDisconnectOtherRoom() 回调通知给您。 - **/ + * 退出结果会通过 **TRTCCloudDelegate** 中的 {@link onDisconnectOtherRoom} 回调通知给您。 + */ - (void)disconnectOtherRoom; /** - * 1.6 设置音视频数据接收模式,需要在进房前设置才能生效 + * 2.8 设置订阅模式(需要在进入房前设置才能生效) * - * 为实现进房秒开的绝佳体验,SDK 默认进房后自动接收音视频。即在您进房成功的同时,您将立刻收到远端所有用户的音视频数据。 - * 若您没有调用 startRemoteView,视频数据将自动超时取消。 - * 若您主要用于语音聊天等没有自动接收视频数据需求的场景,您可以根据实际需求选择接收模式,以免产生预期之外的视频时长费用。 + * 您可以通过该接口在“自动订阅”和“手动订阅”两种模式下进行切换: + * - 自动订阅:默认模式,用户在进入房间后会立刻接收到该房间中的音视频流,音频会自动播放,视频会自动开始解码(依然需要您通过 {@link startRemoteView} 接口绑定渲染控件)。 + * - 手动订阅:在用户进入房间后,需要手动调用 {@link startRemoteView} 接口才能启动视频流的订阅和解码,需要手动调用 {@link muteRemoteAudio} (NO) 接口才能启动声音的播放。 * - * @param autoRecvAudio YES:自动接收音频数据;NO:需要调用 muteRemoteAudio 进行请求或取消。默认值:YES - * @param autoRecvVideo YES:自动接收视频数据;NO:需要调用 startRemoteView/stopRemoteView 进行请求或取消。默认值:YES - * - * @note 需要在进房前设置才能生效。 - **/ + * 在绝大多数场景下,用户进入房间后都会订阅房间中所有主播的音视频流,因此 TRTC 默认采用了自动订阅模式,以求得最佳的“秒开体验”。 + * 如果您的应用场景中每个房间同时会有很多路音视频流在发布,而每个用户只想选择性地订阅其中的 1-2 路,则推荐使用“手动订阅”模式以节省流量费用。 + * @param autoRecvAudio YES:自动订阅音频;NO:需手动调用 muteRemoteAudio(NO) 订阅音频。默认值:YES。 + * @param autoRecvVideo YES:自动订阅视频;NO:需手动调用 startRemoteView 订阅视频。默认值:YES。 + * @note + * 1. 需要在进入房间前调用该接口进行设置才能生效。 + * 2. 在自动订阅模式下,如果用户在进入房间后没有调用 {@link startRemoteView} 订阅视频流,SDK 会自动停止订阅视频流,以便达到节省流量的目的。 + */ - (void)setDefaultStreamRecvMode:(BOOL)autoRecvAudio video:(BOOL)autoRecvVideo; /** -* 1.7 创建子 TRTCCloud 实例 -* -* 子 TRTCCloud 实例用于进入其他房间,观看其他房间主播的音视频流,还可以在不同的房间之间切换推送音视频流。 -* -* 此接口主要应用于类似超级小班课这种需要进入多个房间推拉流的场景。 -* -* <pre> -* TRTCCloud *mainCloud = [TRTCCloud sharedInstance]; -* // 1、mainCloud 进房并开始推送音视频流。 -* // 2、创建子 TRTCCloud 实例并进入其他房间。 -* TRTCCloud *subCloud = [mainCloud createSubCloud]; -* [subCloud enterRoom:params appScene:scene)]; -* -* // 3、切换房间推送音视频流。 -* // 3.1、mainCloud 停止推送音视频流。 -* [mainCloud switchRole:TRTCRoleAudience]; -* [mainCloud muteLocalVideo:true]; -* [mainCloud muteLocalAudio:true]; -* // 3.2、subCLoud 推送音视频流。 -* [subCloud switchRole:TRTCRoleAnchor]; -* [subCloud muteLocalVideo:false]; -* [subCloud muteLocalAudio:false]; -* -* // 4、subCLoud 退房。 -* [subCloud exitRoom]; -* -* // 5、销毁 subCLoud。 -* [mainCloud destroySubCloud:subCloud]; -* </pre> -* -* @return 子 TRTCCloud 实例 -* @note -* - 同一个用户,可以使用同一个 userId 进入多个不同 roomId 的房间。 -* - 两台手机不可以同时使用同一个 userId 进入同一个 roomId 的房间。 -* - 通过 createSubCloud 接口创建出来的子房间 TRTCCloud 实例有一个能力限制:不能调用子实例中与本地音视频 -* 相关的接口(除了 switchRole、muteLocalVideo 和 muteLocalAudio 之外), 设置美颜等接口请使用 -* 原 TRTCCloud 实例对象。 -* - 同一个用户,同时只能在一个 TRTCCloud 实例中推流,在不同房间同时推流会引发云端的状态混乱,导致各种 bug。 -*/ -- (TRTCCloud *)createSubCloud; - -/** - * 1.8 销毁子 TRTCCloud 实例 + * 2.9 创建子房间示例(用于多房间并发观看) + * + * TRTCCloud 一开始被设计成单例模式,限制了多房间并发观看的能力。 + * 通过调用该接口,您可以创建出多个 TRTCCloud 实例,以便同时进入多个不同的房间观看音视频流。 + * 但需要注意的是,由于摄像头和麦克风还是只有一份,因此您只能同时在一个 TRTCCloud 实例中以“主播”的身份存在,也就是您只能同时在一个 TRTCCloud 实例中发布自己的音视频流。 + * 该功能主要用于在线教育场景中一种被称为“超级小班课”的业务场景中,用于解决“每个 TRTC 的房间中最多只能有 50 人同时发布自己音视频流”的限制。 + * 示例代码如下: + * <pre> + * TRTCCloud *mainCloud = [TRTCCloud sharedInstance]; + * [mainCloud enterRoom:params1 appScene:TRTCAppSceneLIVE)]; + * //... + * //Switch the role from "anchor" to "audience" in your own room + * [mainCloud switchRole:TRTCRoleAudience]; + * [mainCloud muteLocalVideo:YES]; + * [mainCloud muteLocalAudio:YES]; + * //... + * //Use subcloud to enter another room and switch the role from "audience" to "anchor" + * TRTCCloud *subCloud = [mainCloud createSubCloud]; + * [subCloud enterRoom:params2 appScene:TRTCAppSceneLIVE)]; + * [subCloud switchRole:TRTCRoleAnchor]; + * [subCloud muteLocalVideo:NO]; + * [subCloud muteLocalAudio:NO]; + * //... + * //Exit from new room and release it. + * [subCloud exitRoom]; + * [mainCloud destroySubCloud:subCloud]; + * </pre> + * + * @note + * - 同一个用户,可以使用同一个 userId 进入多个不同 roomId 的房间。 + * - 两台不同的终端设备不可以同时使用同一个 userId 进入同一个 roomId 的房间。 + * - 同一个用户,同时只能在一个 TRTCCloud 实例中推流,在不同房间同时推流会引发云端的状态混乱,导致各种 bug。 + * - 通过 createSubCloud 接口创建出来的 TRTCCloud 实例有一个能力限制:不能调用子实例中与本地音视频相关的接口(除 switchRole、muteLocalVideo 和 muteLocalAudio 之外), 设置美颜等接口请使用原 TRTCCloud 实例对象。 + * @return 子 TRTCCloud 实例 */ -- (void)destroySubCloud:(TRTCCloud *)subCloud; +- (TRTCCloud *)createSubCloud; /** - * 1.9 切换房间 + * 2.10 销毁子房间示例 * - * 调用接口后,会退出原来的房间,并且停止原来房间的音视频数据发送和所有远端用户的音视频播放,但不会停止本地视频的预览。 - * 进入新房间成功后,会自动恢复原来的音视频数据发送状态。 - * - * 接口调用结果会通过 TRTCCloudDelegate 中的 onSwitchRoom(errCode, errMsg) 回调。 + * @param subCloud 子房间实例 */ -- (void)switchRoom:(TRTCSwitchRoomConfig *)config; - -/// @} +- (void)destroySubCloud:(TRTCCloud *)subCloud; ///////////////////////////////////////////////////////////////////////////////// // -// (二)CDN 相关接口函数 +// CDN 相关接口函数 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - CDN 相关接口函数 - -/// @name CDN 相关接口函数 -/// @{ /** - * 2.1 开始向腾讯云的直播 CDN 推流 + * 3.1 开始向腾讯云直播 CDN 上发布音视频流 * - * 该接口会指定当前用户的音视频流在腾讯云 CDN 所对应的 StreamId,进而可以指定当前用户的 CDN 播放地址。 - * - * 例如:如果我们采用如下代码设置当前用户的主画面 StreamId 为 user_stream_001,那么该用户主画面对应的 CDN 播放地址为: + * 该接口会向 TRTC 服务器发送指令,要求其将当前用户的音视频流旁路到直播 CDN 上。 + * 您可以通过参数 streamId 设定直播流的 StreamId,从而可以指定该用户的音视频流对应在直播 CDN 上的播放地址。 + * 例如:您可以通过该接口将当前用户的直播流 ID 指定为 user_stream_001,那么该用户音视频流对应的 CDN 播放地址为: * “http://yourdomain/live/user_stream_001.flv”,其中 yourdomain 为您自己备案的播放域名, - * 您可以在直播[控制台](https://console.cloud.tencent.com/live) 配置您的播放域名,腾讯云不提供默认的播放域名。 - * + * 您可以在[直播控制台](https://console.cloud.tencent.com/live) 配置您的播放域名,腾讯云不提供默认的播放域名。 * <pre> - * TRTCCloud *trtcCloud = [TRTCCloud sharedInstance]; - * [trtcCloud enterRoom:params appScene:TRTCAppSceneLIVE]; - * [trtcCloud startLocalPreview:frontCamera view:localView]; - * [trtcCloud startLocalAudio]; - * [trtcCloud startPublishing: @"user_stream_001" type:TRTCVideoStreamTypeBig]; - * + * TRTCCloud *trtcCloud = [TRTCCloud sharedInstance]; + * [trtcCloud enterRoom:params appScene:TRTCAppSceneLIVE]; + * [trtcCloud startLocalPreview:frontCamera view:localView]; + * [trtcCloud startLocalAudio]; + * [trtcCloud startPublishing: @"user_stream_001" type:TRTCVideoStreamTypeBig]; * </pre> * * 您也可以在设置 enterRoom 的参数 TRTCParams 时指定 streamId, 而且我们更推荐您采用这种方案。 - * * @param streamId 自定义流 ID。 - * @param type 仅支持TRTCVideoStreamTypeBig 和 TRTCVideoStreamTypeSub。 - * @note 您需要先在实时音视频 [控制台](https://console.cloud.tencent.com/rav/) 中的功能配置页开启“启用旁路推流”才能生效。 - * - 若您选择“指定流旁路”,则您可以通过该接口将对应音视频流推送到腾讯云 CDN 且指定为填写的流 ID。 - * - 若您选择“全局自动旁路”,则您可以通过该接口调整默认的流 ID。 -*/ -- (void)startPublishing:(NSString *)streamId type:(TRTCVideoStreamType)type; + * @param streamType 仅支持 {@link TRTCVideoStreamTypeBig} 和 {@link TRTCVideoStreamTypeSub}。 + * @note 您需要提前在 [实时音视频控制台](https://console.cloud.tencent.com/trtc/) 中的功能配置页面上开启“启用旁路推流”才能生效。 + * - 若您选择“指定流旁路”,则您可以通过该接口将对应音视频流推送到腾讯云 CDN 且指定为填写的流 ID。 + * - 若您选择“全局自动旁路”,则您可以通过该接口调整默认的流 ID。 + */ +- (void)startPublishing:(NSString *)streamId type:(TRTCVideoStreamType)streamType; /** - * 2.2 停止向腾讯云的直播 CDN 推流 + * 3.2 停止向腾讯云直播 CDN 上发布音视频流 */ - (void)stopPublishing; /** - * 2.3 开始向友商云的直播 CDN 转推 + * 3.3 开始向非腾讯云 CDN 上发布音视频流 * - * 该接口跟 startPublishing() 类似,但 startPublishCDNStream() 支持向非腾讯云的直播 CDN 转推。 - * @param param CDN 转推参数,请参考 TRTCCloudDef.h 中关于 TRTCPublishCDNParam 的介绍。 - * @note 使用 startPublishing() 绑定腾讯云直播 CDN 不收取额外的费用,但使用 startPublishCDNStream() 绑定非腾讯云直播 CDN 需要收取转推费用。 + * 该接口跟 startPublishing 功能类似,不同之处在于,startPublishing 仅支持向腾讯云的 CDN 发布,而本接口支持向非腾讯云的直播 CDN 上转推音视频流。 + * @param param CDN 转推参数,详情请参考 {@link TRTCPublishCDNParam} + * @note + * - 使用 startPublishing 接口向腾讯云的直播 CDN 上发布音视频流不会收取额外费用 + * - 使用 startPublishCDNStream 接口向非腾讯云的直播 CDN 上发布音视频流,需要收取额外的转推带宽费用。 */ -- (void)startPublishCDNStream:(TRTCPublishCDNParam*)param; +- (void)startPublishCDNStream:(TRTCPublishCDNParam *)param; /** - * 2.4 停止向非腾讯云地址转推 + * 3.4 停止向非腾讯云 CDN 上发布音视频流 */ - (void)stopPublishCDNStream; /** - * 2.5 设置云端的混流转码参数 - * - * 如果您在实时音视频 [控制台](https://console.cloud.tencent.com/trtc/) 中的功能配置页开启了“启用旁路直播”功能, - * 房间里的每一路画面都会有一个默认的直播 [CDN 地址](https://cloud.tencent.com/document/product/647/16826)。 - * - * 一个直播间中可能有不止一位主播,而且每个主播都有自己的画面和声音,但对于 CDN 观众来说,他们只需要一路直播流, - * 所以您需要将多路音视频流混成一路标准的直播流,这就需要混流转码。 - * - * 当您调用 setMixTranscodingConfig() 接口时,SDK 会向腾讯云的转码服务器发送一条指令,目的是将房间里的多路音视频流混合为一路, - * 您可以通过 mixUsers 参数来调整每一路画面的位置,以及是否只混合声音,也可以通过 videoWidth、videoHeight、videoBitrate 等参数控制混合音视频流的编码参数。 - * - * <pre> - * 【画面1】=> 解码 ====> \ - * \ - * 【画面2】=> 解码 => 画面混合 => 编码 => 【混合后的画面】 - * / - * 【画面3】=> 解码 ====> / - * - * 【声音1】=> 解码 ====> \ - * \ - * 【声音2】=> 解码 => 声音混合 => 编码 => 【混合后的声音】 - * / - * 【声音3】=> 解码 ====> / - * </pre> + * 3.5 设置云端混流的排版布局和转码参数 * + * 在一个直播间中可能同时会有多个主播发布自己的音视频流,但对于直播 CDN 上的观众而言,只需要观看一条 HTTP-FLV 或 HLS 格式的视频流即可。 + * 当您调用本接口函数时,SDK 会向腾讯云的 TRTC 混流服务器发送一条指令,混流服务器会将房间里的多路音视频流混合成一路。 + * 您可以通过 {@link TRTCTranscodingConfig} 参数来调整每一路画面的排版布局,也可以设置混合后的音视频流的各项编码参数。 * 参考文档:[云端混流转码](https://cloud.tencent.com/document/product/647/16827)。 - * - * @param config 请参考 TRTCCloudDef.h 中关于 TRTCTranscodingConfig 的介绍。如果传入 nil 则取消云端混流转码。 + *  + * @param config 如果 config 不为空,则开启云端混流,如果 config 为空则停止云端混流。详情请参考 {@link TRTCTranscodingConfig} 。 * @note 关于云端混流的注意事项: - * - 云端转码会引入一定的 CDN 观看延时,大概会增加1 - 2秒。 - * - 调用该函数的用户,会将连麦中的多路画面混合到自己当前这路画面或者 config 中指定的 streamId 上。 - * - 请注意,若您还在房间中且不再需要混流,请务必传入 nil 进行取消,因为当您发起混流后,云端混流模块就会开始工作,不及时取消混流可能会引起不必要的计费损失。 - * - 请放心,您退房时会自动取消混流状态。 + * - 混流转码为收费功能,调用接口将产生云端混流转码费用,详见 [云端混流转码计费说明](https://cloud.tencent.com/document/product/647/49446) 。 + * - 调用该接口的用户,如果没设定 config 参数中的 streamId 字段,TRTC 会将房间中的多路画面混合到当前用户所对应的音视频流上,即 A + B => A。 + * - 调用该接口的用户,如果设定了 config 参数中的 streamId 字段,TRTC 会将房间中的多路画面混合到您指定的 streamId 上,即 A + B => streamId。 + * - 请注意,若您还在房间中且不再需要混流,请务必再次调用本接口并将 config 设置为空以进行取消,不及时取消混流可能会引起不必要的计费损失。 + * - 请放心,当您退房时 TRTC 会自动取消混流状态。 */ -- (void)setMixTranscodingConfig:(TRTCTranscodingConfig*)config; - +- (void)setMixTranscodingConfig:(nullable TRTCTranscodingConfig *)config; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (三)视频相关接口函数 +// 视频相关接口函数 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 视频相关接口函数 -/// @name 视频相关接口函数 +/// @name 视频相关接口函数 /// @{ -#if TARGET_OS_IPHONE /** - * 3.1 开启本地视频的预览画面 (iOS 版本) + * 4.1 开启本地摄像头的预览画面(移动端) * * 在 enterRoom 之前调用此函数,SDK 只会开启摄像头,并一直等到您调用 enterRoom 之后才开始推流。 * 在 enterRoom 之后调用此函数,SDK 会开启摄像头并自动开始视频推流。 - * 当开始渲染首帧摄像头画面时,您会收到 TRTCCloudDelegate 中的 onFirstVideoFrame(nil) 回调。 - * - * @note 如果希望开播前预览摄像头画面并通过 BeautyManager 调节美颜参数,您可以: - * - 方案一:在调用 enterRoom 之前调用 startLocalPreview - * - 方案二:在调用 enterRoom 之后调用 startLocalPreview + muteLocalVideo(true) + * 当开始渲染首帧摄像头画面时,您会收到 {@link TRTCCloudDelegate} 中的 onCameraDidReady 回调通知。 * @param frontCamera YES:前置摄像头;NO:后置摄像头。 * @param view 承载视频画面的控件 + * @note 如果希望开播前预览摄像头画面并通过 BeautyManager 调节美颜参数,您可以: + * - 方案一:在调用 enterRoom 之前调用 startLocalPreview + * - 方案二:在调用 enterRoom 之后调用 startLocalPreview + muteLocalVideo(YES) */ -- (void)startLocalPreview:(BOOL)frontCamera view:(TXView *)view; -#elif TARGET_OS_MAC +#if TARGET_OS_IPHONE +- (void)startLocalPreview:(BOOL)frontCamera view:(nullable TXView *)view; +#endif + /** - * 3.1 开启本地视频的预览画面 (Mac 版本) + * 4.2 开启本地摄像头的预览画面(桌面端) * - * 在调用该方法前,可以先调用 setCurrentCameraDevice 选择使用 Mac 自带摄像头或外接摄像头。 + * 在调用该接口之前,您可以先调用 setCurrentCameraDevice 选择使用 Mac 自带摄像头或外接摄像头。 * 在 enterRoom 之前调用此函数,SDK 只会开启摄像头,并一直等到您调用 enterRoom 之后才开始推流。 * 在 enterRoom 之后调用此函数,SDK 会开启摄像头并自动开始视频推流。 - * 当开始渲染首帧摄像头画面时,您会收到 TRTCCloudDelegate 中的 onFirstVideoFrame(nil) 回调。 - * - * @note 如果希望开播前预览摄像头画面并通过 BeautyManager 调节美颜参数,您可以: - * - 方案一:在调用 enterRoom 之前调用 startLocalPreview - * - 方案二:在调用 enterRoom 之后调用 startLocalPreview + muteLocalVideo(true) - * + * 当开始渲染首帧摄像头画面时,您会收到 {@link TRTCCloudDelegate} 中的 onCameraDidReady 回调通知。 * @param view 承载视频画面的控件 + * @note 如果希望开播前预览摄像头画面并通过 BeautyManager 调节美颜参数,您可以: + * - 方案一:在调用 enterRoom 之前调用 startLocalPreview + * - 方案二:在调用 enterRoom 之后调用 startLocalPreview + muteLocalVideo(YES) */ -- (void)startLocalPreview:(TXView *)view; +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startLocalPreview:(nullable TXView *)view; #endif /** - * 3.2 更新本地视频预览画面的窗口 - * - * @param view 承载视频画面的控件 + * 4.3 更新本地摄像头的预览画面 */ -- (void)updateLocalView:(TXView *)view; +- (void)updateLocalView:(nullable TXView *)view; /** - * 3.3 停止本地视频采集及预览 + * 4.4 停止摄像头预览 */ - (void)stopLocalPreview; /** - * 3.4 暂停/恢复推送本地的视频数据 - * - * 当暂停推送本地视频后,房间里的其它成员将会收到 onUserVideoAvailable(userId, NO) 回调通知 - * 当恢复推送本地视频后,房间里的其它成员将会收到 onUserVideoAvailable(userId, YES) 回调通知 + * 4.5 暂停/恢复发布本地的视频流 * - * @param mute YES:暂停;NO:恢复 + * 该接口可以暂停(或恢复)发布本地的视频画面,暂停之后,同一房间中的其他用户将无法继续看到自己画面。 + * 该接口在指定 TRTCVideoStreamTypeBig 时等效于 start/stopLocalPreview 这两个接口,但具有更好的响应速度。 + * 因为 start/stopLocalPreview 需要打开和关闭摄像头,而打开和关闭摄像头都是硬件设备相关的操作,非常耗时。 + * 相比之下,muteLocalVideo 只需要在软件层面对数据流进行暂停或者放行即可,因此效率更高,也更适合需要频繁打开关闭的场景。 + * 当暂停/恢复发布指定 TRTCVideoStreamTypeBig 后,同一房间中的其他用户将会收到 onUserVideoAvailable 回调通知。 + * 当暂停/恢复发布指定 TRTCVideoStreamTypeSub 后,同一房间中的其他用户将会收到 onUserSubStreamAvailable 回调通知。 + * @param streamType 要暂停/恢复的视频流类型(仅支持 {@link TRTCVideoStreamTypeBig} 和 {@link TRTCVideoStreamTypeSub}) + * @param mute YES:暂停;NO:恢复。 */ -- (void)muteLocalVideo:(BOOL)mute; +- (void)muteLocalVideo:(TRTCVideoStreamType)streamType mute:(BOOL)mute; /** - * 3.5 设置暂停推送本地视频时要推送的图片 - * - * 当暂停推送本地视频后,会继续推送该接口设置的图片 + * 4.6 设置本地画面被暂停期间的替代图片 * - * @param image 设置要推送的图片。 nil 表示不推送 - * @param fps 设置推送图片帧率,最小值为5,最大值为10,默认5。 + * 当您调用 muteLocalVideo(YES) 暂停本地画面时,您可以通过调用本接口设置一张替代图片,设置后,房间中的其他用户会看到这张替代图片,而不是黑屏画面。 + * @param image 设置替代图片,空值代表在 muteLocalVideo 之后不再发送视频流数据,默认值为空。 + * @param fps 设置替代图片帧率,最小值为5,最大值为10,默认5。 */ - (void)setVideoMuteImage:(TXImage *)image fps:(NSInteger)fps; /** - * 3.6 开始拉取并显示指定用户的远端画面 + * 4.7 订阅远端用户的视频流,并绑定视频渲染控件 * - * 该函数会拉取指定 userid 的视频流显示在您指定的 view 控件上,您可以通过 setRemoteRenderParams:streamType:params: 设置显示模式。 - * - 如果您提前知道房间中某个 userid 正在推流,可以直接调用 startRemoteView 显示该用户的远端画面。 - * - 如果您不知道房间中有哪些用户开启了视频,可以在 enterRoom 后等待来自 SDK 的 onUserVideoAvailable(userId, true) 回调通知。 - * 调用 startRemoteView 只是启动拉取,此时画面还需要加载,当加载完毕后 TRTCCloudListener 会通过 onFirstVideoFrame(userId) 通知您。 + * 调用该接口可以让 SDK 拉取指定 userid 的视频流,并渲染到参数 view 指定的渲染控件上。您可以通过 {@link setRemoteRenderParams} 设置画面的显示模式。 + * - 如果您已经知道房间中有视频流的用户的 userid,可以直接调用 startRemoteView 订阅该用户的画面。 + * - 如果您不知道房间中有哪些用户在发布视频,您可以在 enterRoom 之后等待来自 {@link onUserVideoAvailable} 的通知。 * - * @param userId 指定远端用户的 userId + * 调用本接口只是启动视频流的拉取,此时画面还需要加载和缓冲,当缓冲完毕后您会收到来自 {@link onFirstVideoFrame} 的通知。 + * @param userId 指定远端用户的 ID。 * @param streamType 指定要观看 userId 的视频流类型: - * - 高清大画面:TRTCVideoStreamTypeBig - * - 低清小画面:TRTCVideoStreamTypeSmall - * - 辅流(屏幕分享):TRTCVideoStreamTypeSub - * @param view 承载视频画面的控件 + * - 高清大画面:{@link TRTCVideoStreamTypeBig} + * - 低清小画面:{@link TRTCVideoStreamTypeSmall}(需要远端用户通过 {@link enableEncSmallVideoStream} 开启双路编码后才有效果) + * - 辅流画面(常用于屏幕分享):{@link TRTCVideoStreamTypeSub} + * + * @param view 用于承载视频画面的渲染控件 * @note 注意几点规则需要您关注: - * 1. SDK 支持同时观看某 userid 的大画面和辅路,或者小画面和辅路,但不支持同时观看大画面和小画面。 - * 2. 只有当指定的 userid 通过 enableEncSmallVideoStream 开启双路编码后,才能观看该用户的小画面。 - * 3. 如果该用户的小画面不存在,则默认切换到大画面。 + * 1. SDK 支持同时观看某 userid 的大画面和辅路画面,或者同时观看某 userid 的小画面和辅路画面,但不支持同时观看大画面和小画面。 + * 2. 只有当指定的 userid 通过 {@link enableEncSmallVideoStream} 开启双路编码后,才能观看该用户的小画面。 + * 3. 当指定的 userid 的小画面不存在时,SDK 默认切换到该用户的大画面。 */ -- (void)startRemoteView:(NSString *)userId streamType:(TRTCVideoStreamType)streamType view:(TXView *)view; +- (void)startRemoteView:(NSString *)userId streamType:(TRTCVideoStreamType)streamType view:(nullable TXView *)view; /** -* 3.7 更新远端视频画面的窗口 -* -* @param view 承载视频画面的控件 -* @param type 要设置预览窗口的流类型(TRTCVideoStreamTypeBig、TRTCVideoStreamTypeSub) -* @param userId 对方的用户标识 -*/ -- (void)updateRemoteView:(TXView *)view streamType:(TRTCVideoStreamType)type forUser:(NSString *)userId; + * 4.8 更新远端用户的视频渲染控件 + * + * 该接口可用于更新远端视频画面的渲染控件,常被用于切换显示区域的交互场景中。 + * @param view 承载视频画面的控件 + * @param streamType 要设置预览窗口的流类型(仅支持 {@link TRTCVideoStreamTypeBig} 和 {@link TRTCVideoStreamTypeSub}) + * @param userId 指定远端用户的 ID。 + */ +- (void)updateRemoteView:(nullable TXView *)view streamType:(TRTCVideoStreamType)streamType forUser:(NSString *)userId; /** - * 3.8 停止显示远端视频画面,同时不再拉取该远端用户的视频数据流 - * - * 调用此接口后,SDK 会停止接收该用户的远程视频流,同时会清理相关的视频显示资源。 + * 4.9 停止订阅远端用户的视频流,并释放渲染控件 * - * @param userId 指定远端用户的 userId - * @param streamType 指定要停止观看的 userId 的视频流类型: - * - 高清大画面:TRTCVideoStreamTypeBig - * - 低清小画面:TRTCVideoStreamTypeSmall - * - 辅流(屏幕分享):TRTCVideoStreamTypeSub - + * 调用此接口会让 SDK 停止接收该用户的视频流,并释放该路视频流的解码和渲染资源。 + * @param userId 指定远端用户的 ID。 + * @param streamType 指定要观看 userId 的视频流类型: + * - 高清大画面:{@link TRTCVideoStreamTypeBig} + * - 低清小画面:{@link TRTCVideoStreamTypeSmall} + * - 辅流画面(常用于屏幕分享):{@link TRTCVideoStreamTypeSub} */ - (void)stopRemoteView:(NSString *)userId streamType:(TRTCVideoStreamType)streamType; /** - * 3.9 停止显示所有远端视频画面,同时不再拉取远端用户的视频数据流 + * 4.10 停止订阅所有远端用户的视频流,并释放全部渲染资源 * - * @note 如果有屏幕分享的画面在显示,则屏幕分享的画面也会一并被关闭。 + * 调用此接口会让 SDK 停止接收所有来自远端的视频流,并释放全部的解码和渲染资源。 + * @note 如果当前有正在显示的辅路画面(屏幕分享)也会一并被停止。 */ - (void)stopAllRemoteView; /** - * 3.10 暂停/恢复接收指定的远端视频流 - * - * 该接口仅暂停/恢复接收指定的远端用户的视频流,但并不释放显示资源,视频画面会冻屏在 mute 前的最后一帧。 + * 4.11 暂停/恢复订阅远端用户的视频流 * - * @param userId 对方的用户标识 - * @param mute 是否暂停接收 - * @note 您在 enterRoom 之前或之后调用此 API 均会进入屏蔽状态,屏蔽状态在您调用 exitRoom 之后会被重置为 false。 + * 该接口仅暂停/恢复接收指定用户的视频流,但并不释放显示资源,视频画面会被冻屏在接口调用时的最后一帧。 + * @param userId 指定远端用户的 ID。 + * @param streamType 要暂停/恢复的视频流类型(仅支持 {@link TRTCVideoStreamTypeBig} 和 {@link TRTCVideoStreamTypeSub})。 + * @param mute 是否暂停接收。 + * @note 该接口支持您在进入房间(enterRoom)前调用,暂停状态会在退出房间(exitRoom)在之后会被重置。 */ -- (void)muteRemoteVideoStream:(NSString*)userId mute:(BOOL)mute; +- (void)muteRemoteVideoStream:(NSString *)userId streamType:(TRTCVideoStreamType)streamType mute:(BOOL)mute; /** - * 3.11 暂停/恢复接收所有远端视频流 - * - * 该接口仅暂停/恢复接收所有远端用户的视频流,但并不释放显示资源,视频画面会冻屏在 mute 前的最后一帧。 + * 4.12 暂停/恢复订阅所有远端用户的视频流 * - * @param mute 是否暂停接收 - * @note 您在 enterRoom 之前或之后调用此 API 均会进入屏蔽状态,屏蔽状态在您调用 exitRoom 之后会被重置为 false。 + * 该接口仅暂停/恢复接收所有用户的视频流,但并不释放显示资源,视频画面会被冻屏在接口调用时的最后一帧。 + * @param mute 是否暂停接收 + * @note 该接口支持您在进入房间(enterRoom)前调用,暂停状态会在退出房间(exitRoom)在之后会被重置。 */ - (void)muteAllRemoteVideoStreams:(BOOL)mute; /** - * 3.12 设置视频编码器相关参数 - * - * 该设置决定了远端用户看到的画面质量(同时也是云端录制出的视频文件的画面质量) + * 4.13 设置视频编码器的编码参数 * - * @param param 视频编码参数,详情请参考 TRTCCloudDef.h 中的 TRTCVideoEncParam 定义 + * 该设置能够决定远端用户看到的画面质量,同时也能决定云端录制出的视频文件的画面质量。 + * @param param 用于设置视频编码器的相关参数,详情请参考 {@link TRTCVideoEncParam}。 */ -- (void)setVideoEncoderParam:(TRTCVideoEncParam*)param; +- (void)setVideoEncoderParam:(TRTCVideoEncParam *)param; /** - * 3.13 设置网络流控相关参数 + * 4.14 设置网络质量控制的相关参数 * - * 该设置决定 SDK 在各种网络环境下的调控策略(例如弱网下选择“保清晰”或“保流畅”) - * - * @param param 网络流控参数,详情请参考 TRTCCloudDef.h 中的 TRTCNetworkQosParam 定义 + * 该设置决定在差网络环境下的质量调控策略,如“画质优先”或“流畅优先”等策略。 + * @param param 用于设置网络质量控制的相关参数,详情请参考 {@link TRTCNetworkQosParam}。 */ -- (void)setNetworkQosParam:(TRTCNetworkQosParam*)param; +- (void)setNetworkQosParam:(TRTCNetworkQosParam *)param; /** - * 3.14 本地图像的渲染设置 + * 4.15 设置本地画面的渲染参数 * - * @param type 视频线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub) - * @param params 参见 TRTCCouldDef.h 中对 TRTCRenderParams 的定义 + * 可设置的参数包括有:画面的旋转角度、填充模式以及左右镜像等。 + * @param params 画面渲染参数,详情请参考 {@link TRTCRenderParams}。 */ - (void)setLocalRenderParams:(TRTCRenderParams *)params; /** - * 3.15 远端图像的渲染设置 + * 4.16 设置远端画面的渲染模式 * - * @param userId 用户 ID - * @param type 视频线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub) - * @param params 参见 TRTCCouldDef.h 中对 TRTCRenderParams 的定义 + * 可设置的参数包括有:画面的旋转角度、填充模式以及左右镜像等。 + * @param userId 指定远端用户的 ID。 + * @param streamType 可以设置为主路画面(TRTCVideoStreamTypeBig)或辅路画面(TRTCVideoStreamTypeSub) + * @param params 画面渲染参数,详情请参考 {@link TRTCRenderParams}。 */ -- (void)setRemoteRenderParams:(NSString *)userId streamType:(TRTCVideoStreamType)type params:(TRTCRenderParams *)params; +- (void)setRemoteRenderParams:(NSString *)userId streamType:(TRTCVideoStreamType)streamType params:(TRTCRenderParams *)params; /** - * 3.16 设置视频编码输出的画面方向,即设置远端用户观看到的和服务器录制的画面方向 + * 4.17 设置视频编码器输出的画面方向 * - * 在 iPad、iPhone 等设备180度旋转时,由于摄像头的采集方向没有变,所以对方看到的画面是上下颠倒的, - * 在这种情况下,您可以通过该接口将 SDK 输出到对方的画面旋转180度,这样可以可以确保对方看到的画面依然正常。 - * - * @param rotation 目前支持0和180两个旋转角度,默认值:TRTCVideoRotation_0 + * 该设置不影响本地画面的预览方向,但会影响房间中其他用户所观看到(以及云端录制文件)的画面方向。 + * 当用户将手机或 Pad 上下颠倒时,由于摄像头的采集方向没有变,所以房间中其他用户所看到的画面会变成上下颠倒的, + * 在这种情况下,您可以通过调用该接口将 SDK 编码出的画面方向旋转180度,如此一来,房间中其他用户所看到的画面可保持正常的方向。 + * 如果您希望实现上述这种友好的交互体验,我们更推荐您直接调用 {@link setGSensorMode} 实现更加智能的方向适配,无需您手动调用本接口。 + * @param rotation 目前支持0和180两个旋转角度,默认值:TRTCVideoRotation_0,即不旋转。 */ - (void)setVideoEncoderRotation:(TRTCVideoRotation)rotation; /** - * 3.17 设置编码器输出的画面镜像模式 - * - * 该接口不改变本地摄像头的预览画面,但会改变另一端用户看到的(以及服务器录制的)画面效果。 + * 4.18 设置编码器输出的画面镜像模式 * + * 该设置不影响本地画面的镜像模式,但会影响房间中其他用户所观看到(以及云端录制文件)的镜像模式。 * @param mirror 是否开启远端镜像,YES:开启远端画面镜像;NO:关闭远端画面镜像,默认值:NO。 */ - (void)setVideoEncoderMirror:(BOOL)mirror; /** - * 3.18 设置重力感应的适应模式 + * 4.19 设置重力感应的适配模式 * - * @param mode 重力感应模式,详情请参考 TRTCGSensorMode 的定义,默认值:TRTCGSensorMode_UIAutoLayout + * 您可以通过本接口实现如下这种友好的交互体验: + * 当用户将手机或 Pad 上下颠倒时,由于摄像头的采集方向没有变,所以房间中其他用户所看到的画面会变成上下颠倒的, + * 在这种情况下,您可以通过调用该接口让 SDK 根据设备陀螺仪的朝向自动调整本地画面和编码器输出画面的旋转方向,以使远端观众可以看到正常朝向的画面。 + * @param mode 重力感应模式,详情请参考 {@link TRTCGSensorMode},默认值:TRTCGSensorMode_UIAutoLayout。 */ -- (void)setGSensorMode:(TRTCGSensorMode) mode; +- (void)setGSensorMode:(TRTCGSensorMode)mode; /** - * 3.19 开启大小画面双路编码模式 - * - * 如果当前用户是房间中的主要角色(例如主播、老师、主持人等),并且使用 PC 或者 Mac 环境,可以开启该模式。 - * 开启该模式后,当前用户会同时输出【高清大画面】和【低清小画面】两路视频流(但只有一路音频流)。 - * 对于开启该模式的当前用户,会占用更多的网络带宽,并且会更加消耗 CPU 计算资源。 - * - * 对于同一房间的远程观众而言: - * - 如果下行网络很好,可以选择观看【高清大画面】 - * - 如果下行网络较差,可以选择观看【低清小画面】 - * - * @note 双路编码开启后,会消耗更多的 CPU 和 网络带宽,所以对于 iMac、Windows 或者高性能 Pad 可以考虑开启,但请不要在手机端开启。 + * 4.20 开启大小画面双路编码模式 * + * 开启双路编码模式后,当前用户的编码器会同时输出【高清大画面】和【低清小画面】两路视频流(但只有一路音频流)。 + * 如此以来,房间中的其他用户就可以根据自身的网络情况或屏幕大小选择订阅【高清大画面】或是【低清小画面】。 * @param enable 是否开启小画面编码,默认值:NO * @param smallVideoEncParam 小流的视频参数 - * @return 0:成功;-1:大画面已经是最低画质 + * @return 0:成功;-1:当前大画面已被设置为较低画质,开启双路编码已无必要。 + * @note 双路编码开启后,会消耗更多的 CPU 和 网络带宽,所以 Mac、Windows 或者高性能 Pad 可以考虑开启,不建议手机端开启。 */ -- (int)enableEncSmallVideoStream:(BOOL)enable withQuality:(TRTCVideoEncParam*)smallVideoEncParam; +- (int)enableEncSmallVideoStream:(BOOL)enable withQuality:(TRTCVideoEncParam *)smallVideoEncParam; /** - * 3.20 切换指定远端用户的大小画面 - * @note - * 1. 此功能需要该 userId 通过 enableEncSmallVideoStream 提前开启双路编码模式。 - * 如果该 userId 没有开启双路编码模式,则此操作将无任何反应。 - * 2. 在不通过此接口进行设置的情况下,startRemoteView 默认观看的画面为大画面。 - * - * @param userId 用于指定要观看的 userId - * @param type 视频流类型,即选择看大画面或小画面,默认为大画面 - */ -- (void)setRemoteVideoStreamType:(NSString*)userId type:(TRTCVideoStreamType)type; - -/** -* 3.21 视频画面截图 -* -* 截取本地、远程主路和远端辅流的视频画面,并通过 UIImage(iOS) 或 NSImage(macOS) 对象返回给您。 -* -* @param userId 用户 ID,nil 表示截取本地视频画面。 -* @param type 视频流类型,支持主路画面(TRTCVideoStreamTypeBig,一般用于摄像头)和 辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)。 -* @prara sourceType 截图画面来源,支持视频流(TRTCSnapshotSourceTypeStream)和视频渲染画面(TRTCSnapshotSourceTypeView) -* @param completionBlock 画面截取后的回调。 -* -* @note 设置 userId = nil,代表截取当前用户的本地画面,目前本地画面仅支持截取主路画面(TRTCVideoStreamTypeBig)。 -*/ -- (void)snapshotVideo:(NSString *)userId - type:(TRTCVideoStreamType)type - sourceType:(TRTCSnapshotSourceType)sourceType - completionBlock:(void (^)(TXImage *image))completionBlock; + * 4.21 切换指定远端用户的大小画面 + * + * 当房间中某个主播开启了双路编码之后,房间中其他用户通过 {@link startRemoteView} 订阅到的画面默认会是【高清大画面】。 + * 您可以通过此接口选定希望订阅的画面是大画面还是小画面,该接口在 {@link startRemoteView} 之前和之后调用均可生效。 + * @param userId 指定远端用户的 ID。 + * @param streamType 视频流类型,即选择看大画面还是小画面,默认为大画面。 + * @note 此功能需要目标用户已经通过 {@link enableEncSmallVideoStream} 提前开启了双路编码模式,否则此调用无实际效果。 + */ +- (void)setRemoteVideoStreamType:(NSString *)userId type:(TRTCVideoStreamType)streamType; -/// @} +/** + * 4.22 视频画面截图 + * + * 您可以通过本接口截取本地的视频画面,远端用户的主路画面以及远端用户的辅路(屏幕分享)画面。 + * @param userId 用户 ID,如指定空置表示截取本地的视频画面。 + * @param streamType 视频流类型,可选择截取主路画面({@link TRTCVideoStreamTypeBig},常用于摄像头)或辅路画面({@link TRTCVideoStreamTypeSub},常用于屏幕分享)。 + * @param sourceType 画面来源,可选择截取视频流画面({@link TRTCSnapshotSourceTypeStream})或视频渲染画面({@link TRTCSnapshotSourceTypeView}),前者一般更清晰。 + * @note Windows 平台目前仅支持截取 {@link TRTCSnapshotSourceTypeStream} 来源的视频画面。 + */ +- (void)snapshotVideo:(nullable NSString *)userId type:(TRTCVideoStreamType)streamType sourceType:(TRTCSnapshotSourceType)sourceType completionBlock:(void (^)(TXImage *image))completionBlock; +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (四)音频相关接口函数 +// 音频相关接口函数 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 音频相关接口函数 -/// @name 音频相关接口函数 +/// @name 音频相关接口函数 /// @{ /** - * 4.1 开启本地音频的采集和上行 - * - * SDK 默认不采集声音,您需要调用该函数启动麦克风采集,并将音频数据传输给房间里的其他用户。 + * 5.1 开启本地音频的采集和发布 * + * SDK 默认不开启麦克风,当用户需要发布本地音频时,需要调用该接口开启麦克风采集,并将音频编码并发布到当前的房间中。 + * 开启本地音频的采集和发布后,房间中的其他用户会收到 {@link onUserAudioAvailable}(userId, YES) 的通知。 * @param quality 声音音质 - * - {@link TRTCCloudDef#TRTCAudioQualitySpeech}, 流畅:采样率:16k;单声道;音频裸码率:16kbps;适合语音通话为主的场景,比如在线会议,语音通话。 - * - {@link TRTCCloudDef#TRTCAudioQualityDefault},默认:采样率:48k;单声道;音频裸码率:50kbps;SDK 默认的音频质量,如无特殊需求推荐选择之。 - * - {@link TRTCCloudDef#TRTCAudioQualityMusic},高音质:采样率:48k;双声道 + 全频带;音频裸码率:128kbps;适合需要高保真传输音乐的场景,比如K歌、音乐直播等。 - * - * @note 该函数会检查麦克风的使用权限,如果当前 App 没有麦克风权限,SDK 会向用户申请开启。 + * - {@link TRTCAudioQualitySpeech},流畅:采样率:16k;单声道;音频裸码率:16kbps;适合语音通话为主的场景,比如在线会议,语音通话。 + * - {@link TRTCAudioQualityDefault},默认:采样率:48k;单声道;音频裸码率:50kbps;SDK 默认的音频质量,如无特殊需求推荐选择之。 + * - {@link TRTCAudioQualityMusic},高音质:采样率:48k;双声道 + 全频带;音频裸码率:128kbps;适合需要高保真传输音乐的场景,比如在线K歌、音乐直播等。 + * @note 该函数会检查麦克风的使用权限,如果当前 App 没有麦克风权限,SDK 会自动向用户申请麦克风使用权限。 */ - (void)startLocalAudio:(TRTCAudioQuality)quality; /** - * 4.2 关闭本地音频的采集和上行 + * 5.2 停止本地音频的采集和发布 * - * 当关闭本地音频的采集和上行,房间里的其它成员会收到 onUserAudioAvailable(NO) 回调通知。 + * 停止本地音频的采集和发布后,房间中的其他用户会收到 {@link onUserAudioAvailable}(userId, NO) 的通知。 */ - (void)stopLocalAudio; /** - * 4.3 静音/取消静音本地的音频 - * - * 当静音本地音频后,房间里的其它成员会收到 onUserAudioAvailable(userId, NO) 回调通知。 - * 当取消静音本地音频后,房间里的其它成员会收到 onUserAudioAvailable(userId, YES) 回调通知。 + * 5.3 暂停/恢复发布本地的音频流 * - * 与 stopLocalAudio 不同之处在于,muteLocalAudio:YES 并不会停止发送音视频数据,而是继续发送码率极低的静音包。 - * 由于 MP4 等视频文件格式,对于音频的连续性是要求很高的,使用 stopLocalAudio 会导致录制出的 MP4 不易播放。 - * 因此在对录制质量要求很高的场景中,建议选择 muteLocalAudio,从而录制出兼容性更好的 MP4 文件。 - * - * @param mute YES:静音;NO:取消静音 + * 当您暂停发布本地音频流之后,房间中的其他他用户会收到 {@link onUserAudioAvailable}(userId, NO) 的通知。 + * 当您恢复发布本地音频流之后,房间中的其他他用户会收到 {@link onUserAudioAvailable}(userId, YES) 的通知。 + * 与 {@link stopLocalAudio} 的不同之处在于,muteLocalAudio(YES) 并不会释放麦克风权限,而是继续发送码率极低的静音包。 + * 这对于需要云端录制的场景非常适用,因为 MP4 等格式的视频文件,对于音频数据的连续性要求很高,使用 {@link stopLocalAudio} 会导致录制出的 MP4 文件不易播放。 + * 因此在对录制文件的质量要求较高的场景中,建议选择 muteLocalAudio 而不建议使用 stopLocalAudio。 + * @param mute YES:静音;NO:恢复。 */ - (void)muteLocalAudio:(BOOL)mute; /** - * 4.4 静音/取消静音指定的远端用户的声音 - * - * @param userId 对方的用户 ID - * @param mute YES:静音;NO:取消静音 + * 5.4 暂停/恢复播放远端的音频流 * - * @note 静音时会停止接收该用户的远端音频流并停止播放,取消静音时会自动拉取该用户的远端音频流并进行播放。 - * 您在 enterRoom 之前或之后调用此 API 均会进入屏蔽状态,屏蔽状态在您调用 exitRoom 之后会被重置为 false。 + * 当您静音某用户的远端音频时,SDK 会停止播放指定用户的声音,同时也会停止拉取该用户的音频数据数据。 + * @param userId 用于指定远端用户的 ID。 + * @param mute YES:静音;NO:取消静音。 + * @note 在进入房间(enterRoom)之前或之后调用本接口均生效,静音状态在退出房间(exitRoom) 之后会被重置为 NO。 */ - (void)muteRemoteAudio:(NSString *)userId mute:(BOOL)mute; /** - * 4.5 静音/取消静音所有用户的声音 + * 5.5 暂停/恢复播放所有远端用户的音频流 * - * @param mute YES:静音;NO:取消静音 - * - * @note 静音时会停止接收所有用户的远端音频流并停止播放,取消静音时会自动拉取所有用户的远端音频流并进行播放。 - * 您在 enterRoom 之前或之后调用此 API 均会进入屏蔽状态,屏蔽状态在您调用 exitRoom 之后会被重置为 false。 + * 当您静音所有用户的远端音频时,SDK 会停止播放所有来自远端的音频流,同时也会停止拉取所有用户的音频数据。 + * @param mute YES:静音;NO:取消静音。 + * @note 在进入房间(enterRoom)之前或之后调用本接口均生效,静音状态在退出房间(exitRoom) 之后会被重置为 NO。 */ - (void)muteAllRemoteAudio:(BOOL)mute; /** - * 4.6 设置音频路由 - * - * 微信和手机 QQ 视频通话功能的免提模式就是基于音频路由实现的。 - * 一般手机都有两个扬声器,一个是位于顶部的听筒扬声器,声音偏小;一个是位于底部的立体声扬声器,声音偏大。 - * 设置音频路由的作用就是决定声音使用哪个扬声器播放。 + * 5.6 设置音频路由 * - * @param route 音频路由,即声音由哪里输出(扬声器、听筒),默认值:TRTCAudioModeSpeakerphone + * 设置“音频路由”,即设置声音是从手机的扬声器还是从听筒中播放出来,因此该接口仅适用于手机等移动端设备。 + * 手机有两个扬声器:一个是位于手机顶部的听筒,一个是位于手机底部的立体声扬声器。 + * 设置音频路由为听筒时,声音比较小,只有将耳朵凑近才能听清楚,隐私性较好,适合用于接听电话。 + * 设置音频路由为扬声器时,声音比较大,不用将手机贴脸也能听清,因此可以实现“免提”的功能。 + * @param route 音频路由,即声音由哪里输出(扬声器、听筒),默认值:TRTCAudioModeSpeakerphone。 */ - (void)setAudioRoute:(TRTCAudioRoute)route; /** - * 4.7 设置某个远程用户的播放音量 - * - * @param userId 远程用户 ID - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 + * 5.7 设定某一个远端用户的声音播放音量 * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 您可以通过 setRemoteAudioVolume(userId, 0) 将某一个远端用户的声音静音。 + * @param userId 用于指定远端用户的 ID。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - (void)setRemoteAudioVolume:(NSString *)userId volume:(int)volume; /** - * 4.8 设置 SDK 采集音量。 + * 5.8 设定本地音频的采集音量 * - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 - * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * @param volume 音量大小,取值范围为0 - 100;默认值:100 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - (void)setAudioCaptureVolume:(NSInteger)volume; /** - * 4.9 获取 SDK 采集音量 + * 5.9 获取本地音频的采集音量 */ - (NSInteger)getAudioCaptureVolume; /** - * 4.10 设置 SDK 播放音量。 + * 5.10 设定远端音频的播放音量 * - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 + * 该接口会控制 SDK 最终交给系统播放的声音音量,调节效果会影响到本地音频录制文件的音量大小,但不会影响到耳返的音量大小。 * - * @note - * 1. 该函数会控制最终交给系统播放的声音音量,会影响录制本地音频文件的音量大小,但不会影响耳返的音量。<br> - * 2. 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - (void)setAudioPlayoutVolume:(NSInteger)volume; /** - * 4.11 获取 SDK 播放音量 + * 5.11 获取远端音频的播放音量 */ - (NSInteger)getAudioPlayoutVolume; /** - * 4.12 启用音量大小提示 + * 5.12 启用音量大小提示 * - * 开启此功能后,SDK 会在 onUserVoiceVolume() 中反馈对每一路声音音量大小值的评估。 - * 如需打开此功能,请在 startLocalAudio() 之前调用。 - * - * @note Demo 中有一个音量大小的提示条,就是基于这个接口实现的。 - * @param interval 设置 onUserVoiceVolume 回调的触发间隔,单位为ms,最小间隔为100ms,如果小于等于0则会关闭回调,建议设置为300ms; + * 开启此功能后,SDK 会在 {@link TRTCCloudDelegate} 中的 {@link onUserVoiceVolume} 回调中反馈远端音频的音量大小。 + * @note 如需打开此功能,请在 startLocalAudio 之前调用才可以生效。 + * @param interval 设置 onUserVoiceVolume 回调的触发间隔,单位为ms,最小间隔为100ms,如果小于等于 0 则会关闭回调,建议设置为300ms; */ - (void)enableAudioVolumeEvaluation:(NSUInteger)interval; /** - * 4.13 开始录音 - * - * 该方法调用后, SDK 会将通话过程中的所有音频(包括本地音频,远端音频,BGM 等)录制到一个文件里。 - * 无论是否进房,调用该接口都生效。 - * 如果调用 exitRoom 时还在录音,录音会自动停止。 + * 5.13 开始录音 * - * @param param 录音参数,请参考 TRTCAudioRecordingParams - * @return 0:成功;-1:录音已开始;-2:文件或目录创建失败;-3:后缀指定的音频格式不支持 + * 当您调用该接口后, SDK 会将本地和远端的所有音频(包括本地音频,远端音频,背景音乐和音效等)混合并录制到一个本地文件中。 + * 该接口在进入房间前后调用均可生效,如果录制任务在退出房间前尚未通过 stopAudioRecording 停止,则退出房间后录制任务会自动被停止。 + * @param param 录音参数,请参考 {@link TRTCAudioRecordingParams} + * @return 0:成功;-1:录音已开始;-2:文件或目录创建失败;-3:后缀指定的音频格式不支持。 */ -- (int)startAudioRecording:(TRTCAudioRecordingParams*) param; +- (int)startAudioRecording:(TRTCAudioRecordingParams *)param; /** - * 4.14 停止录音 + * 5.14 停止录音 * - * 如果调用 exitRoom 时还在录音,录音会自动停止。 + * 如果录制任务在退出房间前尚未通过本接口停止,则退出房间后录音任务会自动被停止。 */ - (void)stopAudioRecording; -#if TARGET_OS_IPHONE /** - * 4.15 开启本地媒体录制(iOS) - * - * 开启后把直播过程中的音视频数据录制存储到本地文件。 - * 应用场景: - * 1. 不推流情况下,通过调用 startLocalPreview 预览画面后,进行录制。 - * 2. 在推流的同时进行录制,把直播的全程录制保存到本地文件。 - * - * @param params 录制参数,请参考 {@link TRTCCloudDef#TRTCLocalRecordingParams} + * 5.15 开启本地媒体录制 * + * 开启后把直播过程中的音视和视频内容录制到本地的一个文件中。 + * @param params 录制参数,请参考 {@link TRTCLocalRecordingParams} */ - (void)startLocalRecording:(TRTCLocalRecordingParams *)params; /** - * 4.16 停止录制 + * 5.16 停止本地媒体录制 * - * 如果调用 exitRoom 时还在录制,录制会自动停止。 + * 如果录制任务在退出房间前尚未通过本接口停止,则退出房间后录音任务会自动被停止。 */ - (void)stopLocalRecording; -#endif - -#if !TARGET_OS_IPHONE && TARGET_OS_MAC /** - * 4.17 开始录制系统声音,仅适用 Mac 平台 - * - * 开启系统声卡采集,并将其混入上行音频流中,从而可以直播当前 Mac 系统的声音(如电影播出的声音)。 - * - * @note - * 1. 此功能需要为用户的 Mac 系统安装虚拟音频设备插件,安装完成后,SDK 会从已经安装的虚拟音频设备中采集声音。 - * 2. SDK 会自动从网络上下载合适的插件进行安装,但是下载速度可能比较慢,如果您希望加速这个过程,可以将虚拟音频插件文件打包到您 App Bundle 的 Resources 目录下。 + * 5.18 设置远端音频流智能并发播放策略 * + * 设置远端音频流智能并发播放策略,适用于上麦人数比较多的场景。 + * @param params 音频并发参数,请参考 {@link TRTCAudioParallelParams} */ -- (void)startSystemAudioLoopback; - -/** - * 4.18 停止录制系统声音,仅适用 Mac 平台 - */ -- (void)stopSystemAudioLoopback; - -/** - * 4.19 设置系统声音采集的音量,仅适用 Mac 平台 - * - * @param volume 设置的音量大小,范围是:[0 ~ 150],默认值为100 - * - */ -- (void)setSystemAudioLoopbackVolume:(uint32_t)volume; - -#endif +- (void)setRemoteAudioParallelParams:(TRTCAudioParallelParams *)params; /// @} - - - ///////////////////////////////////////////////////////////////////////////////// // -// (五)设备管理相关接口 +// 设备管理相关接口 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 设备管理接口 -/// @name 设备管理接口 +/// @name 设备管理相关接口 /// @{ /** - * 5.1 获取设备管理类 TXDeviceManager + * 6.1 获取设备管理类(TXDeviceManager) */ - (TXDeviceManager *)getDeviceManager; -/// @} - +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (六)美颜特效和图像水印 +// 美颜特效和图像水印 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 美颜特效和变脸特效 -/// @name 美颜特效和变脸特效 +/// @name 美颜特效和图像水印 /// @{ /** - * 6.1 获取美颜管理对象 + * 7.1 获取美颜管理类(TXBeautyManager) * * 通过美颜管理,您可以使用以下功能: - * - 设置"美颜风格"、“美白”、“红润”、“大眼”、“瘦脸”、“V脸”、“下巴”、“短脸”、“小鼻”、“亮眼”、“白牙”、“祛眼袋”、“祛皱纹”、“祛法令纹”等美容效果。 - * - 调整“发际线”、“眼间距”、“眼角”、“嘴形”、“鼻翼”、“鼻子位置”、“嘴唇厚度”、“脸型” - * - 设置人脸挂件(素材)等动态效果 - * - 添加美妆 - * - 进行手势识别 + * - 设置"磨皮"、“美白”、“红润”等美颜特效。 + * - 设置“大眼”、“瘦脸”、“V脸”、“下巴”、“短脸”、“小鼻”、“亮眼”、“白牙”、“祛眼袋”、“祛皱纹”、“祛法令纹”等修脸特效。 + * - 设置“发际线”、“眼间距”、“眼角”、“嘴形”、“鼻翼”、“鼻子位置”、“嘴唇厚度”、“脸型”等修脸特效。 + * - 设置"眼影"、“腮红”等美妆特效。 + * - 设置动态贴纸和人脸挂件等动画特效。 */ - (TXBeautyManager *)getBeautyManager; /** - * 6.2 添加水印 + * 7.2 添加水印 * - * 水印的位置是通过 rect 来指定的,rect 的格式为 (x,y,width,height) + * 水印的位置是通过 rect 参数来指定的,rect 是一个四元组参数,其格式为 (x,y,width,height) * - x:水印的坐标,取值范围为0 - 1的浮点数。 * - y:水印的坐标,取值范围为0 - 1的浮点数。 * - width:水印的宽度,取值范围为0 - 1的浮点数。 * - height:是不用设置的,SDK 内部会根据水印图片的宽高比自动计算一个合适的高度。 * - * 例如,如果当前编码分辨率是540 × 960,rect 设置为(0.1,0.1,0.2,0.0)。 - * 那么水印的左上坐标点就是(540 × 0.1,960 × 0.1)即(54,96),水印的宽度是 540 × 0.2 = 108px,高度自动计算。 + * 参数设置举例: + * 如果当前视频的编码分辨率是 540 × 960,且 rect 参数被您设置为(0.1,0.1,0.2,0.0), + * 那么水印的左上坐标点就是(540 × 0.1,960 × 0.1)即(54,96),水印的宽度是 540 × 0.2 = 108px,水印的高度会根据水印图片的宽高比由 SDK 自动算出。 * - * @param image 水印图片,**必须使用透明底的 png 格式** - * @param streamType 如果要给辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)也设置水印,需要调用两次的 setWatermark。 + * @param image 水印图片,**必须使用透明底色的 png 格式** + * @param streamType 指定给哪一路画面设置水印,详情请参考{@link TRTCVideoStreamType}。 * @param rect 水印相对于编码分辨率的归一化坐标,x,y,width,height 取值范围0 - 1。 + * @note 如果您要给主画面(一般为摄像头)和辅路画面(一般用作屏幕分享)同时设置水印,需要调用该接口两次,并设定不同的 streamType。 */ -- (void)setWatermark:(TXImage*)image streamType:(TRTCVideoStreamType)streamType rect:(CGRect)rect; +- (void)setWatermark:(nullable TXImage *)image streamType:(TRTCVideoStreamType)streamType rect:(CGRect)rect; /// @} - - ///////////////////////////////////////////////////////////////////////////////// // -// (七)音乐特效和人声特效 +// 背景音乐和声音特效 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 音乐特效和人声特效 -/// @name 音乐特效和人声特效 +/// @name 背景音乐和声音特效 /// @{ /** -* 7.1 获取音效管理类 TXAudioEffectManager -* -* 该模块是整个 SDK 的音效管理模块,支持如下功能: -* - 耳机耳返:麦克风捕捉的声音实时通过耳机播放。 -* - 混响效果:KTV、小房间、大会堂、低沉、洪亮... -* - 变声特效:萝莉、大叔、重金属、外国人... -* - 背景音乐:支持在线音乐和本地音乐,支持变速、变调等特效、支持原生和伴奏并播放和循环播放。 -* - 短音效:鼓掌声、欢笑声等简短的音效文件,对于小于10秒的文件,请将 isShortFile 参数设置为 YES。 -*/ + * 8.1 获取音效管理类(TXAudioEffectManager) + * + * TXAudioEffectManager 是音效管理接口,您可以通过该接口实现如下功能: + * - 背景音乐:支持在线音乐和本地音乐,支持变速、变调等特效、支持原生和伴奏并播放和循环播放。 + * - 耳机耳返:麦克风捕捉的声音实时通过耳机播放,常用于音乐直播。 + * - 混响效果:KTV、小房间、大会堂、低沉、洪亮... + * - 变声特效:萝莉、大叔、重金属... + * - 短音效:鼓掌声、欢笑声等简短的音效文件(对于小于10秒的文件,请将 isShortFile 参数设置为 YES)。 + */ - (TXAudioEffectManager *)getAudioEffectManager; -/// @} - -///////////////////////////////////////////////////////////////////////////////// -// -// (八)屏幕分享相关接口函数 -// -///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 屏幕分享相关接口函数 -/// @name 屏幕分享相关接口函数 -/// @{ - -#if TARGET_OS_IPHONE /** - * 8.1 开始应用内的屏幕分享(该接口仅支持 iOS 13.0 及以上的 iPhone 和 iPad) + * 8.2 开启系统声音采集(仅适用于 Mac 系统) * - * iPhone 屏幕分享的推荐配置参数: - * - 分辨率(videoResolution): 1280 x 720 - * - 帧率(videoFps): 10 FPS - * - 码率(videoBitrate): 1600 kbps - * - 分辨率自适应(enableAdjustRes): NO - * - * @param encParams 设置屏幕分享时的编码参数,推荐采用上述推荐配置,如果您指定 encParams 为 nil,则使用您调用 startScreenCapture 之前的编码参数设置。 + * 该接口会从电脑的声卡中采集音频数据,并将其混入到 SDK 当前的音频数据流中,从而使房间中的其他用户也能听到主播的电脑所播放出的声音。 + * 在线教育场景中,老师可以使用此功能让 SDK 采集教学影片中的声音,并广播给同房间中的学生。 + * 音乐直播场景中,主播可以使用此功能让 SDK 采集音乐播放器中的音乐,从而为自己的直播间增加背景音乐。 + * @note + * 1. 此功能需要为用户的 Mac 系统安装虚拟音频设备插件,安装完成后,SDK 会从已经安装的虚拟音频设备中采集声音。 + * 2. SDK 会自动从网络上下载合适的插件进行安装,但是下载速度可能比较慢,如果您希望加速这个过程,可以将虚拟音频插件文件打包到您 App Bundle 的 Resources 目录下。 */ -- (void)startScreenCaptureInApp:(TRTCVideoEncParam *)encParams API_AVAILABLE(ios(13.0)); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startSystemAudioLoopback; +#endif /** - * 8.2 开始全系统的屏幕分享(该接口支持 iOS 11.0 及以上的 iPhone 和 iPad) - * - * 该接口支持共享整个 iOS 系统的屏幕,可以实现类似腾讯会议的全系统级的屏幕分享。 - * 但是实现复杂度要比 startScreenCaptureInApp 略繁琐一些,需要参考文档为 App 实现一个 Replaykit 扩展模块。 - * - * 参考文档:[屏幕录制]https://cloud.tencent.com/document/product/647/32249 - * - * iPhone 屏幕分享的推荐配置参数: - * - 分辨率(videoResolution): 1280 x 720 - * - 帧率(videoFps): 10 FPS - * - 码率(videoBitrate): 1600 kbps - * - 分辨率自适应(enableAdjustRes): NO + * 8.3 停止系统声音采集(仅适用于桌面系统和 Android 系统) + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)stopSystemAudioLoopback; +#endif + +/** + * 8.4 设置系统声音的采集音量 * - * @param encParams 设置屏幕分享时的编码参数,推荐采用上述推荐配置,如果您指定 encParams 为 nil,则使用您调用 startScreenCapture 之前的编码参数设置。 - * @param appGroup 主 App 与 Broadcast 共享的 Application Group Identifier,可以指定为 nil,但按照文档设置会使功能更加可靠。 + * @param volume 设置的音量大小,范围是:[0 ~ 150],默认值为100。 */ -- (void)startScreenCaptureByReplaykit:(TRTCVideoEncParam *)encParams - appGroup:(NSString *)appGroup API_AVAILABLE(ios(11.0)); +- (void)setSystemAudioLoopbackVolume:(uint32_t)volume; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 屏幕分享相关接口 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 屏幕分享相关接口 +/// @{ /** - * 8.3 开始应用内的屏幕分享(该接口仅支持 iOS 13.0 及以上的 iPhone 和 iPad) + * 9.1 开始应用内的屏幕分享(仅支持 iOS 13.0 及以上系统) * - * iPhone 屏幕分享的推荐配置参数: - * - 分辨率(videoResolution): 1280 x 720 - * - 帧率(videoFps): 10 FPS - * - 码率(videoBitrate): 1600 kbps - * - 分辨率自适应(enableAdjustRes): NO + * 该接口会抓取当前应用内的实时屏幕内容并将其分享给同房间中的其他用户,适用于 13.0 以上的 iOS 系统。 + * 如果您希望抓取整个 iOS 系统的屏幕内容(而不受当前应用的限制),推荐使用 {@link startScreenCaptureByReplaykit}。 + * iPhone 推荐的屏幕分享视频编码参数({@link TRTCVideoEncParam}): + * - 分辨率(videoResolution): 1280 x 720 + * - 帧率(videoFps): 10 FPS + * - 码率(videoBitrate): 1600 kbps + * - 分辨率自适应(enableAdjustRes): NO * * @param streamType 屏幕分享使用的线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub)。 - * @param encParams 设置屏幕分享时的编码参数,推荐采用上述推荐配置,如果您指定 encParams 为 nil,则使用您调用 startScreenCapture 之前的编码参数设置。 + * @param encParams 设置屏幕分享时的视频编码参数,推荐采用上述推荐配置。 + * 如果您指定 encParams 为 nil,SDK 会使用您在调用 startScreenCapture 接口之前所设置的视频编码参数。 */ +#if TARGET_OS_IPHONE - (void)startScreenCaptureInApp:(TRTCVideoStreamType)streamType encParam:(TRTCVideoEncParam *)encParams API_AVAILABLE(ios(13.0)); +#endif /** - * 8.4 开始全系统的屏幕分享(该接口支持 iOS 11.0 及以上的 iPhone 和 iPad) + * 9.1 开始全系统的屏幕分享(仅支持 iOS 11.0 及以上系统) * - * 该接口支持共享整个 iOS 系统的屏幕,可以实现类似腾讯会议的全系统级的屏幕分享。 - * 但是实现复杂度要比 startScreenCaptureInApp 略繁琐一些,需要参考文档为 App 实现一个 Replaykit 扩展模块。 + * 该接口支持抓取整个 iOS 系统的屏幕,可以实现类似腾讯会议的全系统级的屏幕分享。 + * 但对接步骤要比 {@link startScreenCaptureInApp} 略繁琐一些,需要为您的应用实现一个 Replaykit 扩展模块。 + * 参考文档:[实时屏幕分享(iOS)](https://cloud.tencent.com/document/product/647/45750) + * iPhone 推荐的屏幕分享视频编码参数({@link TRTCVideoEncParam}): + * - 分辨率(videoResolution): 1280 x 720 + * - 帧率(videoFps): 10 FPS + * - 码率(videoBitrate): 1600 kbps + * - 分辨率自适应(enableAdjustRes): NO * - * 参考文档:[屏幕录制]https://cloud.tencent.com/document/product/647/32249 - * - * iPhone 屏幕分享的推荐配置参数: - * - 分辨率(videoResolution): 1280 x 720 - * - 帧率(videoFps): 10 FPS - * - 码率(videoBitrate): 1600 kbps - * - 分辨率自适应(enableAdjustRes): NO - * - * @param streamType 屏幕分享使用的线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub),默认使用辅路。 - * @param encParams 设置屏幕分享时的编码参数,推荐采用上述推荐配置,如果您指定 encParams 为 nil,则使用您调用 startScreenCapture 之前的编码参数设置。 - * @param appGroup 主 App 与 Broadcast 共享的 Application Group Identifier,可以指定为 nil,但按照文档设置会使功能更加可靠。 + * @param streamType 屏幕分享使用的线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub)。 + * @param encParams 设置屏幕分享时的视频编码参数,推荐采用上述推荐配置。如果您指定 encParams 为 nil,SDK 会使用您在调用 startScreenCapture 接口之前所设置的视频编码参数。 + * @param appGroup 用于指定您的应用与录屏进程共享的 Application Group Identifier,您可以指定该参数为 nil,但推荐您按照文档指示进行设置,从而获得更好的可靠性。 */ -- (void)startScreenCaptureByReplaykit:(TRTCVideoStreamType)streamType encParam:(TRTCVideoEncParam *)encParams - appGroup:(NSString *)appGroup API_AVAILABLE(ios(11.0)); - -#elif TARGET_OS_MAC +#if TARGET_OS_IPHONE +- (void)startScreenCaptureByReplaykit:(TRTCVideoStreamType)streamType encParam:(TRTCVideoEncParam *)encParams appGroup:(NSString *)appGroup API_AVAILABLE(ios(11.0)); +#endif /** - * 8.3 开始桌面端屏幕分享(该接口仅支持 Mac OS 桌面系统) + * 9.1 开始桌面端屏幕分享(该接口仅支持桌面系统) * - * @param view 渲染控件所在的父控件,可以设置为 nil,表示不显示屏幕分享的预览效果。 - * @param streamType 屏幕分享使用的线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub),默认使用辅路。 + * 该接口可以抓取整个 Mac OS 系统的屏幕内容,或抓取您指定的某个应用的窗口内容,并将其分享给同房间中的其他用户。 + * @param view 渲染控件所在的父控件,可以设置为空值,表示不显示屏幕分享的预览效果。 + * @param streamType 屏幕分享使用的线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub),推荐使用辅路。 * @param encParam 屏幕分享的画面编码参数,SDK 会优先使用您通过此接口设置的编码参数: - * - 如果 encParam 设置为 nil,且您已通过 setSubStreamEncoderParam 设置过辅路视频编码参数,SDK 将使用您设置过的辅路编码参数进行屏幕分享。 - * - 如果 encParam 设置为 nil,且您未通过 setSubStreamEncoderParam 设置过辅路视频编码参数,SDK 将自适应选择最佳的编码参数进行屏幕分享。 + * - 如果您设置 encParam 为 nil,且您已通过 setSubStreamEncoderParam 设置过辅路视频编码参数,SDK 将使用您设置过的辅路编码参数进行屏幕分享。 + * - 如果您设置 encParam 为 nil,且您未通过 setSubStreamEncoderParam 设置过辅路视频编码参数,SDK 将自动选择一个最佳的编码参数进行屏幕分享。 * - * @note 一个用户同时最多只能上传一条主路(TRTCVideoStreamTypeBig)画面和一条辅路(TRTCVideoStreamTypeSub)画面, - * 默认情况下,屏幕分享使用辅路画面,如果使用主路画面,建议您提前停止摄像头采集(stopLocalPreview)避免相互冲突。 + * @note + * 1. 同一个用户同时最多只能发布一路主路({@link TRTCVideoStreamTypeBig})画面和一路辅路({@link TRTCVideoStreamTypeSub})画面。 + * 2. 默认情况下,屏幕分享使用辅路画面。如果使用主路做屏幕分享,您需要提前停止摄像头采集({@link stopLocalPreview})以避免相互冲突。 + * 3. 同一个房间中同时只能有一个用户使用辅路做屏幕分享,也就是说,同一个房间中同时只允许一个用户开启辅路。 + * 4. 当房间中已经有其他用户在使用辅路分享屏幕时,此时调用该接口会收到来自 {@link TRTCCloudDelegate} 的 onError(ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO) 回调。 */ -- (void)startScreenCapture:(NSView *)view streamType:(TRTCVideoStreamType)streamType encParam:(TRTCVideoEncParam *)encParam; +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startScreenCapture:(nullable NSView *)view streamType:(TRTCVideoStreamType)streamType encParam:(nullable TRTCVideoEncParam *)encParam; #endif /** - * 8.4 停止屏幕采集 - * - * @return 0:成功;<0:失败 + * 9.2 停止屏幕分享 */ - (int)stopScreenCapture API_AVAILABLE(ios(11.0)); /** - * 8.5 暂停屏幕分享 - * - * @return 0:成功;<0:失败 + * 9.3 暂停屏幕分享 */ - (int)pauseScreenCapture API_AVAILABLE(ios(11.0)); /** - * 8.6 恢复屏幕分享 - * - * @return 0:成功;<0:失败 + * 9.4 恢复屏幕分享 */ - (int)resumeScreenCapture API_AVAILABLE(ios(11.0)); -#if !TARGET_OS_IPHONE && TARGET_OS_MAC /** - * 8.7 枚举可分享的屏幕窗口,仅支持 Mac OS 平台,建议在 startScreenCapture 之前调用 - * - * 如果您要给您的 App 增加屏幕分享功能,一般需要先显示一个窗口选择界面,这样用户可以选择希望分享的窗口。 - * 通过下列函数,您可以获得可分享窗口的 ID、类型、窗口名称以及缩略图。 - * 获取上述信息后,您就可以实现一个窗口选择界面。您也可以使用 Demo 源码中已经实现好的窗口选择界面。 - * - * @note 返回的列表中包括屏幕和应用窗口,屏幕会在列表的前面几个元素中。 + * 9.5 枚举可分享的屏幕和窗口(该接口仅支持 Mac OS 系统) * + * 当您在对接桌面端系统的屏幕分享功能时,一般都需要展示一个选择分享目标的界面,这样用户能够使用这个界面选择是分享整个屏幕还是某个窗口。 + * 通过本接口,您就可以查询到当前系统中可用于分享的窗口的 ID、名称以及缩略图。我们在 Demo 中提供了一份默认的界面实现供您参考。 * @param thumbnailSize 指定要获取的窗口缩略图大小,缩略图可用于绘制在窗口选择界面上 * @param iconSize 指定要获取的窗口图标大小 * @return 窗口列表包括屏幕 + * @note 返回的列表中包含屏幕和应用窗口,屏幕是列表中的第一个元素。如果用户有多个显示器,那么每个显示器都是一个分享目标。 */ -- (NSArray<TRTCScreenCaptureSourceInfo*>*)getScreenCaptureSourcesWithThumbnailSize:(CGSize)thumbnailSize iconSize:(CGSize)iconSize; +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray<TRTCScreenCaptureSourceInfo *> *)getScreenCaptureSourcesWithThumbnailSize:(CGSize)thumbnailSize iconSize:(CGSize)iconSize; +#endif /** - * 8.8 设置屏幕分享参数,仅支持 Mac OS 平台,该方法在屏幕分享过程中也可以调用 - * - * 如果您期望在屏幕分享的过程中,切换想要分享的窗口,可以再次调用这个函数,无需重新开启屏幕分享。 + * 9.6 选取要分享的屏幕或窗口(该接口仅支持 Mac OS 系统) * + * 当您通过 getScreenCaptureSources 获取到可以分享的屏幕和窗口之后,您可以调用该接口选定期望分享的目标屏幕或目标窗口。 + * 在屏幕分享的过程中,您也可以随时调用该接口以切换分享目标。 * @param screenSource 指定分享源 - * @param rect 指定捕获的区域 + * @param rect 指定捕获的区域(设定该参数为 CGRectZero:当分享目标是某个窗口时则分享整个窗口,当分享目标是桌面时则分享整个桌面) * @param capturesCursor 是否捕获鼠标光标 * @param highlight 是否高亮正在分享的窗口 - * @note 当 rect 传递 CGRectZero 时:若分享目标是某一个 Mac 窗口时则默认分享整个窗口,当分享目标是 Mac 桌面时则默认分享整个桌面 - * */ -- (void)selectScreenCaptureTarget:(TRTCScreenCaptureSourceInfo *)screenSource - rect:(CGRect)rect - capturesCursor:(BOOL)capturesCursor - highlight:(BOOL)highlight; - +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)selectScreenCaptureTarget:(TRTCScreenCaptureSourceInfo *)screenSource rect:(CGRect)rect capturesCursor:(BOOL)capturesCursor highlight:(BOOL)highlight; #endif /** - * 8.9 设置辅路视频的编码器参数,适用 iOS、Mac 平台 - * - setVideoEncoderParam() 用于设置主路画面(TRTCVideoStreamTypeBig,一般用于摄像头)的编码参数。 - * - setSubStreamEncoderParam() 用于设置辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享或者自定义辅路)的编码参数。 - * 该设置决定远端用户看到的画面质量,同时也是云端录制出的视频文件的画面质量。 + * 9.7 设置屏幕分享(即辅路)的视频编码参数(桌面系统和移动系统均已支持) * - * @param param 辅流编码参数,详情请参考 TRTCCloudDef.h 中的 TRTCVideoEncParam 定义 - * @note 即使使用主路传输屏幕分享的数据(在调用 startScreenCapture 时设置 type = TRTCVideoStreamTypeBig),依然要使用此接口更新屏幕分享或者自定义辅路的编码参数。 + * 该接口可以设定远端用户所看到的屏幕分享(即辅路)的画面质量,同时也能决定云端录制出的视频文件中屏幕分享的画面质量。 + * 请注意如下两个接口的差异: + * - {@link setVideoEncoderParam} 用于设置主路画面({@link TRTCVideoStreamTypeBig},一般用于摄像头)的视频编码参数。 + * - {@link setSubStreamEncoderParam} 用于设置辅路画面({@link TRTCVideoStreamTypeSub},一般用于屏幕分享)的视频编码参数。 + * + * @param param 辅流编码参数,详情请参考 {@link TRTCVideoEncParam}。 + * @note 即使您使用主路传输屏幕分享(在调用 startScreenCapture 时设置 type=TRTCVideoStreamTypeBig),依然要使用 {@link setSubStreamEncoderParam} 设定屏幕分享的编码参数,而不要使用 {@link setVideoEncoderParam} 。 */ - (void)setSubStreamEncoderParam:(TRTCVideoEncParam *)param; - -#if !TARGET_OS_IPHONE && TARGET_OS_MAC - /** - * 8.10 设置屏幕分享的混音音量大小,仅适用 Mac 平台 + * 9.8 设置屏幕分享时的混音音量大小(该接口仅支持桌面系统) * - * 数值越高,辅路音量的占比越高,麦克风音量占比越小。不推荐将该参数值设置过大,数值太大容易压制麦克风的声音。 - * - * @param volume 设置的音量大小,范围0 - 100 + * 这个数值越高,屏幕分享音量的占比就越高,麦克风音量占比就越小,所以不推荐设置得太大,否则麦克风的声音就被压制了。 + * @param volume 设置的混音音量大小,范围0 - 100。 */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)setSubStreamMixVolume:(NSInteger)volume; +#endif /** - * 8.11 将指定窗口加入屏幕分享的排除列表中,加入排除列表中的窗口不会被分享出去,仅适用 Mac 平台 + * 9.9 将指定窗口加入屏幕分享的排除列表中(该接口仅支持桌面系统) * + * 加入排除列表中的窗口不会被分享出去,常见的用法是将某个应用的窗口加入到排除列表中以避免隐私问题。 * 支持启动屏幕分享前设置过滤窗口,也支持屏幕分享过程中动态添加过滤窗口。 - * - * @param windowID 不希望分享出去的窗口ID + * @param windowID 不希望分享出去的窗口 + * @note + * 1. 该接口只有在 {@link TRTCScreenCaptureSourceInfo} 中的 type 指定为 {@link TRTCScreenCaptureSourceTypeScreen} 时生效,即只有在分享整个屏幕内容时,排除指定窗口的功能才生效。 + * 2. 使用该接口添加到排除列表中的窗口会在退出房间后被 SDK 自动清除。 + * 3. Mac 平台下请传入窗口 ID(即 CGWindowID),您可以通过 {@link TRTCScreenCaptureSourceInfo} 中的 sourceId 成员获得。 */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)addExcludedShareWindow:(NSInteger)windowID; +#endif /** - * 8.12 将指定窗口从屏幕分享的排除列表中移除,仅适用 Mac 平台 + * 9.10 将指定窗口从屏幕分享的排除列表中移除(该接口仅支持桌面系统) * - * @param windowID 不希望分享出去的窗口ID + * @param windowID */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)removeExcludedShareWindow:(NSInteger)windowID; +#endif /** - * 8.13 将所有窗口从屏幕分享的排除列表中移除,仅适用 Mac 平台 + * 9.11 将所有窗口从屏幕分享的排除列表中移除(该接口仅支持桌面系统) */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)removeAllExcludedShareWindows; +#endif /** - * 8.14 将指定窗口加入窗口分享的包含列表中,加入包含列表中的窗口会被一起分享出去,仅适用 Mac 平台 + * 9.12 将指定窗口加入屏幕分享的包含列表中(该接口仅支持桌面系统) * - * 支持启动屏幕分享前设置指定窗口,也支持屏幕分享过程中动态添加指定窗口。 - * - * @param windowID 希望分享出去的窗口ID + * 该接口只有在 {@link TRTCScreenCaptureSourceInfo} 中的 type 指定为 {@link TRTCScreenCaptureSourceTypeWindow} 时生效。即只有在分享窗口内容时,额外包含指定窗口的功能才生效。 + * 您在 {@link startScreenCapture} 之前和之后调用均可。 + * @param windowID 希望被分享出去的窗口(Windows 平台下为窗口句柄: HWND) + * @note 通过该方法添加到包含列表中的窗口,会在退出房间后被 SDK 自动清除。 */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)addIncludedShareWindow:(NSInteger)windowID; +#endif /** - * 8.15 将指定窗口从窗口分享的包含列表中移除,仅适用 Mac 平台 + * 9.13 将指定窗口从屏幕分享的包含列表中移除(该接口仅支持桌面系统) * - * @param windowID 不希望分享出去的窗口ID + * 该接口只有在 {@link TRTCScreenCaptureSourceInfo} 中的 type 指定为 {@link TRTCScreenCaptureSourceTypeWindow} 时生效。 + * 即只有在分享窗口内容时,额外包含指定窗口的功能才生效。 + * @param windowID 希望被分享出去的窗口(Mac 平台: 窗口 ID;Windows 平台: HWND) */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)removeIncludedShareWindow:(NSInteger)windowID; +#endif /** - * 8.16 将所有窗口从窗口分享的包含列表中移除,仅适用 Mac 平台 + * 9.14 将全部窗口从屏幕分享的包含列表中移除(该接口仅支持桌面系统) + * + * 该接口只有在 {@link TRTCScreenCaptureSourceInfo} 中的 type 指定为 {@link TRTCScreenCaptureSourceTypeWindow} 时生效。 + * 即只有在分享窗口内容时,额外包含指定窗口的功能才生效。 */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)removeAllIncludedShareWindows; - #endif -/// @} +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (九)自定义采集和渲染 +// 自定义采集和自定义渲染 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 自定义采集和渲染 -/// @name 自定义采集和渲染 +/// @name 自定义采集和自定义渲染 /// @{ + /** - * 9.1 启用自定义视频采集模式 - * - * 开启该模式后,SDK 不再运行原有视频流上的采集流程,只保留编码和发送能力。 - * 您需要用 sendCustomVideoData:frame: 不断地向 SDK 指定 streamType 塞入自己采集的视频画面。 + * 10.1 启用/关闭视频自定义采集模式 * - * @param streamType 视频流类型: - * - 高清大画面:TRTCVideoStreamTypeBig - * - 辅流:TRTCVideoStreamTypeSub - * @param enable 是否启用,默认值:NO + * 开启该模式后,SDK 不在运行原有的视频采集流程,即不再继续从摄像头采集数据和美颜,而是只保留视频编码和发送能力。 + * 您需要通过 {@link sendCustomVideoData} 不断地向 SDK 塞入自己采集的视频画面。 + * @param streamType 用于指定视频流类型,{@link TRTCVideoStreamTypeBig}:高清大画面;{@link TRTCVideoStreamTypeSub}:辅路画面。 + * @param enable 是否启用,默认值:NO。 */ - (void)enableCustomVideoCapture:(TRTCVideoStreamType)streamType enable:(BOOL)enable; /** - * 9.2 向 SDK 中指定 streamType 投送自己采集的视频数据 + * 10.2 向 SDK 投送自己采集的视频帧 * - * TRTCVideoFrame 推荐下列填写方式(其他字段不需要填写): - * - pixelFormat:推荐选择 TRTCVideoPixelFormat_NV12。 - * - bufferType:推荐选择 TRTCVideoBufferType_PixelBuffer。 - * - pixelBuffer:iOS/Mac 平台上常用的视频数据格式。 + * 使用此接口可以向 SDK 投送自己采集的视频帧,SDK 会将视频帧进行编码并通过自身的网络模块传输出去。 + * 参数 {@link TRTCVideoFrame} 推荐下列填写方式(其他字段不需要填写): + * - pixelFormat:推荐选择 {@link TRTCVideoPixelFormat_NV12}。 + * - bufferType:推荐选择 {@link TRTCVideoBufferType_PixelBuffer}。 + * - pixelBuffer:iOS/Mac OS 平台上常用的视频数据格式。 * - data:视频裸数据格式,bufferType 为 NSData 时使用。 - * - timestamp:时间戳,单位毫秒(ms)。如果 timestamp 间隔不均匀,会严重影响音画同步和录制出的 MP4 质量。 + * - timestamp:时间戳,单位为毫秒(ms),请使用视频帧在采集时被记录下来的时间戳(可以在采集到一帧视频帧之后,通过调用 {@link generateCustomPTS} 获取时间戳)。 * - width:视频图像长度,bufferType 为 NSData 时填写。 * - height:视频图像宽度,bufferType 为 NSData 时填写。 * * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 - * - * @param streamType 自定义视频流类型: - * - 高清大画面:TRTCVideoStreamTypeBig - * - 辅流:TRTCVideoStreamTypeSub + * @param streamType 用于指定视频流类型,{@link TRTCVideoStreamTypeBig}:高清大画面;{@link TRTCVideoStreamTypeSub}:辅路画面。 * @param frame 视频数据,支持 PixelBuffer NV12,BGRA 以及 I420 格式数据。 - * @note - SDK 内部有帧率控制逻辑,目标帧率以您在 setVideoEncoderParam (高清大画面) 或者 setSubStreamEncoderParam (辅流) 中设置的为准。 - * @note - 可以设置 frame 中的 timestamp 为 0,相当于让 SDK 自己设置时间戳,但请“均匀”地控制 sendCustomVideoData 的调用间隔,否则会导致视频帧率不稳定。 + * @note + * 1. 推荐您在采集到的一帧视频帧后,即调用 {@link generateCustomPTS} 接口获取该帧的 timestamp 数值,这样可以获得最佳的音画同步效果。 + * 2. SDK 最终编码出的视频帧率并不是由您调用本接口的频率决定的,而是由您在 {@link setVideoEncoderParam} 中所设置的 FPS 决定的。 + * 3. 请尽量保持本接口的调用间隔是均匀的,否则会导致编码器输出帧率不稳或者音画不同步等问题。 */ - (void)sendCustomVideoData:(TRTCVideoStreamType)streamType frame:(TRTCVideoFrame *)frame; /** - * 9.3 第三方美颜的视频数据回调 + * 10.3 启用音频自定义采集模式 * - * 设置此方法后,SDK 内部会在本地渲染前,把采集到的视频纹理回调出来,用于第三方美颜处理。 - * - * @param delegate 自定义预处理回调,详见 {@link TRTCVideoFrameDelegate} - * @param pixelFormat 指定回调的像素格式 - * @param bufferType 指定回调的数据格式 - * @return 0:成功;<0:错误 + * 开启该模式后,SDK 不在运行原有的音频采集流程,即不再继续从麦克风采集音频数据,而是只保留音频编码和发送能力。 + * 您需要通过 {@link sendCustomAudioData} 不断地向 SDK 塞入自己采集的音频数据。 + * @param enable 是否启用,默认值:NO。 + * @note 由于回声抵消(AEC)需要严格的控制声音采集和播放的时间,所以开启自定义音频采集后,AEC 能力可能会失效。 */ -- (int)setLocalVideoProcessDelegete:(id<TRTCVideoFrameDelegate>)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; +- (void)enableCustomAudioCapture:(BOOL)enable; /** - * 9.4 设置本地视频的自定义渲染回调 + * 10.4 向 SDK 投送自己采集的音频数据 * - * 设置此方法后,SDK 内部会跳过原来的渲染流程,并把采集到的数据回调出来,您需要自己完成画面渲染。 - * - pixelFormat 指定回调的数据格式,例如 NV12、i420 以及 32BGRA。 - * - bufferType 指定 buffer 的类型,直接使用 PixelBuffer 效率最高;使用 NSData 相当于让 SDK 在内部做了一次内存转换,因此会有额外的性能损耗。 + * 参数 {@link TRTCAudioFrame} 推荐下列填写方式(其他字段不需要填写): + * - audioFormat:音频数据格式,仅支持 TRTCAudioFrameFormatPCM。 + * - data:音频帧 buffer。音频帧数据只支持 PCM 格式,支持[5ms ~ 100ms]帧长,推荐使用 20ms 帧长,长度计算方法:【48000采样率、单声道的帧长度:48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * - sampleRate:采样率,支持:16000、24000、32000、44100、48000。 + * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 + * - timestamp:时间戳,单位为毫秒(ms),请使用音频帧在采集时被记录下来的时间戳(可以在采集到一帧音频帧之后,通过调用 {@link generateCustomPTS} 获取时间戳)。 * - * @param delegate 自定义渲染回调 - * @param pixelFormat 指定回调的像素格式 - * @param bufferType PixelBuffer:可以直接使用 imageWithCVImageBuffer 转成 UIImage;NSData:经过内存整理的视频数据。 - * @return 0:成功;<0:错误 + * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 + * @param frame 音频数据 + * @note 请您精准地按每帧时长的间隔调用本接口,数据投送间隔不均匀时极易触发声音卡顿。 */ -- (int)setLocalVideoRenderDelegate:(id<TRTCVideoRenderDelegate>)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; +- (void)sendCustomAudioData:(TRTCAudioFrame *)frame; + +/** + * 10.5 启用/关闭自定义音轨 + * + * 开启后,您可以通过本接口向 SDK 混入一条自定义的音轨。通过两个布尔型参数,您可以控制该音轨是否要在远端和本地播放。 + * @param enablePublish 控制混入的音轨是否要在远端播放,默认值:NO。 + * @param enablePlayout 控制混入的音轨是否要在本地播放,默认值:NO。 + * @note 如果您指定参数 enablePublish 和 enablePlayout 均为 NO,代表完全关闭您的自定义音轨。 + */ +- (void)enableMixExternalAudioFrame:(BOOL)enablePublish playout:(BOOL)enablePlayout; /** - * 9.5 设置远端视频的自定义渲染回调 + * 10.6 向 SDK 混入自定义音轨 * - * 此方法同 setLocalVideoRenderDelegate,区别在于一个是本地画面的渲染回调, 一个是远程画面的渲染回调。 + * 调用该接口之前,您需要先通过 {@link enableMixExternalAudioFrame} 开启自定义音轨,之后就可以通过本接口将自己的音轨以 PCM 格式混入到 SDK 中。 + * 理想情况下,我们期望您的代码能够以非常均匀的速度向 SDK 提供音轨数据。但我们也非常清楚,完美的调用间隔是一个巨大的挑战。 + * 所以 SDK 内部会开启一个音轨数据的缓冲区,该缓冲区的作用类似一个“蓄水池”,它能够暂存您传入的音轨数据,平抑由于接口调用间隔的抖动问题。 + * 本接口的返回值代表这个音轨缓冲区的大小,单位是毫秒(ms),比如:如果该接口返回 50,则代表当前的音轨缓冲区有 50ms 的音轨数据。因此只要您在 50ms 内再次调用本接口,SDK 就能保证您混入的音轨数据是连续的。 + * 当您调用该接口后,如果发现返回值 > 100ms,则可以等待一帧音频帧的播放时间之后再次调用;如果返回值 < 100ms,则代表缓冲区比较小,您可以再次混入一些音轨数据以确保音轨缓冲区的大小维持在“安全水位”以上。 + * 参数 {@link TRTCAudioFrame} 推荐下列填写方式(其他字段不需要填写): + * - data:音频帧 buffer。音频帧数据只支持 PCM 格式,支持[5ms ~ 100ms]帧长,推荐使用 20ms 帧长,长度计算方法:【48000采样率、单声道的帧长度:48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * - sampleRate:采样率,支持:16000、24000、32000、44100、48000。 + * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 + * - timestamp:时间戳,单位为毫秒(ms),请使用音频帧在采集时被记录下来的时间戳(可以在获得一帧音频帧之后,通过调用 {@link generateCustomPTS} 获得时间戳)。 * - * @note 调用此函数之前,需要先调用 startRemoteView 来获取远端用户的视频流(view 设置为 nil 即可),否则不会有数据回调出来。 + * @param frame 音频数据 * - * @param userId 指定目标 userId。 - * @param delegate 自定义渲染的回调。 - * @param pixelFormat 指定回调的像素格式。 - * @param bufferType PixelBuffer:可以直接使用 imageWithCVImageBuffer 转成 UIImage;NSData:经过内存整理的视频数据。 - * @return 0:成功;<0:错误 + * @return >= 0 缓冲的长度,单位:ms。< 0 错误(-1 未启用 mixExternalAudioFrame) + * + * @note 请您精准地按每帧时长的间隔调用本接口,数据投送间隔不均匀时极易触发声音卡顿。 */ -- (int)setRemoteVideoRenderDelegate:(NSString*)userId delegate:(id<TRTCVideoRenderDelegate>)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; +- (int)mixExternalAudioFrame:(TRTCAudioFrame *)frame; /** - * 9.6 启用音频自定义采集模式 + * 10.7 设置推流时混入外部音频的推流音量和播放音量 * - * 开启该模式后,SDK 不在运行原有的音频采集流程,只保留编码和发送能力。 - * 您需要用 sendCustomAudioData() 不断地向 SDK 塞入自己采集的音频数据。 + * @param publishVolume 设置的推流音量大小,范围0 - 100, -1表示不改变 + * @param playoutVolume 设置的播放音量大小,范围0 - 100, -1表示不改变 + */ +- (void)setMixExternalAudioVolume:(NSInteger)publishVolume playoutVolume:(NSInteger)playoutVolume; + +/** + * 10.8 生成自定义采集时的时间戳 * - * @note 由于回声抵消(AEC)需要严格的控制声音采集和播放的时间,所以开启自定义音频采集后,AEC 能力可能会失效。 + * 本接口仅适用于自定义采集模式,用于解决音视频帧的采集时间(capture time)和投送时间(send time)不一致所导致的音画不同步问题。 + * 当您通过 {@link sendCustomVideoData} 或 {@link sendCustomAudioData} 等接口进行自定义视频或音频采集时,请按照如下操作使用该接口: + * 1. 首先,在采集到一帧视频或音频帧时,通过调用本接口获得当时的 PTS 时间戳。 + * 2. 之后可以将该视频或音频帧送入您使用的前处理模块(如第三方美颜组件,或第三方音效组件)。 + * 3. 在真正调用 {@link sendCustomVideoData} 或 {@link sendCustomAudioData} 进行投送时,请将该帧在采集时记录的 PTS 时间戳赋值给 {@link TRTCVideoFrame} 或 {@link TRTCAudioFrame} 中的 timestamp 字段。 * - * @param enable 是否启用, true:启用;false:关闭,默认值:NO + * @return 时间戳(单位:ms) */ -- (void)enableCustomAudioCapture:(BOOL)enable; ++ (uint64_t)generateCustomPTS; + +/** + * 10.9 设置第三方美颜的视频数据回调 + * + * 设置该回调之后,SDK 会把采集到的视频帧通过您设置的 delegate 回调出来,用于第三方美颜组件进行二次处理,之后 SDK 会将处理后的视频帧进行编码和发送。 + * @param delegate 自定义预处理回调,详见 {@link TRTCVideoFrameDelegate} + * @param pixelFormat 指定回调的像素格式,目前仅支持 {@link TRTCVideoPixelFormat_Texture_2D} + * @param bufferType 指定回调的数据格式,目前仅支持 {@link TRTCVideoBufferType_Texture} + * @return 0:成功;<0:错误 + */ +- (int)setLocalVideoProcessDelegete:(nullable id<TRTCVideoFrameDelegate>)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; /** - * 9.7 向 SDK 投送自己采集的音频数据 - * - * TRTCAudioFrame 推荐如下填写方式: - * - * - data:音频帧 buffer。音频帧数据只支持 PCM 格式,支持[5ms ~ 100ms]帧长,推荐使用20 ms帧长,【48000采样率、单声道的帧长度:48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 - * - sampleRate:采样率,支持:16000、24000、32000、44100、48000。 - * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 - * - timestamp:时间戳,单位毫秒(ms)。如果 timestamp 间隔不均匀,会严重影响音画同步和录制出的 MP4 质量。 + * 10.10 设置本地视频自定义渲染回调 * + * 设置该回调之后,SDK 内部会跳过原来的渲染流程,并把采集到的数据回调出来,您需要自己完成画面渲染。 + * - pixelFormat 指定回调的数据格式,例如 NV12、i420 以及 32BGRA。 + * - bufferType 指定 buffer 的类型,直接使用 PixelBuffer 效率最高;使用 NSData 相当于让 SDK 在内部做了一次内存转换,因此会有额外的性能损耗。 * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 - * - * @param frame 音频数据 - * @note 可以设置 frame 中的 timestamp 为 0,相当于让 SDK 自己设置时间戳,但请“均匀”地控制 sendCustomAudioData 的调用间隔,否则会导致声音断断续续。 + * @param delegate 自定义渲染回调 + * @param pixelFormat 指定回调的像素格式 + * @param bufferType PixelBuffer:可以直接使用 imageWithCVImageBuffer 转成 UIImage;NSData:经过内存整理的视频数据。 + * @return 0:成功;<0:错误 */ -- (void)sendCustomAudioData:(TRTCAudioFrame *)frame; +- (int)setLocalVideoRenderDelegate:(nullable id<TRTCVideoRenderDelegate>)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; /** - * 9.8 设置音频数据回调 + * 10.11 设置远端视频自定义渲染回调 * - * 设置此方法,SDK 内部会把音频数据(PCM 格式)回调出来,包括: - * - onCapturedRawAudioFrame:本地麦克风采集到的原始音频数据回调 - * - onLocalProcessedAudioFrame:本地采集并经过音频模块前处理后的音频数据回调 - * - onRemoteUserAudioFrame:混音前的每一路远程用户的音频数据 - * - onMixedPlayAudioFrame:各路音频数据混合后送入扬声器播放的音频数据 + * 设置该回调之后,SDK 内部会跳过原来的渲染流程,并把采集到的数据回调出来,您需要自己完成画面渲染。 + * - pixelFormat 指定回调的数据格式,例如 NV12、i420 以及 32BGRA。 + * - bufferType 指定 buffer 的类型,直接使用 PixelBuffer 效率最高;使用 NSData 相当于让 SDK 在内部做了一次内存转换,因此会有额外的性能损耗。 * - * @param delegate 音频数据回调,delegate = nil 则停止回调数据 + * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 + * @param userId 指定远端用户的 ID + * @param delegate 自定义渲染回调 + * @param pixelFormat 指定回调的像素格式 + * @param bufferType PixelBuffer:可以直接使用 imageWithCVImageBuffer 转成 UIImage;NSData:经过内存整理的视频数据。 + * @return 0:成功;<0:错误 + * @note 调用此函数之前,需要先调用 startRemoteView(nil) 来获取远端用户的视频流(view 设置为 nil 即可),否则不会有数据回调出来。 */ -- (void)setAudioFrameDelegate:(id<TRTCAudioFrameDelegate>)delegate; +- (int)setRemoteVideoRenderDelegate:(NSString *)userId delegate:(nullable id<TRTCVideoRenderDelegate>)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; /** - * 9.9 生成自定义采集时间戳 + * 10.12 设置音频数据自定义回调 * - * 此函数仅适合自定义视频采集时使用,当您的 App 自己或由第三方美颜 SDK 调用摄像头 API 采集视频时,由于可能引入一些耗时的外部操作(比如美颜),这会导致视频的节奏和 SDK 内部的音频节奏不一致,进而导致音画不同步。 - * 为避免发生音画不同步的问题,请按照如下步骤正确使用该接口: - * 1. 在调用系统相机 API 采集到一帧视频时,额外调用一次 generateCustomPTS 获得 pts 时间戳。 - * 2. 在调用 sendCustomVideoData: 时,将该帧采集时记录的 pts 时间戳赋值给入参 TRTCVideoFrame 中的 timestamp 字段。 + * 设置该回调之后,SDK 内部会把音频数据(PCM 格式)回调出来,包括: + * - {@link onCapturedRawAudioFrame}:本地麦克风采集到的原始音频数据回调 + * - {@link onLocalProcessedAudioFrame}:本地采集并经过音频模块前处理后的音频数据回调 + * - {@link onRemoteUserAudioFrame}:混音前的每一路远程用户的音频数据 + * - {@link onMixedPlayAudioFrame}:将各路音频混合之后并最终要由系统播放出的音频数据回调 * - * @return 时间戳(单位:ms) + * @note 设置回调为空即代表停止自定义音频回调,反之,设置回调不为空则代表启动自定义音频回调。 */ -+ (uint64_t)generateCustomPTS; +- (void)setAudioFrameDelegate:(nullable id<TRTCAudioFrameDelegate>)delegate; /** - * 9.10 设置本地麦克风采集回调出来的 AudioFrame 格式 + * 10.13 设置本地麦克风采集出的原始音频帧回调格式 * - * 设置 onCapturedRawAudioFrame 回调出来的 AudioFrame 的格式 + * 本接口用于设置 {@link onCapturedRawAudioFrame} 回调出来的 AudioFrame 的格式: * - sampleRate:采样率,支持:16000、32000、44100、48000。 * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 * - samplesPerCall:采样点数,定义回调数据帧长。帧长必须为 10ms 的整数倍。 - * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; - * 举例:48000 采样率希望回调 20ms 帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; - * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) - * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为 3840 = 960 * 2 * 2 * + * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; + * 举例:48000 采样率希望回调 20ms 帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; + * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) + * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为 3840 = 960 * 2 * 2 * @param format 音频数据回调格式。 * @return 0:成功;<0:错误 */ - (int)setCapturedRawAudioFrameDelegateFormat:(TRTCAudioFrameDelegateFormat *)format; /** - * 9.11 设置本地采集并经过音频模块前处理后的音频数据回调出来的 AudioFrame 格式 + * 10.14 设置经过前处理后的本地音频帧回调格式 * - * 设置 onLocalProcessedAudioFrame 回调出来的AudioFrame的格式 - * - sampleRate:采样率,支持:16000、32000、44100、48000。 - * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 + * 本接口用于设置 {@link onLocalProcessedAudioFrame} 回调出来的 AudioFrame 的格式: + * - sampleRate:采样率,支持:16000、32000、44100、48000。 + * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 * - samplesPerCall:采样点数,定义回调数据帧长。帧长必须为 10ms 的整数倍。 - * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; - * 举例:48000 采样率希望回调20ms帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; - * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) - * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为 3840 = 960 * 2 * 2 + * + * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; + * 举例:48000 采样率希望回调20ms帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; + * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) + * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为 3840 = 960 * 2 * 2 * * @param format 音频数据回调格式。 * @return 0:成功;<0:错误 @@ -1280,1054 +1238,926 @@ - (int)setLocalProcessedAudioFrameDelegateFormat:(TRTCAudioFrameDelegateFormat *)format; /** - * 9.12 设置送入扬声器播放的音频数据回调的 AudioFrame 格式 + * 10.15 设置最终要由系统播放出的音频帧回调格式 * - * 设置 onMixedPlayAudioFrame 回调出来的 AudioFrame 格式 + * 本接口用于设置 {@link onMixedPlayAudioFrame} 回调出来的 AudioFrame 的格式: * - sampleRate:采样率,支持:16000、32000、44100、48000。 * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 * - samplesPerCall:采样点数,定义回调数据帧长。帧长必须为 10ms 的整数倍。 - * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; - * 举例:48000 采样率希望回调 20ms 帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; - * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) - * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为3840 = 960 * 2 * 2 * + * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; + * 举例:48000 采样率希望回调20ms帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; + * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) + * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为 3840 = 960 * 2 * 2 * @param format 音频数据回调格式。 * @return 0:成功;<0:错误 */ - (int)setMixedPlayAudioFrameDelegateFormat:(TRTCAudioFrameDelegateFormat *)format; /** - * 9.13 控制外部音频是否要混入推流和混入播放 + * 10.16 开启音频自定义播放 * - * 您可以通过 mixExternalAudioFrame: 增加一路音频混合到推流的音频流,同时可以支持在本地播放 - * - * @param enablePublish 是否混入推流 YES:混入推流;NO:不混入推流,默认值:NO - * @param enablePlayout 是否混入本地播放 YES:混入播放;NO:不混入播放,默认值:NO - * @note enablePublish = NO, enablePlayout = NO 时,表示完全关闭这个额外的音频流,即不推流,也不播放 + * 如果您需要外接一些特定的音频设备,或者希望自己掌控音频的播放逻辑,您可以通过该接口启用音频自定义播放。 + * 启用音频自定义播放后,SDK 将不再调用系统的音频接口播放数据,您需要通过 {@link getCustomAudioRenderingFrame} 获取 SDK 要播放的音频帧并自行播放。 + * @param enable 是否启用音频自定义播放,默认为关闭状态。 + * @note 需要您在进入房间前设置才能生效,暂不支持进入房间后再设置。 */ -- (void)enableMixExternalAudioFrame:(BOOL)enablePublish playout:(BOOL)enablePlayout; +- (void)enableCustomAudioRendering:(BOOL)enable; /** - * 9.14 向 SDK 投送自己附加的音频数据 + * 10.17 获取可播放的音频数据 * - * TRTCAudioFrame 推荐如下填写方式: - * - data:音频帧 buffer。音频帧数据只支持 PCM 格式,支持[5ms ~ 100ms]帧长,推荐使用20 ms帧长,【48000采样率、单声道的帧长度:48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 - * - sampleRate:采样率,支持:16000、24000、32000、44100、48000。 - * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 - * - timestamp:时间戳,单位毫秒(ms)。如果 timestamp 间隔不均匀,会严重影响音画同步和录制出的 MP4 质量。 + * 调用该接口之前,您需要先通过 {@link enableCustomAudioRendering} 开启音频自定义播放。 + * 参数 {@link TRTCAudioFrame} 推荐下列填写方式(其他字段不需要填写): + * - sampleRate:采样率,必填,支持 16000、24000、32000、44100、48000。 + * - channel:声道数,必填,单声道请填1,双声道请填2,双声道时数据是交叉的。 + * - data:用于获取音频数据的 buffer。需要您根据一帧音频帧的帧长度分配好 data 的内存大小。 + * 获取的 PCM 数据支持 10ms 或 20ms 两种帧长,推荐使用 20ms 的帧长。 + * 计算公式为:48000采样率、单声道、且播放时长为 20ms 的一帧音频帧的 buffer 大小为 48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节。 + * + * @param audioFrame 音频数据帧。 + * @note + * 1. 参数 audioFrame 中的 sampleRate、channel 需提前设置好,同时分配好所需读取帧长的 data 空间。 + * 2. SDK 内部会根据 sampleRate 和 channel 自动填充 data 数据。 + * 3. 建议由系统的音频播放线程直接驱动该函数的调用,在播放完一帧音频之后,即调用该接口获取下一帧可播放的音频数据。 * - * @param frame 音频数据 - * @note 可以设置 frame 中的 timestamp 为 0,相当于让 SDK 自己设置时间戳,但请“均匀”地控制 mixExternalAudioFrame 的调用间隔,否则会导致声音断断续续。 */ -- (void)mixExternalAudioFrame:(TRTCAudioFrame *)frame; -/// @} +- (void)getCustomAudioRenderingFrame:(TRTCAudioFrame *)audioFrame; +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (十)自定义消息发送 +// 自定义消息发送接口 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 自定义消息发送 -/// @name 自定义消息发送 +/// @name 自定义消息发送接口 /// @{ /** - * 10.1 发送自定义消息给房间内所有用户 - * - * 该接口可以借助音视频数据通道向当前房间里的其他用户广播您自定义的数据,但因为复用了音视频数据通道, - * 请务必严格控制自定义消息的发送频率和消息体的大小,否则会影响音视频数据的质量控制逻辑,造成不确定性的问题。 + * 11.1 使用 UDP 通道发送自定义消息给房间内所有用户 * - * @param cmdID 消息 ID,取值范围为1 - 10 - * @param data 待发送的消息,最大支持 1KB(1000 字节)的数据大小 - * @param reliable 是否可靠发送,可靠发送的代价是会引入一定的延时,因为接收端要暂存一段时间的数据来等待重传 - * @param ordered 是否要求有序,即是否要求接收端接收的数据顺序和发送端发送的顺序一致,这会带来一定的接收延时,因为在接收端需要暂存并排序这些消息。 + * 该接口可以让您借助 TRTC 的 UDP 通道,向当前房间里的其他用户广播自定义数据,已达到传输信令的目的。 + * TRTC 中的 UDP 通道原本设计用来传输音视频数据的,该接口的原理是将您要发送的信令伪装成音视频数据包,与原本要发送的音视频数据一并发送出去。 + * 房间中的其他用户可以通过 {@link TRTCCloudDelegate} 中的 onRecvCustomCmdMsg 回调接收消息。 + * @param cmdID 消息 ID,取值范围为1 - 10。 + * @param data 待发送的消息,单个消息的最大长度被限制为 1KB。 + * @param reliable 是否可靠发送,可靠发送可以获得更高的发送成功率,但可靠发送比不可靠发送会带来更大的接收延迟。 + * @param ordered 是否要求有序,即是否要求接收端的数据包顺序和发送端的数据包顺序一致(这会带来一定的接收延时)。 * @return YES:消息已经发出;NO:消息发送失败。 - * - * @note 本接口有以下限制: - * - 发送消息到房间内所有用户(暂时不支持 Web/小程序端),每秒最多能发送30条消息。 - * - 每个包最大为 1KB,超过则很有可能会被中间路由器或者服务器丢弃。 - * - 每个客户端每秒最多能发送总计 8KB 数据。 - * - 将 reliable 和 ordered 同时设置为 YES 或 NO,暂不支持交叉设置。 - * - 强烈建议不同类型的消息使用不同的 cmdID,这样可以在要求有序的情况下减小消息时延。 + * @note + * 1. 发送消息到房间内所有用户(暂时不支持 Web/小程序端),每秒最多能发送30条消息。 + * 2. 每个包最大为 1KB,超过则很有可能会被中间路由器或者服务器丢弃。 + * 3. 每个客户端每秒最多能发送总计 8KB 数据。 + * 4. 请将 reliable 和 ordered 同时设置为 YES 或同时设置为 NO,暂不支持交叉设置。 + * 5. 强烈建议您将不同类型的消息设定为不同的 cmdID,这样可以在要求有序的情况下减小消息时延。 */ - (BOOL)sendCustomCmdMsg:(NSInteger)cmdID data:(NSData *)data reliable:(BOOL)reliable ordered:(BOOL)ordered; /** - * 10.2 将小数据量的自定义数据嵌入视频帧中 - * - * 与 sendCustomCmdMsg 的原理不同,sendSEIMsg 是将数据直接塞入视频数据头中。因此,即使视频帧被旁路到了直播 CDN 上, - * 这些数据也会一直存在。由于需要把数据嵌入视频帧中,建议尽量控制数据大小,推荐使用几个字节大小的数据。 - * - * 最常见的用法是把自定义的时间戳(timstamp)用 sendSEIMsg 嵌入视频帧中,实现消息和画面的完美对齐。 + * 11.2 使用 SEI 通道发送自定义消息给房间内所有用户 * + * 该接口可以让您借助 TRTC 的 SEI 通道,向当前房间里的其他用户广播自定义数据,已达到传输信令的目的。 + * 视频帧的头部有一个叫做 SEI 的头部数据块,该接口的原理就是利用这个被称为 SEI 的头部数据块,将您要发送的自定义信令嵌入其中,使其同视频帧一并发送出去。 + * 因此,与 {@link sendCustomCmdMsg} 相比,SEI 通道传输的信令具有更好的兼容性:信令可以伴随着视频帧一直传输到直播 CDN 上。 + * 不过,由于视频帧头部的数据块不能太大,建议您使用该接口时,尽量将信令控制在几个字节的大小。 + * 最常见的用法是把自定义的时间戳(timestamp)用本接口嵌入视频帧中,实现消息和画面的完美对齐(比如:教育场景下的课件和视频信号的对齐)。 + * 房间中的其他用户可以通过 {@link TRTCCloudDelegate} 中的 onRecvSEIMsg 回调接收消息。 * @param data 待发送的数据,最大支持 1KB(1000字节)的数据大小 * @param repeatCount 发送数据次数 * @return YES:消息已通过限制,等待后续视频帧发送;NO:消息被限制发送 - * * @note 本接口有以下限制: - * - 数据在接口调用完后不会被即时发送出去,而是从下一帧视频帧开始带在视频帧中发送。 - * - 发送消息到房间内所有用户,每秒最多能发送 30 条消息(与 sendCustomCmdMsg 共享限制)。 - * - 每个包最大为 1KB,若发送大量数据,会导致视频码率增大,可能导致视频画质下降甚至卡顿(与 sendCustomCmdMsg 共享限制)。 - * - 每个客户端每秒最多能发送总计8KB数据(与 sendCustomCmdMsg 共享限制)。 - * - 若指定多次发送(repeatCount > 1),则数据会被带在后续的连续 repeatCount 个视频帧中发送出去,同样会导致视频码率增大。 - * - 如果 repeatCount > 1,多次发送,接收消息 onRecvSEIMsg 回调也可能会收到多次相同的消息,需要去重。 + * 1. 数据在接口调用完后不会被即时发送出去,而是从下一帧视频帧开始带在视频帧中发送。 + * 2. 发送消息到房间内所有用户,每秒最多能发送 30 条消息(与 sendCustomCmdMsg 共享限制)。 + * 3. 每个包最大为 1KB,若发送大量数据,会导致视频码率增大,可能导致视频画质下降甚至卡顿(与 sendCustomCmdMsg 共享限制)。 + * 4. 每个客户端每秒最多能发送总计8KB数据(与 sendCustomCmdMsg 共享限制)。 + * 5. 若指定多次发送(repeatCount > 1),则数据会被带在后续的连续 repeatCount 个视频帧中发送出去,同样会导致视频码率增大。 + * 6. 如果 repeatCount > 1,多次发送,接收消息 onRecvSEIMsg 回调也可能会收到多次相同的消息,需要去重。 */ -- (BOOL)sendSEIMsg:(NSData *)data repeatCount:(int)repeatCount; +- (BOOL)sendSEIMsg:(NSData *)data repeatCount:(int)repeatCount; /// @} - - ///////////////////////////////////////////////////////////////////////////////// // -// (十一)设备和网络测试 +// 网络测试接口 // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 设备和网络测试 -/// @name 设备和网络测试 +/// @name 网络测试接口 /// @{ /** - * 11.1 开始进行网络测速(视频通话期间请勿测试,以免影响通话质量) - * - * 测速结果将会用于优化 SDK 接下来的服务器选择策略,因此推荐您在用户首次通话前先进行一次测速,这将有助于我们选择最佳的服务器。 - * 同时,如果测试结果非常不理想,您可以通过醒目的 UI 提示用户选择更好的网络。 - * - * @note 测速本身会消耗一定的流量,所以也会产生少量额外的流量费用。 + * 12.1 开始进行网速测试(进入房间前使用) * - * @param sdkAppId 应用标识 - * @param userId 用户标识 - * @param userSig 用户签名 - * @param completion 测试回调,会分多次回调 + * @param params 测速选项 + * @return 接口调用结果,< 0:失败 + * @note + * 1. 测速过程将产生少量的基础服务费用,详见 [计费概述 > 基础服务](https://cloud.tencent.com/document/product/647/17157#.E5.9F.BA.E7.A1.80.E6.9C.8D.E5.8A.A1) 文档说明。 + * 2. 请在进入房间前进行网速测试,在房间中网速测试会影响正常的音视频传输效果,而且由于干扰过多,网速测试结果也不准确。 + * 3. 同一时间只允许一项网速测试任务运行。 */ -- (void)startSpeedTest:(uint32_t)sdkAppId userId:(NSString *)userId userSig:(NSString *)userSig completion:(void(^)(TRTCSpeedTestResult* result, NSInteger completedCount, NSInteger totalCount))completion; +- (int)startSpeedTest:(TRTCSpeedTestParams *)params; /** - * 11.2 停止服务器测速 + * 12.2 停止网络测速 */ - (void)stopSpeedTest; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (十二)Log 相关接口函数 +// 调试相关接口 // ///////////////////////////////////////////////////////////////////////////////// -/// @name Log 相关接口函数 +/// @name 调试相关接口 /// @{ -#pragma mark - LOG 相关接口函数 /** - * 12.1 获取 SDK 版本信息 + * 13.1 获取 SDK 版本信息 */ + (NSString *)getSDKVersion; /** - * 12.2 设置 Log 输出级别 + * 13.2 设置 Log 输出级别 * - * @param level 参见 TRTCLogLevel,默认值:TRTC_LOG_LEVEL_NULL + * @param level 参见 {@link TRTCLogLevel},默认值:{@link TRTCLogLevelNone} */ + (void)setLogLevel:(TRTCLogLevel)level; /** - * 12.3 启用或禁用控制台日志打印 + * 13.3 启用/禁用控制台日志打印 * - * @param enabled 指定是否启用,默认为禁止状态 + * @param enabled 指定是否启用,默认:禁止状态 */ + (void)setConsoleEnabled:(BOOL)enabled; /** - * 12.4 启用或禁用 Log 的本地压缩。 + * 13.4 启用/禁用日志的本地压缩 * * 开启压缩后,Log 存储体积明显减小,但需要腾讯云提供的 Python 脚本解压后才能阅读。 * 禁用压缩后,Log 采用明文存储,可以直接用记事本打开阅读,但占用空间较大。 - * - * @param enabled 指定是否启用,默认为启动状态 + * @param enabled 指定是否启用,默认为启动状态 */ + (void)setLogCompressEnabled:(BOOL)enabled; /** - * 12.5 修改日志保存路径 + * 13.5 设置本地日志的保存路径 + * + * 通过该接口您可以更改 SDK 本地日志的默认存储路径,SDK 默认的本地日志的存储位置: + * - Windows 平台:在 C:/Users/[系统用户名]/AppData/Roaming/liteav/log,即 %appdata%/liteav/log 下。 + * - iOS 或 Mac 平台:在 sandbox Documents/log 下。 + * - Android 平台:在 /app私有目录/files/log/liteav/ 下。 * - * @note 日志文件默认保存在 sandbox Documents/log 下,如需修改,必须在所有方法前调用。 - * @param path 存储日志路径 + * @note 请务必在所有其他接口之前调用,并且保证您指定的目录是存在的,并且您的应用程序拥有对该目录的读写权限。 + * @param path 存储日志的路径 */ + (void)setLogDirPath:(NSString *)path; /** - * 12.6 设置日志回调 + * 13.6 设置日志回调 */ -+ (void)setLogDelegate:(id<TRTCLogDelegate>)logDelegate; ++ (void)setLogDelegate:(nullable id<TRTCLogDelegate>)logDelegate; /** - * 12.7 显示仪表盘 + * 13.7 显示仪表盘 * - * 仪表盘是状态统计和事件消息浮层 view,方便调试。 - * @param showType 0:不显示;1:显示精简版;2:显示全量版 + * “仪表盘”是位于视频渲染控件之上的一个半透明的调试信息浮层,用于展示音视频信息和事件信息,便于对接和调试。 + * @param showType 0:不显示;1:显示精简版(仅显示音视频信息);2:显示完整版(包含音视频信息和事件信息)。 */ - (void)showDebugView:(NSInteger)showType; /** - * 12.8 设置仪表盘的边距 + * 13.8 设置仪表盘的边距 * - * 必须在 showDebugView 调用前设置才会生效 + * 用于调整仪表盘在视频渲染控件中的位置,必须在 showDebugView 之前调用才能生效。 * @param userId 用户 ID - * @param margin 仪表盘内边距,注意这里是基于 parentView 的百分比,margin 的取值范围是0 - 1 + * @param margin 仪表盘内边距,注意这里是基于 parentView 的百分比,margin 的取值范围是0 - 1。 */ - (void)setDebugViewMargin:(NSString *)userId margin:(TXEdgeInsets)margin; - /** - * 12.9 调用实验性 API 接口 - * - * @note 该接口用于调用一些实验性功能 - * @param jsonStr 接口及参数描述的 JSON 字符串 + * 13.9 调用实验性接口 */ -- (void)callExperimentalAPI:(NSString*)jsonStr; +- (void)callExperimentalAPI:(NSString *)jsonStr; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (十三)弃用接口(建议使用对应的新接口) +// 弃用接口(建议使用对应的新接口) // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - 弃用接口(建议使用对应的新接口) -/// @name 弃用接口(建议使用对应的新接口) +/// @name 弃用接口(建议使用对应的新接口) /// @{ /** * 设置麦克风的音量大小 * - * @deprecated v6.9 版本弃用 - * 播放背景音乐混音时使用,用来控制麦克风音量大小。 - * - * @param volume 音量大小,100 为原始音量,范围是:[0 ~ 150],默认值为 100 - * - * @note 如果要将 volume 设置为大于 100 的数值,需要进行特殊配置,请联系技术支持。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link setAudioCaptureVolume} 替代之。 */ - (void)setMicVolumeOnMixing:(NSInteger)volume __attribute__((deprecated("use setAudioCaptureVolume instead"))); /** * 设置美颜、美白以及红润效果级别 * - * SDK 内部集成两套风格不同的磨皮算法,一套我们取名叫“光滑”,适用于美女秀场,效果比较明显。 - * 另一套我们取名“自然”,磨皮算法更多地保留了面部细节,主观感受上会更加自然。 - * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param beautyStyle 美颜风格,光滑或者自然,光滑风格磨皮更加明显,适合娱乐场景。 - * @param beautyLevel 美颜级别,取值范围 0 - 9; 0表示关闭,1 - 9 值越大,效果越明显。 - * @param whitenessLevel 美白级别,取值范围 0 - 9;0表示关闭,1 - 9 值越大,效果越明显。 - * @param ruddinessLevel 红润级别,取值范围 0 - 9;0表示关闭,1 - 9 值越大,效果越明显。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ -- (void)setBeautyStyle:(TRTCBeautyStyle)beautyStyle beautyLevel:(NSInteger)beautyLevel - whitenessLevel:(NSInteger)whitenessLevel ruddinessLevel:(NSInteger)ruddinessLevel - __attribute__((deprecated("use getBeautyManager instead"))); +- (void)setBeautyStyle:(TRTCBeautyStyle)beautyStyle beautyLevel:(NSInteger)beautyLevel whitenessLevel:(NSInteger)whitenessLevel ruddinessLevel:(NSInteger)ruddinessLevel __attribute__((deprecated("use getBeautyManager instead"))); -#if TARGET_OS_IPHONE /** - * 设置大眼级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置大眼级别 * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param eyeScaleLevel 大眼级别,取值范围 0 - 9;0表示关闭,1 - 9 值越大,效果越明显。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ +#if TARGET_OS_IPHONE - (void)setEyeScaleLevel:(float)eyeScaleLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif /** - * 设置瘦脸级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置瘦脸级别 * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param faceScaleLevel 瘦脸级别,取值范围 0 - 9;0表示关闭,1 - 9 值越大,效果越明显。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ +#if TARGET_OS_IPHONE - (void)setFaceScaleLevel:(float)faceScaleLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif /** - * 14.5设置 V 脸级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置 V 脸级别 * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param faceVLevel V脸级别,取值范围 0 - 9;0表示关闭,1 - 9 值越大,效果越明显。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ +#if TARGET_OS_IPHONE - (void)setFaceVLevel:(float)faceVLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif /** - * 设置下巴拉伸或收缩,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置下巴拉伸或收缩幅度 * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param chinLevel 下巴拉伸或收缩级别,取值范围 -9 - 9;0 表示关闭,小于 0 表示收缩,大于 0 表示拉伸。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ +#if TARGET_OS_IPHONE - (void)setChinLevel:(float)chinLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif /** - * 设置短脸级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置短脸级别 * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param faceShortlevel 短脸级别,取值范围 0 - 9;0 表示关闭,1 - 9 值越大,效果越明显。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ +#if TARGET_OS_IPHONE - (void)setFaceShortLevel:(float)faceShortlevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif /** - * 设置瘦鼻级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置瘦鼻级别 * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param noseSlimLevel 瘦鼻级别,取值范围 0 - 9;0 表示关闭,1 - 9 值越大,效果越明显。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ +#if TARGET_OS_IPHONE - (void)setNoseSlimLevel:(float)noseSlimLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif /** - * 选择使用哪一款 AI 动效挂件,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置动效贴纸 * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param tmplPath 动效文件路径 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ +#if TARGET_OS_IPHONE - (void)selectMotionTmpl:(NSString *)tmplPath __attribute__((deprecated("use getBeautyManager instead"))); +#endif /** - * 设置动效静音,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 - * - * 部分挂件本身会有声音特效,通过此 API 可以关闭特效播放时所带的声音效果。 + * 设置动效静音 * - * @deprecated v6.9 版本弃用,请使用 TXBeautyManager 设置美颜功能 - * @param motionMute YES:静音;NO:不静音。 + * @deprecated v6.9 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ +#if TARGET_OS_IPHONE - (void)setMotionMute:(BOOL)motionMute __attribute__((deprecated("use getBeautyManager instead"))); +#endif -#elif TARGET_OS_MAC /** * 启动屏幕分享 * - * @deprecated v7.2 版本弃用,请使用 startScreenCapture:streamType:encParam: 启动屏幕分享 - * @param view 渲染控件所在的父控件 + * @deprecated v7.2 版本开始不推荐使用,建议使用 startScreenCapture:streamType:encParam: 替代之。 */ -- (void)startScreenCapture:(NSView *)view __attribute__((deprecated("use startScreenCapture:streamType:encParam: instead"))); - +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startScreenCapture:(nullable NSView *)view __attribute__((deprecated("use startScreenCapture:streamType:encParam: instead"))); #endif /** - * 设置指定素材滤镜特效 + * 设置色彩滤镜效果 * - * @deprecated v7.2 版本弃用,请使用 TXBeautyManager 设置素材滤镜 - * @param image 指定素材,即颜色查找表图片。**必须使用 png 格式** + * @deprecated v7.2 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ - (void)setFilter:(TXImage *)image __attribute__((deprecated("use getBeautyManager instead"))); /** - * 设置滤镜浓度 - * - * 在美女秀场等应用场景里,滤镜浓度的要求会比较高,以便更加突显主播的差异。 - * 我们默认的滤镜浓度是0.5,如果您觉得滤镜效果不明显,可以使用下面的接口进行调节。 + * 设置色彩滤镜浓度 * - * @deprecated v7.2 版本弃用,请使用 TXBeautyManager setFilterStrength 接口 - * @param concentration 从 0 到 1,越大滤镜效果越明显,默认值为0.5。 + * @deprecated v7.2 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ - (void)setFilterConcentration:(float)concentration __attribute__((deprecated("use getBeautyManager instead"))); /** - * 设置绿幕背景视频(企业版有效,其它版本设置此参数无效) + * 设置绿幕背景视频 * - * 此处的绿幕功能并非智能抠背,需要被拍摄者的背后有一块绿色的幕布来辅助产生特效 - * - * @deprecated v7.2 版本弃用,请使用 TXBeautyManager 设置绿幕背景视频 - * @param file 视频文件路径。支持 MP4; nil 表示关闭特效。 + * @deprecated v7.2 版本开始不推荐使用,建议使用 {@link getBeautyManager} 替代之。 */ - (void)setGreenScreenFile:(NSURL *)file __attribute__((deprecated("use getBeautyManager instead"))); /** * 启动播放背景音乐 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager startPlayMusic 接口,支持并发播放多个 BGM - * @param path 音乐文件路径,支持的文件格式:aac, mp3, m4a。 - * @param beginNotify 音乐播放开始的回调通知 - * @param progressNotify 音乐播放的进度通知,单位毫秒 - * @param completeNotify 音乐播放结束的回调通知 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link getAudioEffectManager} 替代之。 */ -- (void) playBGM:(NSString *)path - withBeginNotify:(void (^)(NSInteger errCode))beginNotify -withProgressNotify:(void (^)(NSInteger progressMS, NSInteger durationMS))progressNotify - andCompleteNotify:(void (^)(NSInteger errCode))completeNotify - __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)playBGM:(NSString *)path + withBeginNotify:(void (^)(NSInteger errCode))beginNotify + withProgressNotify:(void (^)(NSInteger progressMS, NSInteger durationMS))progressNotify + andCompleteNotify:(void (^)(NSInteger errCode))completeNotify __attribute__((deprecated("use getAudioEffectManager instead"))); /** * 停止播放背景音乐 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager stopPlayMusic 接口 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link getAudioEffectManager} 替代之。 */ - (void)stopBGM __attribute__((deprecated("use getAudioEffectManager instead"))); /** - * 暂停播放背景音乐 + * 停止播放背景音乐 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager pausePlayMusic 接口 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link getAudioEffectManager} 替代之。 */ - (void)pauseBGM __attribute__((deprecated("use getAudioEffectManager instead"))); /** - * 继续播放背景音乐 + * 停止播放背景音乐 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager resumePlayMusic 接口 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link getAudioEffectManager} 替代之。 */ - (void)resumeBGM __attribute__((deprecated("use getAudioEffectManager instead"))); /** - * 获取音乐文件总时长,单位毫秒 + * 获取背景音乐总时长(单位:毫秒) * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager getMusicDurationInMS 接口 - * @param path 音乐文件路径,如果 path 为空,那么返回当前正在播放的 music 时长。 - * @return 成功返回时长,失败返回 -1 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link getMusicDurationInMS} 替代之。 */ -- (NSInteger)getBGMDuration:(NSString *)path __attribute__((deprecated("use getAudioEffectManager instead"))); +- (NSInteger)getBGMDuration:(NSString *)path __attribute__((deprecated("use TXAudioEffectManager#getMusicDurationInMS instead"))); /** - * 设置 BGM 播放进度 + * 设置背景音乐的播放进度 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager seekMusicToPosInMS 接口 - * @param pos 单位毫秒 - * @return 0:成功;-1:失败 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link seekMusicToPosInMS} 替代之。 */ -- (int)setBGMPosition:(NSInteger)pos __attribute__((deprecated("use getAudioEffectManager instead"))); +- (int)setBGMPosition:(NSInteger)pos __attribute__((deprecated("use TXAudioEffectManager#seekMusicToPosInMS instead"))); /** - * 设置背景音乐播放音量的大小 - * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager setMusicVolume 接口 - * 播放背景音乐混音时使用,用来控制背景音乐播放音量的大小, - * 该接口会同时控制远端播放音量的大小和本地播放音量的大小, - * 因此调用该接口后,setBGMPlayoutVolume 和 setBGMPublishVolume 设置的音量值会被覆盖 + * 设置背景音乐的音量大小 * - * @param volume 音量大小,100 为原始音量,范围是:[0 ~ 150],默认值为 100 - * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link setMusicVolume} 替代之。 */ -- (void)setBGMVolume:(NSInteger)volume __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)setBGMVolume:(NSInteger)volume __attribute__((deprecated("use TXAudioEffectManager#setMusicVolume instead"))); /** - * 设置背景音乐本地播放音量的大小 - * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager setMusicPlayoutVolume 接口 - * 播放背景音乐混音时使用,用来控制背景音乐在本地播放时的音量大小。 + * 设置背景音乐的本地播放音量 * - * @param volume 音量大小,100 为原始音量,范围是:[0 ~ 150],默认值为 100 - * - * @note 如果要将 volume 设置为大于 100 的数值,需要进行特殊配置,请联系技术支持。 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link setMusicPlayoutVolume} 替代之。 */ -- (void)setBGMPlayoutVolume:(NSInteger)volume __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)setBGMPlayoutVolume:(NSInteger)volume __attribute__((deprecated("use TXAudioEffectManager#setMusicPlayoutVolume instead"))); /** - * 设置背景音乐远端播放音量的大小 - * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager setMusicPublishVolume 接口 - * 播放背景音乐混音时使用,用来控制背景音乐在远端播放时的音量大小。 + * 设置背景音乐的远端播放音量 * - * @param volume 音量大小,100 为原始音量,范围是:[0 ~ 150],默认值为 100 - * - * @note 如果要将 volume 设置为大于 100 的数值,需要进行特殊配置,请联系技术支持。 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link setBGMPublishVolume} 替代之。 */ -- (void)setBGMPublishVolume:(NSInteger)volume __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)setBGMPublishVolume:(NSInteger)volume __attribute__((deprecated("use TXAudioEffectManager#setBGMPublishVolume instead"))); /** - * 设置混响效果,目前仅支持 iOS + * 设置混响效果 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager setVoiceReverbType 接口 - * @param reverbType 混响类型,详情请参见 TXReverbType + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link setVoiceReverbType} 替代之。 */ -- (void)setReverbType:(TRTCReverbType)reverbType __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)setReverbType:(TRTCReverbType)reverbType __attribute__((deprecated("use TXAudioEffectManager#setVoiceReverbType instead"))); /** - * 设置变声类型,目前仅支持 iOS + * 设置变声类型 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager setVoiceChangerType 接口 - * @param voiceChangerType 变声类型,详情请参见 TXVoiceChangerType + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link setVoiceChangerType} 替代之。 */ -- (void)setVoiceChangerType:(TRTCVoiceChangerType)voiceChangerType __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)setVoiceChangerType:(TRTCVoiceChangerType)voiceChangerType __attribute__((deprecated("use TXAudioEffectManager#setVoiceChangerType instead"))); /** * 播放音效 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager startPlayMusic 接口 - * 每个音效都需要您指定具体的 ID,您可以通过该 ID 对音效的开始、停止、音量等进行设置。 - * 支持的文件格式:aac, mp3, m4a。 - * - * @note 若您想同时播放多个音效,请分配不同的 ID 进行播放。因为使用同一个 ID 播放不同音效,SDK 会先停止播放旧的音效,再播放新的音效。 - * - * @param effect 音效 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link startPlayMusic} 替代之。 */ -- (void)playAudioEffect:(TRTCAudioEffectParam*)effect __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)playAudioEffect:(TRTCAudioEffectParam *)effect __attribute__((deprecated("use TXAudioEffectManager#startPlayMusic instead"))); /** * 设置音效音量 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager setMusicPublishVolume / setMusicPlayoutVolume 接口 - * @note 该操作会覆盖通过 setAllAudioEffectsVolume 指定的整体音效音量。 - * - * @param effectId 音效 ID - * @param volume 音量大小,100 为原始音量,范围是:[0 ~ 150],默认值为 100 - * - * @note 如果要将 volume 设置为大于 100 的数值,需要进行特殊配置,请联系技术支持。 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link setMusicPublishVolume} 和 {@link setMusicPlayoutVolume} 替代之。 */ -- (void)setAudioEffectVolume:(int)effectId volume:(int) volume __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)setAudioEffectVolume:(int)effectId volume:(int)volume __attribute__((deprecated("use setMusicPublishVolume/setMusicPlayoutVolume instead"))); /** - * 停止音效 + * 停止播放音效 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager stopPlayMusic 接口 - * @param effectId 音效 ID + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link stopPlayMusic} 替代之。 */ -- (void)stopAudioEffect:(int)effectId __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)stopAudioEffect:(int)effectId __attribute__((deprecated("use TXAudioEffectManager#stopPlayMusic instead"))); /** * 停止所有音效 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager stopPlayMusic 接口 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link stopPlayMusic} 替代之。 */ -- (void)stopAllAudioEffects __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)stopAllAudioEffects __attribute__((deprecated("use TXAudioEffectManager#stopPlayMusic instead"))); /** * 设置所有音效音量 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager setMusicPublishVolume / setMusicPlayoutVolume 接口 - * @note 该操作会覆盖通过 setAudioEffectVolume 指定的单独音效音量。 - * - * @param volume 音量大小,100 为原始音量,范围是:[0 ~ 150],默认值为 100 - * - * @note 如果要将 volume 设置为大于 100 的数值,需要进行特殊配置,请联系技术支持。 + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link setMusicPublishVolume} 和{@link setMusicPlayoutVolume} 替代之。 */ -- (void)setAllAudioEffectsVolume:(int)volume __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)setAllAudioEffectsVolume:(int)volume __attribute__((deprecated("use setMusicPublishVolume/setMusicPlayoutVolume instead"))); /** * 暂停音效 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager pausePlayMusic 接口 - * @param effectId 音效 ID + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link pauseAudioEffect} 替代之。 */ -- (void)pauseAudioEffect:(int)effectId __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)pauseAudioEffect:(int)effectId __attribute__((deprecated("use TXAudioEffectManager#pauseAudioEffect instead"))); /** - * 恢复音效 + * 暂停音效 * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager resumePlayMusic 接口 - * @param effectId 音效 ID + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link resumePlayMusic} 替代之。 */ -- (void)resumeAudioEffect:(int)effectId __attribute__((deprecated("use getAudioEffectManager instead"))); +- (void)resumeAudioEffect:(int)effectId __attribute__((deprecated("use TXAudioEffectManager#resumePlayMusic instead"))); -#if TARGET_OS_IPHONE /** - * 开启耳返 - * - * @deprecated v7.3 版本弃用,请使用 TXAudioEffectManager setVoiceEarMonitor 接口 - * 开启后会在耳机里听到自己的声音。 + * 开启(或关闭)耳返 * - * @note 仅在戴耳机时有效 - * - * @param enable YES:开启;NO:关闭,默认值:NO + * @deprecated v7.3 版本开始不推荐使用,建议使用 {@link TXAudioEffectManager} 中的 {@link setVoiceEarMonitor} 替代之。 */ -- (void)enableAudioEarMonitoring:(BOOL)enable __attribute__((deprecated("use getAudioEffectManager instead"))); +#if TARGET_OS_IPHONE +- (void)enableAudioEarMonitoring:(BOOL)enable __attribute__((deprecated("use TXAudioEffectManager#setVoiceEarMonitor instead"))); #endif /** * 开始显示远端视频画面 * - * @deprecated v8.0 版本弃用,请使用 startRemoteView:streamType:view 接口 - * 在收到 SDK 的 onUserVideoAvailable(userid, YES) 通知时,可以获知该远程用户开启了视频, - * 此后调用 startRemoteView(userid) 接口加载该用户的远程画面,此时可以用 loading 动画优化加载过程中的等待体验。 - * 待该用户的首帧画面开始显示时,您会收到 onFirstVideoFrame(userId) 事件回调。 - * - * @param userId 对方的用户标识 - * @param view 承载视频画面的控件 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link startRemoteView}:streamType:view: 替代之。 */ - (void)startRemoteView:(NSString *)userId view:(TXView *)view __attribute__((deprecated("use startRemoteView:streamType:view: instead"))); /** * 停止显示远端视频画面,同时不再拉取该远端用户的视频数据流 * - * @deprecated v8.0 版本弃用,请使用 stopRemoteView:streamType: 接口 - * 调用此接口后,SDK 会停止接收该用户的远程视频流,同时会清理相关的视频显示资源。 - * - * @param userId 对方的用户标识 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link stopRemoteView}:streamType: 替代之。 */ - (void)stopRemoteView:(NSString *)userId __attribute__((deprecated("use stopRemoteView:streamType: instead"))); /** * 设置远端图像的渲染模式 * - * @deprecated v8.0 版本弃用,请使用 setRemoteRenderParams:streamType:params: 接口 - * @param userId 用户 ID - * @param mode 填充(画面可能会被拉伸裁剪)或适应(画面可能会有黑边),默认值:TRTCVideoFillMode_Fill + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link setRemoteRenderParams}:streamType:params 替代之。 */ -- (void)setRemoteViewFillMode:(NSString*)userId mode:(TRTCVideoFillMode)mode __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); - +- (void)setRemoteViewFillMode:(NSString *)userId mode:(TRTCVideoFillMode)mode __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); /** * 设置远端图像的顺时针旋转角度 * - * @deprecated v8.0 版本弃用,请使用 setRemoteRenderParams:streamType:params: 接口 - * @param userId 用户 ID - * @param rotation 支持90、180以及270旋转角度,默认值:TRTCVideoRotation_0 - */ -- (void)setRemoteViewRotation:(NSString*)userId rotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); - -/** - * 开始显示远端用户的辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享) - * - * @deprecated v8.0 版本弃用,请使用 startRemoteView:streamType:view 接口 - * - startRemoteView() 用于显示主路画面(TRTCVideoStreamTypeBig,一般用于摄像头)。 - * - startRemoteSubStreamView() 用于显示辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)。 - * - * @param userId 对方的用户标识 - * @param view 渲染控件 - * @note 请在 onUserSubStreamAvailable 回调后再调用这个接口。 - */ -- (void)startRemoteSubStreamView:(NSString *)userId view:(TXView *)view __attribute__((deprecated("use startRemoteView:type:view: instead"))); - -/** - * 停止显示远端用户的辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)。 - * - * @deprecated v8.0 版本弃用,请使用 stopRemoteView:streamType: 接口 - * @param userId 对方的用户标识 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link setRemoteRenderParams}:streamType:params: 替代之。 */ -- (void)stopRemoteSubStreamView:(NSString *)userId __attribute__((deprecated("use stopRemoteView:streamType: instead"))); +- (void)setRemoteViewRotation:(NSString *)userId rotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); /** * 设置本地图像的渲染模式 * - * @deprecated v8.0 版本弃用,请使用 setLocalRenderParams: 接口 - * @param mode 填充(画面可能会被拉伸裁剪)或适应(画面可能会有黑边),默认值:TRTCVideoFillMode_Fill + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link setLocalRenderParams} 替代之。 */ -- (void)setLocalViewFillMode:(TRTCVideoFillMode)mode __attribute__((deprecated("use setLocalRenderParams: instead"))); +- (void)setLocalViewFillMode:(TRTCVideoFillMode)mode __attribute__((deprecated("use setLocalRenderParams instead"))); /** * 设置本地图像的顺时针旋转角度 * - * @deprecated v8.0 版本弃用,请使用 setLocalRenderParams: 接口 - * @param rotation 支持90、180以及270旋转角度,默认值:TRTCVideoRotation_0 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link setLocalRenderParams} 替代之。 */ -- (void)setLocalViewRotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setLocalRenderParams: instead"))); +- (void)setLocalViewRotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setLocalRenderParams instead"))); -#if TARGET_OS_IPHONE /** - * 设置本地摄像头预览画面的镜像模式(iOS) + * 设置本地摄像头预览画面的镜像模式 * - * @deprecated v8.0 版本弃用,请使用 setLocalRenderParams: 接口 - * @param mirror 镜像模式,默认值:TRTCLocalVideoMirrorType_Auto + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link setLocalRenderParams} 替代之。 */ +#if TARGET_OS_IPHONE - (void)setLocalViewMirror:(TRTCLocalVideoMirrorType)mirror __attribute__((deprecated("use setLocalRenderParams: instead"))); #elif TARGET_OS_MAC +- (void)setLocalViewMirror:(BOOL)mirror __attribute__((deprecated("use setLocalRenderParams: instead"))); +#endif /** - * 设置本地摄像头预览画面的镜像模式(Mac) + * 开始显示远端用户的辅路画面 * - * @deprecated v8.0 版本弃用,请使用 setLocalRenderParams: 接口 - * @param mirror 镜像模式,默认值:YES + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link startRemoteView}:streamType:view: 替代之。 */ -- (void)setLocalViewMirror:(BOOL)mirror __attribute__((deprecated("use setLocalRenderParams: instead"))); -#endif +- (void)startRemoteSubStreamView:(NSString *)userId view:(TXView *)view __attribute__((deprecated("use startRemoteView:type:view: instead"))); /** - * 设置辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)的显示模式 + * 停止显示远端用户的辅路画面 * - * @deprecated v8.0 版本弃用,请使用 setRemoteRenderParams:streamType:params: 接口 - * - setRemoteViewFillMode() 用于设置远端主路画面(TRTCVideoStreamTypeBig,一般用于摄像头)的显示模式。 - * - setRemoteSubStreamViewFillMode() 用于设置远端辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)的显示模式。 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link stopRemoteView}:streamType: 替代之。 + */ +- (void)stopRemoteSubStreamView:(NSString *)userId __attribute__((deprecated("use stopRemoteView:streamType: instead"))); + +/** + * 设置辅路画面的填充模式 * - * @param userId 用户的 ID - * @param mode 填充(画面可能会被拉伸裁剪)或适应(画面可能会有黑边),默认值:TRTCVideoFillMode_Fit + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link setRemoteRenderParams}:streamType:params: 替代之。 */ - (void)setRemoteSubStreamViewFillMode:(NSString *)userId mode:(TRTCVideoFillMode)mode __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); /** - * 设置辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)的顺时针旋转角度 + * 设置辅路画面的顺时针旋转角度 * - * @deprecated v8.0 版本弃用,请使用 setRemoteRenderParams:streamType:params: 接口 - * - setRemoteViewRotation() 用于设置远端主路画面(TRTCVideoStreamTypeBig,一般用于摄像头)的旋转角度。 - * - setRemoteSubStreamViewRotation() 用于设置远端辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)的旋转角度。 - * - * @param userId 用户 ID - * @param rotation 支持 90、180、270 旋转角度 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link setRemoteRenderParams}:streamType:params: 替代之。 */ -- (void)setRemoteSubStreamViewRotation:(NSString*)userId rotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); +- (void)setRemoteSubStreamViewRotation:(NSString *)userId rotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); /** - * 设定观看方优先选择的视频质量 + * 设定优先观看大画面还是小画面 * - * @deprecated v8.0 版本弃用,请使用 startRemoteView:streamType:view: 接口 - * 低端设备推荐优先选择低清晰度的小画面。 - * 如果对方没有开启双路视频模式,则此操作无效。 - * - * @param type 默认观看大画面或小画面,默认为大画面 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link startRemoteView}:streamType:view: 替代之。 */ -- (void)setPriorRemoteVideoStreamType:(TRTCVideoStreamType)type __attribute__((deprecated("use startRemoteView:streamType:view: instead"))); +- (void)setPriorRemoteVideoStreamType:(TRTCVideoStreamType)streamType __attribute__((deprecated("use startRemoteView:streamType:view: instead"))); /** * 设置音频质量 * - * @deprecated v8.0 版本弃用,请使用 startLocalAudio(quality) 接口 - * 主播端的音质越高,观众端的听感越好,但传输所依赖的带宽也就越高,在带宽有限的场景下也更容易出现卡顿。 - * - * - {@link TRTCCloudDef#TRTCAudioQualitySpeech}, 流畅:采样率:16k;单声道;音频裸码率:16kbps;适合语音通话为主的场景,比如在线会议,语音通话。 - * - {@link TRTCCloudDef#TRTCAudioQualityDefault},默认:采样率:48k;单声道;音频裸码率:50kbps;SDK 默认的音频质量,如无特殊需求推荐选择之。 - * - {@link TRTCCloudDef#TRTCAudioQualityMusic},高音质:采样率:48k;双声道 + 全频带;音频裸码率:128kbps;适合需要高保真传输音乐的场景,比如K歌、音乐直播等。 - * @note 该方法需要在 startLocalAudio 之前进行设置,否则不会生效。 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link startLocalAudio}:quality 替代之。 */ - (void)setAudioQuality:(TRTCAudioQuality)quality __attribute__((deprecated("use startLocalAudio(quality) instead"))); /** - * 开启本地音频的采集和上行 - * - * @deprecated v8.0 版本弃用,请使用 startLocalAudio(quality) 接口 - * 该函数会启动麦克风采集,并将音频数据传输给房间里的其他用户。 - * SDK 不会默认开启本地音频采集和上行,您需要调用该函数开启,否则房间里的其他用户将无法听到您的声音。 + * 设置音频质量 * - * @note 该函数会检查麦克风的使用权限,如果当前 App 没有麦克风权限,SDK 会向用户申请开启。 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link startLocalAudio}:quality 替代之。 */ - (void)startLocalAudio __attribute__((deprecated("use startLocalAudio(quality) instead"))); -#if TARGET_OS_IPHONE - /** * 切换摄像头 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager switchCamera 接口 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link switchCamera} 接口替代之。 */ -- (void)switchCamera __attribute__((deprecated("use getDeviceManager instead"))); +#if TARGET_OS_IPHONE +- (void)switchCamera __attribute__((deprecated("use TXDeviceManager#switchCamera instead"))); +#endif /** * 查询当前摄像头是否支持缩放 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager isCameraZoomSupported 接口 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link isCameraZoomSupported} 接口替代之。 */ -- (BOOL)isCameraZoomSupported __attribute__((deprecated("use getDeviceManager instead"))); +#if TARGET_OS_IPHONE +- (BOOL)isCameraZoomSupported __attribute__((deprecated("use TXDeviceManager#isCameraZoomSupported instead"))); +#endif /** - * 查询是否支持开关闪光灯(手电筒模式) + * 设置摄像头缩放倍数(焦距) * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager isCameraTorchSupported 接口 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCameraZoomRatio} 接口替代之。 */ -- (BOOL)isCameraTorchSupported __attribute__((deprecated("use getDeviceManager instead"))); - +#if TARGET_OS_IPHONE +- (void)setZoom:(CGFloat)distance __attribute__((deprecated("use TXDeviceManager#setCameraZoomRatio instead"))); +#endif /** - * 查询是否支持设置焦点 + * 查询是否支持开关闪光灯 * - * @deprecated v8.0 版本弃用 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link isCameraTorchSupported} 接口替代之。 */ -- (BOOL)isCameraFocusPositionInPreviewSupported __attribute__((deprecated)); +#if TARGET_OS_IPHONE +- (BOOL)isCameraTorchSupported __attribute__((deprecated("use TXDeviceManager#isCameraTorchSupported instead"))); +#endif /** - * 设置摄像头焦点 + * 开关/关闭闪光灯 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCameraFocusPosition 接口 - * @param touchPoint 对焦位置 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link enableCameraTorch} 接口替代之。 */ -- (void)setFocusPosition:(CGPoint)touchPoint __attribute__((deprecated("use getDeviceManager instead"))); +#if TARGET_OS_IPHONE +- (BOOL)enbaleTorch:(BOOL)enable __attribute__((deprecated("use TXDeviceManager#enableCameraTorch instead"))); +#endif /** - * 查询是否支持自动识别人脸位置 + * 查询摄像头是否支持设置焦点 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager isAutoFocusEnabled 接口 + * @deprecated v8.0 版本开始不推荐使用。 */ -- (BOOL)isCameraAutoFocusFaceModeSupported __attribute__((deprecated("use getDeviceManager instead"))); +#if TARGET_OS_IPHONE +- (BOOL)isCameraFocusPositionInPreviewSupported __attribute__((deprecated)); +#endif /** - * 自动识别人脸位置 + * 设置摄像头焦点坐标位置 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager enableCameraAutoFocus 接口 - * @param enable YES:开启;NO:关闭,默认值:YES + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCameraFocusPosition} 接口替代之。 */ -- (void)enableAutoFaceFoucs:(BOOL)enable __attribute__((deprecated("use getDeviceManager instead"))); +#if TARGET_OS_IPHONE +- (void)setFocusPosition:(CGPoint)touchPoint __attribute__((deprecated("use TXDeviceManager#setCameraFocusPosition instead"))); +#endif /** - * 设置摄像头缩放因子(焦距) - * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCameraZoomRatio 接口 - * 取值范围1 - 5,取值为 1 表示最远视角(正常镜头),取值为 5 表示最近视角(放大镜头)。 - * 最大值推荐为 5,若超过 5,视频数据会变得模糊不清。 + * 查询是否支持自动识别人脸位置 * - * @param distance 取值范围为1 - 5,数值越大,焦距越远 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link isAutoFocusEnabled} 接口替代之。 */ -- (void)setZoom:(CGFloat)distance __attribute__((deprecated("use getDeviceManager instead"))); +#if TARGET_OS_IPHONE +- (BOOL)isCameraAutoFocusFaceModeSupported __attribute__((deprecated("use TXDeviceManager#isAutoFocusEnabled instead"))); +#endif /** - * 开关闪光灯 + * 开启/关闭人脸跟踪对焦 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager enableCameraTorch 接口 - * @param enable YES:开启;NO:关闭,默认值:NO + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link enableCameraAutoFocus} 接口替代之。 */ -- (BOOL)enbaleTorch:(BOOL)enable __attribute__((deprecated("use getDeviceManager instead"))); - +#if TARGET_OS_IPHONE +- (void)enableAutoFaceFoucs:(BOOL)enable __attribute__((deprecated("use TXDeviceManager#enableCameraAutoFocus instead"))); #endif /** - * 设置通话时使用的系统音量类型 - * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setSystemVolumeType 接口 - * 智能手机一般具备两种系统音量类型,即通话音量类型和媒体音量类型。 - * - 通话音量:手机专门为通话场景设计的音量类型,使用手机自带的回声抵消功能,音质相比媒体音量类型较差, - * 无法通过音量按键将音量调成零,但是支持蓝牙耳机上的麦克风。 - * - * - 媒体音量:手机专门为音乐场景设计的音量类型,音质相比于通话音量类型要好,通过通过音量按键可以将音量调成零。 - * 使用媒体音量类型时,如果要开启回声抵消(AEC)功能,SDK 会开启内置的声学处理算法对声音进行二次处理。 - * 在媒体音量模式下,蓝牙耳机无法使用自带的麦克风采集声音,只能使用手机上的麦克风进行声音采集。 - * - * SDK 目前提供了三种系统音量类型的控制模式,分别为: - * - {@link TRTCSystemVolumeTypeAuto}: - * “麦上通话,麦下媒体”,即主播上麦时使用通话音量,观众不上麦则使用媒体音量,适合在线直播场景。 - * 如果您在 enterRoom 时选择的场景为 {@link TRTCAppSceneLIVE} 或 {@link TRTCAppSceneVoiceChatRoom},SDK 会自动选择该模式。 - * - * - {@link TRTCSystemVolumeTypeVOIP}: - * 通话全程使用通话音量,适合多人会议场景。 - * 如果您在 enterRoom 时选择的场景为 {@link TRTCAppSceneVideoCall} 或 {@link TRTCAppSceneAudioCall},SDK 会自动选择该模式。 - * - * - {@link TRTCSystemVolumeTypeMedia}: - * 通话全程使用媒体音量,不常用,适合个别有特殊需求(如主播外接声卡)的应用场景。 - * - * @note - * 1. 需要在调用 startLocalAudio() 之前调用该接口。<br> - * 2. 如无特殊需求,不推荐您自行设置,您只需通过 enterRoom 设置好适合您的场景,SDK 内部会自动选择相匹配的音量类型。 + * 开始进行摄像头测试 * - * @param type 系统音量类型,如无特殊需求,不推荐您自行设置。 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link startCameraDeviceTest} 接口替代之。 */ -- (void)setSystemVolumeType:(TRTCSystemVolumeType)type __attribute__((deprecated("use getDeviceManager instead"))); - #if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startCameraDeviceTestInView:(NSView *)view __attribute__((deprecated("use TXDeviceManager#startCameraDeviceTest instead"))); +#endif /** * 开始进行摄像头测试 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager startCameraDeviceTest 接口 - * @note 在测试过程中可以使用 setCurrentCameraDevice 接口切换摄像头。 - * @param view 预览控件所在的父控件 - */ -- (void)startCameraDeviceTestInView:(NSView *)view __attribute__((deprecated("use getDeviceManager instead"))); - -/** - * 结束视频测试预览 - * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager stopCameraDeviceTest 接口 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link stopCameraDeviceTest} 接口替代之。 */ -- (void)stopCameraDeviceTest __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)stopCameraDeviceTest __attribute__((deprecated("use TXDeviceManager#stopCameraDeviceTest instead"))); +#endif /** * 开始进行麦克风测试 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager startMicDeviceTest 接口 - * 该方法测试麦克风是否能正常工作,volume 的取值范围为0 - 100。 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link startMicDeviceTest} 接口替代之。 */ -- (void)startMicDeviceTest:(NSInteger)interval testEcho:(void (^)(NSInteger volume))testEcho __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startMicDeviceTest:(NSInteger)interval testEcho:(void (^)(NSInteger volume))testEcho __attribute__((deprecated("use TXDeviceManager#startMicDeviceTest instead"))); +#endif /** - * 停止麦克风测试 + * 开始进行麦克风测试 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager stopMicDeviceTest 接口 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link stopMicDeviceTest} 接口替代之。 */ -- (void)stopMicDeviceTest __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)stopMicDeviceTest __attribute__((deprecated("use TXDeviceManager#stopMicDeviceTest instead"))); +#endif /** - * 开始扬声器测试 + * 开始进行扬声器测试 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager startSpeakerDeviceTest 接口 - * 该方法播放指定的音频文件测试播放设备是否能正常工作。如果能听到声音,说明播放设备能正常工作。 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link startSpeakerDeviceTest} 接口替代之。 */ -- (void)startSpeakerDeviceTest:(NSString*)audioFilePath onVolumeChanged:(void (^)(NSInteger volume, BOOL isLastFrame))volumeBlock __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startSpeakerDeviceTest:(NSString *)audioFilePath onVolumeChanged:(void (^)(NSInteger volume, BOOL isLastFrame))volumeBlock __attribute__((deprecated("use TXDeviceManager#startSpeakerDeviceTest instead"))); +#endif /** - * 停止扬声器测试 + * 停止进行扬声器测试 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager stopSpeakerDeviceTest 接口 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link stopSpeakerDeviceTest} 接口替代之。 */ -- (void)stopSpeakerDeviceTest __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)stopSpeakerDeviceTest __attribute__((deprecated("use TXDeviceManager#stopSpeakerDeviceTest instead"))); +#endif /** * 获取麦克风设备列表 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getDevicesList:type: 接口 - * Mac 主机本身自带一个质量很好的麦克风,但它也允许用户外接其他的麦克风,而且很多 USB 摄像头上也自带麦克风。 - * 如果您希望用户选择自己外接的麦克风,可以提供一个多麦克风选择的功能。 - * - * @return 麦克风设备列表,第一项为当前系统默认设备 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getDevicesList} 接口替代之。 */ -- (NSArray<TRTCMediaDeviceInfo*>*)getMicDevicesList __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray<TRTCMediaDeviceInfo *> *)getMicDevicesList __attribute__((deprecated("use TXDeviceManager#getDevicesList instead"))); +#endif /** * 获取当前的麦克风设备 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getCurrentDevice 接口 - * @return 当前麦克风设备信息 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getCurrentDevice} 接口替代之。 */ -- (TRTCMediaDeviceInfo*)getCurrentMicDevice __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (TRTCMediaDeviceInfo *)getCurrentMicDevice __attribute__((deprecated("use TXDeviceManager#getCurrentDevice instead"))); +#endif /** - * 设置要使用的麦克风 + * 选定当前使用的麦克风 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCurrentDevice 接口 - * @param deviceId 从 getMicDevicesList 中得到的设备 ID - * @return 0:成功;<0:失败 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCurrentDevice} 接口替代之。 */ -- (int)setCurrentMicDevice:(NSString*)deviceId __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (int)setCurrentMicDevice:(NSString *)deviceId __attribute__((deprecated("use TXDeviceManager#setCurrentDevice instead"))); +#endif /** - * 获取当前麦克风设备音量 + * 获取当前麦克风的设备音量 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getCurrentDeviceVolume 接口 - * @return 麦克风音量 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getCurrentDeviceVolume} 接口替代之。 */ -- (float)getCurrentMicDeviceVolume __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (float)getCurrentMicDeviceVolume __attribute__((deprecated("use TXDeviceManager#getCurrentDeviceVolume instead"))); +#endif /** - * 设置麦克风设备的音量 + * 设置当前麦克风的设备音量 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCurrentDeviceVolume 接口 - * 该接口的功能是调节系统采集音量,如果用户直接调节 Mac 系统设置的采集音量时,该接口的设置结果会被用户的操作所覆盖。 - * - * @param volume 麦克风音量值,范围0 - 100 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCurrentDeviceVolume} 接口替代之。 */ -- (void)setCurrentMicDeviceVolume:(NSInteger)volume __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)setCurrentMicDeviceVolume:(NSInteger)volume __attribute__((deprecated("use TXDeviceManager#setCurrentDeviceVolume instead"))); +#endif /** - * 设置系统当前麦克风设备的静音状态 + * 获取系统当前麦克风设备是否静音 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCurrentDeviceMute 接口 - * @param mute 设置为 YES 时,麦克风设备静音 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getCurrentDeviceMute} 接口替代之。 */ -- (void)setCurrentMicDeviceMute:(BOOL)mute __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (BOOL)getCurrentMicDeviceMute __attribute__((deprecated("use TXDeviceManager#getCurrentDeviceMute instead"))); +#endif /** - * 获取系统当前麦克风设备是否静音 + * 设置系统当前麦克风设备的静音状态 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getCurrentDeviceMute 接口 - * @return 静音状态 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCurrentDeviceMute} 接口替代之。 */ -- (BOOL)getCurrentMicDeviceMute __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)setCurrentMicDeviceMute:(BOOL)mute __attribute__((deprecated("use TXDeviceManager#setCurrentDeviceMute instead"))); +#endif /** * 获取扬声器设备列表 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getDevicesList:type: 接口 - * @return 扬声器设备列表,第一项为当前系统默认设备 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getDevicesList} 接口替代之。 */ -- (NSArray<TRTCMediaDeviceInfo*>*)getSpeakerDevicesList __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray<TRTCMediaDeviceInfo *> *)getSpeakerDevicesList __attribute__((deprecated("use TXDeviceManager#getDevicesList instead"))); +#endif /** * 获取当前的扬声器设备 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getCurrentDevice 接口 - * @return 当前扬声器设备信息 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getCurrentDevice} 接口替代之。 */ -- (TRTCMediaDeviceInfo*)getCurrentSpeakerDevice __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (TRTCMediaDeviceInfo *)getCurrentSpeakerDevice __attribute__((deprecated("use TXDeviceManager#getCurrentDevice instead"))); +#endif /** * 设置要使用的扬声器 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCurrentDevice 接口 - * @param deviceId 从 getSpeakerDevicesList 中得到的设备 ID - * @return 0:成功;<0:失败 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCurrentDevice} 接口替代之。 */ -- (int)setCurrentSpeakerDevice:(NSString*)deviceId __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (int)setCurrentSpeakerDevice:(NSString *)deviceId __attribute__((deprecated("use TXDeviceManager#setCurrentDevice instead"))); +#endif /** - * 当前扬声器设备音量 + * 获取当前扬声器的设备音量 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getCurrentDeviceVolume 接口 - * @return 扬声器音量 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getCurrentDeviceVolume} 接口替代之。 */ -- (float)getCurrentSpeakerDeviceVolume __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (float)getCurrentSpeakerDeviceVolume __attribute__((deprecated("use TXDeviceManager#getCurrentDeviceVolume instead"))); +#endif /** - * 设置当前扬声器音量 - * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCurrentDeviceVolume 接口 - * 该接口的功能是调节系统播放音量,如果用户直接调节 Mac 系统设置的播放音量时,该接口的设置结果会被用户的操作所覆盖。 + * 设置当前扬声器的设备音量 * - * @param volume 设置的扬声器音量,范围0 - 100 - * @return 0:成功;<0:失败 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCurrentDeviceVolume} 接口替代之。 */ -- (int)setCurrentSpeakerDeviceVolume:(NSInteger)volume __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (int)setCurrentSpeakerDeviceVolume:(NSInteger)volume __attribute__((deprecated("use TXDeviceManager#setCurrentDeviceVolume instead"))); +#endif /** - * 设置系统当前扬声器设备的静音状态 + * 获取系统当前扬声器设备是否静音 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCurrentDeviceMute 接口 - * @param mute 设置为 YES 时,扬声器设备静音 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getCurrentDeviceMute} 接口替代之。 */ -- (void)setCurrentSpeakerDeviceMute:(BOOL)mute __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (BOOL)getCurrentSpeakerDeviceMute __attribute__((deprecated("use TXDeviceManager#getCurrentDeviceMute instead"))); +#endif /** - * 获取系统当前扬声器设备是否静音 + * 设置系统当前扬声器设备的静音状态 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getCurrentDeviceMute 接口 - * @return 静音状态 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCurrentDeviceMute} 接口替代之。 */ -- (BOOL)getCurrentSpeakerDeviceMute __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)setCurrentSpeakerDeviceMute:(BOOL)mute __attribute__((deprecated("use TXDeviceManager#setCurrentDeviceMute instead"))); +#endif /** * 获取摄像头设备列表 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getDevicesList 接口 - * Mac 主机本身自带一个摄像头,也允许插入 USB 摄像头。 - * 如果您希望用户选择自己外接的摄像头,可以提供一个多摄像头选择的功能。 - * - * @return 摄像头设备列表,第一项为当前系统默认设备 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getDevicesList} 接口替代之。 */ -- (NSArray<TRTCMediaDeviceInfo *> *)getCameraDevicesList __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray<TRTCMediaDeviceInfo *> *)getCameraDevicesList __attribute__((deprecated("use TXDeviceManager#getDevicesList instead"))); +#endif /** * 获取当前使用的摄像头 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager getCurrentDevice 接口 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link getCurrentDevice} 接口替代之。 */ -- (TRTCMediaDeviceInfo *)getCurrentCameraDevice __attribute__((deprecated("use getDeviceManager instead"))); +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (TRTCMediaDeviceInfo *)getCurrentCameraDevice __attribute__((deprecated("use TXDeviceManager#getCurrentDevice instead"))); +#endif /** - * 设置要使用的摄像头 + * 选定当前要使用的摄像头 * - * @deprecated v8.0 版本弃用,请使用 TXDeviceManager setCurrentDevice 接口 - * @param deviceId 从 getCameraDevicesList 中得到的设备 ID - * @return 0:成功;-1:失败 + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link TXDeviceManager} 中的 {@link setCurrentDevice} 接口替代之。 */ -- (int)setCurrentCameraDevice:(NSString *)deviceId __attribute__((deprecated("use getDeviceManager instead"))); - +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (int)setCurrentCameraDevice:(NSString *)deviceId __attribute__((deprecated("use TXDeviceManager#setCurrentDevice instead"))); #endif /** - * 设置要使用的摄像头 + * 设置系统音量类型 + * + * @deprecated v8.0 版本开始不推荐使用,建议使用 {@link startLocalAudio}(quality) 替代之,通过 quality 参数来决策音质。 + */ +- (void)setSystemVolumeType:(TRTCSystemVolumeType)type __attribute__((deprecated("use startLocalAudio:quality instead"))); + +/** + * 视频截图 * - * @deprecated v8.2 版本弃用,请使用 snapshotVideo:type:sourceType:completionBlock 接口 + * @deprecated v8.2 版本开始不推荐使用,建议使用 {@link snapshotVideo}:type:sourceType:completionBlock 替代之。 */ -- (void)snapshotVideo:(NSString *)userId - type:(TRTCVideoStreamType)type - completionBlock:(void (^)(TXImage *image))completionBlock __attribute__((deprecated("use snapshotVideo:type:sourceType:completionBlock instead"))); +- (void)snapshotVideo:(NSString *)userId type:(TRTCVideoStreamType)streamType completionBlock:(void (^)(TXImage *image))completionBlock __attribute__((deprecated("use snapshotVideo:type:sourceType:completionBlock instead"))); /** * 启用视频自定义采集模式 * - * @deprecated v8.5 版本弃用,请使用 enableCustomVideoCapture:enable: 接口 - * 开启该模式后,SDK 不再运行原有的视频采集流程,只保留编码和发送能力。 - * 您需要用 sendCustomVideoData() 不断地向 SDK 塞入自己采集的视频画面。 + * @deprecated v8.5 版本开始不推荐使用,建议使用 {@link enableCustomVideoCapture}(streamType, enable) 接口替代之。 + */ +- (void)enableCustomVideoCapture:(BOOL)enable __attribute__((deprecated("use enableCustomVideoCapture:enable instead"))); + +/** + * 投送自己采集的视频数据 * - * @param enable 是否启用,默认值:NO + * @deprecated v8.5 版本开始不推荐使用,建议使用 {@link sendCustomVideoData}(streamType, TRTCVideoFrame) 接口替代之。 */ -- (void)enableCustomVideoCapture:(BOOL)enable __attribute__((deprecated("use enableCustomVideoCapture:enable: instead"))); +- (void)sendCustomVideoData:(TRTCVideoFrame *)frame __attribute__((deprecated("use sendCustomVideoData:frame: instead"))); /** - * 向 SDK 投送自己采集的视频数据 + * 开始应用内的屏幕分享(iOS) * - * @deprecated v8.5 版本弃用,请使用 sendCustomVideoData:frame: 接口 - * TRTCVideoFrame 推荐下列填写方式(其他字段不需要填写): - * - pixelFormat:推荐选择 TRTCVideoPixelFormat_NV12。 - * - bufferType:推荐选择 TRTCVideoBufferType_PixelBuffer。 - * - pixelBuffer:iOS 平台上常用的视频数据格式。 - * - data:视频裸数据格式,bufferType 为 NSData 时使用。 - * - timestamp:如果 timestamp 间隔不均匀,会严重影响音画同步和录制出的 MP4 质量。 - * - width:视频图像长度,bufferType 为 NSData 时填写。 - * - height:视频图像宽度,bufferType 为 NSData 时填写。 + * @deprecated v8.6 版本开始不推荐使用,建议使用 {@link startScreenCaptureInApp}:encParam: 接口替代之。 + */ +- (void)startScreenCaptureInApp:(TRTCVideoEncParam *)encParams __attribute__((deprecated("use startScreenCaptureInApp:encParam: instead"))); + +/** + * 开始全系统的屏幕分享(iOS) * - * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 + * @deprecated v8.6 版本开始不推荐使用,建议使用 {@link startScreenCaptureByReplaykit}:encParam:appGroup: 接口替代之。 + */ +- (void)startScreenCaptureByReplaykit:(TRTCVideoEncParam *)encParams appGroup:(NSString *)appGroup __attribute__((deprecated("use startScreenCaptureByReplaykit:encParam:appGroup: instead"))); + +/** + * 暂停/恢复发布本地的视频流 * - * @param frame 视频数据,支持 PixelBuffer NV12,BGRA 以及 I420 格式数据。 - * @note - SDK 内部有帧率控制逻辑,目标帧率以您在 setVideoEncoderParam 中设置的为准,太快会自动丢帧,太慢则会自动补帧。 - * @note - 可以设置 frame 中的 timestamp 为 0,相当于让 SDK 自己设置时间戳,但请“均匀”地控制 sendCustomVideoData 的调用间隔,否则会导致视频帧率不稳定。 + * @deprecated v8.9 版本开始不推荐使用,建议使用 {@link muteLocalVideo}(streamType, mute) 接口替代之。 + */ +- (void)muteLocalVideo:(BOOL)mute __attribute__((deprecated("use muteLocalVideo:streamType:mute: instead"))); + +/** + * 暂停 / 恢复订阅远端用户的视频流 * + * @deprecated v8.9 版本开始不推荐使用,建议使用 {@link muteRemoteVideoStream}(userId, streamType, mute) 接口替代之。 */ -- (void)sendCustomVideoData:(TRTCVideoFrame *)frame __attribute__((deprecated("use sendCustomVideoData:frame: instead"))); +- (void)muteRemoteVideoStream:(NSString *)userId mute:(BOOL)mute __attribute__((deprecated("use muteRemoteVideoStream:userid,streamType:mute: instead"))); -/// @} +/** + * 开始进行网络测速(进入房间前使用) + * + * @deprecated v9.2 版本开始不推荐使用,建议使用 {@link startSpeedTest}(params) 接口替代之。 + */ +- (void)startSpeedTest:(uint32_t)sdkAppId + userId:(NSString *)userId + userSig:(NSString *)userSig + completion:(void (^)(TRTCSpeedTestResult *result, NSInteger completedCount, NSInteger totalCount))completion __attribute__((deprecated("use startSpeedTest: instead"))); +/// @} @end -///@} +/// @} +NS_ASSUME_NONNULL_END diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDef.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDef.h index 4cd1707..446f9e6 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDef.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDef.h @@ -1,1083 +1,1533 @@ -/* - * Module: TRTC 关键类型定义 - * - * Function: 分辨率、质量等级等枚举和常量值的定义 - * - */ - -#import <Foundation/Foundation.h> -#if TARGET_OS_IPHONE || TARGET_OS_SIMULATOR -#import <UIKit/UIKit.h> -typedef UIView TXView; -typedef UIImage TXImage; -typedef UIEdgeInsets TXEdgeInsets; -#elif TARGET_OS_MAC -#import <AppKit/AppKit.h> -typedef NSView TXView; -typedef NSImage TXImage; -typedef NSEdgeInsets TXEdgeInsets; -#endif -#import "TXDeviceManager.h" - -///@defgroup TRTCCloudDef_ios 关键类型定义 -///腾讯云视频通话功能的关键类型定义 -///@{ - -///////////////////////////////////////////////////////////////////////////////// -// -// 【(一)视频相关枚举值定义】 -// -///////////////////////////////////////////////////////////////////////////////// - -/** - * 1.1 视频分辨率 - * - * 此处仅定义横屏分辨率,如需使用竖屏分辨率(例如360 × 640),需要同时指定 TRTCVideoResolutionMode 为 Portrait。 - */ -typedef NS_ENUM(NSInteger, TRTCVideoResolution) { - // 宽高比1:1 - TRTCVideoResolution_120_120 = 1, ///< [C] 建议码率 VideoCall:80kbps LIVE:120kbps - TRTCVideoResolution_160_160 = 3, ///< [C] 建议码率 VideoCall:100kbps LIVE:150kbps - TRTCVideoResolution_270_270 = 5, ///< [C] 建议码率 VideoCall:200kbps LIVE:120kbps - TRTCVideoResolution_480_480 = 7, ///< [C] 建议码率 VideoCall:350kbps LIVE:120kbps - - // 宽高比4:3 - TRTCVideoResolution_160_120 = 50, ///< [C] 建议码率 VideoCall:100kbps LIVE:150kbps - TRTCVideoResolution_240_180 = 52, ///< [C] 建议码率 VideoCall:150kbps LIVE:225kbps - TRTCVideoResolution_280_210 = 54, ///< [C] 建议码率 VideoCall:200kbps LIVE:300kbps - TRTCVideoResolution_320_240 = 56, ///< [C] 建议码率 VideoCall:250kbps LIVE:375kbps - TRTCVideoResolution_400_300 = 58, ///< [C] 建议码率 VideoCall:300kbps LIVE:450kbps - TRTCVideoResolution_480_360 = 60, ///< [C] 建议码率 VideoCall:400kbps LIVE:600kbps - TRTCVideoResolution_640_480 = 62, ///< [C] 建议码率 VideoCall:600kbps LIVE:900kbps - TRTCVideoResolution_960_720 = 64, ///< [C] 建议码率 VideoCall:1000kbps LIVE:1500kbps - - // 宽高比16:9 - TRTCVideoResolution_160_90 = 100, ///< [C] 建议码率 VideoCall:150kbps LIVE:250kbps - TRTCVideoResolution_256_144 = 102, ///< [C] 建议码率 VideoCall:200kbps LIVE:300kbps - TRTCVideoResolution_320_180 = 104, ///< [C] 建议码率 VideoCall:250kbps LIVE:400kbps - TRTCVideoResolution_480_270 = 106, ///< [C] 建议码率 VideoCall:350kbps LIVE:550kbps - TRTCVideoResolution_640_360 = 108, ///< [C] 建议码率 VideoCall:550kbps LIVE:900kbps - TRTCVideoResolution_960_540 = 110, ///< [C] 建议码率 VideoCall:850kbps LIVE:1300kbps - TRTCVideoResolution_1280_720 = 112, ///< [C] 建议码率 VideoCall:1200kbps LIVE:1800kbps - TRTCVideoResolution_1920_1080 = 114, ///< [S] 建议码率 VideoCall:2000kbps LIVE:3000kbps -}; - -/** - * 1.2 视频宽高比模式 - * - * - 横屏分辨率:TRTCVideoResolution_640_360 + TRTCVideoResolutionModeLandscape = 640 × 360 - * - 竖屏分辨率:TRTCVideoResolution_640_360 + TRTCVideoResolutionModePortrait = 360 × 640 - */ -typedef NS_ENUM(NSInteger, TRTCVideoResolutionMode) { - TRTCVideoResolutionModeLandscape = 0, ///< 横屏分辨率 - TRTCVideoResolutionModePortrait = 1, ///< 竖屏分辨率 -}; - - -/** - * 1.3 视频流类型 - * - * TRTC 内部有三种不同的音视频流,分别是: - * - 主画面:最常用的一条线路,一般用来传输摄像头的视频数据。 - * - 小画面:跟主画面的内容相同,但是分辨率和码率更低。 - * - 辅流画面:一般用于屏幕分享,以及远程播片(例如老师放一段视频给学生)。 - * - * @note - 如果主播的上行网络和性能比较好,则可以同时送出大小两路画面。 - * @note - SDK 不支持单独开启小画面,小画面必须依附于主画面而存在。 - */ - -typedef NS_ENUM(NSInteger, TRTCVideoStreamType) { - TRTCVideoStreamTypeBig = 0, ///< 主画面视频流 - TRTCVideoStreamTypeSmall = 1, ///< 小画面视频流 - TRTCVideoStreamTypeSub = 2, ///< 辅流(屏幕分享) - -}; - -/** - * 1.4 视频画面填充模式 - * - * 如果画面的显示分辨率不等于画面的原始分辨率,就需要您设置画面的填充模式: - * - TRTCVideoFillMode_Fill,图像铺满屏幕,超出显示视窗的视频部分将被裁剪,画面显示可能不完整。 - * - TRTCVideoFillMode_Fit,图像长边填满屏幕,短边区域会被填充黑色,画面的内容完整。 - */ -typedef NS_ENUM(NSInteger, TRTCVideoFillMode) { - TRTCVideoFillMode_Fill = 0, ///< 图像铺满屏幕,超出显示视窗的视频部分将被裁剪 - TRTCVideoFillMode_Fit = 1, ///< 图像长边填满屏幕,短边区域会被填充黑色 -}; - -/** - * 1.5 视频画面旋转方向 - * - * TRTC SDK 提供了对本地和远程画面的旋转角度设置 API,下列的旋转角度都是指顺时针方向的。 - */ -typedef NS_ENUM(NSInteger, TRTCVideoRotation) { - TRTCVideoRotation_0 = 0, ///< 不旋转 - TRTCVideoRotation_90 = 1, ///< 顺时针旋转90度 - TRTCVideoRotation_180 = 2, ///< 顺时针旋转180度 - TRTCVideoRotation_270 = 3, ///< 顺时针旋转270度 -}; - -/** - * 1.6 美颜(磨皮)算法 - * - * TRTC SDK 内置多种不同的磨皮算法,您可以选择最适合您产品定位的方案。 - */ -typedef NS_ENUM(NSInteger, TRTCBeautyStyle) { - TRTCBeautyStyleSmooth = 0, ///< 光滑,适用于美女秀场,效果比较明显。 - TRTCBeautyStyleNature = 1, ///< 自然,磨皮算法更多地保留了面部细节,主观感受上会更加自然。 - TRTCBeautyStylePitu = 2, ///< 由上海优图实验室提供的美颜算法,磨皮效果介于光滑和自然之间,比光滑保留更多皮肤细节,比自然磨皮程度更高。 -}; - -/** - * 1.7 视频像素格式 - * - * TRTC SDK 提供针对视频的自定义采集和自定义渲染功能,在自定义采集功能中,您可以用下列枚举值描述您采集的视频像素格式。 - * 在自定义渲染功能中,您可以指定您期望 SDK 回调的视频像素格式。 - */ -typedef NS_ENUM(NSInteger, TRTCVideoPixelFormat) { - TRTCVideoPixelFormat_Unknown = 0, ///< 未知 - TRTCVideoPixelFormat_I420 = 1, ///< YUV420P I420 - TRTCVideoPixelFormat_NV12 = 5, ///< YUV420SP NV12 - TRTCVideoPixelFormat_32BGRA = 6, ///< BGRA8888 - TRTCVideoPixelFormat_Texture_2D = 7, ///< Texture -}; - -/** - * 1.8 视频数据包装格式 - * - * 在自定义采集和自定义渲染功能,您需要用到下列枚举值来指定您希望以什么类型的容器来包装视频数据。 - * - PixelBuffer:直接使用效率最高,iOS 系统提供了众多 API 获取或处理 PixelBuffer。 - * - NSData:仅用于自定义渲染,SDK 帮您做了一次 PixelBuffer 到 NSData 的内存拷贝工作,会有一定的性能消耗。 - */ -typedef NS_ENUM(NSInteger, TRTCVideoBufferType) { - TRTCVideoBufferType_Unknown = 0, ///< 未知 - TRTCVideoBufferType_PixelBuffer = 1, ///< 直接使用效率最高,iOS 系统提供了众多 API 获取或处理 PixelBuffer。 - TRTCVideoBufferType_NSData = 2, ///< 仅用于自定义渲染,SDK 帮您做了一次 PixelBuffer 到 NSData 的内存拷贝工作,会有一定的性能消耗。 - TRTCVideoBufferType_Texture = 3, ///< 用于自定义渲染的 texture -}; - -/** - * 1.9 本地视频预览镜像类型 - * - * iOS 的本地画面提供下列设置模式 - */ -typedef NS_ENUM(NSUInteger, TRTCVideoMirrorType) { - TRTCVideoMirrorTypeAuto = 0, ///< 前置摄像头镜像,后置摄像头不镜像 - TRTCVideoMirrorTypeEnable = 1, ///< 前后置摄像头画面均镜像 - TRTCVideoMirrorTypeDisable = 2, ///< 前后置摄像头画面均不镜像 -}; - -/** - * 1.10 视频截图来源 - */ -typedef NS_ENUM(NSUInteger, TRTCSnapshotSourceType) { - TRTCSnapshotSourceTypeStream = 0, ///< 从视频流上截取视频画面 - TRTCSnapshotSourceTypeView = 1, ///< 从渲染 View 上截取视频画面 -}; - -/** - * 1.12 视频渲染设置 - */ -@interface TRTCRenderParams : NSObject - -/// 【字段含义】画面朝向 -/// 【推荐取值】支持90、180以及270旋转角度,默认值:TRTCVideoRotation_0 -@property (nonatomic) TRTCVideoRotation rotation; - -/// 【字段含义】画面填充模式 -/// 【推荐取值】填充(画面可能会被拉伸裁剪)或适应(画面可能会有黑边),默认值:TRTCVideoFillMode_Fill -@property (nonatomic) TRTCVideoFillMode fillMode; - -/// 【字段含义】画面镜像模式 -/// 【推荐取值】默认值:TRTCVideoMirrorType_Auto -@property (nonatomic) TRTCVideoMirrorType mirrorType; - -@end -///////////////////////////////////////////////////////////////////////////////// -// -// 【(二)网络相关枚举值定义】 -// -///////////////////////////////////////////////////////////////////////////////// - -/** - * 2.1 应用场景 - * - * TRTC 可用于视频会议和在线直播等多种应用场景,针对不同的应用场景,TRTC SDK 的内部会进行不同的优化配置: - * - TRTCAppSceneVideoCall :视频通话场景,适合[1对1视频通话]、[300人视频会议]、[在线问诊]、[视频聊天]、[远程面试]等。 - * - TRTCAppSceneLIVE :视频互动直播,适合[视频低延时直播]、[十万人互动课堂]、[视频直播 PK]、[视频相亲房]、[互动课堂]、[远程培训]、[超大型会议]等。 - * - TRTCAppSceneAudioCall :语音通话场景,适合[1对1语音通话]、[300人语音会议]、[语音聊天]、[语音会议]、[在线狼人杀]等。 - * - TRTCAppSceneVoiceChatRoom:语音互动直播,适合:[语音低延时直播]、[语音直播连麦]、[语聊房]、[K 歌房]、[FM 电台]等。 - */ -typedef NS_ENUM(NSInteger, TRTCAppScene) { - /// 视频通话场景,支持720P、1080P高清画质,单个房间最多支持300人同时在线,最高支持50人同时发言。<br> - /// 适合:[1对1视频通话]、[300人视频会议]、[在线问诊]、[视频聊天]、[远程面试]等。 - TRTCAppSceneVideoCall = 0, - - /// 视频互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。<br> - /// 适合:[视频低延时直播]、[十万人互动课堂]、[视频直播 PK]、[视频相亲房]、[互动课堂]、[远程培训]、[超大型会议]等。<br> - /// 注意:此场景下,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。 - TRTCAppSceneLIVE = 1, - - /// 语音通话场景,支持 48kHz,支持双声道。单个房间最多支持300人同时在线,最高支持50人同时发言。<br> - /// 适合:[1对1语音通话]、[300人语音会议]、[语音聊天]、[语音会议]、[在线狼人杀]等。 - TRTCAppSceneAudioCall = 2, - - /// 语音互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。<br> - /// 适合:[语音低延时直播]、[语音直播连麦]、[语聊房]、[K 歌房]、[FM 电台]等。<br> - /// 注意:此场景下,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。 - TRTCAppSceneVoiceChatRoom = 3, -}; - -/** - * 2.2 角色,仅适用于直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom) - * - * 在直播场景中,多数用户仅为观众,个别用户是主播,这种角色区分有利于 TRTC 进行更好的定向优化。 - * - * - Anchor:主播,可以上行视频和音频,一个房间里最多支持50个主播同时上行音视频。 - * - Audience:观众,只能观看,不能上行视频和音频,一个房间里的观众人数没有上限。 - */ -typedef NS_ENUM(NSInteger, TRTCRoleType) { - TRTCRoleAnchor = 20, ///< 主播 - TRTCRoleAudience = 21, ///< 观众 -}; - -/** - * 2.3 流控模式 - * - * TRTC SDK 内部需要时刻根据网络情况调整内部的编解码器和网络模块,以便能够对网络的变化做出反应。 - * 为了支持快速算法升级,SDK 内部设置了两种不同的流控模式: - * - ModeServer:云端控制,默认模式,推荐选择。 - * - ModeClient:本地控制,用于 SDK 开发内部调试,客户请勿使用。 - * - * @note 推荐您使用云端控制,这样每当我们升级 Qos 算法时,您无需升级 SDK 即可体验更好的效果。 - */ -typedef NS_ENUM(NSInteger, TRTCQosControlMode) -{ - TRTCQosControlModeClient, ///< 客户端控制(用于 SDK 开发内部调试,客户请勿使用) - TRTCQosControlModeServer, ///< 云端控制 (默认) -}; - -/** - * 2.4 画质偏好 - * - * 指当 TRTC SDK 在遇到弱网络环境时,您期望“保清晰”或“保流畅”,两种模式均会优先保障声音数据的传输。 - * - * - Smooth:弱网下优先流畅性,当用户网络较差的时候画面也会比较模糊。 - * - Clear:默认值,弱网下优先清晰度,当用户网络较差的时候会出现卡顿,但画面清晰度不会大幅缩水。 - */ -typedef NS_ENUM(NSInteger, TRTCVideoQosPreference) -{ - TRTCVideoQosPreferenceSmooth = 1, ///< 弱网下保流畅 - TRTCVideoQosPreferenceClear = 2, ///< 弱网下保清晰,默认值 -}; - -/** - * 2.5 网络质量 - * - * TRTC SDK 对网络质量定义了六种不同的级别,Excellent 表示最好,Down 表示不可用。 - */ -typedef NS_ENUM(NSInteger, TRTCQuality) { - TRTCQuality_Unknown = 0, ///< 未定义 - TRTCQuality_Excellent = 1, ///< 最好 - TRTCQuality_Good = 2, ///< 好 - TRTCQuality_Poor = 3, ///< 一般 - TRTCQuality_Bad = 4, ///< 差 - TRTCQuality_Vbad = 5, ///< 很差 - TRTCQuality_Down = 6, ///< 不可用 -}; - -///////////////////////////////////////////////////////////////////////////////// -// -// 【(三)声音相关枚举值定义】 -// -///////////////////////////////////////////////////////////////////////////////// - -/** - * 3.1 音频采样率 - * - * 音频采样率用来衡量声音的保真程度,采样率越高保真程度越好,如果您的应用场景有音乐的存在,推荐使用 TRTCAudioSampleRate48000。 - */ -typedef NS_ENUM(NSInteger, TRTCAudioSampleRate) { - TRTCAudioSampleRate16000 = 16000, ///< 16k采样率 - TRTCAudioSampleRate32000 = 32000, ///< 32采样率 - TRTCAudioSampleRate44100 = 44100, ///< 44.1k采样率 - TRTCAudioSampleRate48000 = 48000, ///< 48k采样率 -}; - -/** - * 3.2 声音音质 - * - * 音频音质用来衡量声音的保真程度,TRTCAudioQualitySpeech 适用于通话场景,TRTCAudioQualityMusic 适用于高音质音乐场景。 - */ -typedef NS_ENUM(NSInteger, TRTCAudioQuality) { - /// 流畅音质:采样率:16k;单声道;音频裸码率:16kbps;适合语音通话为主的场景,比如在线会议,语音通话。 - TRTCAudioQualitySpeech = 1, - /// 默认音质:采样率:48k;单声道;音频裸码率:50kbps;SDK 默认的音频质量,如无特殊需求推荐选择之。 - TRTCAudioQualityDefault = 2, - /// 高音质:采样率:48k;双声道 + 全频带;音频裸码率:128kbps;适合需要高保真传输音乐的场景,比如K歌、音乐直播等。 - TRTCAudioQualityMusic = 3, -}; - -/** - * 3.3 声音播放模式(音频路由) - * - * 微信和手机 QQ 里的视频通话功能,都有一个免提模式,开启后就不用把手机贴在耳朵上,这个功能就是基于音频路由实现的。 - * 一般手机都有两个扬声器,设置音频路由的作用就是要决定声音从哪个扬声器播放出来: - * - Speakerphone:扬声器,位于手机底部,声音偏大,适合外放音乐。 - * - Earpiece:听筒,位于手机顶部,声音偏小,适合通话。 - */ -typedef NS_ENUM(NSInteger, TRTCAudioRoute) { - TRTCAudioModeSpeakerphone = 0, ///< 扬声器 - TRTCAudioModeEarpiece = 1, ///< 听筒 -}; - -/** - * 3.4 声音混响模式 - * - * 该枚举值应用于直播场景中的混响模式,主要用于秀场直播中。 - */ -typedef NS_ENUM(NSInteger, TRTCReverbType) { - TRTCReverbType_0 = 0, ///< 关闭混响 - TRTCReverbType_1 = 1, ///< KTV - TRTCReverbType_2 = 2, ///< 小房间 - TRTCReverbType_3 = 3, ///< 大会堂 - TRTCReverbType_4 = 4, ///< 低沉 - TRTCReverbType_5 = 5, ///< 洪亮 - TRTCReverbType_6 = 6, ///< 金属声 - TRTCReverbType_7 = 7, ///< 磁性 -}; - -/** - * 3.5 变声模式 - * - * 该枚举值应用于直播场景中的变声模式,主要用于秀场直播中。 - */ -typedef NS_ENUM(NSInteger, TRTCVoiceChangerType) { - TRTCVoiceChangerType_0 = 0, ///< 关闭变声 - TRTCVoiceChangerType_1 = 1, ///< 熊孩子 - TRTCVoiceChangerType_2 = 2, ///< 萝莉 - TRTCVoiceChangerType_3 = 3, ///< 大叔 - TRTCVoiceChangerType_4 = 4, ///< 重金属 - TRTCVoiceChangerType_5 = 5, ///< 感冒 - TRTCVoiceChangerType_6 = 6, ///< 外国人 - TRTCVoiceChangerType_7 = 7, ///< 困兽 - TRTCVoiceChangerType_8 = 8, ///< 死肥仔 - TRTCVoiceChangerType_9 = 9, ///< 强电流 - TRTCVoiceChangerType_10 = 10, ///< 重机械 - TRTCVoiceChangerType_11 = 11, ///< 空灵 -}; - -/** - * 3.6 系统音量类型 - * - * 智能手机一般具备两种系统音量类型,即通话音量类型和媒体音量类型。 - * - 通话音量:手机专门为通话场景设计的音量类型,使用手机自带的回声抵消功能,音质相比媒体音量类型较差, - * 无法通过音量按键将音量调成零,但是支持蓝牙耳机上的麦克风。 - * - * - 媒体音量:手机专门为音乐场景设计的音量类型,音质相比于通话音量类型要好,通过通过音量按键可以将音量调成零。 - * 使用媒体音量类型时,如果要开启回声抵消(AEC)功能,SDK 会开启内置的声学处理算法对声音进行二次处理。 - * 在媒体音量模式下,蓝牙耳机无法使用自带的麦克风采集声音,只能使用手机上的麦克风进行声音采集。 - * - * SDK 目前提供了三种系统音量类型的控制模式,分别为: - * - Auto:“麦上通话,麦下媒体”,即主播上麦时使用通话音量,观众不上麦则使用媒体音量,适合在线直播场景。 - * 如果您在 enterRoom 时选择的场景为 TRTCAppSceneLIVE 或 TRTCAppSceneVoiceChatRoom,SDK 会自动选择该模式。 - * - * - VOIP:全程使用通话音量,适合多人会议场景。 - * 如果您在 enterRoom 时选择的场景为 TRTCAppSceneVideoCall 或 TRTCAppSceneAudioCall,SDK 会自动选择该模式。 - * - * - Media:通话全程使用媒体音量,不常用,适合个别有特殊需求(如主播外接声卡)的应用场景。 - * - */ -typedef NS_ENUM(NSInteger, TRTCSystemVolumeType) { - /// “麦上通话,麦下媒体”,即主播上麦时使用通话音量,观众不上麦则使用媒体音量,适合在线直播场景。<br> - /// 如果您在 enterRoom 时选择的场景为 TRTCAppSceneLIVE 或 TRTCAppSceneVoiceChatRoom,SDK 会自动选择该模式。 - TRTCSystemVolumeTypeAuto = 0, - - /// 通话全程使用媒体音量,不常用,适合个别有特殊需求(如主播外接声卡)的应用场景。 - TRTCSystemVolumeTypeMedia = 1, - - /// 全程使用通话音量,适合多人会议场景。<br> - /// 如果您在 enterRoom 时选择的场景为 TRTCAppSceneVideoCall 或 TRTCAppSceneAudioCall 会自动选择该模式。 - TRTCSystemVolumeTypeVOIP = 2, -}; - -#pragma mark - - -///////////////////////////////////////////////////////////////////////////////// -// -// 【(四)更多枚举值定义】 -// -///////////////////////////////////////////////////////////////////////////////// - -/** - * 4.1 Log 级别 - * - * 不同的日志等级定义了不同的详实程度和日志数量,推荐一般情况下将日志等级设置为:TRTCLogLevelInfo。 - */ -typedef NS_ENUM(NSInteger, TRTCLogLevel) { - TRTCLogLevelVerbose = 0, ///< 输出所有级别的 Log - TRTCLogLevelDebug = 1, ///< 输出 DEBUG,INFO,WARNING,ERROR 和 FATAL 级别的 Log - TRTCLogLevelInfo = 2, ///< 输出 INFO,WARNING,ERROR 和 FATAL 级别的 Log - TRTCLogLevelWarn = 3, ///< 只输出WARNING,ERROR 和 FATAL 级别的 Log - TRTCLogLevelError = 4, ///< 只输出ERROR 和 FATAL 级别的 Log - TRTCLogLevelFatal = 5, ///< 只输出 FATAL 级别的 Log - TRTCLogLevelNone = 6, ///< 不输出任何 SDK Log -}; - -/** - * 4.2 重力感应开关 - * - * 此配置仅适用于 iOS 和 iPad 等移动设备: - * - Disable:Mac 平台的默认值,视频上行的画面(也就是房间里的其它用户看到的当前用户的画面)不会跟随重力感应方向而自动调整。 - * - UIAutoLayout:iPhone 和 iPad 平台的默认值,视频上行的画面(也就是房间里的其它用户看到的当前用户的画面)会跟随当前界面的状态栏方向而自动调整。 - * - UIFixLayout:待废弃,效果等同于 UIAutoLayout。 - */ -typedef NS_ENUM(NSInteger, TRTCGSensorMode) { - TRTCGSensorMode_Disable = 0, ///< 关闭重力感应,Mac 平台的默认值。 - TRTCGSensorMode_UIAutoLayout = 1, ///< 开启重力感应,iPhone 和 iPad 平台的默认值。 - TRTCGSensorMode_UIFixLayout = 2 ///< 待废弃,效果等同于 UIAutoLayout。 -}; - -#if TARGET_OS_MAC && !TARGET_OS_IPHONE -#pragma mark - - -/** - * 4.3 屏幕分享目标类型(仅 Mac) - * - * 该枚举值主要用于 SDK 区分屏幕分享的目标(某一个窗口或整个屏幕)。 - */ -typedef NS_ENUM(NSInteger, TRTCScreenCaptureSourceType) { - TRTCScreenCaptureSourceTypeUnknown = -1, ///< 未定义 - TRTCScreenCaptureSourceTypeWindow = 0, ///< 该分享目标是某一个Mac窗口 - TRTCScreenCaptureSourceTypeScreen = 1, ///< 该分享目标是整个Mac桌面 -}; -#endif - -#pragma mark - - -/** - * 4.4 混流参数配置模式 - * - */ -typedef NS_ENUM(NSInteger, TRTCTranscodingConfigMode) { - /// 非法值 - TRTCTranscodingConfigMode_Unknown = 0, - - /// 全手动模式,灵活性最高,可以自由组合出各种混流方案,但易用性最差。 - /// 此模式下,您需要填写 TRTCTranscodingConfig 中的所有参数,并需要监听 TRTCCloudDelegate 中的 onUserVideoAvailable() 和 onUserAudioAvailable() 回调, - /// 以便根据当前房间中各个上麦用户的音视频状态不断地调整 mixUsers 参数,否则会导致混流失败。 - TRTCTranscodingConfigMode_Manual = 1, - - /// 纯音频模式,适用于语音通话(AudioCall)和语音聊天室(VoiceChatRoom)等纯音频场景。 - /// 只需要在进房后通过 setMixTranscodingConfig() 接口设置一次,之后 SDK 就会自动把房间内所有上麦用户的声音混流到当前用户的直播流上。 - /// 此模式下,您无需设置 TRTCTranscodingConfig 中的 mixUsers 参数,只需设置 audioSampleRate、audioBitrate 和 audioChannels 等参数。 - TRTCTranscodingConfigMode_Template_PureAudio = 2, - - /// 预排版模式,通过占位符提前对各路画面进行排布 - /// 此模式下,您依然需要设置 mixUsers 参数,但可以将 userId 设置为占位符,可选的占位符有: - /// - "$PLACE_HOLDER_REMOTE$" : 指代远程用户的画面,可以设置多个。 - /// - "$PLACE_HOLDER_LOCAL_MAIN$" : 指代本地摄像头画面,只允许设置一个。 - /// - "$PLACE_HOLDER_LOCAL_SUB$" : 指代本地屏幕分享画面,只允许设置一个。 - /// 但是您可以不需要监听 TRTCCloudDelegate 中的 onUserVideoAvailable() 和 onUserAudioAvailable() 回调进行实时调整, - /// 只需要在进房成功后调用一次 setMixTranscodingConfig() 即可,之后 SDK 会自动将真实的 userId 补位到您设置的占位符上。 - TRTCTranscodingConfigMode_Template_PresetLayout = 3, - - /// 屏幕分享模式,适用于在线教育场景等以屏幕分享为主的应用场景,仅支持 Windows 和 Mac 两个平台的 SDK。 - /// SDK 会先根据您(通过 videoWidth 和 videoHeight 参数)设置的目标分辨率构建一张画布, - /// 当老师未开启屏幕分享时,SDK 会将摄像头画面等比例拉伸绘制到该画布上;当老师开启屏幕分享之后,SDK 会将屏幕分享画面绘制到同样的画布上。 - /// 这样操作的目的是为了确保混流模块的输出分辨率一致,避免课程回放和网页观看的花屏问题(网页播放器不支持可变分辨率)。 - /// 同时,连麦学生的声音会被默认混合到老师的音视频流中。 - /// - /// 由于教学模式下的视频内容以屏幕分享为主,因此同时传输摄像头画面和屏幕分享画面是非常浪费带宽的。 - /// 推荐的做法是直接将摄像头画面通过 setLocalVideoRenderCallback 接口自定义绘制到当前屏幕上。 - /// 在该模式下,您无需设置 TRTCTranscodingConfig 中的 mixUsers 参数,SDK 不会混合学生的画面,以免干扰屏幕分享的效果。 - /// - /// 您可以将 TRTCTranscodingConfig 中的 width × height 设为 0px × 0px,SDK 会自动根据用户当前屏幕的宽高比计算出一个合适的分辨率: - /// - 如果老师当前屏幕宽度 <= 1920px,SDK 会使用老师当前屏幕的实际分辨率。 - /// - 如果老师当前屏幕宽度 > 1920px,SDK 会根据当前屏幕宽高比,选择 1920x1080(16:9)、1920x1200(16:10)、1920x1440(4:3) 三种分辨率中的一种。 - TRTCTranscodingConfigMode_Template_ScreenSharing = 4, -}; - -/** - * 4.5 媒体录制类型 - */ -typedef NS_ENUM(NSUInteger, TRTCRecordType) { - TRTCRecordTypeAudio = 0, ///< 仅录制音频 - TRTCRecordTypeVideo = 1, ///< 仅录制视频 - TRTCRecordTypeBoth = 2, ///< 同时录制音频、视频 -}; - -/** - * 4.6 混流输入类型 - * - */ -typedef NS_ENUM(NSUInteger, TRTCMixInputType) { - /// 不指定,根据 pureAudio 值决定混流输入类型 - TRTCMixInputTypeUndefined = 0, - /// 混入音视频 - TRTCMixInputTypeAudioVideo = 1, - /// 只混入视频 - TRTCMixInputTypePureVideo = 2, - /// 只混入音频 - TRTCMixInputTypePureAudio = 3, -}; - -///////////////////////////////////////////////////////////////////////////////// -// -// 【(五)TRTC 核心类型定义】 -// -///////////////////////////////////////////////////////////////////////////////// -#pragma mark - - -/** - * 5.1 进房相关参数 - * - * 只有该参数填写正确,才能顺利调用 enterRoom 进入 roomId 或者 strRoomId 所指定的音视频房间。 - */ -@interface TRTCParams : NSObject - -///【字段含义】应用标识 [必填],腾讯云基于 sdkAppId 完成计费统计。 -///【推荐取值】在 [实时音视频控制台](https://console.cloud.tencent.com/rav/) 创建应用后可以在账号信息页面中得到该 ID -@property (nonatomic, assign) UInt32 sdkAppId; - -///【字段含义】用户标识 [必填],当前用户的 userId,相当于登录用户名。 -///【推荐取值】限制长度为32字节,只允许包含大小写英文字母(a-zA-Z)、数字(0-9)及下划线和连词符。 -@property (nonatomic, copy, nonnull) NSString* userId; - -///【字段含义】用户签名 [必填],当前 userId 对应的验证签名,相当于登录密码。 -///【推荐取值】具体计算方法请参见 [如何计算UserSig](https://cloud.tencent.com/document/product/647/17275)。 -@property (nonatomic, copy, nonnull) NSString* userSig; - -///【字段含义】数字房间号码,在同一个房间里的用户(userId)可以彼此看到对方并进行视频通话 -///【推荐取值】取值范围:1 - 4294967294。 -///【特别说明】roomId 与 strRoomId 必填一个,若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,将优先选用 roomId。 -/// 请注意,同一个 sdkAppId 互通时,请务必选用同一种房间号码类型,避免影响互通。 -@property (nonatomic, assign) UInt32 roomId; - -///【字段含义】字符串房间号码,在同一个房间里的用户(userId)可以彼此看到对方并进行视频通话。 -///【推荐取值】限制长度为64字节。以下为支持的字符集范围(共 89 个字符): -/// -大小写英文字母(a-zA-Z); -/// -数字(0-9); -/// -空格、"!"、"#"、"$"、"%"、"&"、"("、")"、"+"、"-"、":"、";"、"<"、"="、"."、">"、"?"、"@"、"["、"]"、"^"、"_"、" {"、"}"、"|"、"~"、","。 -///【特别说明】roomId 与 strRoomId 必填一个,若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,将优先选用 roomId。 -/// 请注意,同一个 sdkAppId 互通时,请务必选用同一种房间号码类型,避免影响互通。 -@property (nonatomic, copy, nonnull) NSString* strRoomId; - -///【字段含义】直播场景下的角色,仅适用于直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom),通话场景下指定无效。 -///【推荐取值】默认值:主播(TRTCRoleAnchor) -@property (nonatomic, assign) TRTCRoleType role; - -///【字段含义】绑定腾讯云直播 CDN 流 ID[非必填],设置之后,您就可以在腾讯云直播 CDN 上通过标准直播方案(FLV或HLS)播放该用户的音视频流。 -///【推荐取值】限制长度为64字节,可以不填写,一种推荐的方案是使用 “sdkappid_roomid_userid_main” 作为 streamid,这样比较好辨认且不会在您的多个应用中发生冲突。 -///【特殊说明】要使用腾讯云直播 CDN,您需要先在[控制台](https://console.cloud.tencent.com/trtc/) 中的功能配置页开启“启用旁路推流”开关。 -///【参考文档】[CDN 旁路直播](https://cloud.tencent.com/document/product/647/16826)。 -@property (nonatomic, copy, nullable) NSString* streamId; - -///【字段含义】云端录制开关,用于指定是否要在云端将该用户的音视频流录制成指定格式的文件。 -/// 方案一:手动录制 -/// - 需要在“[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 云端录制配置”中开启云端录制。 -/// - 设置“录制形式”为“手动录制”。 -/// - 设置手动录制后,在一个 TRTC 房间中只有设置了 userDefineRecordId 参数的用户才会在云端录制出视频文件,不指定该参数的用户不会产生录制行为。 -/// - 文件会以 “userDefineRecordId_起始时间_结束时间” 的格式命名。 -/// -/// 方案二:自动录制 -/// - 需要在“[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 云端录制配置”中开启云端录制。 -/// - 设置“录制形式”为“自动录制”。 -/// - 设置自动录制后,在一个 TRTC 房间中的任何一个有音视频上行的用户,均会在云端录制出视频文件。 -/// - 文件会以 “userDefineRecordId_起始时间_结束时间” 的格式命名,如果不指定 userDefineRecordId,则文件会以 streamid 命名。 -/// -///【推荐取值】限制长度为64字节,只允许包含大小写英文字母(a-zA-Z)、数字(0-9)及下划线和连词符。 -///【参考文档】[云端录制](https://cloud.tencent.com/document/product/647/16823)。 -@property (nonatomic, copy, nullable) NSString* userDefineRecordId; - -///【字段含义】房间签名 [非必填],当您希望某个房间只能让特定的 userId 进入时,需要使用 privateMapKey 进行权限保护。 -///【推荐取值】仅建议有高级别安全需求的客户使用,更多详情请参见 [进房权限保护](https://cloud.tencent.com/document/product/647/32240)。 -@property (nonatomic, copy, nullable) NSString* privateMapKey; - -///【字段含义】业务数据 [非必填],部分高级特性才需要用到此字段。 -///【推荐取值】不建议使用 -@property (nonatomic, copy, nullable) NSString* bussInfo; -@end - -/// 回调音频帧数据格式 -@interface TRTCAudioFrameDelegateFormat : NSObject -/// 采样率,可以是16000 32000 44100 48000 -@property (nonatomic, assign) TRTCAudioSampleRate sampleRate; -/// 声道数,可以是1或者2 -@property (nonatomic, assign) int channels; -/// 采样点数,必须是 sampleRate/100 的整数倍 -@property (nonatomic, assign) int samplesPerCall; -@end - -#pragma mark - - -/** - * 5.2 视频编码参数 - * - * 该设置决定远端用户看到的画面质量(同时也是云端录制出的视频文件的画面质量)。 - */ -@interface TRTCVideoEncParam : NSObject - -///【字段含义】视频分辨率 -///【推荐取值】 -/// - 视频通话建议选择360 × 640及以下分辨率,resMode 选择 Portrait。 -/// - 手机直播建议选择540 × 960,resMode 选择 Portrait。 -/// - Windows 和 Mac 建议选择640 × 360 及以上分辨率,resMode 选择 Landscape。 -///【特别说明】 TRTCVideoResolution 默认只有横屏模式的分辨率,例如640 × 360。 -/// 如需使用竖屏分辨率,请指定 resMode 为 Portrait,例如640 × 360结合 Portrait 则为360 × 640。 -@property (nonatomic, assign) TRTCVideoResolution videoResolution; - -///【字段含义】分辨率模式(横屏分辨率 - 竖屏分辨率) -///【推荐取值】手机直播建议选择 Portrait,Windows 和 Mac 建议选择 Landscape。 -///【特别说明】如果 videoResolution 指定分辨率 640 × 360,resMode 指定模式为 Portrait,则最终编码出的分辨率为360 × 640。 -@property (nonatomic, assign) TRTCVideoResolutionMode resMode; - -///【字段含义】视频采集帧率 -///【推荐取值】15fps或20fps,5fps以下,卡顿感明显。10fps以下,会有轻微卡顿感。20fps以上,则过于浪费(电影的帧率为24fps)。 -///【特别说明】很多 Android 手机的前置摄像头并不支持15fps以上的采集帧率,部分过于突出美颜功能的 Android 手机前置摄像头的采集帧率可能低于10fps。 -@property (nonatomic, assign) int videoFps; - -///【字段含义】目标视频码率,SDK 会按照目标码率进行编码,只有在网络不佳的情况下才会主动降低视频码率。 -///【推荐取值】请参考本 TRTCVideoResolution 在各档位注释的最佳码率,也可以在此基础上适当调高。 -/// 比如 TRTCVideoResolution_1280_720 对应 1200kbps 的目标码率,您也可以设置为 1500kbps 用来获得更好的清晰度观感。 -///【特别说明】SDK 会努力按照 videoBitrate 指定的码率进行编码,只有在网络不佳的情况下才会主动降低视频码率,最低会降至 minVideoBitrate 所设定的数值。 -/// 如果您追求“允许卡顿但要保持清晰”的效果,可以设置 minVideoBitrate 为 videoBitrate 的 60%; -/// 如果您追求“允许模糊但要保持流畅”的效果,可以设置 minVideoBitrate 为 200kbps; -/// 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK 的自适应调节能力。 -@property (nonatomic, assign) int videoBitrate; - -///【字段含义】最低视频码率,SDK 会在网络不佳的情况下主动降低视频码率,最低会降至 minVideoBitrate 所设定的数值。 -///【推荐取值】 -/// - 如果您追求“允许卡顿但要保持清晰”的效果,可以设置 minVideoBitrate 为 videoBitrate 的 60%; -/// - 如果您追求“允许模糊但要保持流畅”的效果,可以设置 minVideoBitrate 为 200kbps; -/// - 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK 的自适应调节能力; -/// - 默认值:0,此时最低码率由 SDK 根据分辨率情况,自动设置合适的数值。 -///【特别说明】 -/// - 当您把分辨率设置的比较高时,minVideoBitrate 不适合设置的太低,否则会出现画面模糊和大范围的马赛克宏块。 -/// 比如把分辨率设置为 720p,把码率设置为 200kbps,那么编码出的画面将会出现大范围区域性马赛克。 -@property (nonatomic, assign) int minVideoBitrate; - -///【字段含义】是否允许 SDK 动态调整分辨率,开启后会对云端录制产生影响。 -///【推荐取值】 -/// - 需要开启云端录制的场景建议设置为 NO,中途视频分辨率发生变化后,云端录制出的 MP4 在一般的播放器上都无法正常播放。 -/// - 视频通话模式,若无需云端录制,可以设置为 YES,此时 SDK 会根据当前待带宽情况自动选择合适的分辨率(仅针对 TRTCVideoStreamTypeBig 生效)。 -/// - 默认值:NO。 -///【特别说明】如有云端录制需求,请设置为 NO。 -@property (nonatomic, assign) BOOL enableAdjustRes; -@end - -#pragma mark - - -/** - * 5.3 网络流控相关参数 - * - * 网络流控相关参数,该设置决定 SDK 在各种网络环境下的调控方向(例如弱网下选择“保清晰”或“保流畅”) - */ -@interface TRTCNetworkQosParam : NSObject - -///【字段含义】弱网下是“保清晰”或“保流畅” -///【特别说明】 -/// - 弱网下保流畅:在遭遇弱网环境时,画面会变得模糊,且出现较多马赛克,但可以保持流畅不卡顿 -/// - 弱网下保清晰:在遭遇弱网环境时,画面会尽可能保持清晰,但可能会更容易出现卡顿 -@property (nonatomic, assign) TRTCVideoQosPreference preference; - -///【字段含义】视频分辨率(云端控制 - 客户端控制) -///【推荐取值】云端控制 -///【特别说明】 -/// - Server 模式(默认):云端控制模式,若无特殊原因,请直接使用该模式 -/// - Client 模式:客户端控制模式,用于 SDK 开发内部调试,客户请勿使用 -@property (nonatomic, assign) TRTCQosControlMode controlMode; -@end - -#pragma mark - - -/** - * 5.4 网络质量 - * - * 表示网络质量的好坏,通过这个数值,您可以在 UI 界面上用图标表征 userId 的通话线路质量 - */ -@interface TRTCQualityInfo : NSObject -/// 用户 ID -@property (nonatomic, copy, nullable) NSString* userId; -/// 网络质量 -@property (nonatomic, assign) TRTCQuality quality; -@end - -#pragma mark - - -/** - * 5.5 音量大小 - * - * 表示语音音量的评估大小,通过这个数值,您可以在 UI 界面上用图标表征 userId 是否有在说话 - */ -@interface TRTCVolumeInfo : NSObject <NSCopying> -/// 说话者的 userId, nil 为自己 -@property (strong, nonatomic, nullable) NSString *userId; -/// 说话者的音量, 取值范围0 - 100 -@property (assign, nonatomic) NSUInteger volume; -@end - -#if TARGET_OS_MAC && !TARGET_OS_IPHONE -#pragma mark - - -/** - * 5.6 屏幕分享目标信息(仅 Mac) - * - * 如果您要给您的 App 增加屏幕分享功能,一般需要先显示一个窗口选择界面,用户才可以选择希望分享的窗口。 - * TRTCScreenCaptureSourceInfo 主要用于定义分享窗口的 ID、类型、窗口名称以及缩略图。 - */ -@interface TRTCScreenCaptureSourceInfo : NSObject -/// 分享类型:需要某个窗口或整个屏幕 -@property (assign, nonatomic) TRTCScreenCaptureSourceType type; -/// 窗口ID -@property (copy, nonatomic, nullable) NSString * sourceId; -/// 窗口名称 -@property (copy, nonatomic, nullable) NSString * sourceName; -/// 窗口属性 -@property (nonatomic, strong, nullable) NSDictionary * extInfo; -/// 窗口缩略图 -@property (nonatomic, readonly, nullable) NSImage *thumbnail; -/// 窗口小图标 -@property (nonatomic, readonly, nullable) NSImage *icon; -@end -#endif - -#pragma mark - - -/** - * 5.7 网络测速结果 - * - * 您可以在用户进入房间前通过 TRTCCloud 的 startSpeedTest 接口进行测速 (注意:请不要在通话中调用), - * 测速结果会每2 - 3秒钟返回一次,每次返回一个 IP 地址的测试结果。 - * - * @note - quality 是内部通过评估算法测算出的网络质量,loss 越低,rtt 越小,得分便越高。 - * @note - upLostRate 是指上行丢包率。例如,0.3表示每向服务器发送10个数据包可能会在中途丢失3个。 - * @note - downLostRate 是指下行丢包率。例如,0.2表示每从服务器收取10个数据包可能会在中途丢失2个。 - * @note - rtt 是指当前设备到腾讯云服务器的一次网络往返时间,该值越小越好,正常数值范围是10ms - 100ms - */ -@interface TRTCSpeedTestResult : NSObject - -/// 服务器 IP 地址 -@property (strong, nonatomic, nonnull) NSString *ip; - -/// 网络质量,内部通过评估算法测算出的网络质量,loss 越低,rtt 越小,得分便越高。 -@property (nonatomic) TRTCQuality quality; - -/// 上行丢包率,范围是0 - 1.0,例如,0.3表示每向服务器发送10个数据包可能会在中途丢失3个。 -@property (nonatomic) float upLostRate; - -/// 下行丢包率,范围是0 - 1.0,例如,0.2表示每从服务器收取10个数据包可能会在中途丢失2个。 -@property (nonatomic) float downLostRate; - -/// 延迟(毫秒),指当前设备到腾讯云服务器的一次网络往返时间,该值越小越好,正常数值范围是10ms - 100ms -@property (nonatomic) uint32_t rtt; -@end - -#pragma mark - - -/** - * 5.8 视频帧信息 - * - * TRTCVideoFrame 用来描述一帧视频画面的裸数据,它可以是一帧编码前的画面,也可以是一帧解码后的画面。 - */ -@interface TRTCVideoFrame : NSObject - -///【字段含义】视频像素格式 -///【推荐取值】TRTCVideoPixelFormat_NV12 -@property (nonatomic, assign) TRTCVideoPixelFormat pixelFormat; - -///【字段含义】视频数据结构类型 -///【推荐取值】TRTCVideoBufferType_PixelBuffer -@property (nonatomic, assign) TRTCVideoBufferType bufferType; - -///【字段含义】bufferType 为 TRTCVideoBufferType_PixelBuffer 时的视频数据。 -@property (nonatomic, assign, nullable) CVPixelBufferRef pixelBuffer; - -///【字段含义】bufferType 为 TRTCVideoBufferType_NSData 时的视频数据。 -@property (nonatomic, retain, nullable) NSData* data; - -///【字段含义】视频帧的时间戳,单位毫秒 -///【推荐取值】自定义视频采集时可以设置为0,若该参数为0,SDK 会自定填充 timestamp 字段,但请“均匀”地控制 sendCustomVideoData 的调用间隔。 -@property (nonatomic, assign) uint64_t timestamp; - -///【字段含义】视频宽度 -///【推荐取值】自定义视频采集时不需要填写。 -@property (nonatomic, assign) uint32_t width; - -///【字段含义】视频高度 -///【推荐取值】自定义视频采集时不需要填写。 -@property (nonatomic, assign) uint32_t height; - -///【字段含义】视频像素的顺时针旋转角度 -@property (nonatomic, assign) TRTCVideoRotation rotation; - -///【字段含义】视频纹理ID -@property (nonatomic, assign) GLuint textureId; - -@end - -/** - * 5.9 音频帧数据 - */ -#pragma mark - -/// 音频帧数据 -@interface TRTCAudioFrame : NSObject -/// 音频数据 -@property (nonatomic, retain, nonnull) NSData * data; -/// 采样率 -@property (nonatomic, assign) TRTCAudioSampleRate sampleRate; -/// 声道数 -@property (nonatomic, assign) int channels; -/// 时间戳,单位ms -@property (nonatomic, assign) uint64_t timestamp; -@end - -/** - * 5.10 云端混流中每一路子画面的位置信息 - * - * TRTCMixUser 用于指定每一路(即每一个 userId)视频画面的具体摆放位置 - */ -@interface TRTCMixUser : NSObject -/// 【字段含义】参与混流的 userId -@property(nonatomic, copy, nonnull) NSString * userId; -/// 【字段含义】混流的房间, 可填 nil 表示是自己所在的房间 -@property (nonatomic, copy, nullable) NSString * roomID; -/// 【字段含义】图层位置坐标以及大小,左上角为坐标原点(0,0) (绝对像素值) -@property(nonatomic, assign) CGRect rect; -/// 【字段含义】图层层次(1 - 15)不可重复 -@property(nonatomic, assign) int zOrder; -/// 【字段含义】参与混合的是主路画面(TRTCVideoStreamTypeBig)或屏幕分享(TRTCVideoStreamTypeSub)画面 -@property (nonatomic) TRTCVideoStreamType streamType; -/// 【字段含义】该用户是不是只开启了音频 -/// 【推荐取值】默认值:NO -/// 【特别说明】废弃,推荐使用 inputType -@property (nonatomic, assign) BOOL pureAudio; -/// 【字段含义】该用户的输入流类型(该字段是对 pureAudio 字段的升级) -/// 【推荐取值】 -/// - 默认值:TRTCMixInputTypeUndefined -/// - 如果您没有对 pureAudio 字段进行设置,您可以根据实际需要设置该字段 -/// - 如果您已经设置了 pureAudio 为 YES,请设置该字段为 TRTCMixInputTypeUndefined -@property (nonatomic, assign) TRTCMixInputType inputType; - -@end - -/** - * 5.11 云端混流(转码)配置 - * - * 包括最终编码质量和各路画面的摆放位置 - */ -@interface TRTCTranscodingConfig : NSObject - -///【字段含义】转码config模式 -@property(nonatomic, assign) TRTCTranscodingConfigMode mode; - -///【字段含义】腾讯云直播 AppID -///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/rav) 选择已经创建的应用,单击【帐号信息】后,在“直播信息”中获取 -@property (nonatomic) int appId; - -///【字段含义】腾讯云直播 bizid -///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/rav) 选择已经创建的应用,单击【帐号信息】后,在“直播信息”中获取 -@property (nonatomic) int bizId; - -///【字段含义】最终转码后的视频分辨率的宽度。 -///【推荐取值】推荐值:360px ,如果你是纯音频推流,请将 width × height 设为 0px × 0px,否则混流后会携带一条画布背景的视频流。 -@property(nonatomic, assign) int videoWidth; - -///【字段含义】最终转码后的视频分辨率的高度。 -///【推荐取值】推荐值:640px ,如果你是纯音频推流,请将 width × height 设为 0px × 0px,否则混流后会携带一条画布背景的视频流。 -@property(nonatomic, assign) int videoHeight; - -///【字段含义】最终转码后的视频分辨率的码率(kbps)。 -///【推荐取值】如果填0,后台会根据videoWidth和videoHeight来估算码率,您也可以参考枚举定义TRTCVideoResolution_640_480的注释。 -@property(nonatomic, assign) int videoBitrate; - -///【字段含义】最终转码后的视频分辨率的帧率(FPS)。 -///【推荐取值】默认值:15fps,取值范围是 (0,30]。 -@property(nonatomic, assign) int videoFramerate; - -///【字段含义】最终转码后的视频分辨率的关键帧间隔(又称为 GOP)。 -///【推荐取值】默认值:2,单位为秒,取值范围是 [1,8]。 -@property(nonatomic, assign) int videoGOP; - -///【字段含义】混合后画面的底色颜色,默认为黑色,格式为十六进制数字,比如:“0x61B9F1” 代表 RGB 分别为(97,158,241)。 -///【推荐取值】默认值:0x000000,黑色 -@property(nonatomic, assign) int backgroundColor; - -///【字段含义】混合后画面的背景图。 -///【推荐取值】默认值:nil,即不设置背景图 -///【特别说明】背景图需要您事先在 “[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 功能配置 => 素材管理” 中上传, -/// 上传成功后可以获得对应的“图片ID”,然后将“图片ID”转换成字符串类型并设置到 backgroundImage 里即可。 -/// 例如:假设“图片ID” 为 63,可以设置 backgroundImage = @"63"; -@property(nonatomic, copy, nullable) NSString *backgroundImage; - -///【字段含义】最终转码后的音频采样率。 -///【推荐取值】默认值:48000Hz。支持12000HZ、16000HZ、22050HZ、24000HZ、32000HZ、44100HZ、48000HZ。 -@property(nonatomic, assign) int audioSampleRate; - -///【字段含义】最终转码后的音频码率。 -///【推荐取值】默认值:64kbps,取值范围是 [32,192]。 -@property(nonatomic, assign) int audioBitrate; - -///【字段含义】最终转码后的音频声道数 -///【推荐取值】默认值:1。取值范围为 [1,2] 中的整型。 -@property(nonatomic, assign) int audioChannels; - -///【字段含义】每一路子画面的位置信息 -@property(nonatomic, copy, nonnull) NSArray<TRTCMixUser *> * mixUsers; - -///【字段含义】输出到 CDN 上的直播流 ID -/// 如不设置该参数,SDK 会执行默认逻辑,即房间里的多路流会混合到该接口调用者的视频流上,也就是 A+B =>A; -/// 如果设置该参数,SDK 会将房间里的多路流混合到您指定的直播流 ID 上,也就是 A+B =>C。 -///【推荐取值】默认值:nil,即房间里的多路流会混合到该接口调用者的视频流上。 -@property(nonatomic, copy, nullable) NSString *streamId; - -@end - -#pragma mark - - -/** - * 5.12 CDN 旁路推流参数 - */ -@interface TRTCPublishCDNParam : NSObject -/// 腾讯云 AppID,请在 [实时音视频控制台](https://console.cloud.tencent.com/rav) 选择已经创建的应用,单击【帐号信息】,在“直播信息”中获取 -@property (nonatomic) int appId; - -/// 腾讯云直播 bizid,请在 [实时音视频控制台](https://console.cloud.tencent.com/rav) 选择已经创建的应用,单击【帐号信息】,在“直播信息”中获取 -@property (nonatomic) int bizId; - -/// 旁路转推的 URL -@property (nonatomic, strong, nonnull) NSString * url; -@end - -/** - * 5.13 录音参数 - * - * 请正确填写参数,确保录音文件顺利生成。 - */ -@interface TRTCAudioRecordingParams : NSObject - -///【字段含义】文件路径(必填),录音文件的保存路径。该路径需要用户自行指定,请确保路径存在且可写。 -///【特别说明】该路径需精确到文件名及格式后缀,格式后缀决定录音文件的格式,目前支持的格式有 PCM、WAV 和 AAC。 -/// 例如,指定路径为 path/to/audio.aac,则会生成一个 AAC 格式的文件。 -/// 请指定一个有读写权限的合法路径,否则录音文件无法生成。 -@property (nonatomic, strong, nonnull) NSString * filePath; -@end - -/** - * 5.14 音效 - * - */ -@interface TRTCAudioEffectParam : NSObject - -+ (_Nonnull instancetype)new __attribute__((unavailable("Use -initWith:(int)effectId path:(NSString * )path instead"))); -- (_Nonnull instancetype)init __attribute__((unavailable("Use -initWith:(int)effectId path:(NSString *)path instead"))); - -/// 【字段含义】音效 ID -/// 【特别说明】SDK 允许播放多路音效,因此需要音效 ID 进行标记,用于控制音效的开始、停止、音量等 -@property(nonatomic, assign) int effectId; - -/// 【字段含义】音效文件路径,支持的文件格式:aac, mp3, m4a。 -@property(nonatomic, copy, nonnull) NSString * path; - -/// 【字段含义】循环播放次数 -/// 【推荐取值】取值范围为0 - 任意正整数,默认值:0。0表示播放音效一次;1表示播放音效两次;以此类推 -@property(nonatomic, assign) int loopCount; - -/// 【字段含义】音效是否上行 -/// 【推荐取值】YES:音效在本地播放的同时,会上行至云端,因此远端用户也能听到该音效;NO:音效不会上行至云端,因此只能在本地听到该音效。默认值:NO -@property(nonatomic, assign) BOOL publish; - -/// 【字段含义】音效音量 -/// 【推荐取值】取值范围为0 - 100;默认值:100 -@property(nonatomic, assign) int volume; - -- (_Nonnull instancetype)initWith:(int)effectId path:(NSString * _Nonnull)path; -@end - -#pragma mark - -/** - * 5.15 切换房间 - */ -@interface TRTCSwitchRoomConfig : NSObject - -///【字段含义】数字房间号码 [选填],在同一个房间内的用户可以看到彼此并进行视频通话。 -///【推荐取值】取值范围:1 - 4294967294。 -///【特别说明】roomId 和 strRoomId 必须并且只能填一个。若两者都填,则优先选择 roomId。 -@property (nonatomic, assign) UInt32 roomId; - -///【字段含义】字符串房间号码 [选填],在同一个房间内的用户可以看到彼此并进行视频通话。 -///【特别说明】roomId 和 strRoomId 必须并且只能填一个。若两者都填,则优先选择 roomId。 -@property (nonatomic, copy, nullable) NSString *strRoomId; - -///【字段含义】用户签名 [选填],当前 userId 对应的验证签名,相当于登录密码。不填时,SDK 会继续使用旧的 userSig, -/// 但用户必须保证旧的 userSig 仍在有效期内,否则会造成进房失败等后果。 -///【推荐取值】具体计算方法请参见 [如何计算UserSig](https://cloud.tencent.com/document/product/647/17275)。 -@property (nonatomic, copy, nullable) NSString *userSig; - -///【字段含义】房间签名 [选填],当您希望某个房间只能让特定的 userId 进入时,需要使用 privateMapKey 进行权限保护。 -///【推荐取值】仅建议有高级别安全需求的客户使用,更多详情请参见 [进房权限保护](https://cloud.tencent.com/document/product/647/32240)。 -@property (nonatomic, copy, nullable) NSString *privateMapKey; - -@end - - -typedef NS_ENUM(NSUInteger, TRTCLocalVideoMirrorType) { - TRTCLocalVideoMirrorType_Auto = TRTCVideoMirrorTypeAuto, - TRTCLocalVideoMirrorType_Enable = TRTCVideoMirrorTypeEnable, - TRTCLocalVideoMirrorType_Disable = TRTCVideoMirrorTypeDisable, -} __attribute__((deprecated("use TRTCVideoMirrorType instead"))); - -/** - * 5.16 本地媒体文件录制参数 - * - * 请正确填写参数,确保录制文件顺利生成。 - */ -@interface TRTCLocalRecordingParams : NSObject - -///【字段含义】文件路径(必填),录制的文件地址,请自行指定,确保路径有读写权限且合法,否则录制文件无法生成。 -///【特别说明】该路径需精确到文件名及格式后缀,格式后缀决定录制文件的格式,目前支持的格式只有 mp4。 -/// iOS建议在沙盒目录 Document或Library/Caches 中指定存放路径。 -///【示例代码】在cache目录下录制 example.mp4 文件 -/// NSArray * path = NSSearchPathForDirectoriesInDomains(NSCachesDirectory, NSUserDomainMask, YES); -/// NSString * cachePath = [path lastObject]; -/// NSString * filePath = [cachePath stringByAppendingPathComponent:@"example.mp4"]; -@property(nonatomic, copy, nonnull) NSString *filePath; - -///【字段含义】媒体录制类型,默认值:TRTCRecordTypeBoth,为同时录制音频和视频。 -@property(nonatomic, assign) TRTCRecordType recordType; - -///interval 录制信息更新频率,单位毫秒,有效范围:1000-10000。默认值为-1,表示不回调。 -@property(nonatomic, assign) int interval; - -@end - -#if TARGET_OS_MAC && !TARGET_OS_IPHONE -#pragma mark - -/** - * 设备类型(仅 Mac) - * - * @deprecated - * 在 Mac 上,每一种类型的设备都可能有多个,TRTC SDK 的 Mac 版本提供了一系列函数用来操作这些设备。 - */ -typedef NS_ENUM(NSInteger, TRTCMediaDeviceType) { - TRTCMediaDeviceTypeUnknown = -1, ///< 未定义 - - TRTCMediaDeviceTypeAudioInput = 0, ///< 麦克风 - TRTCMediaDeviceTypeAudioOutput = 1, ///< 扬声器或听筒 - TRTCMediaDeviceTypeVideoCamera = 2, ///< 摄像头 - - TRTCMediaDeviceTypeVideoWindow = 3, ///< 某个窗口(用于屏幕分享) - TRTCMediaDeviceTypeVideoScreen = 4, ///< 整个屏幕(用于屏幕分享) -} __attribute__((deprecated("use TXMediaDeviceType instead"))); - -/** - * 媒体设备描述 - * - * @deprecated - * 在 Mac 上,每一种类型的设备都可能有多个,TRTC SDK 的 Mac 版本提供了一系列函数用来操作这些设备。 - */ -typedef TXMediaDeviceInfo TRTCMediaDeviceInfo __attribute__((deprecated("use TXMediaDeviceInfo instead"))); - -#endif - -/// @} +// Copyright (c) 2021 Tencent. All rights reserved. + +/** + * Module: TRTC 关键类型定义 + * Function: 分辨率、质量等级等枚举和常量值的定义 + */ +/// @defgroup TRTCCloudDef_ios 关键类型定义 +/// 腾讯云实时音视频的关键类型定义 +/// @{ +#import <Foundation/Foundation.h> +#import "TXLiteAVSymbolExport.h" + +///////////////////////////////////////////////////////////////////////////////// +// +// 渲染控件 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * [VIEW] 用于渲染视频画面的渲染控件 + * TRTC 中有很多需要操控视频画面的接口,这些接口都需要您指定视频渲染控件。 + * 1. ObjectiveC 接口 iOS 和 MAC + * - 在 iOS 系统中,您可以直接使用 UIView 作为视频渲染控件,SDK 会在您提供的 UIView 上绘制视频画面。 + * - 在 Mac 系统中,您可以直接使用 NSView 作为视频渲染控件,SDK 会在您提供的 NSView 上绘制视频画面。 + * 示例代码如下: + * UIView *videoView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 360, 640)]; + * [self.view addSubview:videoView]; + * [trtcCloud startLocalPreview:YES view:_localView]; + * 2. 在 Android 平台中,您可以使用我们提供的 TXCloudVideoView 作为视频渲染控件,它支持 SurfaceView 和 TextureView 两种渲染方案。 + * - 当用于渲染本地的视频画面时:TXCloudVideoView 会优先使用 SurfaceView,该方案性能较好,但是不支持对 View 做动画或者变形特效。 + * - 当用于渲染远端的视频画面时:TXCloudVideoView 会优先使用 TextureView,该方案灵活度高,能够更好地支持动画或者变形特效。 + * 如果您希望强制使用某一种方案,可以按照如下方法进行编码: + * 用法一:强制使用 TextureView: + * TXCloudVideoView localView = findViewById(R.id.trtc_tc_cloud_view_main); + * localView.addVideoView(new TextureView(context)); + * mTRTCCloud.startLocalPreview(true, localView); + * 用法二:强制使用 SurfaceView: + * SurfaceView surfaceView = new SurfaceView(this); + * TXCloudVideoView localView = new TXCloudVideoView(surfaceView); + * mTRTCCloud.startLocalPreview(true, localView); + * 3. 全平台方案 View + * 由于全平台 C++ 接口需要使用统一的参数类型,所以您需要在调用这些接口时,将渲染控件统一转换成 TXView 类型的指针: + * - iOS 平台:您可以使用 UIView 对象作为渲染控件,在调用 C++ 接口时请传入 UIView 对象的指针(需强转为 void* 类型)。 + * - Mac 平台:您可以使用 NSView 对象作为渲染控件,在调用 C++ 接口时请传入 NSView 对象的指针(需强转为 void* 类型)。 + * - Android 平台:在调用 C++ 接口时请传入指向 TXCloudVideoView 对象的 jobject 指针(需强转为 void* 类型)。 + * - Windows 平台:您可以使用窗口句柄 HWND 作为渲染控件,在调用 C++ 接口时需要将 HWND 强转为 void* 类型。 + * 代码示例一:在 QT 下使用 C++ 全平台接口 + * QWidget *videoView; + * // The relevant code for setting the videoView is omitted here... + * getTRTCShareInstance()->startLocalPreview(reinterpret_cast<TXView>(videoView->winId())); + * 代码示例二:在 Android 平台下,通过 JNI 调用 C++ 全平台接口 + * native void nativeStartLocalPreview(String userId, int streamType, TXCloudVideoView view); + * //... + * Java_com_example_test_MainActivity_nativeStartRemoteView(JNIEnv *env, jobject thiz, jstring user_id, jint stream_type, jobject view) { + * const char *user_id_chars = env->GetStringUTFChars(user_id, nullptr); + * trtc_cloud->startRemoteView(user_id_chars, (liteav::TRTCVideoStreamType)stream_type, view); + * env->ReleaseStringUTFChars(user_id, user_id_chars); + * } + */ +#if TARGET_OS_IPHONE || TARGET_OS_SIMULATOR +#import <UIKit/UIKit.h> +typedef UIView TXView; +typedef UIImage TXImage; +typedef UIEdgeInsets TXEdgeInsets; +#elif TARGET_OS_MAC +#import <AppKit/AppKit.h> +typedef NSView TXView; +typedef NSImage TXImage; +typedef NSEdgeInsets TXEdgeInsets; +#endif +#import "TXDeviceManager.h" + +///////////////////////////////////////////////////////////////////////////////// +// +// 视频相关枚举值定义 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 1.1 视频分辨率 + * + * 此处仅定义横屏分辨率(如 640 × 360),如需使用竖屏分辨率(如360 × 640),需要同时指定 TRTCVideoResolutionMode 为 Portrait。 + */ +typedef NS_ENUM(NSInteger, TRTCVideoResolution) { + + ///宽高比 1:1;分辨率 120x120;建议码率(VideoCall)80kbps; 建议码率(LIVE)120kbps。 + TRTCVideoResolution_120_120 = 1, + + ///宽高比 1:1 分辨率 160x160;建议码率(VideoCall)100kbps; 建议码率(LIVE)150kbps。 + TRTCVideoResolution_160_160 = 3, + + ///宽高比 1:1;分辨率 270x270;建议码率(VideoCall)200kbps; 建议码率(LIVE)300kbps。 + TRTCVideoResolution_270_270 = 5, + + ///宽高比 1:1;分辨率 480x480;建议码率(VideoCall)350kbps; 建议码率(LIVE)500kbps。 + TRTCVideoResolution_480_480 = 7, + + ///宽高比4:3;分辨率 160x120;建议码率(VideoCall)100kbps; 建议码率(LIVE)150kbps。 + TRTCVideoResolution_160_120 = 50, + + ///宽高比 4:3;分辨率 240x180;建议码率(VideoCall)150kbps; 建议码率(LIVE)250kbps。 + TRTCVideoResolution_240_180 = 52, + + ///宽高比 4:3;分辨率 280x210;建议码率(VideoCall)200kbps; 建议码率(LIVE)300kbps。 + TRTCVideoResolution_280_210 = 54, + + ///宽高比 4:3;分辨率 320x240;建议码率(VideoCall)250kbps; 建议码率(LIVE)375kbps。 + TRTCVideoResolution_320_240 = 56, + + ///宽高比 4:3;分辨率 400x300;建议码率(VideoCall)300kbps; 建议码率(LIVE)450kbps。 + TRTCVideoResolution_400_300 = 58, + + ///宽高比 4:3;分辨率 480x360;建议码率(VideoCall)400kbps; 建议码率(LIVE)600kbps。 + TRTCVideoResolution_480_360 = 60, + + ///宽高比 4:3;分辨率 640x480;建议码率(VideoCall)600kbps; 建议码率(LIVE)900kbps。 + TRTCVideoResolution_640_480 = 62, + + ///宽高比 4:3;分辨率 960x720;建议码率(VideoCall)1000kbps; 建议码率(LIVE)1500kbps。 + TRTCVideoResolution_960_720 = 64, + + ///宽高比 16:9;分辨率 160x90;建议码率(VideoCall)150kbps; 建议码率(LIVE)250kbps。 + TRTCVideoResolution_160_90 = 100, + + ///宽高比 16:9;分辨率 256x144;建议码率(VideoCall)200kbps; 建议码率(LIVE)300kbps。 + TRTCVideoResolution_256_144 = 102, + + ///宽高比 16:9;分辨率 320x180;建议码率(VideoCall)250kbps; 建议码率(LIVE)400kbps。 + TRTCVideoResolution_320_180 = 104, + + ///宽高比 16:9;分辨率 480x270;建议码率(VideoCall)350kbps; 建议码率(LIVE)550kbps。 + TRTCVideoResolution_480_270 = 106, + + ///宽高比 16:9;分辨率 640x360;建议码率(VideoCall)500kbps; 建议码率(LIVE)900kbps。 + TRTCVideoResolution_640_360 = 108, + + ///宽高比 16:9;分辨率 960x540;建议码率(VideoCall)850kbps; 建议码率(LIVE)1300kbps。 + TRTCVideoResolution_960_540 = 110, + + ///宽高比 16:9;分辨率 1280x720;建议码率(VideoCall)1200kbps; 建议码率(LIVE)1800kbps。 + TRTCVideoResolution_1280_720 = 112, + + ///宽高比 16:9;分辨率 1920x1080;建议码率(VideoCall)2000kbps; 建议码率(LIVE)3000kbps。 + TRTCVideoResolution_1920_1080 = 114, + +}; + +/** + * 1.2 视频宽高比模式 + * + * TRTCVideoResolution 中仅定义了横屏分辨率(如 640 × 360),如需使用竖屏分辨率(如360 × 640),需要同时指定 TRTCVideoResolutionMode 为 Portrait。 + */ +typedef NS_ENUM(NSInteger, TRTCVideoResolutionMode) { + + ///横屏分辨率,例如:TRTCVideoResolution_640_360 + TRTCVideoResolutionModeLandscape = 640 × 360。 + TRTCVideoResolutionModeLandscape = 0, + + ///竖屏分辨率,例如:TRTCVideoResolution_640_360 + TRTCVideoResolutionModePortrait = 360 × 640。 + TRTCVideoResolutionModePortrait = 1, + +}; + +/** + * 1.3 视频流类型 + * + * TRTC 内部有三种不同的视频流,分别是: + * - 高清大画面:一般用来传输摄像头的视频数据。 + * - 低清小画面:小画面和大画面的内容相互,但是分辨率和码率都比大画面低,因此清晰度也更低。 + * - 辅流画面:一般用于屏幕分享,同一时间在同一个房间中只允许一个用户发布辅流视频,其他用户必须要等该用户关闭之后才能发布自己的辅流。 + * @note 不支持单独开启低清小画面,小画面必须依附于大画面而存在,SDK 会自动设定低清小画面的分辨率和码率。 + */ +typedef NS_ENUM(NSInteger, TRTCVideoStreamType) { + + ///高清大画面,一般用来传输摄像头的视频数据。 + TRTCVideoStreamTypeBig = 0, + + ///低清小画面:小画面和大画面的内容相互,但是分辨率和码率都比大画面低,因此清晰度也更低。 + TRTCVideoStreamTypeSmall = 1, + + ///辅流画面:一般用于屏幕分享,同一时间在同一个房间中只允许一个用户发布辅流视频,其他用户必须要等该用户关闭之后才能发布自己的辅流。 + TRTCVideoStreamTypeSub = 2, + +}; + +/** + * 1.4 视频画面填充模式 + * + * 如果视频显示区域的宽高比不等于视频内容的宽高比时,就需要您指定画面的填充模式: + */ +typedef NS_ENUM(NSInteger, TRTCVideoFillMode) { + + ///填充模式:即将画面内容居中等比缩放以充满整个显示区域,超出显示区域的部分将会被裁剪掉,此模式下画面可能不完整。 + TRTCVideoFillMode_Fill = 0, + + ///适应模式:即按画面长边进行缩放以适应显示区域,短边部分会被填充为黑色,此模式下图像完整但可能留有黑边。 + TRTCVideoFillMode_Fit = 1, + +}; + +/** + * 1.5 视频画面旋转方向 + * + * TRTC 提供了对本地和远程画面的旋转角度设置 API,下列的旋转角度都是指顺时针方向的。 + */ +typedef NS_ENUM(NSInteger, TRTCVideoRotation) { + + ///不旋转 + TRTCVideoRotation_0 = 0, + + ///顺时针旋转90度 + TRTCVideoRotation_90 = 1, + + ///顺时针旋转180度 + TRTCVideoRotation_180 = 2, + + ///顺时针旋转270度 + TRTCVideoRotation_270 = 3, + +}; + +/** + * 1.6 美颜(磨皮)算法 + * + * TRTC 内置多种不同的磨皮算法,您可以选择最适合您产品定位的方案。 + */ +typedef NS_ENUM(NSInteger, TRTCBeautyStyle) { + + ///光滑,算法比较激进,磨皮效果比较明显,适用于秀场直播。 + TRTCBeautyStyleSmooth = 0, + + ///自然,算法更多地保留了面部细节,磨皮效果更加自然,适用于绝大多数直播场景。 + TRTCBeautyStyleNature = 1, + + ///优图,由优图实验室提供,磨皮效果介于光滑和自然之间,比光滑保留更多皮肤细节,比自然磨皮程度更高。 + TRTCBeautyStylePitu = 2, + +}; + +/** + * 1.7 视频像素格式 + * + * TRTC 提供针对视频的自定义采集和自定义渲染功能: + * - 在自定义采集功能中,您可以用下列枚举值描述您采集的视频像素格式。 + * - 在自定义渲染功能中,您可以指定您期望 SDK 回调出的视频像素格式。 + */ +typedef NS_ENUM(NSInteger, TRTCVideoPixelFormat) { + + ///未定义的格式 + TRTCVideoPixelFormat_Unknown = 0, + + /// YUV420P(I420) 格式 + TRTCVideoPixelFormat_I420 = 1, + + /// OpenGL 2D 纹理格式 + TRTCVideoPixelFormat_Texture_2D = 7, + + /// BGRA 格式 + TRTCVideoPixelFormat_32BGRA = 6, + + /// YUV420SP(NV12)格式 + TRTCVideoPixelFormat_NV12 = 5, + +}; + +/** + * 1.8 视频数据传递方式 + * + * 在自定义采集和自定义渲染功能,您需要用到下列枚举值来指定您希望以什么方式传递视频数据: + * - 方案一:使用内存 Buffer 传递视频数据,该方案在 iOS 效率尚可,但在 Android 系统上效率较差,Windows 暂时仅支持内存 Buffer 的传递方式。 + * - 方案二:使用 Texture 纹理传递视频数据,该方案在 iOS 和 Android 系统下均有较高的效率,Windows 暂不支持,需要您有一定的 OpenGL 编程基础。 + */ +typedef NS_ENUM(NSInteger, TRTCVideoBufferType) { + + ///未定义的传递方式 + TRTCVideoBufferType_Unknown = 0, + + ///使用内存 Buffer 传递视频数据,iOS: PixelBuffer;Android: 用于 JNI 层的 Direct Buffer;Win: 内存数据块。 + TRTCVideoBufferType_PixelBuffer = 1, + + ///使用内存 Buffer 传递视频数据,iOS: 经过一次额外整理后更加紧凑的 NSData 类型的内存块;Android: 用于 JAVA 层的 byte[]。 + ///该传递的方式的性能是几种方案中效率较差的一种。 + TRTCVideoBufferType_NSData = 2, + + ///使用 Texture 纹理传递视频数据 + TRTCVideoBufferType_Texture = 3, + +}; + +/** + * 1.9 视频的镜像类型 + * + * 视频的镜像是指对视频内容进行左右翻转,尤其是对本地的摄像头预览视频,开启镜像后能给主播带来熟悉的“照镜子”体验。 + */ +typedef NS_ENUM(NSUInteger, TRTCVideoMirrorType) { + + ///自动模式:如果正使用前置摄像头则开启镜像,如果是后置摄像头则不开启镜像(仅适用于移动设备)。 + TRTCVideoMirrorTypeAuto = 0, + + ///强制开启镜像,不论当前使用的是前置摄像头还是后置摄像头。 + TRTCVideoMirrorTypeEnable = 1, + + ///强制关闭镜像,不论当前使用的是前置摄像头还是后置摄像头。 + TRTCVideoMirrorTypeDisable = 2, + +}; + +/** + * 已废弃,请用 TRTCVideoMirrorType + */ +typedef NS_ENUM(NSUInteger, TRTCLocalVideoMirrorType) { + TRTCLocalVideoMirrorType_Auto = TRTCVideoMirrorTypeAuto, + TRTCLocalVideoMirrorType_Enable = TRTCVideoMirrorTypeEnable, + TRTCLocalVideoMirrorType_Disable = TRTCVideoMirrorTypeDisable, +} __attribute__((deprecated("use TRTCVideoMirrorType instead"))); + +/** + * 1.10 本地视频截图的数据源 + * + * SDK 支持从如下两种数据源中截取图片并保存成本地文件: + * - 视频流:从视频流中截取原生的视频内容,截取的内容不受渲染控件的显示控制。 + * - 渲染层:从渲染控件中截取显示的视频内容,可以做到用户所见即所得的效果,但如果显示区域过小,截取出的图片也会很小。 + */ +typedef NS_ENUM(NSUInteger, TRTCSnapshotSourceType) { + + ///从视频流中截取原生的视频内容,截取的内容不受渲染控件的显示控制。 + TRTCSnapshotSourceTypeStream = 0, + + ///从渲染控件中截取显示的视频内容,可以做到用户所见即所得的效果,但如果显示区域过小,截取出的图片也会很小。 + TRTCSnapshotSourceTypeView = 1, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// 网络相关枚举值定义 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 2.1 应用场景 + * + * TRTC 针对常见的音视频应用场景都进行了定向优化,以满足各种垂直场景下的差异化要求,主要场景可以分为如下两类: + * - 直播(LIVE)场景:包括 LIVE 和 VoiceChatRoom,前者是音频+视频,后者是纯音频。 + * 直播场景下,用户被分成“主播”和“观众”两种角色,单个房间中同时最多支持10万人在线,适合于观众人数众多的直播场景。 + * - 实时(RTC)场景:包括 VideoCall 和 AudioCall,前者是音频+视频,后者是纯音频。 + * 实时场景下,用户没有角色的差异,但单个房间中同时最多支持 300 人在线,适合于小范围实时通信的场景。 + */ +typedef NS_ENUM(NSInteger, TRTCAppScene) { + + ///视频通话场景,支持720P、1080P高清画质,单个房间最多支持300人同时在线,最高支持50人同时发言。 + ///适用于[1对1视频通话]、[300人视频会议]、[在线问诊]、[教育小班课]、[远程面试]等业务场景。 + TRTCAppSceneVideoCall = 0, + + ///视频互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。 + ///适用于[低延时互动直播]、[大班课]、[主播PK]、[视频相亲]、[在线互动课堂]、[远程培训]、[超大型会议]等业务场景。 + ///@note 此场景下,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。 + TRTCAppSceneLIVE = 1, + + ///语音通话场景,默认采用 SPEECH 音质,单个房间最多支持300人同时在线,最高支持50人同时发言。 + ///适用于[1对1语音通话]、[300人语音会议]、[语音聊天]、[语音会议]、[在线狼人杀]等业务场景。 + TRTCAppSceneAudioCall = 2, + + ///语音互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。 + ///适用于[语音俱乐部]、[在线K歌房]、[音乐直播间]、[FM电台]等业务场景。 + ///@note 此场景下,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。 + TRTCAppSceneVoiceChatRoom = 3, + +}; + +/** + * 2.2 角色 + * + * 仅适用于直播类场景(即 TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom),把用户区分成两种不同的身份: + * - 主播:可以随时发布自己的音视频流,但人数有限制,同一个房间中最多只允许 50 个主播同时发布自己的音视频流。 + * - 观众:只能观看其他用户的音视频流,要发布音视频流,需要先通过 {@link switchRole} 切换成主播,同一个房间中最多能容纳10万观众。 + */ +typedef NS_ENUM(NSInteger, TRTCRoleType) { + + ///主播:可以随时发布自己的音视频流,但人数有限制,同一个房间中最多只允许 50 个主播同时发布自己的音视频流。 + TRTCRoleAnchor = 20, + + ///观众:只能观看其他用户的音视频流,要发布音视频流,需要先通过 {@link switchRole} 切换成主播,同一个房间中最多能容纳10万观众。 + TRTCRoleAudience = 21, + +}; + +/** + * 2.3 流控模式(已废弃) + */ +typedef NS_ENUM(NSInteger, TRTCQosControlMode) { + + ///本地控制,用于 SDK 开发内部调试,客户请勿使用。 + TRTCQosControlModeClient = 0, + + ///云端控制,默认模式,推荐选择。 + TRTCQosControlModeServer = 1, + +}; + +/** + * 2.4 画质偏好 + * + * TRTC 在弱网络环境下有两种调控模式:“优先保证画面清晰”或“优先保证画面流畅”,两种模式均会优先保障声音数据的传输。 + */ +typedef NS_ENUM(NSInteger, TRTCVideoQosPreference) { + + ///流畅优先:即当前网络不足以传输既清晰又流畅的画面时,优先保证画面的流畅性,代价就是画面会比较模糊且伴随有较多的马赛克。 + TRTCVideoQosPreferenceSmooth = 1, + + ///清晰优先(默认值):即当前网络不足以传输既清晰又流畅的画面时,优先保证画面的清晰度,代价就是画面会比较卡顿。 + TRTCVideoQosPreferenceClear = 2, + +}; + +/** + * 2.5 网络质量 + * + * TRTC 会每隔两秒对当前的网络质量进行评估,评估结果为六个等级:Excellent 表示最好,Down 表示最差。 + */ +typedef NS_ENUM(NSInteger, TRTCQuality) { + + ///未定义 + TRTCQuality_Unknown = 0, + + ///当前网络非常好 + TRTCQuality_Excellent = 1, + + ///当前网络比较好 + TRTCQuality_Good = 2, + + ///当前网络一般 + TRTCQuality_Poor = 3, + + ///当前网络较差 + TRTCQuality_Bad = 4, + + ///当前网络很差 + TRTCQuality_Vbad = 5, + + ///当前网络不满足 TRTC 的最低要求 + TRTCQuality_Down = 6, + +}; + +/** + * 2.6 视频状态类型 + * + * 该枚举类型用于视频状态变化回调接口{@link onRemoteVideoStatusUpdated},用于指定当前的视频状态。 + */ +typedef NS_ENUM(NSUInteger, TRTCAVStatusType) { + + ///停止播放 + TRTCAVStatusStopped = 0, + + ///正在播放 + TRTCAVStatusPlaying = 1, + + ///正在加载 + TRTCAVStatusLoading = 2, + +}; + +/** + * 2.7 视频状态变化原因类型 + * + * 该枚举类型用于视频状态变化回调接口{@link onRemoteVideoStatusUpdated},用于指定当前的视频状态原因。 + */ +typedef NS_ENUM(NSUInteger, TRTCAVStatusChangeReason) { + + ///缺省值 + TRTCAVStatusChangeReasonInternal = 0, + + ///网络缓冲 + TRTCAVStatusChangeReasonBufferingBegin = 1, + + ///结束缓冲 + TRTCAVStatusChangeReasonBufferingEnd = 2, + + ///本地启动视频流播放 + TRTCAVStatusChangeReasonLocalStarted = 3, + + ///本地停止视频流播放 + TRTCAVStatusChangeReasonLocalStopped = 4, + + ///远端视频流开始(或继续) + TRTCAVStatusChangeReasonRemoteStarted = 5, + + ///远端视频流停止(或中断 + TRTCAVStatusChangeReasonRemoteStopped = 6, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// 音频相关枚举值定义 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 3.1 音频采样率 + * + * 音频采样率用来衡量声音的保真程度,采样率越高保真程度越好,如果您的应用场景有音乐的存在,推荐使用 TRTCAudioSampleRate48000。 + */ +typedef NS_ENUM(NSInteger, TRTCAudioSampleRate) { + + /// 16k采样率 + TRTCAudioSampleRate16000 = 16000, + + /// 32k采样率 + TRTCAudioSampleRate32000 = 32000, + + /// 44.1k采样率 + TRTCAudioSampleRate44100 = 44100, + + /// 48k采样率 + TRTCAudioSampleRate48000 = 48000, + +}; + +/** + * 3.2 声音音质 + * + * TRTC 提供了三种精心校调好的模式,用来满足各种垂直场景下对音质的差异化追求: + * - 人声模式(Speech):适用于以人声沟通为主的应用场景,该模式下音频传输的抗性较强,TRTC 会通过各种人声处理技术保障在弱网络环境下的流畅度最佳。 + * - 音乐模式(Music):适用于对声乐要求很苛刻的场景,该模式下音频传输的数据量很大,TRTC 会通过各项技术确保音乐信号在各频段均能获得高保真的细节还原度。 + * - 默认模式(Default):介于 Speech 和 Music 之间的档位,对音乐的还原度比人声模式要好,但传输数据量比音乐模式要低很多,对各种场景均有不错的适应性。 + */ +typedef NS_ENUM(NSInteger, TRTCAudioQuality) { + + ///人声模式:采样率:16k;单声道;编码码率:16kbps;具备几个模式中最强的网络抗性,适合语音通话为主的场景,比如在线会议,语音通话等。 + TRTCAudioQualitySpeech = 1, + + ///默认模式:采样率:48k;单声道;编码码率:50kbps;介于 Speech 和 Music 之间的档位,SDK 默认档位,推荐选择。 + TRTCAudioQualityDefault = 2, + + ///音乐模式:采样率:48k;全频带立体声;编码码率:128kbps;适合需要高保真传输音乐的场景,比如在线K歌、音乐直播等。 + TRTCAudioQualityMusic = 3, + +}; + +/** + * 3.3 音频路由(即声音的播放模式) + * + * 音频路由,即声音是从手机的扬声器还是从听筒中播放出来,因此该接口仅适用于手机等移动端设备。 + * 手机有两个扬声器:一个是位于手机顶部的听筒,一个是位于手机底部的立体声扬声器。 + * - 设置音频路由为听筒时,声音比较小,只有将耳朵凑近才能听清楚,隐私性较好,适合用于接听电话。 + * - 设置音频路由为扬声器时,声音比较大,不用将手机贴脸也能听清,因此可以实现“免提”的功能。 + */ +typedef NS_ENUM(NSInteger, TRTCAudioRoute) { + + /// Speakerphone:使用扬声器播放(即“免提”),扬声器位于手机底部,声音偏大,适合外放音乐。 + TRTCAudioModeSpeakerphone = 0, + + /// Earpiece:使用听筒播放,听筒位于手机顶部,声音偏小,适合需要保护隐私的通话场景。 + TRTCAudioModeEarpiece = 1, + +}; + +/** + * 3.4 声音混响模式 + * + * 该枚举值应用于设定直播场景中的混响模式,常用于秀场直播中。 + */ +typedef NS_ENUM(NSInteger, TRTCReverbType) { + + ///关闭混响 + TRTCReverbType_0 = 0, + + /// KTV + TRTCReverbType_1 = 1, + + ///小房间 + TRTCReverbType_2 = 2, + + ///大会堂 + TRTCReverbType_3 = 3, + + ///低沉 + TRTCReverbType_4 = 4, + + ///洪亮 + TRTCReverbType_5 = 5, + + ///金属声 + TRTCReverbType_6 = 6, + + ///磁性 + TRTCReverbType_7 = 7, + +}; + +/** + * 3.5 变声类型 + * + * 该枚举值应用于设定直播场景中的变声模式,常用于秀场直播中。 + */ +typedef NS_ENUM(NSInteger, TRTCVoiceChangerType) { + + ///关闭变声 + TRTCVoiceChangerType_0 = 0, + + ///熊孩子 + TRTCVoiceChangerType_1 = 1, + + ///萝莉 + TRTCVoiceChangerType_2 = 2, + + ///大叔 + TRTCVoiceChangerType_3 = 3, + + ///重金属 + TRTCVoiceChangerType_4 = 4, + + ///感冒 + TRTCVoiceChangerType_5 = 5, + + ///外国人 + TRTCVoiceChangerType_6 = 6, + + ///困兽 + TRTCVoiceChangerType_7 = 7, + + ///死肥仔 + TRTCVoiceChangerType_8 = 8, + + ///强电流 + TRTCVoiceChangerType_9 = 9, + + ///重机械 + TRTCVoiceChangerType_10 = 10, + + ///空灵 + TRTCVoiceChangerType_11 = 11, + +}; + +/** + * 3.6 系统音量类型(仅适用于移动设备) + * + * 现代智能手机中一般都具备两套系统音量类型,即“通话音量”和“媒体音量”。 + * - 通话音量:手机专门为接打电话所设计的音量类型,自带回声抵消(AEC)功能,并且支持通过蓝牙耳机上的麦克风进行拾音,缺点是音质比较一般。 + * 当您通过手机侧面的音量按键下调手机音量时,如果无法将其调至零(也就是无法彻底静音),说明您的手机当前处于通话音量。 + * - 媒体音量:手机专门为音乐场景所设计的音量类型,无法使用系统的 AEC 功能,并且不支持通过蓝牙耳机的麦克风进行拾音,但具备更好的音乐播放效果。 + * 当您通过手机侧面的音量按键下调手机音量时,如果能够将手机音量调至彻底静音,说明您的手机当前处于媒体音量。 + * SDK 目前提供了三种系统音量类型的控制模式:自动切换模式、全程通话音量模式、全程媒体音量模式。 + */ +typedef NS_ENUM(NSInteger, TRTCSystemVolumeType) { + + ///自动切换模式: + ///也被称为“麦上通话,麦下媒体”,即主播上麦时使用通话音量,观众不上麦则使用媒体音量,适合在线直播场景。 + ///如果您在 enterRoom 时选择的场景为 TRTCAppSceneLIVE 或 TRTCAppSceneVoiceChatRoom,SDK 会自动使用该模式。 + TRTCSystemVolumeTypeAuto = 0, + + ///全程媒体音量: + ///通话全程使用媒体音量,并不是非常常用的音量类型,适用于对音质要求比较苛刻的音乐场景中。 + ///如果您的用户大都使用外接设备(比如外接声卡)为主,可以使用该模式,否则请慎用。 + TRTCSystemVolumeTypeMedia = 1, + + ///全程通话音量: + ///该方案的优势在于用户在上下麦时音频模块无需切换工作模式,可以做到无缝上下麦,适合于用户需要频繁上下麦的应用场景。 + ///如果您在 enterRoom 时选择的场景为 TRTCAppSceneVideoCall 或 TRTCAppSceneAudioCall,SDK 会自动使用该模式。 + TRTCSystemVolumeTypeVOIP = 2, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// 更多枚举值定义 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 4.1 Log 级别 + * + * 不同的日志等级定义了不同的详实程度和日志数量,推荐一般情况下将日志等级设置为:TRTCLogLevelInfo。 + */ +typedef NS_ENUM(NSInteger, TRTCLogLevel) { + + ///输出所有级别的 Log + TRTCLogLevelVerbose = 0, + + ///输出 DEBUG,INFO,WARNING,ERROR 和 FATAL 级别的 Log + TRTCLogLevelDebug = 1, + + ///输出 INFO,WARNING,ERROR 和 FATAL 级别的 Log + TRTCLogLevelInfo = 2, + + ///输出WARNING,ERROR 和 FATAL 级别的 Log + TRTCLogLevelWarn = 3, + + ///输出ERROR 和 FATAL 级别的 Log + TRTCLogLevelError = 4, + + ///仅输出 FATAL 级别的 Log + TRTCLogLevelFatal = 5, + + ///不输出任何 SDK Log + TRTCLogLevelNone = 6, + +}; + +/** + * 4.2 重力感应开关(仅适用于移动端) + */ +typedef NS_ENUM(NSInteger, TRTCGSensorMode) { + + ///不适配重力感应 + ///该模式是桌面平台上的默认值,该模式下,当前用户发布出去的视频画面不受重力感应方向变化的影响。 + TRTCGSensorMode_Disable = 0, + + ///适配重力感应 + ///该模式是移动平台上的默认值,该模式下,当前用户发布出去的视频画面会跟随设备的重力感应方向进行相应的调整,同时本地预览画面保持方向不变。 + /// SDK 目前支持的一种适配模式是:当手机或 Pad 上下颠倒时,为了保证远端用户看到的画面方向正常,SDK 会自动将发布出去的画面上下旋转180度。 + ///如果您的 APP 的界面层开启了重力感应自适应,推荐使用 UIFixLayout 模式。 + TRTCGSensorMode_UIAutoLayout = 1, + + ///适配重力感应 + ///该模式下,当前用户发布出去的视频画面会跟随设备的重力感应方向进行相应的调整,同时本地预览画面也会进行相应的旋转适配。 + ///目前支持的一种特性是:当手机或 Pad 上下颠倒时,为了保证远端用户看到的画面方向正常,SDK 会自动将发布出去的画面上下旋转180度。 + ///如果您的 APP 的界面层不支持重力感应自适应,并且希望 SDK 的视频画面能够适配重力感应方向,推荐使用 UIFixLayout 模式。 + TRTCGSensorMode_UIFixLayout = 2, + +}; + +/** + * 4.3 屏幕分享的目标类型(仅适用于桌面端) + */ +typedef NS_ENUM(NSInteger, TRTCScreenCaptureSourceType) { + + ///未定义 + TRTCScreenCaptureSourceTypeUnknown = -1, + + ///该分享目标是某一个应用的窗口 + TRTCScreenCaptureSourceTypeWindow = 0, + + ///该分享目标是某一台显示器的屏幕 + TRTCScreenCaptureSourceTypeScreen = 1, + +}; + +/** + * 4.4 云端混流的排版模式 + * + * TRTC 的云端混流服务能够将房间中的多路音视频流混合成一路,因此您需要指定画面的排版方案,我们提供了如下几种排版模式: + */ +typedef NS_ENUM(NSInteger, TRTCTranscodingConfigMode) { + + ///未定义 + TRTCTranscodingConfigMode_Unknown = 0, + + ///全手动排版模式 + ///该模式下,您需要指定每一路画面的精确排版位置。该模式的自由度最高,但易用性也最差: + ///- 您需要填写 TRTCTranscodingConfig 中的所有参数,包括每一路画面(TRTCMixUser)的位置坐标。 + ///- 您需要监听 TRTCCloudDelegate 中的 onUserVideoAvailable() 和 onUserAudioAvailable() 事件回调,并根据当前房间中各个麦上用户的音视频状态不断地调整 mixUsers 参数。 + TRTCTranscodingConfigMode_Manual = 1, + + ///纯音频模式 + ///该模式适用于语音通话(AudioCall)和语音聊天室(VoiceChatRoom)等纯音频的应用场景。 + ///- 您只需要在进入房间后,通过 setMixTranscodingConfig() 接口设置一次,之后 SDK 就会自动把房间内所有上麦用户的声音混流到当前用户的直播流上。 + ///- 您无需设置 TRTCTranscodingConfig 中的 mixUsers 参数,只需设置 audioSampleRate、audioBitrate 和 audioChannels 等参数即可。 + TRTCTranscodingConfigMode_Template_PureAudio = 2, + + ///预排版模式 + ///最受欢迎的排版模式,因为该模式支持您通过占位符提前对各路画面的位置进行设定,之后 SDK 会自动根据房间中画面的路数动态进行适配调整。 + ///此模式下,您依然需要设置 mixUsers 参数,但可以将 userId 设置为“占位符”,可选的占位符有: + /// - "$PLACE_HOLDER_REMOTE$" : 指代远程用户的画面,可以设置多个。 + /// - "$PLACE_HOLDER_LOCAL_MAIN$" : 指代本地摄像头画面,只允许设置一个。 + /// - "$PLACE_HOLDER_LOCAL_SUB$" : 指代本地屏幕分享画面,只允许设置一个。 + ///此模式下,您不需要监听 TRTCCloudDelegate 中的 onUserVideoAvailable() 和 onUserAudioAvailable() 回调进行实时调整, + ///只需要在进房成功后调用一次 setMixTranscodingConfig() 即可,之后 SDK 会自动将真实的 userId 补位到您设置的占位符上。 + TRTCTranscodingConfigMode_Template_PresetLayout = 3, + + ///屏幕分享模式 + ///适用于在线教育场景等以屏幕分享为主的应用场景,仅支持 Windows 和 Mac 两个平台的 SDK。 + ///该模式下,SDK 会先根据您通过 videoWidth 和 videoHeight 参数设置的目标分辨率构建一张画布, + ///- 当老师未开启屏幕分享时,SDK 会将老师的摄像头画面等比例拉伸绘制到该画布上; + ///- 当老师开启屏幕分享之后,SDK 会将屏幕分享画面绘制到同样的画布上。 + ///此种排版模式的目的是为了确保混流模块的输出分辨率一致,避免课程回放和网页观看的花屏问题(网页播放器不支持可变分辨率)。 + ///同时,连麦学生的声音也会被默认混合到老师的音视频流中。 + ///< br> + ///由于教学模式下的视频内容以屏幕分享为主,因此同时传输摄像头画面和屏幕分享画面是非常浪费带宽的。 + ///推荐的做法是直接将摄像头画面通过 setLocalVideoRenderCallback 接口自定义绘制到当前屏幕上。 + ///在该模式下,您无需设置 TRTCTranscodingConfig 中的 mixUsers 参数,SDK 不会混合学生的画面,以免干扰屏幕分享的效果。 + ///< br> + ///您可以将 TRTCTranscodingConfig 中的 width × height 设为 0px × 0px,SDK 会自动根据用户当前屏幕的宽高比计算出一个合适的分辨率: + ///- 如果老师当前屏幕宽度 <= 1920px,SDK 会使用老师当前屏幕的实际分辨率。 + ///- 如果老师当前屏幕宽度 > 1920px,SDK 会根据当前屏幕宽高比,选择 1920x1080(16:9)、1920x1200(16:10)、1920x1440(4:3) 三种分辨率中的一种。 + TRTCTranscodingConfigMode_Template_ScreenSharing = 4, + +}; + +/** + * 4.5 媒体录制类型 + * + * 该枚举类型用于本地媒体录制接口{@link startLocalRecording},用于指定是录制音视频文件还是纯音频文件。 + */ +typedef NS_ENUM(NSUInteger, TRTCRecordType) { + + ///仅录制音频 + TRTCRecordTypeAudio = 0, + + ///仅录制视频 + TRTCRecordTypeVideo = 1, + + ///同时录制音频和视频 + TRTCRecordTypeBoth = 2, + +}; + +/** + * 4.6 混流输入类型 + */ +typedef NS_ENUM(NSUInteger, TRTCMixInputType) { + + ///默认值 + ///考虑到针对老版本的兼容性,如果您指定了 inputType 为 Undefined,SDK 会根据另一个参数 pureAudio 的数值决定混流输入类型 + TRTCMixInputTypeUndefined = 0, + + ///混入音频和视频 + TRTCMixInputTypeAudioVideo = 1, + + ///只混入视频 + TRTCMixInputTypePureVideo = 2, + + ///只混入音频 + TRTCMixInputTypePureAudio = 3, + + ///混入水印 + ///此时您无需指定 userId 字段,但需要指定 image 字段,推荐使用 png 格式的图片。 + TRTCMixInputTypeWatermark = 4, + +}; + +/** + * 4.7 设备类型(仅适用于桌面平台) + * + * 该枚举值用于定义三种类型的音视频设备,即摄像头、麦克风和扬声器,以便让一套设备管理接口可以操控三种不同类型的设备。 + * 自 Ver8.0 版本开始,TRTC 在 TXDeviceManager 中重新定义了 “TXMediaDeviceType” 用于替换老版本中的 “TRTCMediaDeviceType”, + * 此处仅保留 “TRTCMediaDeviceType” 的定义,用于兼容老版本的客户代码。 + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TRTCMediaDeviceType) { + TRTCMediaDeviceTypeUnknown = -1, ///< undefined device type + TRTCMediaDeviceTypeAudioInput = 0, ///< microphone + TRTCMediaDeviceTypeAudioOutput = 1, ///< speaker + TRTCMediaDeviceTypeVideoCamera = 2, ///< camera + TRTCMediaDeviceTypeVideoWindow = 3, ///< windows(for screen share) + TRTCMediaDeviceTypeVideoScreen = 4, ///< screen (for screen share) +} __attribute__((deprecated("use TXDeviceManager#TXMediaDeviceType instead"))); + +typedef TXMediaDeviceInfo TRTCMediaDeviceInfo __attribute__((deprecated("use TXDeviceManager#TXMediaDeviceInfo instead"))); +#endif + +/** + * 4.11 音频录制内容类型 + * + * 该枚举类型用于音频录制接口{@link startAudioRecording},用于指定录制音频的内容。 + */ +typedef NS_ENUM(NSUInteger, TRTCAudioRecordingContent) { + + ///录制本地和远端所有音频 + TRTCAudioRecordingContentAll = 0, + + ///仅录制本地音频 + TRTCAudioRecordingContentLocal = 1, + + ///仅录制远端音频 + TRTCAudioRecordingContentRemote = 2, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// TRTC 核心类型定义 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 5.1 进房参数 + * + * 作为 TRTC SDK 的进房参数,只有该参数填写正确,才能顺利进入 roomId 或者 strRoomId 所指定的音视频房间。 + * 由于历史原因,TRTC 支持数字和字符串两种类型的房间号,分别是 roomId 和 strRoomId。 + * 请注意:不要混用 roomId 和 strRoomId,因为它们之间是不互通的,比如数字 123 和字符串 “123” 在 TRTC 看来是两个完全不同的房间。 + */ +LITEAV_EXPORT @interface TRTCParams : NSObject + +///【字段含义】应用标识(必填),腾讯云基于 sdkAppId 完成计费统计。 +///【推荐取值】在 [实时音视频控制台](https://console.cloud.tencent.com/rav/) 创建应用后可以在账号信息页面中得到该 ID。 +@property(nonatomic, assign) UInt32 sdkAppId; + +///【字段含义】用户标识(必填),当前用户的 userId,相当于用户名,使用 UTF-8 编码。 +///【推荐取值】如果一个用户在您的帐号系统中的 ID 为“mike”,则 userId 即可设置为“mike”。 +@property(nonatomic, copy, nonnull) NSString *userId; + +///【字段含义】用户签名(必填),当前 userId 对应的验证签名,相当于使用云服务的登录密码。 +///【推荐取值】具体计算方法请参见 [如何计算UserSig](https://cloud.tencent.com/document/product/647/17275)。 +@property(nonatomic, copy, nonnull) NSString *userSig; + +///【字段含义】数字房间号,在同一个房间里的用户(userId)可以彼此看到对方并进行音视频通话。 +///【推荐取值】取值范围:1 - 4294967294。 +///【特别说明】roomId 与 strRoomId 是互斥的,若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,SDK 将优先选用 roomId。 +///【请您注意】不要混用 roomId 和 strRoomId,因为它们之间是不互通的,比如数字 123 和字符串 “123” 在 TRTC 看来是两个完全不同的房间。 +@property(nonatomic, assign) UInt32 roomId; + +///【字段含义】字符串房间号,在同一个房间里的用户(userId)可以彼此看到对方并进行音视频通话。 +///【特别说明】roomId 与 strRoomId 是互斥的,若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,SDK 将优先选用 roomId。 +///【请您注意】不要混用 roomId 和 strRoomId,因为它们之间是不互通的,比如数字 123 和字符串 “123” 在 TRTC 看来是两个完全不同的房间。 +///【推荐取值】限制长度为64字节。以下为支持的字符集范围(共 89 个字符): +/// - 大小写英文字母(a-zA-Z); +/// - 数字(0-9); +/// - 空格、"!"、"#"、"$"、"%"、"&"、"("、")"、"+"、"-"、":"、";"、"<"、"="、"."、">"、"?"、"@"、"["、"]"、"^"、"_"、" {"、"}"、"|"、"~"、","。 +@property(nonatomic, copy, nonnull) NSString *strRoomId; + +///【字段含义】直播场景下的角色,仅适用于直播场景({@link TRTCAppSceneLIVE} 和{@link TRTCAppSceneVoiceChatRoom}),通话场景下指定该参数是无效的。 +///【推荐取值】默认值:主播({@link TRTCRoleAnchor})。 +@property(nonatomic, assign) TRTCRoleType role; + +///【字段含义】用于指定在腾讯云直播平台上的 streamId(选填),设置之后,您可以在腾讯云直播 CDN 上通过标准拉流方案(FLV或HLS)播放该用户的音视频流。 +///【推荐取值】限制长度为64字节,可以不填写,一种推荐的方案是使用 “sdkappid_roomid_userid_main” 作为 streamid,这中命名方式容易辨认且不会在您的多个应用中发生冲突。 +///【特殊说明】要使用腾讯云直播 CDN,您需要先在[控制台](https://console.cloud.tencent.com/trtc/) 中的功能配置页开启“启动自动旁路直播”开关。 +///【参考文档】[CDN 旁路直播](https://cloud.tencent.com/document/product/647/16826)。 +@property(nonatomic, copy, nullable) NSString *streamId; + +///【字段含义】云端录制开关(选填),用于指定是否要在云端将该用户的音视频流录制下来。 +///【参考文档】[云端录制](https://cloud.tencent.com/document/product/647/16823)。 +///【推荐取值】限制长度为64字节,只允许包含大小写英文字母(a-zA-Z)、数字(0-9)及下划线和连词符。 +/// <p> +/// 方案一:手动录制方案: +/// 1. 在“[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 云端录制配置”中开启云端录制。 +/// 2. 设置“录制形式”为“手动录制”。 +/// 3. 设置手动录制后,在一个 TRTC 房间中只有设置了 userDefineRecordId 参数的用户才会在云端录制出视频文件,不指定该参数的用户不会产生录制行为。 +/// 4. 云端会以 “userDefineRecordId_起始时间_结束时间” 的格式命名录制下来的文件。 +/// <p> +/// 方案二:自动录制方案: +/// 1. 需要在“[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 云端录制配置”中开启云端录制。 +/// 2. 设置“录制形式”为“自动录制”。 +/// 3. 设置自动录制后,在一个 TRTC 房间中的任何一个有音视频上行的用户,均会在云端录制出视频文件。 +/// 4. 文件会以 “userDefineRecordId_起始时间_结束时间” 的格式命名,如果不指定 userDefineRecordId,则文件会以 “streamId_起始时间_结束时间” 命名。 +/// <br> +@property(nonatomic, copy, nullable) NSString *userDefineRecordId; + +///【字段含义】用于权限控制的权限票据(选填),当您希望某个房间只能让特定的 userId 进入时,需要使用 privateMapKey 进行权限保护。 +///【推荐取值】仅建议有高级别安全需求的客户使用,更多详情请参见 [进房权限保护](https://cloud.tencent.com/document/product/647/32240)。 +@property(nonatomic, copy, nullable) NSString *privateMapKey; + +///【字段含义】业务数据字段(选填),部分高级特性才需要用到此字段。 +///【推荐取值】请不要自行设置该字段。 +@property(nonatomic, copy, nullable) NSString *bussInfo; + +@end + +/** + * 5.2 视频编码参数 + * + * 该设置决定远端用户看到的画面质量,同时也决定了云端录制出的视频文件的画面质量。 + */ +LITEAV_EXPORT @interface TRTCVideoEncParam : NSObject + +///【字段含义】 视频分辨率 +///【特别说明】如需使用竖屏分辨率,请指定 resMode 为 Portrait,例如: 640 × 360 + Portrait = 360 × 640。 +///【推荐取值】 +/// - 手机视频通话:建议选择 360 × 640 及以下分辨率,resMode 选择 Portrait,即竖屏分辨率。 +/// - 手机在线直播:建议选择 540 × 960,resMode 选择 Portrait,即竖屏分辨率。 +/// - 桌面平台(Win + Mac):建议选择 640 × 360 及以上分辨率,resMode 选择 Landscape,即横屏分辨率。 +@property(nonatomic, assign) TRTCVideoResolution videoResolution; + +///【字段含义】分辨率模式(横屏分辨率 or 竖屏分辨率) +///【推荐取值】手机平台(iOS、Android)建议选择 Portrait,桌面平台(Windows、Mac)建议选择 Landscape。 +///【特别说明】如需使用竖屏分辨率,请指定 resMode 为 Portrait,例如: 640 × 360 + Portrait = 360 × 640。 +@property(nonatomic, assign) TRTCVideoResolutionMode resMode; + +///【字段含义】视频采集帧率 +///【推荐取值】15fps或20fps。5fps以下,卡顿感明显。10fps以下,会有轻微卡顿感。20fps以上,会浪费带宽(电影的帧率为24fps)。 +///【特别说明】部分 Android 手机的前置摄像头并不支持15fps以上的采集帧率,部分主打美颜功能的 Android 手机的前置摄像头的采集帧率可能低于10fps。 +@property(nonatomic, assign) int videoFps; + +///【字段含义】目标视频码率,SDK 会按照目标码率进行编码,只有在弱网络环境下才会主动降低视频码率。 +///【推荐取值】请参考本 TRTCVideoResolution 在各档位注释的最佳码率,也可以在此基础上适当调高。 +/// 比如:TRTCVideoResolution_1280_720 对应 1200kbps 的目标码率,您也可以设置为 1500kbps 用来获得更好的观感清晰度。 +///【特别说明】您可以通过同时设置 videoBitrate 和 minVideoBitrate 两个参数,用于约束 SDK 对视频码率的调整范围: +/// - 如果您追求“弱网络下允许卡顿但要保持清晰”的效果,可以设置 minVideoBitrate 为 videoBitrate 的 60%; +/// - 如果您追求“弱网络下允许模糊但要保持流畅”的效果,可以设置 minVideoBitrate 为一个较低的数值(比如 100kbps); +/// - 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK 对视频码率的自适应调节能力。 +@property(nonatomic, assign) int videoBitrate; + +///【字段含义】最低视频码率,SDK 会在网络不佳的情况下主动降低视频码率以保持流畅度,最低会降至 minVideoBitrate 所设定的数值。 +///【特别说明】 默认值:0,此时最低码率由 SDK 会根据您指定的分辨率,自动计算出合适的数值。 +///【推荐取值】您可以通过同时设置 videoBitrate 和 minVideoBitrate 两个参数,用于约束 SDK 对视频码率的调整范围: +/// - 如果您追求“弱网络下允许卡顿但要保持清晰”的效果,可以设置 minVideoBitrate 为 videoBitrate 的 60%; +/// - 如果您追求“弱网络下允许模糊但要保持流畅”的效果,可以设置 minVideoBitrate 为一个较低的数值(比如 100kbps); +/// - 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK 对视频码率的自适应调节能力。 +@property(nonatomic, assign) int minVideoBitrate; + +///【字段含义】是否允许动态调整分辨率(开启后会对云端录制产生影响)。 +///【推荐取值】该功能适用于不需要云端录制的场景,开启后 SDK 会根据当前网络情况,智能选择出一个合适的分辨率,避免出现“大分辨率+小码率”的低效编码模式。 +///【特别说明】默认值:关闭。如有云端录制的需求,请不要开启此功能,因为如果视频分辨率发生变化后,云端录制出的 MP4 在普通的播放器上无法正常播放。 +@property(nonatomic, assign) BOOL enableAdjustRes; + +@end + +/** + * 5.3 网络流控(Qos)参数集 + * + * 网络流控相关参数,该设置决定 SDK 在弱网络环境下的调控策略(例如:“清晰优先”或“流畅优先”) + */ +LITEAV_EXPORT @interface TRTCNetworkQosParam : NSObject + +///【字段含义】清晰优先还是流畅优先 +///【推荐取值】清晰优先 +///【特别说明】该参数主要影响 TRTC 在较差网络环境下的音视频表现: +/// - 流畅优先:即当前网络不足以传输既清晰又流畅的画面时,优先保证画面的流畅性,代价就是画面会比较模糊且伴随有较多的马赛克。 +/// - 清晰优先(默认值):即当前网络不足以传输既清晰又流畅的画面时,优先保证画面的清晰度,代价就是画面会比较卡顿。 +@property(nonatomic, assign) TRTCVideoQosPreference preference; + +///【字段含义】流控模式(已废弃) +///【推荐取值】云端控制 +///【特别说明】请设置为云端控制模式(TRTCQosControlModeServer) +@property(nonatomic, assign) TRTCQosControlMode controlMode; + +@end + +/** + * 5.4 视频画面的渲染参数 + * + * 您可以通过设置此参数来控制画面的旋转角度、填充模式和左右镜像模式。 + */ +LITEAV_EXPORT @interface TRTCRenderParams : NSObject + +///【字段含义】图像的顺时针旋转角度 +///【推荐取值】支持90、180以及270旋转角度,默认值:{@link TRTCVideoRotation_0} +@property(nonatomic) TRTCVideoRotation rotation; + +///【字段含义】画面填充模式 +///【推荐取值】填充(画面可能会被拉伸裁剪)或适应(画面可能会有黑边),默认值:{@link TRTCVideoFillMode_Fill} +@property(nonatomic) TRTCVideoFillMode fillMode; + +///【字段含义】画面镜像模式 +///【推荐取值】默认值:{@link TRTCVideoMirrorType_Auto} +@property(nonatomic) TRTCVideoMirrorType mirrorType; + +@end + +/** + * 5.5 网络质量 + * + * 表征网络质量的好坏,您可以通过该数值在用户界面上展示每个用户的网络质量。 + */ +LITEAV_EXPORT @interface TRTCQualityInfo : NSObject + +///用户 ID +@property(nonatomic, copy, nullable) NSString *userId; + +///网络质量 +@property(nonatomic, assign) TRTCQuality quality; +@end + +/** + * 5.6 音量大小 + * + * 表征语音音量的评估值,您可以通过该数值在用户界面上展示每个用户的音量大小。 + */ +LITEAV_EXPORT @interface TRTCVolumeInfo : NSObject + +///说话者的 userId, 如果 userId 为空则代表是当前用户自己。 +@property(nonatomic, copy, nullable) NSString *userId; + +///说话者的音量大小, 取值范围[0 - 100]。 +@property(assign, nonatomic) NSUInteger volume; + +@end + +/** + * 5.7 测速参数 + * + * 您可以在用户进入房间前通过 {@link startSpeedTest} 接口测试网速(注意:请不要在通话中调用)。 + */ +LITEAV_EXPORT @interface TRTCSpeedTestParams : NSObject + +///应用标识,请参考 {@link TRTCParams} 中的相关说明。 +@property(nonatomic) uint32_t sdkAppId; + +///用户标识,请参考 {@link TRTCParams} 中的相关说明。 +@property(nonatomic, copy, nonnull) NSString *userId; + +///用户签名,请参考 {@link TRTCParams} 中的相关说明。 +@property(nonatomic, copy, nonnull) NSString *userSig; + +///预期的上行带宽(kbps,取值范围: 10 ~ 5000,为 0 时不测试)。 +@property(nonatomic) NSInteger expectedUpBandwidth; + +///预期的下行带宽(kbps,取值范围: 10 ~ 5000,为 0 时不测试)。 +@property(nonatomic) NSInteger expectedDownBandwidth; +@end + +/** + * 5.8 网络测速结果 + * + * 您可以在用户进入房间前通过 {@link startSpeedTest:} 接口进行测速(注意:请不要在通话中调用)。 + */ +LITEAV_EXPORT @interface TRTCSpeedTestResult : NSObject + +///测试是否成功。 +@property(nonatomic) BOOL success; + +///带宽测试错误信息。 +@property(nonatomic, copy, nonnull) NSString *errMsg; + +///服务器 IP 地址。 +@property(nonatomic, copy, nonnull) NSString *ip; + +///内部通过评估算法测算出的网络质量,更多信息请参见 {@link TRTCQuality}。 +@property(nonatomic) TRTCQuality quality; + +///上行丢包率,取值范围是 [0 - 1.0],例如 0.3 表示每向服务器发送 10 个数据包可能会在中途丢失 3 个。 +@property(nonatomic) float upLostRate; + +///下行丢包率,取值范围是 [0 - 1.0],例如 0.2 表示每从服务器收取 10 个数据包可能会在中途丢失 2 个。 +@property(nonatomic) float downLostRate; + +///延迟(毫秒),指当前设备到 TRTC 服务器的一次网络往返时间,该值越小越好,正常数值范围是10ms - 100ms。 +@property(nonatomic) uint32_t rtt; + +///上行带宽(kbps,-1:无效值)。 +@property(nonatomic) NSInteger availableUpBandwidth; + +///下行带宽(kbps,-1:无效值)。 +@property(nonatomic) NSInteger availableDownBandwidth; + +@end + +/** + * 5.10 视频帧信息 + * + * TRTCVideoFrame 用来描述一帧视频画面的裸数据,也就是编码前或者解码后的视频画面数据。 + */ +LITEAV_EXPORT @interface TRTCVideoFrame : NSObject + +///【字段含义】视频的像素格式 +@property(nonatomic, assign) TRTCVideoPixelFormat pixelFormat; + +///【字段含义】视频数据结构类型 +@property(nonatomic, assign) TRTCVideoBufferType bufferType; + +///【字段含义】bufferType 为 {@link TRTCVideoBufferType_PixelBuffer} 时的视频数据,承载 iOS 平台专用的 PixelBuffer。 +@property(nonatomic, assign, nullable) CVPixelBufferRef pixelBuffer; + +///【字段含义】bufferType 为 {@link TRTCVideoBufferType_NSData} 时的视频数据,承载 NSData 类型的内存数据块。 +@property(nonatomic, retain, nullable) NSData *data; + +///【字段含义】视频纹理 ID,bufferType 为 {@link TRTCVideoBufferType_Texture} 时的视频数据,承载用于 OpenGL 渲染的纹理数据。 +@property(nonatomic, assign) GLuint textureId; + +///【字段含义】视频宽度 +@property(nonatomic, assign) uint32_t width; + +///【字段含义】视频高度 +@property(nonatomic, assign) uint32_t height; + +///【字段含义】视频帧的时间戳,单位毫秒 +///【推荐取值】自定义视频采集时可以设置为0。若该参数为0,SDK 会自定填充 timestamp 字段,但请“均匀”地控制 sendCustomVideoData 的调用间隔。 +@property(nonatomic, assign) uint64_t timestamp; + +///【字段含义】视频像素的顺时针旋转角度 +@property(nonatomic, assign) TRTCVideoRotation rotation; + +@end + +/** + * 5.11 音频帧数据 + */ +LITEAV_EXPORT @interface TRTCAudioFrame : NSObject + +///【字段含义】音频数据 +@property(nonatomic, retain, nonnull) NSData *data; + +///【字段含义】采样率 +@property(nonatomic, assign) TRTCAudioSampleRate sampleRate; + +///【字段含义】声道数 +@property(nonatomic, assign) int channels; + +///【字段含义】时间戳,单位ms +@property(nonatomic, assign) uint64_t timestamp; + +///【字段含义】音频额外数据,远端用户通过 `onLocalProcessedAudioFrame` 写入的数据会通过该字段回调 +@property(nonatomic, retain, nullable) NSData *extraData; + +@end + +/** + * 5.12 云端混流中各路画面的描述信息 + * + * TRTCMixUser 用于指定云端混流中每一路视频画面的位置、大小、图层以及流类型等信息。 + */ +LITEAV_EXPORT @interface TRTCMixUser : NSObject + +///【字段含义】用户 ID +@property(nonatomic, copy, nonnull) NSString *userId; + +///【字段含义】该路音视频流所在的房间号(设置为空值代表当前用户所在的房间号) +@property(nonatomic, copy, nullable) NSString *roomID; + +///【字段含义】指定该路画面的坐标区域(单位:像素) +@property(nonatomic, assign) CGRect rect; + +///【字段含义】指定该路画面的层级(取值范围:1 - 15,不可重复) +@property(nonatomic, assign) int zOrder; + +///【字段含义】指定该路画面是主路画面({@link TRTCVideoStreamTypeBig})还是辅路画面({@link TRTCVideoStreamTypeSub})。 +@property(nonatomic) TRTCVideoStreamType streamType; + +///【字段含义】指定该路流是不是只混合声音 +///【推荐取值】默认值:NO +///【特别说明】已废弃,推荐使用8.5版本开始新引入的字段:inputType。 +@property(nonatomic, assign) BOOL pureAudio; + +///【字段含义】指定该路流的混合内容(只混音频、只混视频、混合音视频、混入水印) +///【默认取值】默认值:TRTCMixInputTypeUndefined +///【特别说明】 +/// - 当指定 inputType 为 TRTCMixInputTypeUndefined 并设置 pureAudio 为 YES 时,等效于设置 inputType 为 TRTCMixInputTypePureAudio。 +/// - 当指定 inputType 为 TRTCMixInputTypeUndefined 并设置 pureAudio 为 NO 时,等效于设置 inputType 为 TRTCMixInputTypeAudioVideo。 +/// - 当指定 inputType 为 TRTCMixInputTypeWatermark 时,您可以不指定 userId 字段,但需要指定 image 字段。 +@property(nonatomic, assign) TRTCMixInputType inputType; + +///【字段含义】该画面在输出时的显示模式 +///【推荐取值】默认值:视频流默认为0。0为裁剪,1为缩放,2为缩放并显示黑底。 +///【特别说明】水印图和占位图暂时不支持设置 renderMode,默认强制拉伸处理 +@property(nonatomic, assign) int renderMode; + +///【字段含义】占位图或水印图 +/// - 占位图是指当对应 userId 混流内容为纯音频时,混合后的画面中显示的是占位图片。 +/// - 水印图是指一张贴在混合后画面中的半透明图片,这张图片会一直覆盖于混合后的画面上。 +/// - 当指定 inputType 为 TRTCMixInputTypePureAudio 时,image 为占位图,此时需要您指定 userId。 +/// - 当指定 inputType 为 TRTCMixInputTypeWatermark 时,image 为水印图,此时不需要您指定 userId。 +///【推荐取值】默认值:空值,即不设置占位图或者水印图。 +///【特别说明】 +/// - 您可以将 image 设置为控制台中的某一个素材 ID,这需要您事先在 “[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 功能配置 => 素材管理” 中单击 [新增图片] 按钮进行上传。 +/// - 上传成功后可以获得对应的“图片ID”,然后将“图片ID”转换成字符串类型并设置给 image 字段即可(比如假设“图片ID” 为 63,可以设置 image = @"63") +/// - 您也可以将 image 设置为图片的 URL 地址,腾讯云的后台服务器会将该 URL 地址指定的图片混合到最终的画面中。 +/// - URL 链接长度限制为 512 字节。图片大小限制不超过 2MB。 +/// - 图片格式支持 png、jpg、jpeg、bmp 格式,推荐使用 png 格式的半透明图片作为水印。 +/// - image 仅在 inputType 为 TRTCMixInputTypePureAudio 或者 TRTCMixInputTypeWatermark 时才生效。 +@property(nonatomic, copy, nullable) NSString *image; + +@end + +/** + * 5.13 云端混流的排版布局和转码参数 + * + * 用于指定混流时各路画面的排版位置信息和云端转码的编码参数。 + */ +LITEAV_EXPORT @interface TRTCTranscodingConfig : NSObject + +///【字段含义】排版模式 +///【推荐取值】请根据您的业务场景要求自行选择,预排版模式是适用性较好的一种模式。 +@property(nonatomic, assign) TRTCTranscodingConfigMode mode; + +///【字段含义】腾讯云直播服务的 AppID +///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/trtc) 依次单击【应用管理】=>【应用信息】,并在【旁路直播信息】中获取 appid。 +@property(nonatomic) int appId; + +///【字段含义】腾讯云直播服务的 bizid +///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/trtc) 依次单击【应用管理】=>【应用信息】,并在【旁路直播信息】中获取 bizid。 +@property(nonatomic) int bizId; + +///【字段含义】指定云端转码的目标分辨率(宽度) +///【推荐取值】单位:像素值,推荐值:360,如果你只混合音频流,请将 width 和 height 均设置位 0,否则混流转码后的直播流中会有黑色背景。 +@property(nonatomic, assign) int videoWidth; + +///【字段含义】指定云端转码的目标分辨率(高度) +///【推荐取值】单位:像素值,推荐值:640,如果你只混合音频流,请将 width 和 height 均设置位 0,否则混流转码后的直播流中会有黑色背景。 +@property(nonatomic, assign) int videoHeight; + +///【字段含义】指定云端转码的目标视频码率(kbps) +///【推荐取值】如果填0,TRTC 会根据 videoWidth 和 videoHeight 估算出一个合理的码率值,您也可以参考视频分辨率枚举定义中所推荐的码率值(见注释部分)。 +@property(nonatomic, assign) int videoBitrate; + +///【字段含义】指定云端转码的目标视频帧率(FPS) +///【推荐取值】默认值:15fps,取值范围是 (0,30]。 +@property(nonatomic, assign) int videoFramerate; + +///【字段含义】指定云端转码的目标视频关键帧间隔(GOP) +///【推荐取值】默认值:2,单位为秒,取值范围是 [1,8]。 +@property(nonatomic, assign) int videoGOP; + +///【字段含义】指定混合画面的底色颜色 +///【推荐取值】默认值:0x000000 代表黑色。格式为十六进制数字,比如:“0x61B9F1” 代表 RGB 分别为(97,158,241)。 +@property(nonatomic, assign) int backgroundColor; + +///【字段含义】指定混合画面的背景图片 +///【推荐取值】默认值:空值,即不设置背景图片。 +///【特别说明】 +/// - 您可以将 image 设置为控制台中的某一个素材 ID,这需要您事先在 “[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 功能配置 => 素材管理” 中单击 [新增图片] 按钮进行上传。 +/// - 上传成功后可以获得对应的“图片ID”,然后将“图片ID”转换成字符串类型并设置给 image 字段即可(比如假设“图片ID” 为 63,可以设置 image = @"63") +/// - 您也可以将 image 设置为图片的 URL 地址,腾讯云的后台服务器会将该 URL 地址指定的图片混合到最终的画面中。 +/// - URL 链接长度限制为 512 字节。图片大小限制不超过 2MB。 +/// - 图片格式支持 png、jpg、jpeg、bmp 格式。 +@property(nonatomic, copy, nullable) NSString *backgroundImage; + +///【字段含义】指定云端转码的目标音频采样率 +///【推荐取值】默认值:48000Hz。支持12000HZ、16000HZ、22050HZ、24000HZ、32000HZ、44100HZ、48000HZ。 +@property(nonatomic, assign) int audioSampleRate; + +///【字段含义】指定云端转码的目标音频码率 +///【推荐取值】默认值:64kbps,取值范围是 [32,192]。 +@property(nonatomic, assign) int audioBitrate; + +///【字段含义】指定云端转码的音频声道数 +///【推荐取值】默认值:1,代表单声道。可设定的数值只有两个数字:1-单声道,2-双声道。 +@property(nonatomic, assign) int audioChannels; + +///【字段含义】指定云端转码的输出流音频编码类型 +///【推荐取值】默认值:0,代表LC-AAC。可设定的数值只有三个数字:0 - LC-AAC,1 - HE-AAC,2 - HE-AACv2。 +///【特别说明】HE-AAC 和 HE-AACv2 支持的输出流音频采样率范围为[48000, 44100, 32000, 24000, 16000] +///【特别说明】当音频编码设置为 HE-AACv2 时,只支持输出流音频声道数为双声道。 +///【特别说明】HE-AAC 和 HE-AACv2 取值仅在输出流为您额外设置的 streamId 上时才生效。 +@property(nonatomic, assign) int audioCodec; + +///【字段含义】指定云端混流中每一路视频画面的位置、大小、图层以及流类型等信息 +///【推荐取值】该字段是一个 TRTCMixUser 类型的数组,数组中的每一个元素都用来代表每一路画面的信息。 +@property(nonatomic, copy, nonnull) NSArray<TRTCMixUser *> *mixUsers; + +///【字段含义】输出到 CDN 上的直播流 ID +///【推荐取值】默认值:空值,即房间里的多路音视频流最终会混合到接口调用者的那一路音视频流上。 +/// - 如不设置该参数,SDK 会执行默认逻辑,即房间里的多路音视频流会混合到该接口调用者的那一路音视频流上,也就是 A + B => A。 +/// - 如您设置该参数,SDK 会将房间里的多路音视频流混合到您指定的直播流上,也就是 A + B => C(C 代表您指定的 streamId)。 +@property(nonatomic, copy, nullable) NSString *streamId; + +@end + +/** + * 5.14 向非腾讯云 CDN 上发布音视频流时需设置的转推参数 + * + * TRTC 的后台服务支持通过标准 RTMP 协议,将其中的音视频流发布到第三方直播 CDN 服务商。 + * 如果您使用腾讯云直播 CDN 服务,可无需关注此参数,直接使用 {@link startPublish} 接口即可。 + */ +LITEAV_EXPORT @interface TRTCPublishCDNParam : NSObject + +///【字段含义】腾讯云直播服务的 AppID +///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/trtc) 依次单击【应用管理】=>【应用信息】,并在【旁路直播信息】中获取 appid。 +@property(nonatomic) int appId; + +///【字段含义】腾讯云直播服务的 bizid +///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/trtc) 依次单击【应用管理】=>【应用信息】,并在【旁路直播信息】中获取 bizid。 +@property(nonatomic) int bizId; + +///【字段含义】指定该路音视频流在第三方直播服务商的推流地址(RTMP 格式) +///【推荐取值】各家服务商的推流地址规则差异较大,请根据目标服务商的要求填写合法的推流 URL,TRTC 的后台服务器会按照您填写的 URL 向第三方服务商推送标准格式音视频流。 +///【特别说明】推流 URL 必须为 RTMP 格式,必须符合您的目标直播服务商的规范要求,否则目标服务商会拒绝来自 TRTC 后台服务的推流请求。 +@property(nonatomic, strong, nonnull) NSString *url; + +///【字段含义】需要转推的 streamId +///【推荐取值】默认值:空值。如果不填写,则默认转推调用者的旁路流。 +@property(nonatomic, strong, nonnull) NSString *streamId; + +@end + +/** + * 5.15 本地音频文件的录制参数 + * + * 该参数用于在音频录制接口 {@link startAudioRecording} 中指定录制参数。 + */ +LITEAV_EXPORT @interface TRTCAudioRecordingParams : NSObject + +///【字段含义】录音文件的保存路径(必填)。 +///【特别说明】该路径需精确到文件名及格式后缀,格式后缀用于决定录音文件的格式,目前支持的格式有 PCM、WAV 和 AAC。 +/// 例如:假如您指定路径为 "mypath/record/audio.aac",代表您希望 SDK 生成一个 AAC 格式的音频录制文件。 +/// 请您指定一个有读写权限的合法路径,否则录音文件无法生成。 +@property(nonatomic, strong, nonnull) NSString *filePath; + +///【字段含义】音频录制内容类型。 +///【特别说明】默认录制所有本地和远端音频。 +@property(nonatomic, assign) TRTCAudioRecordingContent recordingContent; + +@end + +/** + * 5.16 本地媒体文件的录制参数 + * + * 该参数用于在本地媒体文件的录制接口 {@link startLocalRecording} 中指定录制相关参数。 + * 接口 startLocalRecording 是接口 startAudioRecording 的能力加强版本,前者可以录制视频文件,后者只能录制音频文件。 + */ +LITEAV_EXPORT @interface TRTCLocalRecordingParams : NSObject + +///【字段含义】录制的文件地址(必填),请确保路径有读写权限且合法,否则录制文件无法生成。 +///【特别说明】该路径需精确到文件名及格式后缀,格式后缀用于决定录制出的文件格式,目前支持的格式暂时只有 MP4。 +/// 例如:假如您指定路径为 "mypath/record/test.mp4",代表您希望 SDK 生成一个 MP4 格式的本地视频文件。 +/// 请您指定一个有读写权限的合法路径,否则录制文件无法生成。 +@property(nonatomic, copy, nonnull) NSString *filePath; + +///【字段含义】媒体录制类型,默认值:TRTCRecordTypeBoth,即同时录制音频和视频。 +@property(nonatomic, assign) TRTCRecordType recordType; + +///【字段含义】interval 录制信息更新频率,单位毫秒,有效范围:1000-10000。默认值为-1,表示不回调。 +@property(nonatomic, assign) int interval; + +@end + +/** + * 5.17 音效参数(已废弃) + * + * TRTC 中的“音效”特指一些短暂的音频文件,通常仅有几秒钟的播放时间,比如“鼓掌声”、“欢笑声”等。 + * 该参数用于在早期版本的音效播放接口 {@link TRTCCloud#playAudioEffect} 中指定音效文件(即短音频文件)的路径和播放次数等。 + * 在 7.3 版本以后,音效接口已被新的接口 {@link TXAudioEffectManager#startPlayMusic} 所取代。 + * 您在指定 startPlayMusic 的参数 {@link TXAudioMusicParam} 时,如果将 “isShortFile” 设置为 YES,即为“音效”文件。 + */ +LITEAV_EXPORT @interface TRTCAudioEffectParam : NSObject + ++ (_Nonnull instancetype)new __attribute__((unavailable("Use -initWith:(int)effectId path:(NSString * )path instead"))); +- (_Nonnull instancetype)init __attribute__((unavailable("Use -initWith:(int)effectId path:(NSString *)path instead"))); + +///【字段含义】音效 ID +///【特别说明】SDK 允许播放多路音效,因此需要音效 ID 进行标记,用于控制音效的开始、停止、音量等。 +@property(nonatomic, assign) int effectId; + +///【字段含义】音效文件路径,支持的文件格式:aac, mp3, m4a。 +@property(nonatomic, copy, nonnull) NSString *path; + +///【字段含义】循环播放次数 +///【推荐取值】取值范围为0 - 任意正整数,默认值:0,表示播放音效一次;1表示播放音效两次;以此类推。 +@property(nonatomic, assign) int loopCount; + +///【字段含义】音效是否上行 +///【推荐取值】YES:音效在本地播放的同时,会上行至云端,因此远端用户也能听到该音效;NO:音效不会上行至云端,因此只能在本地听到该音效。默认值:NO +@property(nonatomic, assign) BOOL publish; + +///【字段含义】音效音量 +///【推荐取值】取值范围为0 - 100;默认值:100 +@property(nonatomic, assign) int volume; + +- (_Nonnull instancetype)initWith:(int)effectId path:(NSString *_Nonnull)path; +@end + +/** + * 5.18 房间切换参数 + * + * 该参数用于切换房间接口{@link switchRoom},可以让用户从一个房间快速切换到另一个房间。 + */ +LITEAV_EXPORT @interface TRTCSwitchRoomConfig : NSObject + +///【字段含义】数字房间号码 [选填],在同一个房间内的用户可以看到彼此并能够进行音视频通话。 +///【推荐取值】取值范围:1 - 4294967294。 +///【特别说明】roomId 和 strRoomId 必须并且只能填一个。若两者都填,则优先选择 roomId。 +@property(nonatomic, assign) UInt32 roomId; + +///【字段含义】字符串房间号码 [选填],在同一个房间内的用户可以看到彼此并能够进行音视频通话。 +///【特别说明】roomId 和 strRoomId 必须并且只能填一个。若两者都填,则优先选择 roomId。 +@property(nonatomic, copy, nullable) NSString *strRoomId; + +///【字段含义】用户签名 [选填],当前 userId 对应的验证签名,相当于登录密码。 +/// 如果您在切换房间时不指定新计算出的 userSig,SDK 会继续使用您在进入房间时(enterRoom)时所指定的 userSig。 +/// 这就需要您必须保证旧的 userSig 在切换房间的那一刻仍在签名允许的效期内,否则会导致房间切换失败。 +///【推荐取值】具体计算方法请参考 [如何计算UserSig](https://cloud.tencent.com/document/product/647/17275)。 +@property(nonatomic, copy, nullable) NSString *userSig; + +///【字段含义】用于权限控制的权限票据(选填),当您希望某个房间只能让特定的 userId 进入时,需要使用 privateMapKey 进行权限保护。 +///【推荐取值】仅建议有高级别安全需求的客户使用,更多详情请参见 [进房权限保护](https://cloud.tencent.com/document/product/647/32240)。 +@property(nonatomic, copy, nullable) NSString *privateMapKey; + +@end + +/** + * 5.19 音频自定义回调的格式参数 + * + * 该参数用于在音频自定义回调相关的接口中,设置 SDK 回调出来的音频数据的相关格式(包括采样率、声道数等)。 + */ +LITEAV_EXPORT @interface TRTCAudioFrameDelegateFormat : NSObject + +///【字段含义】采样率 +///【推荐取值】默认值:48000Hz。支持 16000, 32000, 44100, 48000。 +@property(nonatomic, assign) TRTCAudioSampleRate sampleRate; + +///【字段含义】声道数 +///【推荐取值】默认值:1,代表单声道。可设定的数值只有两个数字:1-单声道,2-双声道。 +@property(nonatomic, assign) int channels; + +///【字段含义】采样点数 +///【推荐取值】取值必须是 sampleRate/100 的整数倍。 +@property(nonatomic, assign) int samplesPerCall; + +@end + +/** + * 5.21 屏幕分享的目标信息(仅适用于桌面系统) + * + * 在用户进行屏幕分享时,可以选择抓取整个桌面,也可以仅抓取某个程序的窗口。 + * TRTCScreenCaptureSourceInfo 用于描述待分享目标的信息,包括 ID、名称、缩略图等,该结构体中的字段信息均是只读的。 + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +LITEAV_EXPORT @interface TRTCScreenCaptureSourceInfo : NSObject + +///【字段含义】采集源类型(是分享整个屏幕?还是分享某个窗口?) +@property(assign, nonatomic) TRTCScreenCaptureSourceType type; + +///【字段含义】采集源的ID,对于窗口,该字段代表窗口的 ID;对于屏幕,该字段代表显示器的 ID。 +@property(copy, nonatomic, nullable) NSString *sourceId; + +///【字段含义】采集源名称(采用 UTF8 编码) +@property(copy, nonatomic, nullable) NSString *sourceName; + +///【字段含义】窗口的扩展信息 +@property(nonatomic, strong, nullable) NSDictionary *extInfo; + +///【字段含义】分享窗口的缩略图 +@property(nonatomic, readonly, nullable) NSImage *thumbnail; + +///【字段含义】分享窗口的图标 +@property(nonatomic, readonly, nullable) NSImage *icon; + +@end +#endif + +/** + * 5.24 远端音频流智能并发播放策略的参数 + * + * 该参数用于设置远端音频流智能并发播放策略。 + */ +LITEAV_EXPORT @interface TRTCAudioParallelParams : NSObject + +///【字段含义】最大并发播放数。默认值:0 +///- 如果 maxCount > 0,且实际人数 > maxCount,会实时智能选出 maxCount 路数据进行播放,这会极大的降低性能消耗。 +///- 如果 maxCount = 0,SDK 不限制并发播放数,在上麦人数比较多的房间可能会引发性能问题。 +@property(assign, nonatomic) UInt32 maxCount; + +///【字段含义】指定用户必定能并发播放。 +///【特殊说明】指定必定并发播放的用户 ID 列表。这些用户不参与智能选择。 +/// includeUsers 的数量必须小于 maxCount,否则本次并发播放设置失效。 +/// includeUsers 仅在 maxCount > 0 时有效。当 includeUsers 生效时,参与智能并发选择的最大播放数 = maxCount - 有效 includeUsers 的数量。 +@property(nonatomic, strong, nullable) NSArray<NSString *> *includeUsers; + +@end + +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDelegate.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDelegate.h index 842c86d..576fe5b 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDelegate.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDelegate.h @@ -1,10 +1,12 @@ -/* +// Copyright (c) 2021 Tencent. All rights reserved. + +/** * Module: TRTCCloudDelegate @ TXLiteAVSDK - * - * Function: 腾讯云视频通话功能的事件回调接口 - * + * Function: 腾讯云实时音视频的事件回调接口 */ - +/// @defgroup TRTCCloudDelegate_ios TRTCCloudDelegate +/// 腾讯云实时音视频的事件回调接口 +/// @{ #import <Foundation/Foundation.h> #import "TRTCCloudDef.h" #import "TXLiteAVCode.h" @@ -14,58 +16,61 @@ NS_ASSUME_NONNULL_BEGIN @class TRTCCloud; @class TRTCStatistics; - -/// @defgroup TRTCCloudDelegate_ios TRTCCloudDelegate -/// 腾讯云视频通话功能的事件回调接口 -/// @{ @protocol TRTCCloudDelegate <NSObject> @optional ///////////////////////////////////////////////////////////////////////////////// // -// (一)错误事件和警告事件 +// 错误和警告事件 // ///////////////////////////////////////////////////////////////////////////////// -/// @name 错误事件和警告事件 +/// @name 错误和警告事件 /// @{ + /** - * 1.1 错误回调,表示 SDK 不可恢复的错误,一定要监听并分情况给用户适当的界面提示。 + * 1.1 错误事件回调 + * + * 错误事件,表示 SDK 抛出的不可恢复的错误,比如进入房间失败或设备开启失败等。 + * 参考文档:[错误码表](https://cloud.tencent.com/document/product/647/32257) * * @param errCode 错误码 * @param errMsg 错误信息 * @param extInfo 扩展信息字段,个别错误码可能会带额外的信息帮助定位问题 */ -- (void)onError:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg extInfo:(nullable NSDictionary*)extInfo; +- (void)onError:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg extInfo:(nullable NSDictionary *)extInfo; /** - * 1.2 警告回调,用于告知您一些非严重性问题,例如出现了卡顿或者可恢复的解码失败。 + * 1.2 警告事件回调 + * + * 警告事件,表示 SDK 抛出的提示性问题,比如视频出现卡顿或 CPU 使用率太高等。 + * 参考文档:[错误码表](https://cloud.tencent.com/document/product/647/32257) * * @param warningCode 警告码 * @param warningMsg 警告信息 * @param extInfo 扩展信息字段,个别警告码可能会带额外的信息帮助定位问题 */ -- (void)onWarning:(TXLiteAVWarning)warningCode warningMsg:(nullable NSString *)warningMsg extInfo:(nullable NSDictionary*)extInfo; +- (void)onWarning:(TXLiteAVWarning)warningCode warningMsg:(nullable NSString *)warningMsg extInfo:(nullable NSDictionary *)extInfo; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (二)房间事件回调 +// 房间相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// -/// @name 房间事件回调 +/// @name 房间相关事件回调 /// @{ + /** - * 2.1 已加入房间的回调 - * - * 调用 TRTCCloud 中的 enterRoom() 接口执行进房操作后,会收到来自 SDK 的 onEnterRoom(result) 回调: + * 2.1 进入房间成功与否的事件回调 * - * - 如果加入成功,result 会是一个正数(result > 0),代表加入房间的时间消耗,单位是毫秒(ms)。 - * - 如果加入失败,result 会是一个负数(result < 0),代表进房失败的错误码。 - * 进房失败的错误码含义请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 + * 调用 TRTCCloud 中的 enterRoom() 接口执行进房操作后,会收到来自 TRTCCloudDelegate 的 onEnterRoom(result) 回调: + * - 如果加入成功,回调 result 会是一个正数(result > 0),代表进入房间所消耗的时间,单位是毫秒(ms)。 + * - 如果加入失败,回调 result 会是一个负数(result < 0),代表失败原因的错误码。 + * 进房失败的错误码含义请参见[错误码表](https://cloud.tencent.com/document/product/647/32257)。 * - * @note 在 Ver6.6 之前的版本,只有进房成功会抛出 onEnterRoom(result) 回调,进房失败由 onError() 回调抛出。 - * 在 Ver6.6 及之后改为:进房成功返回正的 result,进房失败返回负的 result,同时进房失败也会有 onError() 回调抛出。 + * @note + * 1. 在 Ver6.6 之前的版本,只有进房成功会抛出 onEnterRoom(result) 回调,进房失败由 onError() 回调抛出。 + * 2. 在 Ver6.6 之后的版本:无论进房成功或失败,均会抛出 onEnterRoom(result) 回调,同时进房失败也会有 onError() 回调抛出。 * * @param result result > 0 时为进房耗时(ms),result < 0 时为进房错误码。 */ @@ -75,12 +80,12 @@ NS_ASSUME_NONNULL_BEGIN * 2.2 离开房间的事件回调 * * 调用 TRTCCloud 中的 exitRoom() 接口会执行退出房间的相关逻辑,例如释放音视频设备资源和编解码器资源等。 - * 待资源释放完毕,SDK 会通过 onExitRoom() 回调通知到您。 + * 待 SDK 占用的所有资源释放完毕后,SDK 会抛出 onExitRoom() 回调通知到您。 * - * 如果您要再次调用 enterRoom() 或者切换到其他的音视频 SDK,请等待 onExitRoom() 回调到来之后再执行相关操作。 - * 否则可能会遇到音频设备(例如 iOS 里的 AudioSession)被占用等各种异常问题。 + * 如果您要再次调用 enterRoom() 或者切换到其他的音视频 SDK,请等待 onExitRoom() 回调到来后再执行相关操作。 + * 否则可能会遇到例如摄像头、麦克风设备被强占等各种异常问题。 * - * @param reason 离开房间原因,0:主动调用 exitRoom 退房;1:被服务器踢出当前房间;2:当前房间整个被解散。 + * @param reason 离开房间原因,0:主动调用 exitRoom 退出房间;1:被服务器踢出当前房间;2:当前房间整个被解散。 */ - (void)onExitRoom:(NSInteger)reason; @@ -96,320 +101,375 @@ NS_ASSUME_NONNULL_BEGIN - (void)onSwitchRole:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; /** - * 2.4 请求跨房通话(主播 PK)的结果回调 + * 2.4 切换房间的结果回调 * - * 调用 TRTCCloud 中的 connectOtherRoom() 接口会将两个不同房间中的主播拉通视频通话,也就是所谓的“主播PK”功能。 - * 调用者会收到 onConnectOtherRoom() 回调来获知跨房通话是否成功, - * 如果成功,两个房间中的所有用户都会收到 PK 主播的 onUserVideoAvailable() 回调。 + * 调用 TRTCCloud 中的 switchRoom() 接口可以让用户快速地从一个房间切换到另一个房间, + * 待 SDK 切换完成后,会抛出 onSwitchRoom() 事件回调。 * - * @param userId 要 PK 的目标主播 userid。 * @param errCode 错误码,ERR_NULL 代表切换成功,其他请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 * @param errMsg 错误信息。 */ -- (void)onConnectOtherRoom:(NSString*)userId errCode:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; +- (void)onSwitchRoom:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; /** - * 2.5 结束跨房通话(主播 PK)的结果回调 + * 2.5 请求跨房通话的结果回调 + * + * 调用 TRTCCloud 中的 connectOtherRoom() 接口会将两个不同房间中的主播拉通视频通话,也就是所谓的“主播PK”功能。 + * 调用者会收到 onConnectOtherRoom() 回调来获知跨房通话是否成功, + * 如果成功,两个房间中的所有用户都会收到来自另一个房间中的 PK 主播的 onUserVideoAvailable() 回调。 + * + * @param userId 要跨房通话的另一个房间中的主播的用户 ID。 + * @param errCode 错误码,ERR_NULL 代表切换成功,其他请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 + * @param errMsg 错误信息。 */ -- (void)onDisconnectOtherRoom:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; +- (void)onConnectOtherRoom:(NSString *)userId errCode:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; /** - * 2.6 切换房间 (switchRoom) 的结果回调 + * 2.6 结束跨房通话的结果回调 */ -- (void)onSwitchRoom:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; +- (void)onDisconnectOtherRoom:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (三)成员事件回调 +// 用户相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// -/// @name 成员事件回调 +/// @name 用户相关事件回调 /// @{ /** * 3.1 有用户加入当前房间 * - * 出于性能方面的考虑,在两种不同的应用场景下,该通知的行为会有差别: - * - 通话场景(TRTCAppSceneVideoCall 和 TRTCAppSceneAudioCall):该场景下用户没有角色的区别,任何用户进入房间都会触发该通知。 - * - 直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom):该场景不限制观众的数量,如果任何用户进出都抛出回调会引起很大的性能损耗,所以该场景下只有主播进入房间时才会触发该通知,观众进入房间不会触发该通知。 - * - * - * @note 注意 onRemoteUserEnterRoom 和 onRemoteUserLeaveRoom 只适用于维护当前房间里的“成员列表”,如果需要显示远程画面,建议使用监听 onUserVideoAvailable() 事件回调。 - * - * @param userId 用户标识 + * 出于性能方面的考虑,在 TRTC 两种不同的应用场景(即 AppScene,在 enterRoom 时通过第二个参数指定)下,该通知的行为会有差别: + * - 直播类场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom):该场景下的用户区分主播和观众两种角色,只有主播进入房间时才会触发该通知,观众进入房间时不会触发该通知。 + * - 通话类场景(TRTCAppSceneVideoCall 和 TRTCAppSceneAudioCall):该场景下的用户没有角色的区分(可认为都是主播),任何用户进入房间都会触发该通知。 + * @param userId 远端用户的用户标识 + * @note + * 1. 事件回调 onRemoteUserEnterRoom 和 onRemoteUserLeaveRoom 只适用于维护当前房间里的“用户列表”,有此事件回调不代表一定有视频画面。 + * 2. 如果需要显示远程画面,请监听代表某个用户是否有视频画面的 onUserVideoAvailable() 事件回调。 */ - (void)onRemoteUserEnterRoom:(NSString *)userId; /** * 3.2 有用户离开当前房间 * - * 与 onRemoteUserEnterRoom 相对应,在两种不同的应用场景下,该通知的行为会有差别: - * - 通话场景(TRTCAppSceneVideoCall 和 TRTCAppSceneAudioCall):该场景下用户没有角色的区别,任何用户的离开都会触发该通知。 - * - 直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom):只有主播离开房间时才会触发该通知,观众离开房间不会触发该通知。 + * 与 onRemoteUserEnterRoom 相对应,在两种不同的应用场景(即 AppScene,在 enterRoom 时通过第二个参数指定)下,该通知的行为会有差别: + * - 直播类场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom):只有主播离开房间时才会触发该通知,观众离开房间不会触发该通知。 + * - 通话类场景(TRTCAppSceneVideoCall 和 TRTCAppSceneAudioCall):该场景下用户没有角色的区别,任何用户的离开都会触发该通知。 * - * @param userId 用户标识 - * @param reason 离开原因,0 表示用户主动退出房间,1 表示用户超时退出,2 表示被踢出房间。 + * @param userId 远端用户的用户标识 + * @param reason 离开原因,0表示用户主动退出房间,1表示用户超时退出,2表示被踢出房间。 */ - (void)onRemoteUserLeaveRoom:(NSString *)userId reason:(NSInteger)reason; /** - * 3.3 远端用户是否存在可播放的主路画面(一般用于摄像头) + * 3.3 某远端用户发布/取消了主路视频画面 * - * 当您收到 onUserVideoAvailable(userid, YES) 通知时,表示该路画面已经有可用的视频数据帧到达。 - * 此时,您需要调用 startRemoteView(userid) 接口加载该用户的远程画面。 - * 然后,您会收到名为 onFirstVideoFrame(userid) 的首帧画面渲染回调。 + * “主路画面”一般被用于承载摄像头画面。当您收到 onUserVideoAvailable(userId, YES) 通知时,表示该路画面已经有可播放的视频帧到达。 + * 此时,您需要调用 {@link startRemoteView} 接口订阅该用户的远程画面,订阅成功后,您会继续收到该用户的首帧画面渲染回调 onFirstVideoFrame(userid)。 * - * 当您收到 onUserVideoAvailable(userid, NO) 通知时,表示该路远程画面已被关闭, - * 可能由于该用户调用了 muteLocalVideo() 或 stopLocalPreview()。 + * 当您收到 onUserVideoAvailable(userId, NO) 通知时,表示该路远程画面已经被关闭,关闭的原因可能是该用户调用了 {@link muteLocalVideo} 或 {@link stopLocalPreview}。 * - * @param userId 用户标识 - * @param available 画面是否开启 + * @param userId 远端用户的用户标识 + * @param available 该用户是否发布(或取消发布)了主路视频画面,YES: 发布;NO:取消发布。 */ - (void)onUserVideoAvailable:(NSString *)userId available:(BOOL)available; /** - * 3.4 远端用户是否存在可播放的辅路画面(一般用于屏幕分享) + * 3.4 某远端用户发布/取消了辅路视频画面 * - * @note 显示辅路画面使用的函数是 startRemoteSubStreamView() 而非 startRemoteView()。 - * @param userId 用户标识 - * @param available 屏幕分享是否开启 + * “辅路画面”一般被用于承载屏幕分享的画面。当您收到 onUserSubStreamAvailable(userId, YES) 通知时,表示该路画面已经有可播放的视频帧到达。 + * 此时,您需要调用 {@link startRemoteSubStreamView} 接口订阅该用户的远程画面,订阅成功后,您会继续收到该用户的首帧画面渲染回调 onFirstVideoFrame(userid)。 + * @param userId 远端用户的用户标识 + * @param available 该用户是否发布(或取消发布)了辅路视频画面,YES: 发布;NO:取消发布。 + * @note 显示辅路画面使用的函数是 {@link startRemoteSubStreamView} 而非 {@link startRemoteView}。 */ - (void)onUserSubStreamAvailable:(NSString *)userId available:(BOOL)available; /** - * 3.5 远端用户是否存在可播放的音频数据 + * 3.5 某远端用户发布/取消了自己的音频 * - * @param userId 用户标识 - * @param available 声音是否开启 + * 当您收到 onUserAudioAvailable(userId, YES) 通知时,表示该用户发布了自己的声音,此时 SDK 的表现为: + * - 在自动订阅模式下,您无需做任何操作,SDK 会自动播放该用户的声音。 + * - 在手动订阅模式下,您可以通过 {@link muteRemoteAudio}(userid, NO) 来播放该用户的声音。 + * @param userId 远端用户的用户标识 + * @param available 该用户是否发布(或取消发布)了自己的音频,YES: 发布;NO:取消发布。 + * @note SDK 默认使用自动订阅模式,您可以通过 {@link setDefaultStreamRecvMode} 设置为手动订阅,但需要在您进入房间之前调用才生效。 */ - (void)onUserAudioAvailable:(NSString *)userId available:(BOOL)available; /** - * 3.6 开始渲染本地或远程用户的首帧画面 - * - * 如果 userId == nil,代表开始渲染本地采集的摄像头画面,需要您先调用 startLocalPreview 触发。 - * 如果 userId != nil,代表开始渲染远程用户的首帧画面,需要您先调用 startRemoteView 触发。 + * 3.6 SDK 开始渲染自己本地或远端用户的首帧画面 * - * @note 只有当您调用 startLocalPreivew()、startRemoteView() 或 startRemoteSubStreamView() 之后,才会触发该回调。 - * - * @param userId 本地或远程用户 ID,如果 userId == nil 代表本地,userId != nil 代表远程。 - * @param streamType 视频流类型:摄像头或屏幕分享。 - * @param width 画面宽度 - * @param height 画面高度 + * SDK 会在渲染自己本地或远端用户的首帧画面时抛出该事件,您可以通过回调事件中的 userId 参数来判断事件来自于“本地”还是来自于“远端”。 + * - 如果 userId 为空值,代表 SDK 已经开始渲染自己本地的视频画面,不过前提是您已经调用了 {@link startLocalPreview} 或 {@link startScreenCapture}。 + * - 如果 userId 不为空,代表 SDK 已经开始渲染远端用户的视频画面,不过前提是您已经调用了 {@link startRemoteView} 订阅了该用户的视频画面。 + * @param userId 本地或远端的用户标识,如果 userId 为空值代表自己本地的首帧画面已到来,userId 不为空则代表远端用户的首帧画面已到来。 + * @param streamType 视频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 + * @param width 画面的宽度。 + * @param height 画面的高度。 + * @note + * 1. 只有当您调用了 {@link startLocalPreview} 或 {@link startScreenCapture} 之后,才会触发自己本地的首帧画面事件回调。 + * 2. 只有当您调用了 {@link startRemoteView} 或 {@link startRemoteSubStreamView} 之后,才会触发远端用户的首帧画面事件回调。 */ -- (void)onFirstVideoFrame:(NSString*)userId streamType:(TRTCVideoStreamType)streamType width:(int)width height:(int)height; +- (void)onFirstVideoFrame:(NSString *)userId streamType:(TRTCVideoStreamType)streamType width:(int)width height:(int)height; /** - * 3.7 开始播放远程用户的首帧音频(本地声音暂不通知) + * 3.7 SDK 开始播放远端用户的首帧音频 + * + * SDK 会在播放远端用户的首帧音频时抛出该事件,本地音频的首帧事件暂不抛出。 * - * @param userId 远程用户 ID。 + * @param userId 远端用户的用户标识 */ -- (void)onFirstAudioFrame:(NSString*)userId; +- (void)onFirstAudioFrame:(NSString *)userId; /** - * 3.8 首帧本地视频数据已经被送出 + * 3.8 自己本地的首个视频帧已被发布出去 * - * SDK 会在 enterRoom() 并 startLocalPreview() 成功后开始摄像头采集,并将采集到的画面进行编码。 - * 当 SDK 成功向云端送出第一帧视频数据后,会抛出这个回调事件。 + * 当您成功进入房间并通过 {@link startLocalPreview} 或 {@link startScreenCapture} 开启本地视频采集之后(开启采集和进入房间的先后顺序无影响), + * SDK 就会开始进行视频编码,并通过自身的网络模块向云端发布自己本地的视频数据。 + * 当 SDK 成功地向云端送出自己的第一帧视频数据帧以后,就会抛出 onSendFirstLocalVideoFrame 事件回调。 * - * @param streamType 视频流类型,主画面、小画面或辅流画面(屏幕分享) + * @param streamType 视频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 */ -- (void)onSendFirstLocalVideoFrame: (TRTCVideoStreamType)streamType; +- (void)onSendFirstLocalVideoFrame:(TRTCVideoStreamType)streamType; /** - * 3.9 首帧本地音频数据已经被送出 + * 3.9 自己本地的首个音频帧已被发布出去 * - * SDK 会在 enterRoom() 并 startLocalAudio() 成功后开始麦克风采集,并将采集到的声音进行编码。 - * 当 SDK 成功向云端送出第一帧音频数据后,会抛出这个回调事件。 + * 当您成功进入房间并通过 {@link startLocalAudio} 开启本地音频采集之后(开启采集和进入房间的先后顺序无影响), + * SDK 就会开始进行音频编码,并通过自身的网络模块向云端发布自己本地的音频数据。 + * 当 SDK 成功地向云端送出自己的第一帧音频数据帧以后,就会抛出 onSendFirstLocalAudioFrame 事件回调。 */ - (void)onSendFirstLocalAudioFrame; /** - * 3.10 废弃接口:有主播加入当前房间 - * - * 该回调接口可以被看作是 onRemoteUserEnterRoom 的废弃版本,不推荐使用。请使用 onUserVideoAvailable 或 onRemoteUserEnterRoom 进行替代。 - * - * @note 该接口已被废弃,不推荐使用 + * 3.10 远端视频状态变化的事件回调 * + * 您可以通过此事件回调获取远端每一路画面的播放状态(包括 Playing、Loading 和 Stopped 三种状态),从而进行相应的 UI 展示。 * @param userId 用户标识 + * @param streamType 视频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 + * @param status 视频状态:包括 Playing、Loading 和 Stopped 三种状态。 + * @param reason 视频状态改变的原因 + * @param extrainfo 额外信息 */ -- (void)onUserEnter:(NSString *)userId DEPRECATED_ATTRIBUTE; +- (void)onRemoteVideoStatusUpdated:(NSString *)userId streamType:(TRTCVideoStreamType)streamType streamStatus:(TRTCAVStatusType)status reason:(TRTCAVStatusChangeReason)reason extrainfo:(nullable NSDictionary *)info; /** - * 3.11 废弃接口:有主播离开当前房间 - * - * 该回调接口可以被看作是 onRemoteUserLeaveRoom 的废弃版本,不推荐使用。请使用 onUserVideoAvailable 或 onRemoteUserLeaveRoom 进行替代。 - * - * @note 该接口已被废弃,不推荐使用 + * 3.11 用户视频大小发生改变回调 * + * 当您收到 onUserVideoSizeChanged(userId, streamtype, newWidth, newHeight) 通知时,表示该路画面大小发生了调整,调整的原因可能是该用户调用了 setVideoEncoderParam 或者 setSubStreamEncoderParam 重新设置了画面尺寸。 * @param userId 用户标识 - * @param reason 离开原因。 + * @param streamType 视频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 + * @param newWidth 视频流的宽度(像素) + * @param newHeight 视频流的高度(像素) */ -- (void)onUserExit:(NSString *)userId reason:(NSInteger)reason DEPRECATED_ATTRIBUTE; +- (void)onUserVideoSizeChanged:(NSString *)userId streamType:(TRTCVideoStreamType)streamType newWidth:(int)newWidth newHeight:(int)newHeight; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (四)统计和质量回调 +// 网络和技术指标统计回调 // ///////////////////////////////////////////////////////////////////////////////// - -/// @name 统计和质量回调 +/// @name 网络和技术指标统计回调 /// @{ /** - * 4.1 网络质量,该回调每2秒触发一次,统计当前网络的上行和下行质量 - * - * @note userId == nil 代表自己当前的视频质量 + * 4.1 网络质量的实时统计回调 * + * 该统计回调每间隔2秒抛出一次,用于通知 SDK 感知到的当前网络的上行和下行质量。 + * SDK 会使用一组内嵌的自研算法对当前网络的延迟高低、带宽大小以及稳定情况进行评估,并计算出一个的评估结果: + * 如果评估结果为 1(Excellent) 代表当前的网络情况非常好,如果评估结果为 6(Down)代表当前网络无法支撑 TRTC 的正常通话。 * @param localQuality 上行网络质量 * @param remoteQuality 下行网络质量 + * @note 回调参数 localQuality 和 remoteQuality 中的 userId 如果为空置,代表本组数据统计的是自己本地的网络质量,否则是代表远端用户的网络质量。 */ -- (void)onNetworkQuality: (TRTCQualityInfo*)localQuality remoteQuality:(NSArray<TRTCQualityInfo*>*)remoteQuality; +- (void)onNetworkQuality:(TRTCQualityInfo *)localQuality remoteQuality:(NSArray<TRTCQualityInfo *> *)remoteQuality; /** - * 4.2 技术指标统计回调 + * 4.2 音视频技术指标的实时统计回调 * - * 如果您是熟悉音视频领域相关术语,可以通过这个回调获取 SDK 的所有技术指标。 - * 如果您是首次开发音视频相关项目,可以只关注 onNetworkQuality 回调。 + * 该统计回调每间隔2秒抛出一次,用于通知 SDK 内部音频、视频以及网络相关的专业技术指标,这些信息在 {@link TRTCStatistics} 均有罗列: + * - 视频统计信息:视频的分辨率(resolution)、帧率(FPS)和比特率(bitrate)等信息。 + * - 音频统计信息:音频的采样率(samplerate)、声道(channel)和比特率(bitrate)等信息。 + * - 网络统计信息:SDK 和云端一次往返(SDK => Cloud => SDK)的网络耗时(rtt)、丢包率(loss)、上行流量(sentBytes)和下行流量(receivedBytes)等信息。 * - * @param statistics 统计数据,包括本地和远程的 - * @note 每2秒回调一次 + * @param statistics 统计数据,包括自己本地的统计信息和远端用户的统计信息,详情请参考 {@link TRTCStatistics}。 + * @note 如果您只需要获知当前网络质量的好坏,并不需要花太多时间研究本统计回调,更推荐您使用 {@link onNetworkQuality} 来解决问题。 */ -- (void)onStatistics: (TRTCStatistics *)statistics; +- (void)onStatistics:(TRTCStatistics *)statistics; -/// @} +/** + * 4.3 网速测试的结果回调 + * + * 该统计回调由 {@link startSpeedTest:} 触发。 + * + * @param result 网速测试数据数据,包括丢包、往返延迟、上下行的带宽速率,详情请参考 {@link TRTCSpeedTestResult}。 + */ +- (void)onSpeedTestResult:(TRTCSpeedTestResult *)result; +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (五)服务器事件回调 +// 与云端连接情况的事件回调 // ///////////////////////////////////////////////////////////////////////////////// - -/// @name 服务器事件回调 +/// @name 与云端连接情况的事件回调 /// @{ /** - * 5.1 SDK 跟服务器的连接断开 + * 5.1 SDK 与云端的连接已经断开 + * + * SDK 会在跟云端的连接断开时抛出此事件回调,导致断开的原因大多是网络不可用或者网络切换所致,比如用户在通话中走进电梯时就可能会遇到此事件。 + * 在抛出此事件之后,SDK 会努力跟云端重新建立连接,重连过程中会抛出 {@link onTryToReconnect},连接恢复后会抛出 {@link onConnectionRecovery} 。 + * 所以,SDK 会在如下三个连接相关的事件中按如下规律切换: + * <pre> + * [onConnectionLost] =====> [onTryToReconnect] =====> [onConnectionRecovery] + * /|\ | + * |------------------------------------------------------| + * </pre> */ - (void)onConnectionLost; /** - * 5.2 SDK 尝试重新连接到服务器 + * 5.2 SDK 正在尝试重新连接到云端 + * + * SDK 会在跟云端的连接断开时抛出 {@link onConnectionLost},之后会努力跟云端重新建立连接并抛出本事件,连接恢复后会抛出 {@link onConnectionRecovery}。 */ - (void)onTryToReconnect; /** - * 5.3 SDK 跟服务器的连接恢复 + * 5.3 SDK 与云端的连接已经恢复 + * + * SDK 会在跟云端的连接断开时抛出 {@link onConnectionLost},之后会努力跟云端重新建立连接并抛出{@link onTryToReconnect},连接恢复后会抛出本事件回调。 */ - (void)onConnectionRecovery; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (六)硬件设备事件回调 +// 硬件设备相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// - -/// @name 硬件设备事件回调 +/// @name 硬件设备相关事件回调 /// @{ /** * 6.1 摄像头准备就绪 + * + * 当您调用 {@link startLocalPreivew} 之后,SDK 会尝试启动摄像头,如果摄像头能够启动成功就会抛出本事件。 + * 如果启动失败,大概率是因为当前应用没有获得访问摄像头的权限,或者摄像头当前正在被其他程序独占使用中。 + * 您可以通过捕获 {@link onError} 事件回调获知这些异常情况并通过 UI 界面提示用户。 */ - (void)onCameraDidReady; /** * 6.2 麦克风准备就绪 + * + * 当您调用 {@link startLocalAudio} 之后,SDK 会尝试启动麦克风,如果麦克风能够启动成功就会抛出本事件。 + * 如果启动失败,大概率是因为当前应用没有获得访问麦克风的权限,或者麦克风当前正在被其他程序独占使用中。 + * 您可以通过捕获 {@link onError} 事件回调获知这些异常情况并通过 UI 界面提示用户。 */ - (void)onMicDidReady; -#if TARGET_OS_IPHONE /** - * 6.3 音频路由发生变化(仅 iOS),音频路由即声音由哪里输出(扬声器或听筒) + * 6.3 当前音频路由发生变化(仅适用于移动设备) * - * @param route 当前音频路由 - * @param fromRoute 变更前的音频路由 + * 所谓“音频路由”,是指声音是从手机的扬声器还是从听筒中播放出来,音频路由变化也就是声音的播放位置发生了变化。 + * - 当音频路由为听筒时,声音比较小,只有将耳朵凑近才能听清楚,隐私性较好,适合用于接听电话。 + * - 当音频路由为扬声器时,声音比较大,不用将手机贴脸也能听清,因此可以实现“免提”的功能。 + * + * @param route 音频路由,即声音由哪里输出(扬声器、听筒)。 + * @param fromRoute 变更前的音频路由。 */ +#if TARGET_OS_IPHONE - (void)onAudioRouteChanged:(TRTCAudioRoute)route fromRoute:(TRTCAudioRoute)fromRoute; #endif /** - * 6.4 用于提示音量大小的回调,包括每个 userId 的音量和远端总音量 + * 6.4 音量大小的反馈回调 * - * 您可以通过调用 TRTCCloud 中的 enableAudioVolumeEvaluation 接口来开关这个回调或者设置它的触发间隔。 - * 需要注意的是,调用 enableAudioVolumeEvaluation 开启音量回调后,无论频道内是否有人说话,都会按设置的时间间隔调用这个回调; - * 如果没有人说话,则 userVolumes 为空,totalVolume 为 0。 - * - * @param userVolumes 所有正在说话的房间成员的音量,取值范围 0 - 100。 - * @param totalVolume 所有远端成员的总音量, 取值范围 0 - 100。 - * @note userId 为 nil 时表示自己的音量,userVolumes 内仅包含正在说话(音量不为 0 )的用户音量信息。 + * SDK 可以评估每一路音频的音量大小,并每隔一段时间抛出该事件回调,您可以根据音量大小在 UI 上做出相应的提示,比如“波形图”或“音量槽”。 + * 要完成这个功能, 您需要先调用 {@link enableAudioVolumeEvaluation} 开启这个能力并设定事件抛出的时间间隔。 + * 需要补充说明的是,无论当前房间中是否有人说话,SDK 都会按照您设定的时间间隔定时抛出此事件回调,只不过当没有人说话时,userVolumes 为空,totalVolume 为 0。 + * @param userVolumes 是一个数组,用于承载所有正在说话的用户的音量大小,取值范围 0 - 100。 + * @param totalVolume 所有远端用户的总音量大小, 取值范围 0 - 100。 + * @note userVolumes 为一个数组,对于数组中的每一个元素,当 userId 为空时表示本地麦克风采集的音量大小,当 userId 不为空时代表远端用户的音量大小。 */ - (void)onUserVoiceVolume:(NSArray<TRTCVolumeInfo *> *)userVolumes totalVolume:(NSInteger)totalVolume; - -#if !TARGET_OS_IPHONE && TARGET_OS_MAC /** - * 6.5 本地设备通断回调 + * 6.5 本地设备的通断状态发生变化(仅适用于桌面系统) + * + * 当本地设备(包括摄像头、麦克风以及扬声器)被插入或者拔出时,SDK 便会抛出此事件回调。 * * @param deviceId 设备 ID * @param deviceType 设备类型 - * @param state 0:设备断开;1:设备连接 + * @param state 通断状态,0:设备断开;1:设备连接。 */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)onDevice:(NSString *)deviceId type:(TRTCMediaDeviceType)deviceType stateChanged:(NSInteger)state; - +#endif /** - * 6.6 当前音频采集设备音量变化回调 + * 6.6 当前麦克风的系统采集音量发生变化 * - * @note 使用 enableAudioVolumeEvaluation(interval>0)开启,(interval == 0)关闭 + * 在 Mac 或 Windows 这样的桌面操作系统上,用户可以在设置中心找到声音相关的设置面板,并设置麦克风的采集音量大小。 + * 用户将麦克风的采集音量设置得越大,麦克风采集到的声音的原始音量也就会越大,反之就会越小。 + * 在有些型号的键盘以及笔记本电脑上,用户还可以通过按下“禁用麦克风”按钮(图标是一个话筒上上叠加了一道代表禁用的斜线)来将麦克风静音。 * - * @param volume 音量 取值范围 0 - 100 - * @param muted 当前采集音频设备是否被静音:YES 被静音了,NO 未被静音 + * 当用户通过系统设置界面或者通过键盘上的快捷键设定操作系统的麦克风采集音量时,SDK 便会抛出此事件。 + * @param volume 系统采集音量,取值范围 0 - 100,用户可以在系统的声音设置面板上进行拖拽调整。 + * @param muted 麦克风是否被用户禁用了:YES 被禁用,NO 被启用。 + * @note 您需要调用 {@link enableAudioVolumeEvaluation} 接口并设定(interval>0)开启次事件回调,设定(interval == 0)关闭此事件回调。 */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)onAudioDeviceCaptureVolumeChanged:(NSInteger)volume muted:(BOOL)muted; +#endif /** - * 6.7 当前音频播放设备音量变化回调 + * 6.7 当前系统的播放音量发生变化 * - * @note 使用 enableAudioVolumeEvaluation(interval>0)开启,(interval == 0)关闭 + * 在 Mac 或 Windows 这样的桌面操作系统上,用户可以在设置中心找到声音相关的设置面板,并设置系统的播放音量大小。 + * 在有些型号的键盘以及笔记本电脑上,用户还可以通过按下“静音”按钮(图标是一个喇叭上叠加了一道代表禁用的斜线)来将系统静音。 * - * @param volume 音量 取值范围 0 - 100 - * @param muted 当前音频播放设备是否被静音:YES 被静音了,NO 未被静音 + * 当用户通过系统设置界面或者通过键盘上的快捷键设定操作系统的播放音量时,SDK 便会抛出此事件。 + * @param volume 系统播放音量,取值范围 0 - 100,用户可以在系统的声音设置面板上进行拖拽调整。 + * @param muted 系统是否被用户静音了:YES 被静音,NO 已恢复。 + * @note 您需要调用 {@link enableAudioVolumeEvaluation} 接口并设定(interval>0)开启次事件回调,设定(interval == 0)关闭此事件回调。 */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)onAudioDevicePlayoutVolumeChanged:(NSInteger)volume muted:(BOOL)muted; +#endif /** - * 6.8 系统声音采集结果回调 + * 6.8 系统声音采集是否被成功开启的事件回调(仅适用于 Mac 系统) * - * 系统声音采集接口 startSystemAudioLoopback 会触发这个回调 + * 在 Mac 系统上,您可以通过调用 {@link startSystemAudioLoopback} 为当前系统安装一个音频驱动,并让 SDK 通过该音频驱动捕获当前 Mac 系统播放出的声音。 + * 当用于播片教学或音乐直播中,比如老师端可以使用此功能,让 SDK 能够采集老师所播放的电影中的声音,使同房间的学生端也能听到电影中的声音。 + * SDK 会将统声音采集是否被成功开启的结果,通过本事件回调抛出,需要您关注参数中的错误码。 * - * @param err ERR_NULL 表示成功,其余值表示失败 + * @param err ERR_NULL 表示成功,其余值表示失败。 */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC - (void)onSystemAudioLoopbackError:(TXLiteAVError)err; - #endif /// @} - - ///////////////////////////////////////////////////////////////////////////////// // -// (七)自定义消息的接收回调 +// 自定义消息的接收事件回调 // ///////////////////////////////////////////////////////////////////////////////// - -/// @name 自定义消息的接收回调 +/// @name 自定义消息的接收事件回调 /// @{ /** - * 7.1 收到自定义消息回调 + * 7.1 收到自定义消息的事件回调 * - * 当房间中的某个用户使用 sendCustomCmdMsg 发送自定义消息时,房间中的其它用户可以通过 onRecvCustomCmdMsg 接口接收消息 + * 当房间中的某个用户使用 {@link sendCustomCmdMsg} 发送自定义 UDP 消息时,房间中的其它用户可以通过 onRecvCustomCmdMsg 事件回调接收到该条消息。 * * @param userId 用户标识 * @param cmdID 命令 ID @@ -419,334 +479,408 @@ NS_ASSUME_NONNULL_BEGIN - (void)onRecvCustomCmdMsgUserId:(NSString *)userId cmdID:(NSInteger)cmdID seq:(UInt32)seq message:(NSData *)message; /** - * 7.2 自定义消息丢失回调 + * 7.2 自定义消息丢失的事件回调 * - * 实时音视频使用 UDP 通道,即使设置了可靠传输(reliable),也无法确保100@%不丢失,只是丢消息概率极低,能满足常规可靠性要求。 + * 当您使用 {@link sendCustomCmdMsg} 发送自定义 UDP 消息时,即使设置了可靠传输(reliable),也无法确保100@%不丢失,只是丢消息概率极低,能满足常规可靠性要求。 * 在发送端设置了可靠运输(reliable)后,SDK 都会通过此回调通知过去时间段内(通常为5s)传输途中丢失的自定义消息数量统计信息。 * - * @note 只有在发送端设置了可靠传输(reliable),接收方才能收到消息的丢失回调 * @param userId 用户标识 * @param cmdID 命令 ID * @param errCode 错误码 * @param missed 丢失的消息数量 + * @note 只有在发送端设置了可靠传输(reliable),接收方才能收到消息的丢失回调 */ - (void)onMissCustomCmdMsgUserId:(NSString *)userId cmdID:(NSInteger)cmdID errCode:(NSInteger)errCode missed:(NSInteger)missed; /** * 7.3 收到 SEI 消息的回调 * - * 当房间中的某个用户使用 sendSEIMsg 发送数据时,房间中的其它用户可以通过 onRecvSEIMsg 接口接收数据。 + * 当房间中的某个用户使用 {@link sendSEIMsg} 借助视频数据帧发送 SEI 消息时,房间中的其它用户可以通过 onRecvSEIMsg 事件回调接收到该条消息。 * * @param userId 用户标识 * @param message 数据 */ -- (void)onRecvSEIMsg:(NSString *)userId message:(NSData*)message; +- (void)onRecvSEIMsg:(NSString *)userId message:(NSData *)message; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (八)CDN 旁路回调 +// CDN 相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// -/// @name CDN 旁路转推回调 +/// @name CDN 相关事件回调 /// @{ - + /** - * 8.1 开始向腾讯云的直播 CDN 推流的回调,对应于 TRTCCloud 中的 startPublishing() 接口 + * 8.1 开始向腾讯云直播 CDN 上发布音视频流的事件回调 + * + * 当您调用 {@link startPublishing} 开始向腾讯云直播 CDN 上发布音视频流时,SDK 会立刻将这一指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 * * @param err 0表示成功,其余值表示失败 * @param errMsg 具体错误原因 */ -- (void)onStartPublishing:(int)err errMsg:(NSString*)errMsg; +- (void)onStartPublishing:(int)err errMsg:(NSString *)errMsg; /** - * 8.2 停止向腾讯云的直播 CDN 推流的回调,对应于 TRTCCloud 中的 stopPublishing() 接口 + * 8.2 停止向腾讯云直播 CDN 上发布音视频流的事件回调 + * + * 当您调用 {@link stopPublishing} 停止向腾讯云直播 CDN 上发布音视频流时,SDK 会立刻将这一指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 * * @param err 0表示成功,其余值表示失败 * @param errMsg 具体错误原因 */ -- (void)onStopPublishing:(int)err errMsg:(NSString*)errMsg; +- (void)onStopPublishing:(int)err errMsg:(NSString *)errMsg; /** - * 8.3 启动旁路推流到 CDN 完成的回调 - * - * 对应于 TRTCCloud 中的 startPublishCDNStream() 接口 + * 8.3 开始向非腾讯云 CDN 上发布音视频流的事件回调 * - * @note Start 回调如果成功,只能说明转推请求已经成功告知给腾讯云,如果目标 CDN 有异常,还是有可能会转推失败。 + * 当您调用 {@link startPublishCDNStream} 开始向非腾讯云直播 CDN 上发布音视频流时,SDK 会立刻将这一指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 + * @param err 0表示成功,其余值表示失败 + * @param errMsg 具体错误原因 + * @note 当您收到成功的事件回调时,只是说明您的发布指令已经同步到腾讯云后台服务器,但如果目标 CDN 厂商的服务器不接收该条视频流,依然可能导致发布失败。 */ - (void)onStartPublishCDNStream:(int)err errMsg:(NSString *)errMsg; /** - * 8.4 停止旁路推流到 CDN 完成的回调 + * 8.4 停止向非腾讯云 CDN 上发布音视频流的事件回调 * - * 对应于 TRTCCloud 中的 stopPublishCDNStream() 接口 + * 当您调用 {@link stopPublishCDNStream} 开始向非腾讯云直播 CDN 上发布音视频流时,SDK 会立刻将这一指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 * + * @param err 0表示成功,其余值表示失败 + * @param errMsg 具体错误原因 */ - (void)onStopPublishCDNStream:(int)err errMsg:(NSString *)errMsg; /** - * 8.5 设置云端的混流转码参数的回调,对应于 TRTCCloud 中的 setMixTranscodingConfig() 接口 + * 8.5 设置云端混流的排版布局和转码参数的事件回调 * - * @param err 0表示成功,其余值表示失败 - * @param errMsg 具体错误原因 + * 当您调用 {@link setMixTranscodingConfig} 调整云端混流的排版布局和转码参数时,SDK 会立刻将这一调整指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 + * + * @param err 错误码:0表示成功,其余值表示失败。 + * @param errMsg 具体的错误原因。 */ -- (void)onSetMixTranscodingConfig:(int)err errMsg:(NSString*)errMsg; +- (void)onSetMixTranscodingConfig:(int)err errMsg:(NSString *)errMsg; /// @} - ///////////////////////////////////////////////////////////////////////////////// // -// (九)音效回调 +// 屏幕分享相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// -/// @name 音效回调 +/// @name 屏幕分享相关事件回调 /// @{ -/** - * 播放音效结束回调 - * - * @param effectId 音效 ID - * @param code 0 表示播放正常结束;其他表示异常结束 - * @note 该接口已不再维护,推荐使用 TXAudioEffectManager.startPlayMusic 及相关回调 - */ -- (void)onAudioEffectFinished:(int) effectId code:(int) code DEPRECATED_ATTRIBUTE; -/// @} -///////////////////////////////////////////////////////////////////////////////// -// -// (十)屏幕分享回调 -// -///////////////////////////////////////////////////////////////////////////////// -/// @name 屏幕分享回调 -/// @{ /** - * 10.1 当屏幕分享开始时,SDK 会通过此回调通知 + * 9.1 屏幕分享开启的事件回调 + * + * 当您通过 {@link startScreenCapture} 等相关接口启动屏幕分享时,SDK 便会抛出此事件回调。 */ - (void)onScreenCaptureStarted; /** - * 10.2 当屏幕分享暂停时,SDK 会通过此回调通知 + * 9.2 屏幕分享暂停的事件回调 * - * @param reason 原因,0:用户主动暂停;1:屏幕窗口不可见暂停 + * 当您通过 {@link pauseScreenCapture} 暂停屏幕分享时,SDK 便会抛出此事件回调。 + * @param reason 原因。 + * - 0:用户主动暂停。 + * - 1:注意此字段的含义在 MAC 和 Windows 平台有稍微差异。屏幕窗口不可见暂停(Mac)。表示设置屏幕分享参数导致的暂停(Windows)。 + * - 2:表示屏幕分享窗口被最小化导致的暂停(仅 Windows)。 + * - 3:表示屏幕分享窗口被隐藏导致的暂停(仅 Windows)。 */ - (void)onScreenCapturePaused:(int)reason; /** - * 10.3 当屏幕分享恢复时,SDK 会通过此回调通知 + * 9.3 屏幕分享恢复的事件回调 * - * @param reason 恢复原因,0:用户主动恢复;1:屏幕窗口恢复可见从而恢复分享 + * 当您通过 {@link resumeScreenCapture} 恢复屏幕分享时,SDK 便会抛出此事件回调。 + * @param reason 恢复原因。 + * - 0:用户主动恢复。 + * - 1:注意此字段的含义在 MAC 和 Windows 平台有稍微差异。屏幕窗口恢复可见从而恢复分享(Mac)。屏幕分享参数设置完毕后自动恢复(Windows) + * - 2:表示屏幕分享窗口从最小化被恢复(仅 Windows)。 + * - 3:表示屏幕分享窗口从隐藏被恢复(仅 Windows)。 */ - (void)onScreenCaptureResumed:(int)reason; /** - * 10.4 当屏幕分享停止时,SDK 会通过此回调通知 + * 9.4 屏幕分享停止的事件回调 * - * @param reason 停止原因,0:用户主动停止;1:屏幕窗口关闭导致停止 + * 当您通过 {@link stopScreenCapture} 停止屏幕分享时,SDK 便会抛出此事件回调。 + * @param reason 停止原因,0:用户主动停止;1:屏幕窗口关闭导致停止;2:表示屏幕分享的显示屏状态变更(如接口被拔出、投影模式变更等)。 */ - (void)onScreenCaptureStoped:(int)reason; -/// @} -#if TARGET_OS_IPHONE +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (十一)媒体录制回调 +// 本地录制和本地截图的事件回调 // ///////////////////////////////////////////////////////////////////////////////// -/// @name 媒体录制回调 +/// @name 本地录制和本地截图的事件回调 /// @{ + /** - * 11.1 媒体录制回调 + * 10.1 本地录制任务已经开始的事件回调 * + * 当您调用 {@link startLocalRecording} 启动本地媒体录制任务时,SDK 会抛出该事件回调,用于通知您录制任务是否已经顺利启动。 * @param errCode 错误码 0:初始化录制成功;-1:初始化录制失败;-2: 文件后缀名有误。 * @param storagePath 录制文件存储路径 */ - (void)onLocalRecordBegin:(NSInteger)errCode storagePath:(NSString *)storagePath; /** - * 11.2 录制信息更新回调 + * 10.2 本地录制任务正在进行中的进展事件回调 + * + * 当您调用 {@link startLocalRecording} 成功启动本地媒体录制任务后,SDK 变会定时地抛出本事件回调。 + * 您可通过捕获该事件回调来获知录制任务的健康状况。 + * 您可以在 {@link startLocalRecording} 时设定本事件回调的抛出间隔。 * + * @param duration 已经录制的累计时长,单位毫秒 * @param storagePath 录制文件存储路径 - * @param duration 录制时长,单位毫秒 */ - (void)onLocalRecording:(NSInteger)duration storagePath:(NSString *)storagePath; /** - * 11.2 录制任务已结束 + * 10.3 本地录制任务已经结束的事件回调 * - * @param errCode 错误码 0:录制成功;-1:录制失败;-2:切换分辨率或横竖屏导致录制结束。 + * 当您调用 {@link stopLocalRecording} 停止本地媒体录制任务时,SDK 会抛出该事件回调,用于通知您录制任务的最终结果。 + * @param errCode 错误码 0:录制成功;-1:录制失败;-2:切换分辨率或横竖屏导致录制结束;-3:音频数据或者视频数据一直没有到达导致没有开始正式录制。 * @param storagePath 录制文件存储路径 */ - (void)onLocalRecordComplete:(NSInteger)errCode storagePath:(NSString *)storagePath; -///@} -#endif -@end -/// @} +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (十二)自定义视频渲染回调 +// 废弃的事件回调(建议使用对应的新回调) // ///////////////////////////////////////////////////////////////////////////////// -#pragma mark - TRTCVideoRenderDelegate -/// @addtogroup TRTCCloudDelegate_ios +/// @name 废弃的事件回调(建议使用对应的新回调) /// @{ + /** - * 视频数据帧的自定义处理回调 + * 有主播加入当前房间(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link onRemoteUserEnterRoom} 替代之。 */ -@protocol TRTCVideoRenderDelegate <NSObject> +- (void)onUserEnter:(NSString *)userId __attribute__((deprecated("use onRemoteUserLeaveRoom instead"))); + /** - * 自定义视频渲染回调 + * 有主播离开当前房间(已废弃) * - * @param frame 待渲染的视频帧信息 - * @param userId 视频源的 userId,如果是本地视频回调(setLocalVideoRenderDelegate),该参数可以忽略 - * @param streamType 视频源类型,例如,使用摄像头画面或屏幕分享画面等 + * @deprecated 新版本开始不推荐使用,建议使用 {@link onRemoteUserLeaveRoom} 替代之。 */ -@optional -- (void) onRenderVideoFrame:(TRTCVideoFrame * _Nonnull)frame userId:(NSString* __nullable)userId streamType:(TRTCVideoStreamType)streamType; +- (void)onUserExit:(NSString *)userId reason:(NSInteger)reason __attribute__((deprecated("use onRemoteUserLeaveRoom instead"))); + +/** + * 音效播放已结束(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link ITXAudioEffectManager} 接口替代之。 + * 新的接口中不再区分背景音乐和音效,而是统一用 {@link startPlayMusic} 取代之。 + */ +- (void)onAudioEffectFinished:(int)effectId code:(int)code __attribute__((deprecated("use ITXAudioEffectManager.startPlayMusic instead"))); -@end +/// @} +@end // End of class TRTCCloudDelegate ///////////////////////////////////////////////////////////////////////////////// // -// (十三)第三方美颜回调 +// 视频数据自定义回调 // ///////////////////////////////////////////////////////////////////////////////// +/// @name 视频数据自定义回调 +/// @{ + +@protocol TRTCVideoRenderDelegate <NSObject> + +/** + * 自定义视频渲染回调 + * + * 当您设置了本地或者远端的视频自定义渲染回调之后,SDK 就会将原本要交给渲染控件进行渲染的视频帧通过此回调接口抛送给您,以便于您进行自定义渲染。 + * @param frame 待渲染的视频帧信息 + * @param userId 视频源的 userId,如果是本地视频回调(setLocalVideoRenderDelegate),该参数可以忽略 + * @param streamType 频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 + */ +@optional +- (void)onRenderVideoFrame:(TRTCVideoFrame *_Nonnull)frame userId:(NSString *__nullable)userId streamType:(TRTCVideoStreamType)streamType; + +@end // End of class TRTCVideoRenderDelegate @protocol TRTCVideoFrameDelegate <NSObject> +/** + * 用于对接第三方美颜组件的视频处理回调 + * + * 如果您选购了第三方美颜组件,就需要在 TRTCCloud 中设置第三方美颜回调,之后 TRTC 就会将原本要进行预处理的视频帧通过此回调接口抛送给您。 + * 之后您就可以将 TRTC 抛出的视频帧交给第三方美颜组件进行图像处理,由于抛出的数据是可读且可写的,因此第三方美颜的处理结果也可以同步给 TRTC 进行后续的编码和发送。 + * 情况一:美颜组件自身会产生新的纹理 + * 如果您使用的美颜组件会在处理图像的过程中产生一帧全新的纹理(用于承载处理后的图像),那请您在回调函数中将 dstFrame.textureId 设置为新纹理的 ID: + * <pre> + * uint32_t onProcessVideoFrame(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame{ + * self.frameID += 1; + * dstFrame.pixelBuffer = [[FURenderer shareRenderer] renderPixelBuffer:srcFrame.pixelBuffer + * withFrameId:self.frameID + * items:self.renderItems + * itemCount:self.renderItems.count]; + * return 0; + * } + * </pre> + * + * + * 情况二:美颜组件需要您提供目标纹理 + * 如果您使用的第三方美颜模块并不生成新的纹理,而是需要您设置给该模块一个输入纹理和一个输出纹理,则可以考虑如下方案: + * ```ObjectiveC + * uint32_t onProcessVideoFrame(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame{ + * thirdparty_process(srcFrame.textureId, srcFrame.width, srcFrame.height, dstFrame.textureId); + * return 0; + * } + * ``` + * ```java + * int onProcessVideoFrame(TRTCCloudDef.TRTCVideoFrame srcFrame, TRTCCloudDef.TRTCVideoFrame dstFrame) { + * thirdparty_process(srcFrame.texture.textureId, srcFrame.width, srcFrame.height, dstFrame.texture.textureId); + * return 0; + * } + * ``` + * @param srcFrame 用于承载 TRTC 采集到的摄像头画面 + * @param dstFrame 用于接收第三方美颜处理过的视频画面 + * @note 目前仅支持 OpenGL 纹理方案( PC 仅支持 TRTCVideoBufferType_Buffer 格式)。 + */ @optional +- (uint32_t)onProcessVideoFrame:(TRTCVideoFrame *_Nonnull)srcFrame dstFrame:(TRTCVideoFrame *_Nonnull)dstFrame; /** -* 第三方美颜的视频数据回调,需要使用 TRTCCloud 中的 setLocalVideoProcessDelegete 接口进行设置 -* -* @param srcFrame 用于承载 TRTC 采集到的摄像头画面 -* @param dstFrame 用于接收第三方美颜处理过的视频画面 -* @note 目前仅支持 OpenGL 纹理方案 -* -* 【使用相芯】 -* 由于相芯的美颜模块会在处理图像的过程中产生一个全新的纹理,因此需要您在回调函数中将 dstFrame.textureId 设置为相芯处理后的新纹理。 -* <pre> -* uint32_t onProcessVideoFrame(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame -* { -* uint32_t dstTextureId = renderItemWithTexture(srcFrame.textureId, srcFrame.width, srcFrame.height); -* dstFrame.textureId = dstTextureId; -* return 0; -* } -* </pre> -* -* 【其他方案】 -* 如果您使用的第三方美颜模块并不生成新的纹理,而是需要您设置给该模块一个输入纹理和一个输出纹理,则可以考虑如下方案: -* <pre> -* uint32_t onProcessVideoFrame(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame -* { -* thirdparty_process(srcFrame.textureId, srcFrame.width, srcFrame.height, dstFrame.textureId); -* return 0; -* } -* </pre> -* -**/ -- (uint32_t)onProcessVideoFrame:(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame; - -/** - * SDK 内部的 OpenGL 环境的销毁通知 + * SDK 内部 OpenGL 环境被销的通知 */ +@optional - (void)onGLContextDestory; -@end +@end // End of class TRTCVideoFrameDelegate +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (十四)音频数据回调 +// 音频数据自定义回调 // ///////////////////////////////////////////////////////////////////////////////// -/** - * 声音数据帧的自定义处理回调 - */ +/// @name 音频数据自定义回调 +/// @{ + @protocol TRTCAudioFrameDelegate <NSObject> @optional /** - * 本地麦克风采集到的原始音频数据回调 + * 本地采集并经过音频模块前处理后的音频数据回调 * - * @param frame 音频数据 - * @note - 请不要在此回调函数中做任何耗时操作,建议直接拷贝到另一线程进行处理,否则会导致各种声音问题。 - * @note - 此接口回调出的音频数据 **不包含** 背景音、音效、混响等前处理效果,延迟极低。 - * - 此接口回调出的音频数据支持修改。 + * 当您设置完音频数据自定义回调之后,SDK 内部会把刚采集到并经过前处理(ANS、AEC、AGC)之后的数据,以 PCM 格式的形式通过本接口回调给您。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * + * @param frame PCM 格式的音频数据帧 + * @note + * 1. 请不要在此回调函数中做任何耗时操作,由于 SDK 每隔 20ms 就要处理一帧音频数据,如果您的处理时间超过 20ms,就会导致声音异常。 + * 2. 此接口回调出的音频数据是可读写的,也就是说您可以在回调函数中同步修改音频数据,但请保证处理耗时。 + * 3. 此接口回调出的音频数据已经经过了前处理(ANS、AEC、AGC),但**不包含**背景音、音效、混响等前处理效果,延迟较低。 */ -- (void) onCapturedRawAudioFrame:(TRTCAudioFrame *)frame; +- (void)onCapturedRawAudioFrame:(TRTCAudioFrame *)frame; /** - * 本地采集并经过音频模块前处理后的音频数据回调 + * 本地采集并经过音频模块前处理、音效处理和混 BGM 后的音频数据回调 * - * @param frame 音频数据 - * @note - 请不要在此回调函数中做任何耗时操作,建议直接拷贝到另一线程进行处理,否则会导致各种声音问题。 - * @note - 此接口回调出的音频数据包含背景音、音效、混响等前处理效果,延迟较高。 - * @note - 此接口回调出的音频数据支持修改。 - * @note - 此接口回调出的音频时间帧长固定为 0.02s。 - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 - 以SDK默认的音频录制格式 48000 采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * 当您设置完音频数据自定义回调之后,SDK 内部会把刚采集到并经过前处理、音效处理和混 BGM 之后的数据,在最终进行网络编码之前,以 PCM 格式的形式通过本接口回调给您。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * + * 特殊说明: + * 您可以通过设置接口中的 `TRTCAudioFrame.extraData` 字段,达到传输信令的目的。 + * 由于音频帧头部的数据块不能太大,建议您写入 `extraData` 时,尽量将信令控制在几个字节的大小,如果超过 100 个字节,写入的数据不会被发送。 + * 房间内其他用户可以通过 {@link TRTCAudioFrameDelegate} 中的 `onRemoteUserAudioFrame` 中的 `TRTCAudioFrame.extraData` 字段回调接收数据。 + * + * @param frame PCM 格式的音频数据帧 + * @note + * 1. 请不要在此回调函数中做任何耗时操作,由于 SDK 每隔 20ms 就要处理一帧音频数据,如果您的处理时间超过 20ms,就会导致声音异常。 + * 2. 此接口回调出的音频数据是可读写的,也就是说您可以在回调函数中同步修改音频数据,但请保证处理耗时。 + * 3. 此接口回调出的数据已经经过了前处理(ANS、AEC、AGC)、音效和混 BGM 处理,声音的延迟相比于 {@link onCapturedRawAudioFrame} 要高一些。 */ -- (void) onLocalProcessedAudioFrame:(TRTCAudioFrame *)frame; +- (void)onLocalProcessedAudioFrame:(TRTCAudioFrame *)frame; /** - * 混音前的每一路远程用户的音频数据,即混音前的各路原始数据。例如,对某一路音频进行文字转换时,您必须使用该路音频的原始数据 + * 混音前的每一路远程用户的音频数据 + * + * 当您设置完音频数据自定义回调之后,SDK 内部会把远端的每一路原始数据,在最终混音之前,以 PCM 格式的形式通过本接口回调给您。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 * - * @param frame 音频数据 - * @param userId 用户标识 - * @note - 此接口回调出的音频数据是只读的,不支持修改。 + * @param frame PCM 格式的音频数据帧 + * @param userId 用户标识 + * @note 此接口回调出的音频数据是只读的,不支持修改 */ -- (void) onRemoteUserAudioFrame:(TRTCAudioFrame *)frame userId:(NSString *)userId; +- (void)onRemoteUserAudioFrame:(TRTCAudioFrame *)frame userId:(NSString *)userId; /** - * 各路音频数据混合后的音频数据 + * 将各路待播放音频混合之后并在最终提交系统播放之前的数据回调 + * + * 当您设置完音频数据自定义回调之后,SDK 内部会把各路待播放的音频混合之后的音频数据,在提交系统播放之前,以 PCM 格式的形式通过本接口回调给您。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 * - * @param frame 音频数据 - * @note - 请不要在此回调函数中做任何耗时操作,建议直接拷贝到另一线程进行处理,否则会导致各种声音问题。 - * @note - 此接口回调出的音频数据支持修改。 - * @note - 此接口回调出的音频时间帧长固定为 0.02s。 - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 - 以SDK默认的音频播放格式 48000 采样率、双声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 2 × 16bit = 30720bit = 3840字节】。 - * @note - 此接口回调出的音频数据是各路音频播放数据的混合,不包含耳返的音频数据。 + * @param frame PCM 格式的音频数据帧 + * @note + * 1. 请不要在此回调函数中做任何耗时操作,由于 SDK 每隔 20ms 就要处理一帧音频数据,如果您的处理时间超过 20ms,就会导致声音异常。 + * 2. 此接口回调出的音频数据是可读写的,也就是说您可以在回调函数中同步修改音频数据,但请保证处理耗时。 + * 3. 此接口回调出的是对各路待播放音频数据的混合,但其中并不包含耳返的音频数据。 */ -- (void) onMixedPlayAudioFrame:(TRTCAudioFrame *)frame; +- (void)onMixedPlayAudioFrame:(TRTCAudioFrame *)frame; /** -* SDK所有音频数据混合后的数据回调(包括采集音频数据和所有播放音频数据) -* -* @param frame 音频数据 -* @note - 此接口回调出的音频数据不支持修改。 -* @note - 此接口回调出的音频时间帧长固定为 0.02s。音频格式固定为48000采样率、单声道、16采样点位宽。 - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 - 因此回调的字节帧长固定为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 -* @note - 此接口回调出的是SDK所有音频数据的混合数据,包括:经过特效(包括混响、变声等)处理后的本地麦克风采集数据, - 所有远程用户的数据,所有背景音和音效数据。不包括耳返数据。 -*/ -- (void) onMixedAllAudioFrame:(TRTCAudioFrame *)frame; + * SDK 所有音频混合后的音频数据(包括采集到的和待播放的) + * + * 当您设置完音频数据自定义回调之后,SDK 内部会把所有采集到的和待播放的音频数据混合起来,以 PCM 格式的形式通过本接口回调给您,便于您进行自定义录制。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * + * @param frame PCM 格式的音频数据帧 + * @note + * 1. 此接口回调出的是SDK所有音频数据的混合数据,包括:经过 3A 前处理、特效叠加以及背景音乐混音后的本地音频,所有远端音频,但不包括耳返音频。 + * 2. 此接口回调出的音频数据不支持修改。 + */ +- (void)onMixedAllAudioFrame:(TRTCAudioFrame *)frame; -@end +@end // End of class TRTCAudioFrameDelegate +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (十五)Log 信息回调 +// 更多事件回调接口 // ///////////////////////////////////////////////////////////////////////////////// -/** - * 日志相关回调 - * - * 建议在比较早初始化的类中设置回调委托对象,例如 AppDelegate - */ +/// @name 更多事件回调接口 +/// @{ + @protocol TRTCLogDelegate <NSObject> + +@optional + /** - * 有日志打印时的回调 + * 本地 LOG 的打印回调 * + * 如果您希望捕获 SDK 的本地日志打印行为,可以通过设置日志回调,让 SDK 将要打印的日志都通过本回调接口抛送给您。 * @param log 日志内容 - * @param level 日志等级,参见 TRTCLogLevel - * @param module 值暂无具体意义,目前为固定值 TXLiteAVSDK + * @param level 日志等级 参见 TRTC_LOG_LEVEL + * @param module 保留字段,暂无具体意义,目前为固定值 TXLiteAVSDK */ -@optional --(void) onLog:(nullable NSString*)log LogLevel:(TRTCLogLevel)level WhichModule:(nullable NSString*)module; +- (void)onLog:(nullable NSString *)log LogLevel:(TRTCLogLevel)level WhichModule:(nullable NSString *)module; -@end +@end // End of class TRTCLogDelegate /// @} - NS_ASSUME_NONNULL_END + +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCStatistics.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCStatistics.h index adbca9e..5e62b94 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCStatistics.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TRTCStatistics.h @@ -1,135 +1,191 @@ +// Copyright (c) 2021 Tencent. All rights reserved. -/* - * Module: TRTCStatistics @ TXLiteAVSDK - * - * Function: 腾讯云视频通话功能的质量统计相关接口 - * +/** + * Module: TRTC 音视频统计指标(只读) + * Function: TRTC SDK 会以两秒钟一次的频率向您汇报当前实时的音视频指标(帧率、码率、卡顿情况等) */ -///@addtogroup TRTCCloudDef_ios -///@{ - -/// 自己本地的音视频统计信息 -@interface TRTCLocalStatistics : NSObject +#import "TXLiteAVSymbolExport.h" +/// @defgroup TRTCStatisic_ios TRTCStatisic +/// TRTC 音视频统计指标 +/// @{ + +///////////////////////////////////////////////////////////////////////////////// +// +// 本地的音视频统计指标 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 本地的音视频统计指标 +/// @{ + +/** + * 本地的音视频统计指标 + */ +LITEAV_EXPORT @interface TRTCLocalStatistics : NSObject -/// 视频宽度 -@property (nonatomic, assign) uint32_t width; +///【字段含义】本地视频的宽度,单位 px +@property(nonatomic, assign) uint32_t width; -/// 视频高度 -@property (nonatomic, assign) uint32_t height; +///【字段含义】本地视频的高度,单位 px +@property(nonatomic, assign) uint32_t height; -/// 帧率(fps) -@property (nonatomic, assign) uint32_t frameRate; +///【字段含义】本地视频的帧率,即每秒钟会有多少视频帧,单位:FPS +@property(nonatomic, assign) uint32_t frameRate; -/// 视频发送码率(Kbps) -@property (nonatomic, assign) uint32_t videoBitrate; +///【字段含义】远端视频的码率,即每秒钟新产生视频数据的多少,单位 Kbps +@property(nonatomic, assign) uint32_t videoBitrate; -/// 音频采样率(Hz) -@property (nonatomic, assign) uint32_t audioSampleRate; +///【字段含义】远端音频的采样率,单位 Hz +@property(nonatomic, assign) uint32_t audioSampleRate; -/// 音频发送码率(Kbps) -@property (nonatomic, assign) uint32_t audioBitrate; +///【字段含义】本地音频的码率,即每秒钟新产生音频数据的多少,单位 Kbps +@property(nonatomic, assign) uint32_t audioBitrate; -/// 流类型(大画面 | 小画面 | 辅路画面) -@property (nonatomic, assign) TRTCVideoStreamType streamType; +///【字段含义】视频流类型(高清大画面|低清小画面|辅流画面) +@property(nonatomic, assign) TRTCVideoStreamType streamType; -/// 音频设备采集状态,用于检测外接音频设备的健康度 +///【字段含义】音频设备采集状态(用于检测音频外设的健康度) /// 0:采集设备状态正常;1:检测到长时间静音;2:检测到破音;3:检测到声音异常间断。 -@property (nonatomic, assign) uint32_t audioCaptureState; +@property(nonatomic, assign) uint32_t audioCaptureState; + @end +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 远端的音视频统计指标 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 远端的音视频统计指标 +/// @{ + +/** + * 远端的音视频统计指标 + */ +LITEAV_EXPORT @interface TRTCRemoteStatistics : NSObject + +///【字段含义】用户 ID +@property(nonatomic, retain) NSString* userId; -/// 远端成员的音视频统计信息 -@interface TRTCRemoteStatistics : NSObject +///【字段含义】音频流的总丢包率(%) +/// audioPacketLoss 代表音频流历经“主播 => 云端 => 观众”这样一条完整的传输链路后,最终在观众端统计到的丢包率。 +/// audioPacketLoss 越小越好,丢包率为0即表示该路音频流的所有数据均已经完整地到达了观众端。 +///如果出现了 downLoss == 0 但 audioPacketLoss != 0 的情况,说明该路音频流在“云端=>观众”这一段链路上没有出现丢包,但是在“主播=>云端”这一段链路上出现了不可恢复的丢包。 +@property(nonatomic, assign) uint32_t audioPacketLoss; -/// 用户 ID,指定是哪个用户的视频流 -@property (nonatomic, retain) NSString* userId; +///【字段含义】该路视频流的总丢包率(%) +/// videoPacketLoss 代表该路视频流历经“主播 => 云端 => 观众”这样一条完整的传输链路后,最终在观众端统计到的丢包率。 +/// videoPacketLoss 越小越好,丢包率为0即表示该路视频流的所有数据均已经完整地到达了观众端。 +///如果出现了 downLoss == 0 但 videoPacketLoss != 0 的情况,说明该路视频流在“云端=>观众”这一段链路上没有出现丢包,但是在“主播=>云端”这一段链路上出现了不可恢复的丢包。 +@property(nonatomic, assign) uint32_t videoPacketLoss; -/// 该线路的总丢包率(%) -/// 这个值越小越好,例如,丢包率为0表示网络很好。 -/// 丢包率是该线路的 userId 从上行到服务器再到下行的总丢包率。 -/// 如果 downLoss 为0,但是 finalLoss 不为0,说明该 userId 上行时出现了无法恢复的丢包。 -@property (nonatomic, assign) uint32_t finalLoss; +///【字段含义】远端视频的宽度,单位 px +@property(nonatomic, assign) uint32_t width; -/// 视频宽度 -@property (nonatomic, assign) uint32_t width; +///【字段含义】远端视频的高度,单位 px +@property(nonatomic, assign) uint32_t height; -/// 视频高度 -@property (nonatomic, assign) uint32_t height; +///【字段含义】远端视频的帧率,单位:FPS +@property(nonatomic, assign) uint32_t frameRate; -/// 接收帧率(fps) -@property (nonatomic, assign) uint32_t frameRate; +///【字段含义】远端视频的码率,单位 Kbps +@property(nonatomic, assign) uint32_t videoBitrate; -/// 视频码率(Kbps) -@property (nonatomic, assign) uint32_t videoBitrate; +///【字段含义】本地音频的采样率,单位 Hz +@property(nonatomic, assign) uint32_t audioSampleRate; -/// 音频采样率(Hz) -@property (nonatomic, assign) uint32_t audioSampleRate; +///【字段含义】本地音频的码率,单位 Kbps +@property(nonatomic, assign) uint32_t audioBitrate; -/// 音频码率(Kbps) -@property (nonatomic, assign) uint32_t audioBitrate; +///【字段含义】播放延迟,单位 ms +///为了避免网络抖动和网络包乱序导致的声音和画面卡顿,TRTC 会在播放端管理一个播放缓冲区,用于对接收到的网络数据包进行整理, +///该缓冲区的大小会根据当前的网络质量进行自适应调整,该缓冲区的大小折算成以毫秒为单位的时间长度,也就是 jitterBufferDelay。 +@property(nonatomic, assign) uint32_t jitterBufferDelay; -/// 播放时延(ms) -@property (nonatomic, assign) uint32_t jitterBufferDelay; +///【字段含义】端到端延迟,单位 ms +/// point2PointDelay 代表 “主播=>云端=>观众” 的延迟,更准确地说,它代表了“采集=>编码=>网络传输=>接收=>缓冲=>解码=>播放” 全链路的延迟。 +/// point2PointDelay 需要本地和远端的 SDK 均为 8.5 及以上的版本才生效,若远端用户为 8.5 以前的版本,此数值会一直为0,代表无意义。 +@property(nonatomic, assign) uint32_t point2PointDelay; -/// 端对端延迟(ms) -/// 该字段为全链路延迟统计,链路包含:采集->编码->网络传输->接收->缓冲->解码->播放 -/// 延迟以 audio 为基准进行计算。需要本地和远端均为8.5版本以上时才生效 -/// 若远端用户为低版本,对应延迟会回调为0,此时代表无效值 -@property (nonatomic, assign) uint32_t point2PointDelay; +///【字段含义】音频播放的累计卡顿时长,单位 ms +@property(nonatomic, assign) uint32_t audioTotalBlockTime; -/// 音频播放卡顿累计时长(ms) -@property (nonatomic, assign) uint32_t audioTotalBlockTime; +///【字段含义】音频播放卡顿率,单位 (%) +///音频播放卡顿率(audioBlockRate) = 音频播放的累计卡顿时长(audioTotalBlockTime) / 音频播放的总时长 +@property(nonatomic, assign) uint32_t audioBlockRate; -/// 音频播放卡顿率,音频卡顿的累计时长占音频总播放时长的百分比 (%) -@property (nonatomic, assign) uint32_t audioBlockRate; +///【字段含义】视频播放的累计卡顿时长,单位 ms +@property(nonatomic, assign) uint32_t videoTotalBlockTime; -/// 视频播放卡顿累计时长(ms) -@property (nonatomic, assign) uint32_t videoTotalBlockTime; +///【字段含义】视频播放卡顿率,单位 (%) +///视频播放卡顿率(videoBlockRate) = 视频播放的累计卡顿时长(videoTotalBlockTime) / 视频播放的总时长 +@property(nonatomic, assign) uint32_t videoBlockRate; -/// 音频播放卡顿率,视频卡顿累计时长占视频总播放时长的百分比(%) -@property (nonatomic, assign) uint32_t videoBlockRate; +///【字段含义】该路音视频流的总丢包率(%) +///已废弃,不推荐使用;建议使用 audioPacketLoss、videoPacketLoss 替代 +@property(nonatomic, assign) uint32_t finalLoss __attribute__((deprecated("Use audioPacketLoss and videoPacketLoss instead."))); -/// 流类型(大画面 | 小画面 | 辅路画面) -@property (nonatomic, assign) TRTCVideoStreamType streamType; +///【字段含义】视频流类型(高清大画面|低清小画面|辅流画面) +@property(nonatomic, assign) TRTCVideoStreamType streamType; @end +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 网络和性能的汇总统计指标 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 网络和性能的汇总统计指标 +/// @{ + +/** + * 网络和性能的汇总统计指标 + */ +LITEAV_EXPORT @interface TRTCStatistics : NSObject + +///【字段含义】当前应用的 CPU 使用率,单位 (%),Android 8.0 以上不支持 +@property(nonatomic, assign) uint32_t appCpu; -/// 统计数据 -@interface TRTCStatistics : NSObject +///【字段含义】当前系统的 CPU 使用率,单位 (%),Android 8.0 以上不支持 +@property(nonatomic, assign) uint32_t systemCpu; -/// C -> S 上行丢包率(%), -/// 该值越小越好,例如,丢包率为 0 表示网络很好, -/// 丢包率为 30@% 则意味着 SDK 向服务器发送的数据包中会有 30@% 丢失在上行传输中。 -@property (nonatomic, assign) uint32_t upLoss; +///【字段含义】从 SDK 到云端的上行丢包率,单位 (%) +///该数值越小越好,如果 upLoss 为 0%,则意味着上行链路的网络质量很好,上传到云端的数据包基本不发生丢失。 +///如果 upLoss 为 30%,则意味着 SDK 向云端发送的音视频数据包中,会有 30% 丢失在传输链路中。 +@property(nonatomic, assign) uint32_t upLoss; -/// S -> C 下行丢包率(%), -/// 该值越小越好,例如,丢包率为0表示网络很好, -/// 丢包率为 30@% 则意味着 SDK 向服务器发送的数据包中会有 30@% 丢失在下行传输中。 -@property (nonatomic, assign) uint32_t downLoss; +///【字段含义】从云端到 SDK 的下行丢包率,单位 (%) +///该数值越小越好,如果 downLoss 为 0%,则意味着下行链路的网络质量很好,从云端接收的数据包基本不发生丢失。 +///如果 downLoss 为 30%,则意味着云端向 SDK 传输的音视频数据包中,会有 30% 丢失在传输链路中。 +@property(nonatomic, assign) uint32_t downLoss; -/// 当前 App 的 CPU 使用率(%) -@property (nonatomic, assign) uint32_t appCpu; +///【字段含义】从 SDK 到云端的往返延时,单位 ms +///该数值代表从 SDK 发送一个网络包到云端,再从云端回送一个网络包到 SDK 的总计耗时,也就是一个网络包经历 “SDK=>云端=>SDK” 的总耗时。 +///该数值越小越好:如果 rtt < 50ms,意味着较低的音视频通话延迟;如果 rtt > 200ms,则意味着较高的音视频通话延迟。 +///需要特别解释的是,rtt 代表 “SDK=>云端=>SDK” 的总耗时,所不需要区分 upRtt 和 downRtt。 +@property(nonatomic, assign) uint32_t rtt; -/// 当前系统的 CPU 使用率(%) -@property (nonatomic, assign) uint32_t systemCpu; +///【字段含义】从 SDK 到本地路由器的往返时延,单位 ms +///该数值代表从 SDK 发送一个网络包到本地路由器网关,再从网关回送一个网络包到 SDK 的总计耗时,也就是一个网络包经历 “SDK=>网关=>SDK” 的总耗时。 +///该数值越小越好:如果 gatewayRtt < 50ms,意味着较低的音视频通话延迟;如果 gatewayRtt > 200ms,则意味着较高的音视频通话延迟。 +///当网络类型为蜂窝网时,该值无效。 +@property(nonatomic, assign) uint32_t gatewayRtt; -/// 延迟(毫秒), -/// 指 SDK 到腾讯云服务器的一次网络往返时间,该值越小越好。 -/// 一般低于 50ms 的 rtt 相对理想,而高于 100ms 的 rtt 会引入较大的通话延时。 -/// 由于数据上下行共享一条网络连接,所以 local 和 remote 的 rtt 相同。 -@property (nonatomic, assign) uint32_t rtt; +///【字段含义】总发送字节数(包含信令数据和音视频数据),单位:字节数(Bytes) +@property(nonatomic, assign) uint64_t sentBytes; -/// 总接收字节数(包含信令及音视频) -@property (nonatomic, assign) uint64_t receivedBytes; +///【字段含义】总接收字节数(包含信令数据和音视频数据),单位:字节数(Bytes) +@property(nonatomic, assign) uint64_t receivedBytes; -/// 总发送字节数(包含信令及音视频) -@property (nonatomic, assign) uint64_t sentBytes; +///【字段含义】本地的音视频统计信息 +///由于本地可能有三路音视频流(即高清大画面,低清小画面,以及辅流画面),因此本地的音视频统计信息是一个数组。 +@property(nonatomic, strong) NSArray<TRTCLocalStatistics*>* localStatistics; -/// 自己本地的音视频统计信息,可能有主画面、小画面以及辅路画面等多路的情况,因此是一个数组 -@property (nonatomic, strong) NSArray<TRTCLocalStatistics*>* localStatistics; +///【字段含义】远端的音视频统计信息 +///因为同时可能有多个远端用户,而且每个远端用户同时可能有多路音视频流(即高清大画面,低清小画面,以及辅流画面),因此远端的音视频统计信息是一个数组。 +@property(nonatomic, strong) NSArray<TRTCRemoteStatistics*>* remoteStatistics; -/// 远端成员的音视频统计信息,可能有主画面、小画面以及辅路画面等多路的情况,因此是一个数组 -@property (nonatomic, strong) NSArray<TRTCRemoteStatistics*>* remoteStatistics; @end -///@} +/// @} + +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioCustomProcessDelegate.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioCustomProcessDelegate.h index 9155c38..baf0726 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioCustomProcessDelegate.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioCustomProcessDelegate.h @@ -1,10 +1,4 @@ -// -// TXAudioCustomProcessDelegate.h -// TXLiteAVSDK -// -// Created by realingzhou on 2018/1/15. -// Copyright © 2018年 Tencent. All rights reserved. -// +// Copyright © 2020 Tencent. All rights reserved. #ifndef TXAudioCustomProcessDelegate_h #define TXAudioCustomProcessDelegate_h @@ -21,7 +15,11 @@ * @param withBgm 回调的数据是否包含bgm,当不开启回声消除时,回调的raw pcm会包含bgm */ @optional -- (void)onRecordRawPcmData:(NSData *)data timeStamp:(unsigned long long)timeStamp sampleRate:(int)sampleRate channels:(int)channels withBgm:(BOOL)withBgm; +- (void)onRecordRawPcmData:(NSData *)data + timeStamp:(unsigned long long)timeStamp + sampleRate:(int)sampleRate + channels:(int)channels + withBgm:(BOOL)withBgm; /** * 经过特效处理的声音回调 @@ -31,7 +29,10 @@ * @param channels 声道数 */ @optional -- (void)onRecordPcmData:(NSData *)data timeStamp:(unsigned long long)timeStamp sampleRate:(int)sampleRate channels:(int)channels; +- (void)onRecordPcmData:(NSData *)data + timeStamp:(unsigned long long)timeStamp + sampleRate:(int)sampleRate + channels:(int)channels; @end diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioEffectManager.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioEffectManager.h index 4c0de36..fd9720a 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioEffectManager.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioEffectManager.h @@ -1,155 +1,331 @@ -/* -* Module: TXAudioEffectManager 音乐和人声设置类 -* -* Function: 用于音乐、短音效和人声效果功能的使用 -* -*/ +// Copyright (c) 2021 Tencent. All rights reserved. +/** + * Module: TRTC 背景音乐、短音效和人声特效的管理类 + * Function: 用于对背景音乐、短音效和人声特效进行设置的管理类 + */ +/// @defgroup TXAudioEffectManager_ios TXAudioEffectManager +/// TRTC 背景音乐、短音效和人声特效的管理类 +/// @{ #import <Foundation/Foundation.h> - +#import "TXLiteAVSymbolExport.h" NS_ASSUME_NONNULL_BEGIN +///////////////////////////////////////////////////////////////////////////////// +// +// 音效相关的枚举值定义 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 音效相关的枚举值定义 +/// @{ + +/** + * 1.1 混响特效 + * + * 混响特效可以作用于人声之上,通过声学算法对声音进行叠加处理,模拟出各种不同环境下的临场感受,目前支持如下几种混响效果: + * 0:关闭;1:KTV;2:小房间;3:大会堂;4:低沉;5:洪亮;6:金属声;7:磁性;8:空灵;9:录音棚;10:悠扬。 + */ +typedef NS_ENUM(NSInteger, TXVoiceReverbType) { + + ///关闭特效 + TXVoiceReverbType_0 = 0, + + /// KTV + TXVoiceReverbType_1 = 1, + + ///小房间 + TXVoiceReverbType_2 = 2, + + ///大会堂 + TXVoiceReverbType_3 = 3, + + ///低沉 + TXVoiceReverbType_4 = 4, + + ///洪亮 + TXVoiceReverbType_5 = 5, + + ///金属声 + TXVoiceReverbType_6 = 6, + + ///磁性 + TXVoiceReverbType_7 = 7, + + ///空灵 + TXVoiceReverbType_8 = 8, + + ///录音棚 + TXVoiceReverbType_9 = 9, + + ///悠扬 + TXVoiceReverbType_10 = 10, + +}; + +/** + * 1.2 变声特效 + * + * 变声特效可以作用于人声之上,通过声学算法对人声进行二次处理,以获得与原始声音所不同的音色,目前支持如下几种变声特效: + * 0:关闭;1:熊孩子;2:萝莉;3:大叔;4:重金属;5:感冒;6:外语腔;7:困兽;8:肥宅;9:强电流;10:重机械;11:空灵。 + */ +typedef NS_ENUM(NSInteger, TXVoiceChangeType) { + + ///关闭 + TXVoiceChangeType_0 = 0, + + ///熊孩子 + TXVoiceChangeType_1 = 1, + + ///萝莉 + TXVoiceChangeType_2 = 2, + + ///大叔 + TXVoiceChangeType_3 = 3, + + ///重金属 + TXVoiceChangeType_4 = 4, + + ///感冒 + TXVoiceChangeType_5 = 5, + + ///外语腔 + TXVoiceChangeType_6 = 6, + + ///困兽 + TXVoiceChangeType_7 = 7, + + ///肥宅 + TXVoiceChangeType_8 = 8, + + ///强电流 + TXVoiceChangeType_9 = 9, + + ///重机械 + TXVoiceChangeType_10 = 10, + + ///空灵 + TXVoiceChangeType_11 = 11, + +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 背景音乐的播放事件回调 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 背景音乐的事件回调接口 +/// @{ + +// Playback progress block of background music + +/** + * 背景音乐开始播放 + */ typedef void (^TXAudioMusicStartBlock)(NSInteger errCode); + +/** + * 背景音乐的播放进度 + */ typedef void (^TXAudioMusicProgressBlock)(NSInteger progressMs, NSInteger durationMs); + +/** + * 背景音乐已经播放完毕 + */ typedef void (^TXAudioMusicCompleteBlock)(NSInteger errCode); -@class TXAudioMusicParam; -typedef NS_ENUM(NSInteger, TXVoiceChangeType); -typedef NS_ENUM(NSInteger, TXVoiceReverbType); +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 背景音乐的播放控制信息 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 背景音乐的播放控制信息 +/// @{ + +/** + * 背景音乐的播放控制信息 + * + * 该信息用于在接口 {@link startPlayMusic} 中指定背景音乐的相关信息,包括播放 ID、文件路径和循环次数等: + * 1. 如果要多次播放同一首背景音乐,请不要每次播放都分配一个新的 ID,我们推荐使用相同的 ID。 + * 2. 若您希望同时播放多首不同的音乐,请为不同的音乐分配不同的 ID 进行播放。 + * 3. 如果使用同一个 ID 播放不同音乐,SDK 会先停止播放旧的音乐,再播放新的音乐。 + */ +LITEAV_EXPORT @interface TXAudioMusicParam : NSObject + +///【字段含义】音乐 ID <br/> +///【特殊说明】SDK 允许播放多路音乐,因此需要使用 ID 进行标记,用于控制音乐的开始、停止、音量等。 +@property(nonatomic) int32_t ID; + +///【字段含义】音效文件的完整路径或 URL 地址。支持的音频格式包括 MP3、AAC、M4A、WAV +@property(nonatomic, copy) NSString *path; + +///【字段含义】音乐循环播放的次数 <br/> +///【推荐取值】取值范围为0 - 任意正整数,默认值:0。0表示播放音乐一次;1表示播放音乐两次;以此类推 +@property(nonatomic) NSInteger loopCount; + +///【字段含义】是否将音乐传到远端 <br/> +///【推荐取值】YES:音乐在本地播放的同时,远端用户也能听到该音乐;NO:主播只能在本地听到该音乐,远端观众听不到。默认值:NO。 +@property(nonatomic) BOOL publish; + +///【字段含义】播放的是否为短音乐文件 <br/> +///【推荐取值】YES:需要重复播放的短音乐文件;NO:正常的音乐文件。默认值:NO +@property(nonatomic) BOOL isShortFile; + +///【字段含义】音乐开始播放时间点,单位:毫秒。 +@property(nonatomic) NSInteger startTimeMS; -@interface TXAudioEffectManager : NSObject +///【字段含义】音乐结束播放时间点,单位毫秒,0表示播放至文件结尾。 +@property(nonatomic) NSInteger endTimeMS; -/// TXAudioEffectManager对象不可直接创建 -/// 要通过 `TRTCCloud` 或 `TXLivePush` 的 `getAudioEffectManager` 接口获取 +@end +/// @} + +// Definition of audio effect management module +LITEAV_EXPORT @interface TXAudioEffectManager : NSObject + +/** + * TXAudioEffectManager对象不可直接被创建 + * 要通过 `TRTCCloud` 或 `TXLivePush` 中的 `getAudioEffectManager` 接口获取 + */ - (instancetype)init NS_UNAVAILABLE; ///////////////////////////////////////////////////////////////////////////////// // -// (一)人声相关特效函数 +// 人声相关的特效接口 // ///////////////////////////////////////////////////////////////////////////////// - -/// @name 人声相关特效函数 +/// @name 人声相关的特效接口 /// @{ - + /** * 1.1 开启耳返 * - * 开启后会在耳机里听到自己的声音。 - * - * @note 仅在戴耳机时有效,暂时仅支持部分采集延迟较低的机型 - * @param enable true:开启;false:关闭 + * 主播开启耳返后,可以在耳机里听到麦克风采集到的自己发出的声音,该特效适用于主播唱歌的应用场景中。 + * 需要您注意的是,由于蓝牙耳机的硬件延迟非常高,所以在主播佩戴蓝牙耳机时无法开启此特效,请尽量在用户界面上提示主播佩戴有线耳机。 + * 同时也需要注意,并非所有的手机开启此特效后都能达到优秀的耳返效果,我们已经对部分耳返效果不佳的手机屏蔽了该特效。 + * @note 仅在主播佩戴耳机时才能开启此特效,同时请您提示主播佩戴有线耳机。 + * @param enable YES:开启;NO:关闭。 */ - (void)enableVoiceEarMonitor:(BOOL)enable; /** * 1.2 设置耳返音量 * - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 - * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 通过该接口您可以设置耳返特效中声音的音量大小。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - (void)setVoiceEarMonitorVolume:(NSInteger)volume; /** - * 1.3 设置人声的混响效果(KTV、小房间、大会堂、低沉、洪亮...) + * 1.3 设置人声的混响效果 * - * @note 设置的效果在退房后会失效,如果下次进房还需要对应特效,需要调用此接口再次设置。 + * 通过该接口您可以设置人声的混响效果,具体特效请参考枚举定义{@link TXVoiceReverbType}。 + * @note 设置的效果在退出房间后会自动失效,如果下次进房还需要对应特效,需要调用此接口再次进行设置。 */ - (void)setVoiceReverbType:(TXVoiceReverbType)reverbType; /** - * 1.4 设置人声的变声特效(萝莉、大叔、重金属、外国人...) + * 1.4 设置人声的变声特效 * - * @note 设置的效果在退房后会失效,如果下次进房还需要对应特效,需要调用此接口再次设置。 + * 通过该接口您可以设置人声的变声特效,具体特效请参考枚举定义{@link TXVoiceChangeType}。 + * @note 设置的效果在退出房间后会自动失效,如果下次进房还需要对应特效,需要调用此接口再次进行设置。 */ - (void)setVoiceChangerType:(TXVoiceChangeType)changerType; /** - * 1.5 设置麦克风采集人声的音量 - * - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 + * 1.5 设置语音音量 * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 该接口可以设置语音音量的大小,一般配合音乐音量的设置接口 {@link setAllMusicVolume} 协同使用,用于调谐语音和音乐在混音前各自的音量占比。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - (void)setVoiceVolume:(NSInteger)volume; -/// @} +/** + * 1.6 设置语音音调 + * + * 该接口可以设置语音音调,用于实现变调不变速的目的。 + * @param pitch 音调,取值范围为-1.0f~1.0f,默认值:0.0f。 + */ +- (void)setVoicePitch:(double)pitch; +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (二)背景音乐特效函数 +// 背景音乐的相关接口 // ///////////////////////////////////////////////////////////////////////////////// - -/// @name 人声相关特效函数 +/// @name 背景音乐的相关接口 /// @{ + /** * 2.1 开始播放背景音乐 * * 每个音乐都需要您指定具体的 ID,您可以通过该 ID 对音乐的开始、停止、音量等进行设置。 - * - * @note 若您想同时播放多个音乐,请分配不同的 ID 进行播放。 - * 如果使用同一个 ID 播放不同音乐,SDK 会先停止播放旧的音乐,再播放新的音乐。 * @param musicParam 音乐参数 * @param startBlock 播放开始回调 * @param progressBlock 播放进度回调 * @param completeBlock 播放结束回调 + * @note + * 1. 如果要多次播放同一首背景音乐,请不要每次播放都分配一个新的 ID,我们推荐使用相同的 ID。 + * 2. 若您希望同时播放多首不同的音乐,请为不同的音乐分配不同的 ID 进行播放。 + * 3. 如果使用同一个 ID 播放不同音乐,SDK 会先停止播放旧的音乐,再播放新的音乐。 */ -- (void)startPlayMusic:(TXAudioMusicParam *)musicParam - onStart:(TXAudioMusicStartBlock _Nullable)startBlock - onProgress:(TXAudioMusicProgressBlock _Nullable)progressBlock - onComplete:(TXAudioMusicCompleteBlock _Nullable)completeBlock; +- (void)startPlayMusic:(TXAudioMusicParam *)musicParam onStart:(TXAudioMusicStartBlock _Nullable)startBlock onProgress:(TXAudioMusicProgressBlock _Nullable)progressBlock onComplete:(TXAudioMusicCompleteBlock _Nullable)completeBlock; /** * 2.2 停止播放背景音乐 * - * @param id 音乐 ID + * @param id 音乐 ID */ - (void)stopPlayMusic:(int32_t)id; /** * 2.3 暂停播放背景音乐 * - * @param id 音乐 ID + * @param id 音乐 ID */ - (void)pausePlayMusic:(int32_t)id; /** * 2.4 恢复播放背景音乐 * - * @param id 音乐 ID + * @param id 音乐 ID */ - (void)resumePlayMusic:(int32_t)id; /** - * 2.5 设置背景音乐的远端音量大小,即主播可以通过此接口设置远端观众能听到的背景音乐的音量大小。 - * - * @param id 音乐 ID - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 + * 2.5 设置所有背景音乐的本地音量和远端音量的大小 * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 该接口可以设置所有背景音乐的本地音量和远端音量。 + * - 本地音量:即主播本地可以听到的背景音乐的音量大小。 + * - 远端音量:即观众端可以听到的背景音乐的音量大小。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ -- (void)setMusicPublishVolume:(int32_t)id volume:(NSInteger)volume; +- (void)setAllMusicVolume:(NSInteger)volume; /** - * 2.6 设置背景音乐的本地音量大小,即主播可以通过此接口设置主播自己本地的背景音乐的音量大小。 + * 2.6 设置某一首背景音乐的远端音量的大小 * + * 该接口可以细粒度地控制每一首背景音乐的远端音量,也就是观众端可听到的背景音乐的音量大小。 * @param id 音乐 ID - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 - * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * @param volume 音量大小,取值范围为0 - 100;默认值:100 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ -- (void)setMusicPlayoutVolume:(int32_t)id volume:(NSInteger)volume; +- (void)setMusicPublishVolume:(int32_t)id volume:(NSInteger)volume; /** - * 2.7 设置全局背景音乐的本地和远端音量的大小 + * 2.7 设置某一首背景音乐的本地音量的大小 * - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 - * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 该接口可以细粒度地控制每一首背景音乐的本地音量,也就是主播本地可以听到的背景音乐的音量大小。 + * @param id 音乐 ID + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ -- (void)setAllMusicVolume:(NSInteger)volume; +- (void)setMusicPlayoutVolume:(int32_t)id volume:(NSInteger)volume; /** * 2.8 调整背景音乐的音调高低 @@ -168,7 +344,7 @@ typedef NS_ENUM(NSInteger, TXVoiceReverbType); - (void)setMusicSpeedRate:(int32_t)id speedRate:(double)speedRate; /** - * 2.10 获取背景音乐当前的播放进度(单位:毫秒) + * 2.10 获取背景音乐的播放进度(单位:毫秒) * * @param id 音乐 ID * @return 成功返回当前播放时间,单位:毫秒,失败返回-1 @@ -176,82 +352,25 @@ typedef NS_ENUM(NSInteger, TXVoiceReverbType); - (NSInteger)getMusicCurrentPosInMS:(int32_t)id; /** - * 2.11 设置背景音乐的播放进度(单位:毫秒) - * - * @note 请尽量避免频繁地调用该接口,因为该接口可能会再次读写音乐文件,耗时稍高。 - * 当配合进度条使用时,请在进度条拖动完毕的回调中调用,而避免在拖动过程中实时调用。 + * 2.11 获取背景音乐的总时长(单位:毫秒) * - * @param id 音乐 ID - * @param pts 单位: 毫秒 + * @param path 音乐文件路径。 + * @return 成功返回时长,失败返回-1 */ -- (void)seekMusicToPosInMS:(int32_t)id pts:(NSInteger)pts; +- (NSInteger)getMusicDurationInMS:(NSString *)path; /** - * 2.12 获取景音乐文件的总时长(单位:毫秒) + * 2.12 设置背景音乐的播放进度(单位:毫秒) * - * @param path 音乐文件路径,如果 path 为空,那么返回当前正在播放的 music 时长。 - * @return 成功返回时长,失败返回-1 + * @param id 音乐 ID + * @param pts 单位: 毫秒 + * @note 请尽量避免过度频繁地调用该接口,因为该接口可能会再次读写音乐文件,耗时稍高。 + * 因此,当用户拖拽音乐的播放进度条时,请在用户完成拖拽操作后再调用本接口。 + * 因为 UI 上的进度条控件往往会以很高的频率反馈用户的拖拽进度,如不做频率限制,会导致较差的用户体验。 */ -- (NSInteger)getMusicDurationInMS:(NSString *)path; +- (void)seekMusicToPosInMS:(int32_t)id pts:(NSInteger)pts; /// @} - -@end - - -@interface TXAudioMusicParam : NSObject - -/// 【字段含义】音乐 ID -/// 【特殊说明】SDK 允许播放多路音乐,因此需要音乐 ID 进行标记,用于控制音乐的开始、停止、音量等 -@property (nonatomic) int32_t ID; - -/// 【字段含义】音乐文件的绝对路径 -@property (nonatomic, copy) NSString *path; - -/// 【字段含义】音乐循环播放的次数 -/// 【推荐取值】取值范围为0 - 任意正整数,默认值:0。0表示播放音乐一次;1表示播放音乐两次;以此类推 -@property (nonatomic) NSInteger loopCount; - -/// 【字段含义】是否将音乐传到远端 -/// 【推荐取值】YES:音乐在本地播放的同时,会上行至云端,因此远端用户也能听到该音乐;NO:音乐不会上行至云端,因此只能在本地听到该音乐。默认值:NO -@property (nonatomic) BOOL publish; - -/// 【字段含义】播放的是否为短音乐文件 -/// 【推荐取值】YES:需要重复播放的短音乐文件;NO:正常的音乐文件。默认值:NO -@property (nonatomic) BOOL isShortFile; - -/// 【字段含义】音乐开始播放时间点,单位毫秒 -@property (nonatomic) NSInteger startTimeMS; - -/// 【字段含义】音乐结束播放时间点,单位毫秒,0或者-1表示播放至文件结尾。 -@property (nonatomic) NSInteger endTimeMS; - -@end - -typedef NS_ENUM(NSInteger, TXVoiceReverbType) { - TXVoiceReverbType_0 = 0, ///< 关闭混响 - TXVoiceReverbType_1 = 1, ///< KTV - TXVoiceReverbType_2 = 2, ///< 小房间 - TXVoiceReverbType_3 = 3, ///< 大会堂 - TXVoiceReverbType_4 = 4, ///< 低沉 - TXVoiceReverbType_5 = 5, ///< 洪亮 - TXVoiceReverbType_6 = 6, ///< 金属声 - TXVoiceReverbType_7 = 7, ///< 磁性 -}; - -typedef NS_ENUM(NSInteger, TXVoiceChangeType) { - TXVoiceChangeType_0 = 0, ///< 关闭变声 - TXVoiceChangeType_1 = 1, ///< 熊孩子 - TXVoiceChangeType_2 = 2, ///< 萝莉 - TXVoiceChangeType_3 = 3, ///< 大叔 - TXVoiceChangeType_4 = 4, ///< 重金属 - TXVoiceChangeType_5 = 5, ///< 感冒 - TXVoiceChangeType_6 = 6, ///< 外国人 - TXVoiceChangeType_7 = 7, ///< 困兽 - TXVoiceChangeType_8 = 8, ///< 死肥仔 - TXVoiceChangeType_9 = 9, ///< 强电流 - TXVoiceChangeType_10 = 10, ///< 重机械 - TXVoiceChangeType_11 = 11, ///< 空灵 -}; - +@end // End of interface TXAudioEffectManager +/// @} NS_ASSUME_NONNULL_END diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioRawDataDelegate.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioRawDataDelegate.h index 9d834fe..c844190 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioRawDataDelegate.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXAudioRawDataDelegate.h @@ -1,10 +1,4 @@ -// -// TXAudioRawDataDelegate.h -// TXLiteAVSDK -// -// Created by realingzhou on 2018/2/24. -// Copyright © 2018年 Tencent. All rights reserved. -// +// Copyright © 2020 Tencent. All rights reserved. #ifndef TXAudioRawDataDelegate_h #define TXAudioRawDataDelegate_h diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXBeautyManager.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXBeautyManager.h index da026e1..69e864e 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXBeautyManager.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXBeautyManager.h @@ -1,12 +1,13 @@ -/* +// Copyright (c) 2021 Tencent. All rights reserved. + +/** * Module: 美颜与图像处理参数设置类 - * * Function: 修改美颜、滤镜、绿幕等参数 - * */ - #import <Foundation/Foundation.h> #import <TargetConditionals.h> +#import "TXLiteAVSymbolExport.h" + #if TARGET_OS_IPHONE #import <UIKit/UIKit.h> typedef UIImage TXImage; @@ -18,236 +19,335 @@ typedef NSImage TXImage; NS_ASSUME_NONNULL_BEGIN /// @defgroup TXBeautyManager_ios TXBeautyManager -/// 美颜及动效参数管理 +/// 美颜与图像处理参数设置类 /// @{ /** * 美颜(磨皮)算法 - * SDK 内置了多种不同的磨皮算法,您可以选择最适合您产品定位的方案。 + * + * TRTC 内置多种不同的磨皮算法,您可以选择最适合您产品定位的方案。 */ typedef NS_ENUM(NSInteger, TXBeautyStyle) { - TXBeautyStyleSmooth = 0, ///< 光滑,适用于美女秀场,效果比较明显。 - TXBeautyStyleNature = 1, ///< 自然,磨皮算法更多地保留了面部细节,主观感受上会更加自然。 - TXBeautyStylePitu = 2 ///< 由上海优图实验室提供的美颜算法,磨皮效果介于光滑和自然之间,比光滑保留更多皮肤细节,比自然磨皮程度更高。 + + ///光滑,算法比较激进,磨皮效果比较明显,适用于秀场直播。 + TXBeautyStyleSmooth = 0, + + ///自然,算法更多地保留了面部细节,磨皮效果更加自然,适用于绝大多数直播场景。 + TXBeautyStyleNature = 1, + + ///优图,由优图实验室提供,磨皮效果介于光滑和自然之间,比光滑保留更多皮肤细节,比自然磨皮程度更高。 + TXBeautyStylePitu = 2 }; -/// 美颜及动效参数管理 -@interface TXBeautyManager : NSObject +///////////////////////////////////////////////////////////////////////////////// +// +// 美颜相关接口 +// +///////////////////////////////////////////////////////////////////////////////// + +LITEAV_EXPORT @interface TXBeautyManager : NSObject /** * 设置美颜(磨皮)算法 * - * SDK 内部集成了两套风格不同的磨皮算法,一套我们取名叫“光滑”,适用于美女秀场,效果比较明显。 - * 另一套我们取名“自然”,磨皮算法更多地保留了面部细节,主观感受上会更加自然。 - * - * @param beautyStyle 美颜风格,光滑或者自然,光滑风格磨皮更加明显,适合娱乐场景。 + * TRTC 内置多种不同的磨皮算法,您可以选择最适合您产品定位的方案: + * @param beautyStyle 美颜风格,TXBeautyStyleSmooth:光滑;TXBeautyStyleNature:自然;TXBeautyStylePitu:优图。 */ - (void)setBeautyStyle:(TXBeautyStyle)beautyStyle; /** * 设置美颜级别 - * @param level 美颜级别,取值范围0 - 9; 0表示关闭,1 - 9值越大,效果越明显。 + * + * @param beautyLevel 美颜级别,取值范围0 - 9; 0表示关闭,9表示效果最明显。 */ -- (void)setBeautyLevel:(float)level; +- (void)setBeautyLevel:(float)beautyLevel; /** * 设置美白级别 * - * @param level 美白级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * @param whitenessLevel 美白级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 */ -- (void)setWhitenessLevel:(float)level; +- (void)setWhitenessLevel:(float)whitenessLevel; /** * 开启清晰度增强 - * - * @param enable YES:开启清晰度增强;NO:关闭清晰度增强。默认值:YES */ - (void)enableSharpnessEnhancement:(BOOL)enable; /** * 设置红润级别 * - * @param level 红润级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * @param ruddyLevel 红润级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 */ -- (void)setRuddyLevel:(float)level; +- (void)setRuddyLevel:(float)ruddyLevel; /** - * 设置指定素材滤镜特效 + * 设置色彩滤镜效果 * - * @param image 指定素材,即颜色查找表图片。**必须使用 png 格式** + * 色彩滤镜,是一副包含色彩映射关系的颜色查找表图片,您可以在我们提供的官方 Demo 中找到预先准备好的几张滤镜图片。 + * SDK 会根据该查找表中的映射关系,对摄像头采集出的原始视频画面进行二次处理,以达到预期的滤镜效果。 + * @param image 包含色彩映射关系的颜色查找表图片,必须是 png 格式。 */ - (void)setFilter:(nullable TXImage *)image; + /** - * 设置滤镜浓度 + * 设置色彩滤镜的强度 * - * 在美女秀场等应用场景里,滤镜浓度的要求会比较高,以便更加突显主播的差异。 - * 我们默认的滤镜浓度是0.5,如果您觉得滤镜效果不明显,可以使用下面的接口进行调节。 - * - * @param strength 从0到1,越大滤镜效果越明显,默认值为0.5。 + * 该数值越高,色彩滤镜的作用强度越明显,经过滤镜处理后的视频画面跟原画面的颜色差异越大。 + * 我默认的滤镜浓度是0.5,如果您觉得默认的滤镜效果不明显,可以设置为 0.5 以上的数字,最大值为1。 + * @param strength 从0到1,数值越大滤镜效果越明显,默认值为0.5。 */ - (void)setFilterStrength:(float)strength; /** - * 设置绿幕背景视频,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 - * - * 此处的绿幕功能并非智能抠背,需要被拍摄者的背后有一块绿色的幕布来辅助产生特效 + * 设置绿幕背景视频 * - * @param file 视频文件路径。支持 MP4; nil 表示关闭特效。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 此接口所开启的绿幕功能不具备智能去除背景的能力,需要被拍摄者的背后有一块绿色的幕布来辅助产生特效。 + * @param path MP4格式的视频文件路径; 设置空值表示关闭特效。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setGreenScreenFile:(nullable NSString *)file; +- (int)setGreenScreenFile:(nullable NSString *)path; -#if TARGET_OS_IPHONE /** - * 设置大眼级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置大眼级别 * - * @param level 大眼级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param eyeScaleLevel 大眼级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setEyeScaleLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setEyeScaleLevel:(float)eyeScaleLevel; +#endif /** - * 设置瘦脸级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置瘦脸级别 * - * @param level 瘦脸级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param faceSlimLevel 瘦脸级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setFaceSlimLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setFaceSlimLevel:(float)faceSlimLevel; +#endif /** - *设置 V 脸级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置 V 脸级别 * - * @param level V脸级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param faceVLevel V脸级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setFaceVLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setFaceVLevel:(float)faceVLevel; +#endif /** - * 设置下巴拉伸或收缩,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置下巴拉伸或收缩 * - * @param level 下巴拉伸或收缩级别,取值范围-9 - 9;0 表示关闭,小于0表示收缩,大于0表示拉伸。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param chinLevel 下巴拉伸或收缩级别,取值范围-9 - 9;0 表示关闭,小于0表示收缩,大于0表示拉伸。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setChinLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setChinLevel:(float)chinLevel; +#endif + /** - * 设置短脸级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置短脸级别 * - * @param level 短脸级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param faceShortLevel 短脸级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setFaceShortLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setFaceShortLevel:(float)faceShortLevel; +#endif /** - * 设置窄脸级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置窄脸级别 * - * @param level 短脸级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param level 窄脸级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setFaceNarrowLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setFaceNarrowLevel:(float)faceNarrowLevel; +#endif /** - * 设置瘦鼻级别,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置瘦鼻级别 * - * @param level 瘦鼻级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param noseSlimLevel 瘦鼻级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setNoseSlimLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setNoseSlimLevel:(float)noseSlimLevel; +#endif /** - * 设置亮眼 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置亮眼级别 * - * @param level 亮眼级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param eyeLightenLevel 亮眼级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setEyeLightenLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setEyeLightenLevel:(float)eyeLightenLevel; +#endif /** - * 设置白牙 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置牙齿美白级别 * - * @param level 白牙级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param toothWhitenLevel 白牙级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setToothWhitenLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setToothWhitenLevel:(float)toothWhitenLevel; +#endif /** - * 设置祛皱 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置祛皱级别 * - * @param level 祛皱级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param wrinkleRemoveLevel 祛皱级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setWrinkleRemoveLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setWrinkleRemoveLevel:(float)wrinkleRemoveLevel; +#endif /** - * 设置祛眼袋 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置祛眼袋级别 * - * @param level 祛眼袋级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param pounchRemoveLevel 祛眼袋级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setPounchRemoveLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setPounchRemoveLevel:(float)pounchRemoveLevel; +#endif /** - * 设置法令纹 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置法令纹去除级别 * - * @param level 法令纹级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param smileLinesRemoveLevel 法令纹级别,取值范围0 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setSmileLinesRemoveLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setSmileLinesRemoveLevel:(float)smileLinesRemoveLevel; +#endif /** - * 设置发际线 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置发际线调整级别 * - * @param level 发际线级别,取值范围-9 - 9;0表示关闭,小于0表示抬高,大于0表示降低。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param foreheadLevel 发际线级别,取值范围-9 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setForeheadLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setForeheadLevel:(float)foreheadLevel; +#endif /** - * 设置眼距 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置眼距. * - * @param level 眼距级别,取值范围-9 - 9;0表示关闭,小于0表示拉伸,大于0表示收缩。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param eyeDistanceLevel 眼距级别,取值范围-9 - 9;0表示关闭,小于0表示拉伸,大于0表示收缩。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setEyeDistanceLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setEyeDistanceLevel:(float)eyeDistanceLevel; +#endif /** - * 设置眼角 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置眼角调整级别 * - * @param level 眼角级别,取值范围-9 - 9;0表示关闭,小于0表示降低,大于0表示抬高。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param eyeAngleLevel 眼角调整级别,取值范围-9 - 9;0表示关闭,9表示效果最明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setEyeAngleLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setEyeAngleLevel:(float)eyeAngleLevel; +#endif /** - * 设置嘴型 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置嘴型调整级别 * - * @param level 嘴型级别,取值范围-9 - 9;0表示关闭,小于0表示拉伸,大于0表示收缩。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param mouthShapeLevel 嘴型级别,取值范围-9 - 9;0表示关闭,小于0表示拉伸,大于0表示收缩。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setMouthShapeLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setMouthShapeLevel:(float)mouthShapeLevel; +#endif /** - * 设置鼻翼 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 设置鼻翼调整级别 * - * @param level 鼻翼级别,取值范围-9 - 9;0表示关闭,小于0表示拉伸,大于0表示收缩。 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param noseWingLevel 鼻翼调整级别,取值范围-9 - 9;0表示关闭,小于0表示拉伸,大于0表示收缩。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setNoseWingLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setNoseWingLevel:(float)noseWingLevel; +#endif /** - * 设置鼻子位置 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 - * @param level 鼻子位置级别,取值范围-9 - 9;0表示关闭,小于0表示抬高,大于0表示降低。 + * 设置鼻子位置 + * + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param nosePositionLevel 鼻子位置级别,取值范围-9 - 9;0表示关闭,小于0表示抬高,大于0表示降低。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setNosePositionLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setNosePositionLevel:(float)nosePositionLevel; +#endif /** - * 设置嘴唇厚度 ,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 - * @param level 嘴唇厚度级别,取值范围-9 - 9;0表示关闭,小于0表示拉伸,大于0表示收缩。 + * 设置嘴唇厚度 + * + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param lipsThicknessLevel 嘴唇厚度级别,取值范围-9 - 9;0表示关闭,小于0表示拉伸,大于0表示收缩。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setLipsThicknessLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setLipsThicknessLevel:(float)lipsThicknessLevel; +#endif /** - * 设置脸型,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 - * @param level 美型级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * 设置脸型 + * + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param faceBeautyLevel 美型级别,取值范围0 - 9;0表示关闭,1 - 9值越大,效果越明显。 + * @return 0:成功;-5:当前 License 对应 feature 不支持。 */ -- (void)setFaceBeautyLevel:(float)level; +#if TARGET_OS_IPHONE +- (int)setFaceBeautyLevel:(float)faceBeautyLevel; +#endif /** - * 选择 AI 动效挂件,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 选择 AI 动效挂件 * - * @param tmplName 动效名称 - * @param tmplDir 动效所在目录 + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * @param tmplName 动效挂件名称 + * @param tmplDir 动效素材文件所在目录 */ +#if TARGET_OS_IPHONE - (void)setMotionTmpl:(nullable NSString *)tmplName inDir:(nullable NSString *)tmplDir; +#endif /** - * 设置动效静音,该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 + * 是否在动效素材播放时静音 * + * 该接口仅在 [企业版 SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise) 中生效 * 有些挂件本身会有声音特效,通过此 API 可以关闭这些特效播放时所带的声音效果。 - * * @param motionMute YES:静音;NO:不静音。 */ +#if TARGET_OS_IPHONE - (void)setMotionMute:(BOOL)motionMute; #endif diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXDeviceManager.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXDeviceManager.h index a3068e7..b948934 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXDeviceManager.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXDeviceManager.h @@ -1,283 +1,373 @@ -/* -* Module: TXDeviceManager 设备管理类 -* -* Function: 用于管理 iOS / Mac 的硬件设备 -* -*/ +// Copyright (c) 2021 Tencent. All rights reserved. -#import <Foundation/Foundation.h> +/** + * Module: TRTC 音视频设备管理模块 + * Function: 用于管理摄像头、麦克风和扬声器等音视频相关的硬件设备 + */ +/// @defgroup TXDeviceManager_ios TXDeviceManager +/// TRTC 音视频设备管理模块 +/// @{ +#import <Foundation/Foundation.h> +#import "TXLiteAVSymbolExport.h" +#if TARGET_OS_IPHONE +#import <UIKit/UIKit.h> +#elif TARGET_OS_MAC +#import <AppKit/AppKit.h> +#endif NS_ASSUME_NONNULL_BEGIN +@class AVCaptureDevice; + +///////////////////////////////////////////////////////////////////////////////// +// +// 音视频设备相关的类型定义 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 音视频设备相关的类型定义 +/// @{ +/** + * 系统音量类型(仅适用于移动设备) + * + * @deprecated v9.5 版本开始不推荐使用。 + * 现代智能手机中一般都具备两套系统音量类型,即“通话音量”和“媒体音量”。 + * - 通话音量:手机专门为接打电话所设计的音量类型,自带回声抵消(AEC)功能,并且支持通过蓝牙耳机上的麦克风进行拾音,缺点是音质比较一般。 + * 当您通过手机侧面的音量按键下调手机音量时,如果无法将其调至零(也就是无法彻底静音),说明您的手机当前出于通话音量。 + * - 媒体音量:手机专门为音乐场景所设计的音量类型,无法使用系统的 AEC 功能,并且不支持通过蓝牙耳机的麦克风进行拾音,但具备更好的音乐播放效果。 + * 当您通过手机侧面的音量按键下调手机音量时,如果能够将手机音量调至彻底静音,说明您的手机当前出于媒体音量。 + * + * SDK 目前提供了三种系统音量类型的控制模式:自动切换模式、全程通话音量模式、全程媒体音量模式。 + */ #if TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TXSystemVolumeType) { -/// 系统音量类型 -typedef NS_ENUM(NSInteger, TXSystemVolumeType); -/// 声音播放模式(音频路由) -typedef NS_ENUM(NSInteger, TXAudioRoute); + ///自动切换模式 + TXSystemVolumeTypeAuto = 0, -#elif TARGET_OS_MAC + ///全程媒体音量 + TXSystemVolumeTypeMedia = 1, -/// 设备类型 -typedef NS_ENUM(NSInteger, TXMediaDeviceType); -/// 设备描述 -@class TXMediaDeviceInfo; + ///全程通话音量 + TXSystemVolumeTypeVOIP = 2, +}; #endif +/** + * 音频路由(即声音的播放模式) + * + * 音频路由,即声音是从手机的扬声器还是从听筒中播放出来,因此该接口仅适用于手机等移动端设备。 + * 手机有两个扬声器:一个是位于手机顶部的听筒,一个是位于手机底部的立体声扬声器。 + * - 设置音频路由为听筒时,声音比较小,只有将耳朵凑近才能听清楚,隐私性较好,适合用于接听电话。 + * - 设置音频路由为扬声器时,声音比较大,不用将手机贴脸也能听清,因此可以实现“免提”的功能。 + */ +#if TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TXAudioRoute) { -@interface TXDeviceManager : NSObject + /// Speakerphone:使用扬声器播放(即“免提”),扬声器位于手机底部,声音偏大,适合外放音乐。 + TXAudioRouteSpeakerphone = 0, -- (instancetype)init NS_UNAVAILABLE; + /// Earpiece:使用听筒播放,听筒位于手机顶部,声音偏小,适合需要保护隐私的通话场景。 + TXAudioRouteEarpiece = 1, -#if TARGET_OS_IPHONE +}; +#endif + +/** + * 设备类型(仅适用于桌面平台) + * + * 该枚举值用于定义三种类型的音视频设备,即摄像头、麦克风和扬声器,以便让一套设备管理接口可以操控三种不同类型的设备。 + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TXMediaDeviceType) { + + ///未定义的设备类型 + TXMediaDeviceTypeUnknown = -1, + + ///麦克风类型设备 + TXMediaDeviceTypeAudioInput = 0, + + ///扬声器类型设备 + TXMediaDeviceTypeAudioOutput = 1, + + ///摄像头类型设备 + TXMediaDeviceTypeVideoCamera = 2, + +}; +#endif + +/** + * 设备操作 + * + * 该枚举值用于本地设备的状态变化通知{@link onDeviceChanged}。 + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TXMediaDeviceState) { + + ///设备已被插入 + TXMediaDeviceStateAdd = 0, + + ///设备已被移除 + TXMediaDeviceStateRemove = 1, + + ///设备已启用 + TXMediaDeviceStateActive = 2, + +}; +#endif + +/** + * 音视频设备的相关信息(仅适用于桌面平台) + * + * 该结构体用于描述一个音视频设备的关键信息,比如设备ID、设备名称等等,以便用户能够在用户界面上选择自己期望使用的音视频设备。 + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +LITEAV_EXPORT @interface TXMediaDeviceInfo : NSObject + +///设备类型 +@property(assign, nonatomic) TXMediaDeviceType type; + +///设备 id (UTF-8) +@property(copy, nonatomic, nullable) NSString *deviceId; + +///设备名称 (UTF-8) +@property(copy, nonatomic, nullable) NSString *deviceName; + +///设备属性 +@property(copy, nonatomic, nullable) NSString *deviceProperties; + +@end +#endif +/// @} + +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +@protocol TXDeviceObserver <NSObject> /** - * 判断当前是否为前置摄像头 + * 本地设备的通断状态发生变化(仅适用于桌面系统) + * + * 当本地设备(包括摄像头、麦克风以及扬声器)被插入或者拔出时,SDK 便会抛出此事件回调。 + * @param deviceId 设备 ID + * @param type 设备类型 + * @param state 通断状态,0:设备已添加;1:设备已被移除;2:设备已启用。 */ +- (void)onDeviceChanged:(NSString *)deviceId type:(TXMediaDeviceType)mediaType state:(TXMediaDeviceState)mediaState; + +@end +#endif + +LITEAV_EXPORT @interface TXDeviceManager : NSObject + +///////////////////////////////////////////////////////////////////////////////// +// +// 移动端设备操作接口(iOS Android) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 移动端设备操作接口 +/// @{ + +/** + * 1.1 判断当前是否为前置摄像头(仅适用于移动端) + */ +#if TARGET_OS_IPHONE - (BOOL)isFrontCamera; /** - * 切换摄像头 + * 1.2 切换前置或后置摄像头(仅适用于移动端) */ - (NSInteger)switchCamera:(BOOL)frontCamera; /** - * 查询当前摄像头是否支持缩放 + * 1.3 查询当前摄像头是否支持缩放(仅适用于移动端) */ - (BOOL)isCameraZoomSupported; /** - * 查询当前摄像头支持的最大缩放比例 + * 1.3 获取摄像头的最大缩放倍数(仅适用于移动端) */ - (CGFloat)getCameraZoomMaxRatio; /** - * 设置当前摄像头的缩放比例 + * 1.4 设置摄像头的缩放倍数(仅适用于移动端) * - * @param zoomRatio 取值范围1 - 5,取值为1表示最远视角(正常镜头),取值为5表示最近视角(放大镜头)。 - * 最大值推荐为5,若超过5,视频数据会变得模糊不清。默认值为1。 + * @param zoomRatio 取值范围1 - 5,取值为1表示最远视角(正常镜头),取值为5表示最近视角(放大镜头)。最大值推荐为5,若超过5,视频数据会变得模糊不清。 */ - (NSInteger)setCameraZoomRatio:(CGFloat)zoomRatio; /** - * 查询是否支持自动识别人脸位置 + * 1.5 查询是否支持自动识别人脸位置(仅适用于移动端) */ - (BOOL)isAutoFocusEnabled; /** - * 设置人脸自动识别 + * 1.6 开启自动对焦功能(仅适用于移动端) * - * @param enabled YES: 开启;NO:关闭,默认值:YES + * 开启后,SDK 会自动检测画面中的人脸位置,并将摄像头的焦点始终对焦在人脸位置上。 */ - (NSInteger)enableCameraAutoFocus:(BOOL)enabled; /** - * 设置摄像头焦点 + * 1.7 设置摄像头的对焦位置(仅适用于移动端) * - * @param position 对焦位置 + * 您可以通过该接口实现如下交互: + * 1. 在本地摄像头的预览画面上,允许用户单击操作。 + * 2. 在用户的单击位置显示一个矩形方框,以示摄像头会在此处对焦。 + * 3. 随后将用户点击位置的坐标通过本接口传递给 SDK,之后 SDK 会操控摄像头按照用户期望的位置进行对焦。 + * @param position 对焦位置,请传入期望对焦点的坐标值 + * @return 0:操作成功;负数:操作失败。 + * @note 使用该接口的前提是先通过 {@link enableCameraAutoFocus} 关闭自动对焦功能。 */ - (NSInteger)setCameraFocusPosition:(CGPoint)position; /** - * 查询是否支持开关闪光灯(手电筒模式) + * 1.8 查询是否支持开启闪光灯(仅适用于移动端) */ - (BOOL)isCameraTorchSupported; /** - * 开关闪光灯 - * - * enabled YES:开启;NO:关闭,默认值:NO + * 1.8 开启/关闭闪光灯,也就是手电筒模式(仅适用于移动端) */ - (NSInteger)enableCameraTorch:(BOOL)enabled; /** - * 设置通话时使用的系统音量类型 - * - * @note - * 1. 需要在调用 startLocalAudio() 之前调用该接口。 - * 2. 如无特殊需求,不推荐您自行设置,您只需通过 enterRoom 设置好适合您的场景,SDK 内部会自动选择相匹配的音量类型。 - * - * @param type 系统音量类型,参见 TXSystemVolumeType 说明。如无特殊需求,不推荐您自行设置。 - */ -- (NSInteger)setSystemVolumeType:(TXSystemVolumeType)type; - -/** - * 设置音频路由 - * - * 微信和手机 QQ 视频通话功能的免提模式就是基于音频路由实现的。 - * 一般手机都有两个扬声器,一个是位于顶部的听筒扬声器,声音偏小;一个是位于底部的立体声扬声器,声音偏大。 - * 设置音频路由的作用就是决定声音使用哪个扬声器播放。 + * 1.9 设置音频路由(仅适用于移动端) * - * @param route 音频路由,即声音由哪里输出(扬声器、听筒),默认值:TXAudioRouteSpeakerphone + * 手机有两个音频播放设备:一个是位于手机顶部的听筒,一个是位于手机底部的立体声扬声器。 + * 设置音频路由为听筒时,声音比较小,只有将耳朵凑近才能听清楚,隐私性较好,适合用于接听电话。 + * 设置音频路由为扬声器时,声音比较大,不用将手机贴脸也能听清,因此可以实现“免提”的功能。 */ - (NSInteger)setAudioRoute:(TXAudioRoute)route; +#endif -#elif TARGET_OS_MAC +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 桌面端设备操作接口(Windows Mac) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 桌面端设备操作接口 +/// @{ /** - * 获取设备列表 + * 2.1 获取设备列表(仅适用于桌面端) * - * @param type 设备类型 + * @param type 设备类型,指定需要获取哪种设备的列表。详见 TXMediaDeviceType 定义。 + * @note + * - 使用完毕后请调用 release 方法释放资源,这样可以让 SDK 维护 ITXDeviceCollection 对象的生命周期。 + * - 不要使用 delete 释放返回的 Collection 对象,delete ITXDeviceCollection* 指针会导致异常崩溃。 + * - type 只支持 TXMediaDeviceTypeMic、TXMediaDeviceTypeSpeaker、TXMediaDeviceTypeCamera。 + * - 此接口只支持 Mac 和 Windows 平台 */ -- (NSArray<TXMediaDeviceInfo *> * _Nullable)getDevicesList:(TXMediaDeviceType)type; +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray<TXMediaDeviceInfo *> *_Nullable)getDevicesList:(TXMediaDeviceType)type; /** - * 设置要使用的设备 + * 2.2 设置当前要使用的设备(仅适用于桌面端) * - * @param type 设备类型 - * @param deviceId 从 getDevicesList 中得到的设备 ID + * @param type 设备类型,详见 TXMediaDeviceType 定义。 + * @param deviceId 设备ID,您可以通过接口 {@link getDevicesList} 获得设备 ID。 + * @return 0:操作成功;负数:操作失败。 */ - (NSInteger)setCurrentDevice:(TXMediaDeviceType)type deviceId:(NSString *)deviceId; /** - * 获取当前的设备信息 - * - * @param type 设备类型 + * 2.3 获取当前正在使用的设备(仅适用于桌面端) */ -- (TXMediaDeviceInfo * _Nullable)getCurrentDevice:(TXMediaDeviceType)type; +- (TXMediaDeviceInfo *_Nullable)getCurrentDevice:(TXMediaDeviceType)type; /** - * 设置当前设备的音量 + * 2.4 设置当前设备的音量(仅适用于桌面端) * - * @param volume 音量值,范围0 - 100 - * @param type 设备类型,仅支持 AudioInput 和 AudioOutput 类型。 + * 这里的音量指的是麦克风的采集音量或者扬声器的播放音量,摄像头是不支持设置音量的。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - (NSInteger)setCurrentDeviceVolume:(NSInteger)volume deviceType:(TXMediaDeviceType)type; /** - * 获取当前设备的音量 + * 2.5 获取当前设备的音量(仅适用于桌面端) * - * @param type 设备类型,仅支持 AudioInput 和 AudioOutput 类型。 + * 这里的音量指的是麦克风的采集音量或者扬声器的播放音量,摄像头是不支持获取音量的。 */ - (NSInteger)getCurrentDeviceVolume:(TXMediaDeviceType)type; /** - * 设置当前设备的静音状态 + * 2.6 设置当前设备的静音状态(仅适用于桌面端) * - * @param mute 设置为 YES 时,麦克风设备静音 - * @param type 设备类型,仅支持 AudioInput 和 AudioOutput 类型。 + * 这里的音量指的是麦克风和扬声器,摄像头是不支持静音操作的。 */ - (NSInteger)setCurrentDeviceMute:(BOOL)mute deviceType:(TXMediaDeviceType)type; /** - * 获取当前设备的静音状态 + * 2.7 获取当前设备的静音状态(仅适用于桌面端) * - * @param type 设备类型,仅支持 AudioInput 和 AudioOutput 类型。 + * 这里的音量指的是麦克风和扬声器,摄像头是不支持静音操作的。 */ - (BOOL)getCurrentDeviceMute:(TXMediaDeviceType)type; /** - * 开始摄像头测试 + * 2.8 开始摄像头测试(仅适用于桌面端) * - * @note 在测试过程中可以使用 setCurrentCameraDevice 接口切换摄像头。 - * @param view 预览画面所在的父控件 + * @note 在测试过程中可以使用 {@link setCurrentDevice} 接口切换摄像头。 */ - (NSInteger)startCameraDeviceTest:(NSView *)view; /** - * 结束摄像头测试 + * 2.9 结束摄像头测试(仅适用于桌面端) */ - (NSInteger)stopCameraDeviceTest; /** - * 开始麦克风测试 + * 2.10 开始麦克风测试(仅适用于桌面端) * - * 该方法测试麦克风是否能正常工作,volume 的取值范围为0 - 100。 + * 该接口可以测试麦克风是否能正常工作,测试到的麦克风采集音量的大小,会以回调的形式通知给您,其中 volume 的取值范围为0 - 100。 + * @param interval 麦克风音量的回调间隔。 */ - (NSInteger)startMicDeviceTest:(NSInteger)interval testEcho:(void (^)(NSInteger volume))testEcho; /** - * 结束麦克风测试 + * 2.11 结束麦克风测试(仅适用于桌面端) */ - (NSInteger)stopMicDeviceTest; /** - * 开始扬声器测试 + * 2.12 开始扬声器测试(仅适用于桌面端) * - * 该方法播放指定的音频文件测试播放设备是否能正常工作。如果能听到声音,说明播放设备能正常工作。 + * 该接口通过播放指定的音频文件,用于测试播放设备是否能正常工作。如果用户在测试时能听到声音,说明播放设备能正常工作。 + * @param filePath 声音文件的路径 */ - (NSInteger)startSpeakerDeviceTest:(NSString *)audioFilePath onVolumeChanged:(void (^)(NSInteger volume, BOOL isLastFrame))volumeBlock; /** - * 结束扬声器测试 + * 2.13 结束扬声器测试(仅适用于桌面端) */ - (NSInteger)stopSpeakerDeviceTest; -#endif - -@end - -#if TARGET_OS_IPHONE -/** - * 系统音量类型 - * - * 智能手机一般具备两种系统音量类型,即通话音量类型和媒体音量类型。 - * - 通话音量:手机专门为通话场景设计的音量类型,使用手机自带的回声抵消功能,音质相比媒体音量类型较差, - * 无法通过音量按键将音量调成零,但是支持蓝牙耳机上的麦克风。 - * - * - 媒体音量:手机专门为音乐场景设计的音量类型,音质相比于通话音量类型要好,通过通过音量按键可以将音量调成零。 - * 使用媒体音量类型时,如果要开启回声抵消(AEC)功能,SDK 会开启内置的声学处理算法对声音进行二次处理。 - * 在媒体音量模式下,蓝牙耳机无法使用自带的麦克风采集声音,只能使用手机上的麦克风进行声音采集。 - * - * SDK 目前提供了三种系统音量类型的控制模式,分别为: - * - Auto:“麦上通话,麦下媒体”,即主播上麦时使用通话音量,观众不上麦则使用媒体音量,适合在线直播场景。 - * 如果您在 enterRoom 时选择的场景为 TRTCAppSceneLIVE 或 TRTCAppSceneVoiceChatRoom,SDK 会自动选择该模式。 - * - * - VOIP:全程使用通话音量,适合多人会议场景。 - * 如果您在 enterRoom 时选择的场景为 TRTCAppSceneVideoCall 或 TRTCAppSceneAudioCall,SDK 会自动选择该模式。 - * - * - Media:通话全程使用媒体音量,不常用,适合个别有特殊需求(如主播外接声卡)的应用场景。 - * - */ -typedef NS_ENUM(NSInteger, TXSystemVolumeType) { - TXSystemVolumeTypeAuto = 0, - TXSystemVolumeTypeMedia = 1, - TXSystemVolumeTypeVOIP = 2, -}; - - /** - * 声音播放模式(音频路由) - * - * 微信和手机 QQ 里的视频通话功能,都有一个免提模式,开启后就不用把手机贴在耳朵上,这个功能就是基于音频路由实现的。 - * 一般手机都有两个扬声器,设置音频路由的作用就是要决定声音从哪个扬声器播放出来: - * - Speakerphone:扬声器,位于手机底部,声音偏大,适合外放音乐。 - * - Earpiece:听筒,位于手机顶部,声音偏小,适合通话。 + * 2.14 设备热插拔回调(仅适用于 Mac 系统) */ -typedef NS_ENUM(NSInteger, TXAudioRoute) { - TXAudioRouteSpeakerphone = 0, ///< 扬声器 - TXAudioRouteEarpiece = 1, ///< 听筒 -}; +- (void)setObserver:(nullable id<TXDeviceObserver>)observer; +#endif +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 弃用接口(建议使用对应的新接口) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 弃用接口(建议使用对应的新接口) +/// @{ -#elif TARGET_OS_MAC /** - * 设备类型(仅 Mac) + * 设置系统音量类型(仅适用于移动端) * - * 在 Mac 上,每一种类型的设备都可能有多个,TRTC SDK 的 Mac 版本提供了一系列函数用来操作这些设备。 + * @deprecated v9.5 版本开始不推荐使用,建议使用 {@link TRTCCloud} 中的 startLocalAudio(quality) 接口替代之,通过 quality 参数来决策音质。 */ -typedef NS_ENUM(NSInteger, TXMediaDeviceType) { - TXMediaDeviceTypeUnknown = -1, ///< 未定义 - TXMediaDeviceTypeAudioInput = 0, ///< 麦克风 - TXMediaDeviceTypeAudioOutput = 1, ///< 扬声器或听筒 - TXMediaDeviceTypeVideoCamera = 2, ///< 摄像头 -}; - -/** - * 设备描述 - * - * 在 Mac 上,每一种类型的设备都可能有多个,TRTC SDK 的 Mac 版本提供了一系列函数用来操作这些设备。 - */ -@interface TXMediaDeviceInfo : NSObject -/// 设备类型 -@property (assign, nonatomic) TXMediaDeviceType type; -/// 设备ID -@property (copy, nonatomic, nullable) NSString *deviceId; -/// 设备名称 -@property (copy, nonatomic, nullable) NSString *deviceName; -@end - +#if TARGET_OS_IPHONE +- (NSInteger)setSystemVolumeType:(TXSystemVolumeType)type __attribute__((deprecated("use TRTCCloud#startLocalAudio:quality instead"))); #endif +/// @} +@end +/// @} NS_ASSUME_NONNULL_END diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVBuffer.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVBuffer.h index 5ca84ea..a347168 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVBuffer.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVBuffer.h @@ -1,49 +1,61 @@ +// Copyright (c) 2021 Tencent. All rights reserved. +#ifndef SDK_TRTC_INCLUDE_TXLITEAVBUFFER_H_ +#define SDK_TRTC_INCLUDE_TXLITEAVBUFFER_H_ -#ifndef TXLiteAVBuffer_h -#define TXLiteAVBuffer_h - +#include <stdint.h> #include <stdio.h> #include <stdlib.h> -#include <stdint.h> + +#ifdef __APPLE__ +#include "TXLiteAVSymbolExport.h" +#else +#define LITEAV_EXPORT +#endif + +#ifdef __cplusplus namespace liteav { /** * Buffer 数据类型 */ -class TXLiteAVBuffer { - -public: - virtual ~TXLiteAVBuffer() {} - - /** - * 获取 buffer 的内存地址 - */ - virtual uint8_t * data() = 0; - - /** - * 获取 buffer 的内存地址 - */ - virtual const uint8_t * cdata() const = 0; - - /** - * 获取 buffer 的内存size - */ - virtual size_t size() const = 0; - - /** - * 设置 buffe 的有效数据 size - * 如果此 size 超过当前 capacity,会造成重新分配内存,并复制数据 - */ - virtual void SetSize(size_t size) = 0; - - /** - * 确保 buffer 分配的内存空间足够,不用多次分配拷贝内存。此方法会引起内存分配,data / cdata 方法获取的指针失效 - * @param capacity buffer 预分配的内存size - */ - virtual void EnsureCapacity(size_t capacity) = 0; +class LITEAV_EXPORT TXLiteAVBuffer { + public: + virtual ~TXLiteAVBuffer() {} + + /** + * 获取 buffer 的内存地址 + */ + virtual uint8_t* data() = 0; + + /** + * 获取 buffer 的内存地址 + */ + virtual const uint8_t* cdata() const = 0; + + /** + * 获取 buffer 的内存 size + */ + virtual size_t size() const = 0; + + /** + * 设置 buffe 的有效数据 size + * 如果此 size 超过当前 capacity,会造成重新分配内存,并复制数据 + */ + virtual void SetSize(size_t size) = 0; + + /** + * 确保 buffer + * 分配的内存空间足够,不用多次分配拷贝内存。此方法会引起内存分配,data / + * cdata 方法获取的指针失效 + * @param capacity buffer 预分配的内存 size + */ + virtual void EnsureCapacity(size_t capacity) = 0; }; -} -#endif /* TXLiteAVBuffer_h */ +} // namespace liteav + +#endif // __cplusplus + +#endif // SDK_TRTC_INCLUDE_TXLITEAVBUFFER_H_ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVCode.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVCode.h index dc4c146..3a9a47f 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVCode.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVCode.h @@ -1,3 +1,5 @@ +// Copyright (c) 2021 Tencent. All rights reserved. + #ifndef __TXLITEAVCODE_H__ #define __TXLITEAVCODE_H__ @@ -6,303 +8,107 @@ // 错误码 // ///////////////////////////////////////////////////////////////////////////////// +// clang-format off typedef enum TXLiteAVError { ///////////////////////////////////////////////////////////////////////////////// - // // 基础错误码 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_NULL = 0, ///< 无错误 - - ///////////////////////////////////////////////////////////////////////////////// - // - // 进房(enterRoom)相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##onEnterRoom() 和 TRTCCloudDelegate##OnError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_ROOM_ENTER_FAIL = -3301, ///< 进入房间失败 - ERR_ROOM_REQUEST_ENTER_ROOM_TIMEOUT = -3308, ///< 请求进房超时,请检查网络 - ERR_ENTER_ROOM_PARAM_NULL = -3316, ///< 进房参数为空,请检查: enterRoom:appScene: 接口调用是否传入有效的 param - ERR_SDK_APPID_INVALID = -3317, ///< 进房参数 sdkAppId 错误 - ERR_ROOM_ID_INVALID = -3318, ///< 进房参数 roomId 错误 - ERR_USER_ID_INVALID = -3319, ///< 进房参数 userID 不正确 - ERR_USER_SIG_INVALID = -3320, ///< 进房参数 userSig 不正确 - ERR_ROOM_REQUEST_ENTER_ROOM_REFUSED = -3340, ///< 请求进房拒绝,请检查:是否连续调用 enterRoom 进入相同房间 - ERR_SERVER_INFO_SERVICE_SUSPENDED = -100013, ///< 服务不可用。请检查:套餐包剩余分钟数是否大于0,腾讯云账号是否欠费 - ///////////////////////////////////////////////////////////////////////////////// - // - // 退房(exitRoom)相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_ROOM_REQUEST_QUIT_ROOM_TIMEOUT = -3325, ///< 请求退房超时 - + ERR_NULL = 0, ///< 无错误 + ERR_FAILED = -1, ///< 暂未归类的通用错误 + ERR_INVALID_PARAMETER = -2, ///< 调用 API 时,传入的参数不合法 + ERR_REFUSED = -3, ///< API 调用被拒绝 + ERR_NOT_SUPPORTED = -4, ///< 当前 API 不支持调用 + ERR_INVALID_LICENSE = -5, ///< license 不合法,调用失败 + ERR_REQUEST_TIMEOUT = -6, ///< 请求服务器超时 + ERR_SERVER_PROCESS_FAILED = -7, ///< 服务器无法处理您的请求 + ERR_DISCONNECTED = -8, ///< 断开连接 + ///////////////////////////////////////////////////////////////////////////////// - // - // 设备(摄像头、麦克风、扬声器)相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // 区段:-6000 ~ -6999 - // + // 视频相关错误码 ///////////////////////////////////////////////////////////////////////////////// ERR_CAMERA_START_FAIL = -1301, ///< 打开摄像头失败,例如在 Windows 或 Mac 设备,摄像头的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 ERR_CAMERA_NOT_AUTHORIZED = -1314, ///< 摄像头设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 ERR_CAMERA_SET_PARAM_FAIL = -1315, ///< 摄像头参数设置出错(参数不支持或其它) ERR_CAMERA_OCCUPY = -1316, ///< 摄像头正在被占用中,可尝试打开其他摄像头 - ERR_MIC_START_FAIL = -1302, ///< 打开麦克风失败,例如在 Windows 或 Mac 设备,麦克风的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 - ERR_MIC_NOT_AUTHORIZED = -1317, ///< 麦克风设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 - ERR_MIC_SET_PARAM_FAIL = -1318, ///< 麦克风设置参数失败 - ERR_MIC_OCCUPY = -1319, ///< 麦克风正在被占用中,例如移动设备正在通话时,打开麦克风会失败 - ERR_MIC_STOP_FAIL = -1320, ///< 停止麦克风失败 - ERR_SPEAKER_START_FAIL = -1321, ///< 打开扬声器失败,例如在 Windows 或 Mac 设备,扬声器的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 - ERR_SPEAKER_SET_PARAM_FAIL = -1322, ///< 扬声器设置参数失败 - ERR_SPEAKER_STOP_FAIL = -1323, ///< 停止扬声器失败 - - ///////////////////////////////////////////////////////////////////////////////// - // - // 系统声音采集相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##onSystemAudioLoopbackError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_AUDIO_PLUGIN_START_FAIL = -1330, ///< 开启系统声音录制失败,例如音频驱动插件不可用 - ERR_AUDIO_PLUGIN_INSTALL_NOT_AUTHORIZED = -1331, ///< 安装音频驱动插件未授权 - ERR_AUDIO_PLUGIN_INSTALL_FAILED = -1332, ///< 安装音频驱动插件失败 - ///////////////////////////////////////////////////////////////////////////////// - // - // 屏幕分享相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// ERR_SCREEN_CAPTURE_START_FAIL = -1308, ///< 开始录屏失败,如果在移动设备出现,可能是权限被用户拒绝了,如果在 Windows 或 Mac 系统的设备出现,请检查录屏接口的参数是否符合要求 ERR_SCREEN_CAPTURE_UNSURPORT = -1309, ///< 录屏失败,在 Android 平台,需要5.0以上的系统,在 iOS 平台,需要11.0以上的系统 - ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_SUB_VIDEO = -102015, ///< 没有权限上行辅路 - ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO = -102016, ///< 其他用户正在上行辅路 ERR_SCREEN_CAPTURE_STOPPED = -7001, ///< 录屏被系统中止 + ERR_SCREEN_SHARE_NOT_AUTHORIZED = -102015, ///< 没有权限上行辅路 + ERR_SCREEN_SHRAE_OCCUPIED_BY_OTHER = -102016, ///< 其他用户正在上行辅路 - ///////////////////////////////////////////////////////////////////////////////// - // - // 编解码相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_VIDEO_ENCODE_FAIL = -1303, ///< 视频帧编码失败,例如 iOS 设备切换到其他应用时,硬编码器可能被系统释放,再切换回来时,硬编码器重启前,可能会抛出 + ERR_VIDEO_ENCODE_FAIL = -1303, ///< 视频帧编码失败,例如 iOS 设备切换到其他应用时,硬编码器可能被系统释放,再切换回来时,硬编码器重启前,可能会抛出 ERR_UNSUPPORTED_RESOLUTION = -1305, ///< 不支持的视频分辨率 - ERR_AUDIO_ENCODE_FAIL = -1304, ///< 音频帧编码失败,例如传入自定义音频数据,SDK 无法处理 - ERR_UNSUPPORTED_SAMPLERATE = -1306, ///< 不支持的音频采样率 + ERR_PIXEL_FORMAT_UNSUPPORTED = -1327, ///< 自定视频采集:设置的 pixel format 不支持 + ERR_BUFFER_TYPE_UNSUPPORTED = -1328, ///< 自定视频采集:设置的 buffer type 不支持 + ERR_NO_AVAILABLE_HEVC_DECODERS = -2304, ///< 找不到可用的 HEVC 解码器 ///////////////////////////////////////////////////////////////////////////////// - // - // 自定义采集相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // + // 音频相关错误码 ///////////////////////////////////////////////////////////////////////////////// - ERR_PIXEL_FORMAT_UNSUPPORTED = -1327, ///< 设置的 pixel format 不支持 - ERR_BUFFER_TYPE_UNSUPPORTED = -1328, ///< 设置的 buffer type 不支持 + ERR_MIC_START_FAIL = -1302, ///< 打开麦克风失败,例如在 Windows 或 Mac 设备,麦克风的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_MIC_NOT_AUTHORIZED = -1317, ///< 麦克风设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 + ERR_MIC_SET_PARAM_FAIL = -1318, ///< 麦克风设置参数失败 + ERR_MIC_OCCUPY = -1319, ///< 麦克风正在被占用中,例如移动设备正在通话时,打开麦克风会失败 + ERR_MIC_STOP_FAIL = -1320, ///< 停止麦克风失败 - ///////////////////////////////////////////////////////////////////////////////// - // - // CDN 绑定和混流相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##onStartPublishing() 和 TRTCCloudDelegate##onSetMixTranscodingConfig 通知。 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_PUBLISH_CDN_STREAM_REQUEST_TIME_OUT = -3321, ///< 旁路转推请求超时 - ERR_CLOUD_MIX_TRANSCODING_REQUEST_TIME_OUT = -3322, ///< 云端混流请求超时 - ERR_PUBLISH_CDN_STREAM_SERVER_FAILED = -3323, ///< 旁路转推回包异常 - ERR_CLOUD_MIX_TRANSCODING_SERVER_FAILED = -3324, ///< 云端混流回包异常 - ERR_ROOM_REQUEST_START_PUBLISHING_TIMEOUT = -3333, ///< 开始向腾讯云的直播 CDN 推流信令超时 - ERR_ROOM_REQUEST_START_PUBLISHING_ERROR = -3334, ///< 开始向腾讯云的直播 CDN 推流信令异常 - ERR_ROOM_REQUEST_STOP_PUBLISHING_TIMEOUT = -3335, ///< 停止向腾讯云的直播 CDN 推流信令超时 - ERR_ROOM_REQUEST_STOP_PUBLISHING_ERROR = -3336, ///< 停止向腾讯云的直播 CDN 推流信令异常 + ERR_SPEAKER_START_FAIL = -1321, ///< 打开扬声器失败,例如在 Windows 或 Mac 设备,扬声器的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_SPEAKER_SET_PARAM_FAIL = -1322, ///< 扬声器设置参数失败 + ERR_SPEAKER_STOP_FAIL = -1323, ///< 停止扬声器失败 - ///////////////////////////////////////////////////////////////////////////////// - // - // 跨房连麦(ConnectOtherRoom)相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##onConnectOtherRoom() 通知。 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_ROOM_REQUEST_CONN_ROOM_TIMEOUT = -3326, ///< 请求连麦超时 - ERR_ROOM_REQUEST_DISCONN_ROOM_TIMEOUT = -3327, ///< 请求退出连麦超时 - ERR_ROOM_REQUEST_CONN_ROOM_INVALID_PARAM = -3328, ///< 无效参数 - ERR_CONNECT_OTHER_ROOM_AS_AUDIENCE = -3330, ///< 当前是观众角色,不能请求或断开跨房连麦,需要先 switchRole() 到主播 - ERR_SERVER_CENTER_CONN_ROOM_NOT_SUPPORT = -102031, ///< 不支持跨房间连麦 - ERR_SERVER_CENTER_CONN_ROOM_REACH_MAX_NUM = -102032, ///< 达到跨房间连麦上限 - ERR_SERVER_CENTER_CONN_ROOM_REACH_MAX_RETRY_TIMES = -102033, ///< 跨房间连麦重试次数耗尽 - ERR_SERVER_CENTER_CONN_ROOM_REQ_TIMEOUT = -102034, ///< 跨房间连麦请求超时 - ERR_SERVER_CENTER_CONN_ROOM_REQ = -102035, ///< 跨房间连麦请求格式错误 - ERR_SERVER_CENTER_CONN_ROOM_NO_SIG = -102036, ///< 跨房间连麦无签名 - ERR_SERVER_CENTER_CONN_ROOM_DECRYPT_SIG = -102037, ///< 跨房间连麦签名解密失败 - ERR_SERVER_CENTER_CONN_ROOM_NO_KEY = -102038, ///< 未找到跨房间连麦签名解密密钥 - ERR_SERVER_CENTER_CONN_ROOM_PARSE_SIG = -102039, ///< 跨房间连麦签名解析错误 - ERR_SERVER_CENTER_CONN_ROOM_INVALID_SIG_TIME = -102040, ///< 跨房间连麦签名时间戳错误 - ERR_SERVER_CENTER_CONN_ROOM_SIG_GROUPID = -102041, ///< 跨房间连麦签名不匹配 - ERR_SERVER_CENTER_CONN_ROOM_NOT_CONNED = -102042, ///< 本房间无连麦 - ERR_SERVER_CENTER_CONN_ROOM_USER_NOT_CONNED = -102043, ///< 本用户未发起连麦 - ERR_SERVER_CENTER_CONN_ROOM_FAILED = -102044, ///< 跨房间连麦失败 - ERR_SERVER_CENTER_CONN_ROOM_CANCEL_FAILED = -102045, ///< 取消跨房间连麦失败 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_ROOM_NOT_EXIST = -102046, ///< 被连麦房间不存在 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_REACH_MAX_ROOM = -102047, ///< 被连麦房间达到连麦上限 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_NOT_EXIST = -102048, ///< 被连麦用户不存在 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_DELETED = -102049, ///< 被连麦用户已被删除 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_FULL = -102050, ///< 被连麦用户达到资源上限 - ERR_SERVER_CENTER_CONN_ROOM_INVALID_SEQ = -102051, ///< 连麦请求序号错乱 + ERR_AUDIO_PLUGIN_START_FAIL = -1330, ///< 开启系统声音录制失败,例如音频驱动插件不可用 + ERR_AUDIO_PLUGIN_INSTALL_NOT_AUTHORIZED = -1331, ///< 安装音频驱动插件未授权 + ERR_AUDIO_PLUGIN_INSTALL_FAILED = -1332, ///< 安装音频驱动插件失败 + ERR_AUDIO_ENCODE_FAIL = -1304, ///< 音频帧编码失败,例如传入自定义音频数据,SDK 无法处理 + ERR_UNSUPPORTED_SAMPLERATE = -1306, ///< 不支持的音频采样率 ///////////////////////////////////////////////////////////////////////////////// - // - // 客户无需关心的内部错误码 - // - ///////////////////////////////////////////////////////////////////////////////// - - // - Remove From Head - ERR_RTMP_PUSH_NET_DISCONNECT = -1307, ///< 直播,推流出现网络断开,且经过多次重试无法恢复 - ERR_RTMP_PUSH_INVALID_ADDRESS = -1313, ///< 直播,推流地址非法,例如不是 RTMP 协议的地址 - ERR_RTMP_PUSH_NET_ALLADDRESS_FAIL = -1324, ///< 直播,连接推流服务器失败(若支持智能选路,IP 全部失败) - ERR_RTMP_PUSH_NO_NETWORK = -1325, ///< 直播,网络不可用,请确认 Wi-Fi、移动数据或者有线网络是否正常 - ERR_RTMP_PUSH_SERVER_REFUSE = -1326, ///< 直播,服务器拒绝连接请求,可能是该推流地址已经被占用,或者 TXSecret 校验失败,或者是过期了,或者是欠费了 - - ERR_PLAY_LIVE_STREAM_NET_DISCONNECT = -2301, ///< 直播,网络断连,且经多次重连抢救无效,可以放弃治疗,更多重试请自行重启播放 - ERR_GET_RTMP_ACC_URL_FAIL = -2302, ///< 直播,获取加速拉流的地址失败 - ERR_FILE_NOT_FOUND = -2303, ///< 播放的文件不存在 - ERR_HEVC_DECODE_FAIL = -2304, ///< H265 解码失败 - ERR_VOD_DECRYPT_FAIL = -2305, ///< 点播,音视频流解密失败 - ERR_GET_VODFILE_MEDIAINFO_FAIL = -2306, ///< 点播,获取点播文件信息失败 - ERR_PLAY_LIVE_STREAM_SWITCH_FAIL = -2307, ///< 直播,切流失败(切流可以播放不同画面大小的视频) - ERR_PLAY_LIVE_STREAM_SERVER_REFUSE = -2308, ///< 直播,服务器拒绝连接请求 - ERR_RTMP_ACC_FETCH_STREAM_FAIL = -2309, ///< 直播,RTMPACC 低延时拉流失败,且经过多次重试无法恢复 - ERR_HEVC_ENCODE_FAIL = -2310, ///< 265编码失败 - ERR_HEVC_ENCODE_NOT_SUPPORT = -2311, ///< 265编码判断不支持 - ERR_HEVC_SOFTDECODER_START_FAIL = -2312, ///< 265软解启动失败 - - ERR_ROOM_HEARTBEAT_FAIL = -3302, ///< 心跳失败,客户端定时向服务器发送数据包,告诉服务器自己活着,这个错误通常是发包超时 - ERR_ROOM_REQUEST_IP_FAIL = -3303, ///< 拉取接口机服务器地址失败 - ERR_ROOM_CONNECT_FAIL = -3304, ///< 连接接口机服务器失败 - ERR_ROOM_REQUEST_AVSEAT_FAIL = -3305, ///< 请求视频位失败 - ERR_ROOM_REQUEST_TOKEN_HTTPS_TIMEOUT = -3306, ///< 请求 token HTTPS 超时,请检查网络是否正常,或网络防火墙是否放行 HTTPS 访问 official.opensso.tencent-cloud.com:443 - ERR_ROOM_REQUEST_IP_TIMEOUT = -3307, ///< 请求 IP 和 sig 超时,请检查网络是否正常,或网络防火墙是否放行 UDP 访问下列 IP 和域名 query.tencent-cloud.com:8000 162.14.23.140:8000 162.14.7.49:8000 - ERR_ROOM_REQUEST_VIDEO_FLAG_TIMEOUT = -3309, ///< 请求视频位超时 - ERR_ROOM_REQUEST_VIDEO_DATA_ROOM_TIMEOUT = -3310, ///< 请求视频数据超时 - ERR_ROOM_REQUEST_CHANGE_ABILITY_TIMEOUT = -3311, ///< 请求修改视频能力项超时 - ERR_ROOM_REQUEST_STATUS_REPORT_TIMEOUT = -3312, ///< 请求状态上报超时 - ERR_ROOM_REQUEST_CLOSE_VIDEO_TIMEOUT = -3313, ///< 请求关闭视频超时 - ERR_ROOM_REQUEST_SET_RECEIVE_TIMEOUT = -3314, ///< 请求接收视频项超时 - ERR_ROOM_REQUEST_TOKEN_INVALID_PARAMETER = -3315, ///< 请求 token 无效参数,请检查 TRTCParams.userSig 是否填写正确 - - ERR_ROOM_REQUEST_AES_TOKEN_RETURN_ERROR = -3329, ///< 请求 AES TOKEN 时,server 返回的内容是空的 - ERR_ACCIP_LIST_EMPTY = -3331, ///< 请求接口机 IP 返回的列表为空的 - ERR_ROOM_REQUEST_SEND_JSON_CMD_TIMEOUT = -3332, ///< 请求发送Json 信令超时 - - // Info 服务器(查询接口机 IP), 服务器错误码,数值范围[-100000, -110000] - ERR_SERVER_INFO_UNPACKING_ERROR = -100000, ///< server 解包错误,可能请求数据被篡改 - ERR_SERVER_INFO_TOKEN_ERROR = -100001, ///< TOKEN 错误 - ERR_SERVER_INFO_ALLOCATE_ACCESS_FAILED = -100002, ///< 分配接口机错误 - ERR_SERVER_INFO_GENERATE_SIGN_FAILED = -100003, ///< 生成签名错误 - ERR_SERVER_INFO_TOKEN_TIMEOUT = -100004, ///< HTTPS token 超时 - ERR_SERVER_INFO_INVALID_COMMAND = -100005, ///< 无效的命令字 - ERR_SERVER_INFO_PRIVILEGE_FLAG_ERROR = -100006, ///< 权限位校验失败 - ERR_SERVER_INFO_GENERATE_KEN_ERROR = -100007, ///< HTTPS 请求时,生成加密 key 错误 - ERR_SERVER_INFO_GENERATE_TOKEN_ERROR = -100008, ///< HTTPS 请求时,生成 token 错误 - ERR_SERVER_INFO_DATABASE = -100009, ///< 数据库查询失败(房间相关存储信息) - ERR_SERVER_INFO_BAD_ROOMID = -100010, ///< 房间号错误 - ERR_SERVER_INFO_BAD_SCENE_OR_ROLE = -100011, ///< 场景或角色错误 - ERR_SERVER_INFO_ROOMID_EXCHANGE_FAILED = -100012, ///< 房间号转换出错 - ERR_SERVER_INFO_STRGROUP_HAS_INVALID_CHARS = -100014, ///< 房间号非法 - ERR_SERVER_INFO_LACK_SDKAPPID = -100015, ///< 非法SDKAppid - ERR_SERVER_INFO_INVALID = -100016, ///< 无效请求, 分配接口机失败 - ERR_SERVER_INFO_ECDH_GET_KEY = -100017, ///< 生成公钥失败 - ERR_SERVER_INFO_ECDH_GET_TINYID = -100018, ///< userSig 校验失败,请检查 TRTCParams.userSig 是否填写正确 - - // Access 接口机 - ERR_SERVER_ACC_TOKEN_TIMEOUT = -101000, ///< token 过期 - ERR_SERVER_ACC_SIGN_ERROR = -101001, ///< 签名错误 - ERR_SERVER_ACC_SIGN_TIMEOUT = -101002, ///< 签名超时 - ERR_SERVER_ACC_ROOM_NOT_EXIST = -101003, ///< 房间不存在 - ERR_SERVER_ACC_ROOMID = -101004, ///< 后台房间标识 roomId 错误 - ERR_SERVER_ACC_LOCATIONID = -101005, ///< 后台用户位置标识 locationId 错误 - ERR_SERVER_ACC_TOKEN_EORROR = -101006, ///< token里面的tinyid和进房信令tinyid不同 或是 进房信令没有token - - // Center 服务器(信令和流控处理等任务) - ERR_SERVER_CENTER_SYSTEM_ERROR = -102000, ///< 后台错误 - - ERR_SERVER_CENTER_INVALID_ROOMID = -102001, ///< 无效的房间 Id - ERR_SERVER_CENTER_CREATE_ROOM_FAILED = -102002, ///< 创建房间失败 - ERR_SERVER_CENTER_SIGN_ERROR = -102003, ///< 签名错误 - ERR_SERVER_CENTER_SIGN_TIMEOUT = -102004, ///< 签名过期 - ERR_SERVER_CENTER_ROOM_NOT_EXIST = -102005, ///< 房间不存在 - ERR_SERVER_CENTER_ADD_USER_FAILED = -102006, ///< 房间添加用户失败 - ERR_SERVER_CENTER_FIND_USER_FAILED = -102007, ///< 查找用户失败 - ERR_SERVER_CENTER_SWITCH_TERMINATION_FREQUENTLY = -102008, ///< 频繁切换终端 - ERR_SERVER_CENTER_LOCATION_NOT_EXIST = -102009, ///< locationid 错误 - ERR_SERVER_CENTER_NO_PRIVILEDGE_CREATE_ROOM = -102010, ///< 没有权限创建房间 - ERR_SERVER_CENTER_NO_PRIVILEDGE_ENTER_ROOM = -102011, ///< 没有权限进入房间 - ERR_SERVER_CENTER_INVALID_PARAMETER_SUB_VIDEO = -102012, ///< 辅路抢视频位、申请辅路请求类型参数错误 - ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_VIDEO = -102013, ///< 没有权限上视频 - ERR_SERVER_CENTER_ROUTE_TABLE_ERROR = -102014, ///< 没有空闲路由表 - ERR_SERVER_CENTER_NOT_PUSH_SUB_VIDEO = -102017, ///< 当前用户没有上行辅路 - ERR_SERVER_CENTER_USER_WAS_DELETED = -102018, ///< 用户被删除状态 - ERR_SERVER_CENTER_NO_PRIVILEDGE_REQUEST_VIDEO = -102019, ///< 没有权限请求视频 - ERR_SERVER_CENTER_INVALID_PARAMETER = -102023, ///< 进房参数 bussInfo 错误 - ERR_SERVER_CENTER_I_FRAME_UNKNOW_TYPE = -102024, ///< 请求 I 帧未知 opType - ERR_SERVER_CENTER_I_FRAME_INVALID_PACKET = -102025, ///< 请求 I 帧包格式错误 - ERR_SERVER_CENTER_I_FRAME_DEST_USER_NOT_EXIST = -102026, ///< 请求 I 帧目标用户不存在 - ERR_SERVER_CENTER_I_FRAME_ROOM_TOO_BIG = -102027, ///< 请求 I 帧房间用户太多 - ERR_SERVER_CENTER_I_FRAME_RPS_INVALID_PARAMETER = -102028, ///< 请求 I 帧参数错误 - ERR_SERVER_CENTER_INVALID_ROOM_ID = -102029, ///< 房间号非法 - ERR_SERVER_CENTER_ROOM_ID_TOO_LONG = -102030, ///< 房间号超过限制 - ERR_SERVER_CENTER_ROOM_FULL = -102052, ///< 房间满员 - ERR_SERVER_CENTER_DECODE_JSON_FAIL = -102053, ///< JSON 串解析失败 - ERR_SERVER_CENTER_UNKNOWN_SUB_CMD = -102054, ///< 未定义命令字 - ERR_SERVER_CENTER_INVALID_ROLE = -102055, ///< 未定义角色 - ERR_SERVER_CENTER_REACH_PROXY_MAX = -102056, ///< 代理机超出限制 - ERR_SERVER_CENTER_RECORDID_STORE = -102057, ///< 无法保存用户自定义 recordId - ERR_SERVER_CENTER_PB_SERIALIZE = -102058, ///< Protobuf 序列化错误 - - ERR_SERVER_SSO_SIG_EXPIRED = -70001, ///< sig 过期,请尝试重新生成。如果是刚生成,就过期,请检查有效期填写的是否过小,或者填的 0 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_1 = -70003, ///< sig 校验失败,请确认下 sig 内容是否被截断,如缓冲区长度不够导致的内容截断 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_2 = -70004, ///< sig 校验失败,请确认下 sig 内容是否被截断,如缓冲区长度不够导致的内容截断 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_3 = -70005, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_4 = -70006, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_5 = -70007, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_6 = -70008, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_7 = -70009, ///< 用业务公钥验证 sig 失败,请确认生成的 usersig 使用的私钥和 sdkAppId 是否对应 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_8 = -70010, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_ID_NOT_MATCH = -70013, ///< sig 中 identifier 与请求时的 identifier 不匹配,请检查登录时填写的 identifier 与 sig 中的是否一致 - ERR_SERVER_SSO_APPID_NOT_MATCH = -70014, ///< sig 中 sdkAppId 与请求时的 sdkAppId 不匹配,请检查登录时填写的 sdkAppId 与 sig 中的是否一致 - ERR_SERVER_SSO_VERIFICATION_EXPIRED = -70017, ///< 内部第三方票据验证超时,请重试,如多次重试不成功,请@TLS 帐号支持,QQ 3268519604 - ERR_SERVER_SSO_VERIFICATION_FAILED = -70018, ///< 内部第三方票据验证超时,请重试,如多次重试不成功,请@TLS 帐号支持,QQ 3268519604 + // 网络相关错误码 + ///////////////////////////////////////////////////////////////////////////////// + ERR_TRTC_ENTER_ROOM_FAILED = -3301, ///< 进入房间失败,请查看 onError 中的 -3301 对应的 msg 提示确认失败原因 + ERR_TRTC_REQUEST_IP_TIMEOUT = -3307, ///< 请求 IP 和 sig 超时,请检查网络是否正常,或网络防火墙是否放行 UDP。可尝试访问下列 IP:162.14.22.165:8000 162.14.6.105:8000 和域名:default-query.trtc.tencent-cloud.com:8000 + ERR_TRTC_CONNECT_SERVER_TIMEOUT = -3308, ///< 请求进房超时,请检查是否断网或者是否开启vpn,您也可以切换4G进行测试确认 + ERR_TRTC_ROOM_PARAM_NULL = -3316, ///< 进房参数为空,请检查: enterRoom:appScene: 接口调用是否传入有效的 param + ERR_TRTC_INVALID_SDK_APPID = -3317, ///< 进房参数 sdkAppId 错误,请检查 TRTCParams.sdkAppId 是否为空 + ERR_TRTC_INVALID_ROOM_ID = -3318, ///< 进房参数 roomId 错误,请检查 TRTCParams.roomId 或 TRTCParams.strRoomId 是否为空,注意 roomId 和 strRoomId 不可混用 + ERR_TRTC_INVALID_USER_ID = -3319, ///< 进房参数 userId 不正确,请检查 TRTCParams.userId 是否为空 + ERR_TRTC_INVALID_USER_SIG = -3320, ///< 进房参数 userSig 不正确,请检查 TRTCParams.userSig 是否为空 + ERR_TRTC_ENTER_ROOM_REFUSED = -3340, ///< 进房请求被拒绝,请检查是否连续调用 enterRoom 进入相同 Id 的房间 + ERR_TRTC_INVALID_PRIVATE_MAPKEY = -100006, ///< 您开启了高级权限控制,但参数 TRTCParams.privateMapKey 校验失败,您可参考 https://cloud.tencent.com/document/product/647/32240 进行检查 + ERR_TRTC_SERVICE_SUSPENDED = -100013, ///< 服务不可用。请检查:套餐包剩余分钟数是否大于0,腾讯云账号是否欠费。您可参考 https://cloud.tencent.com/document/product/647/50492 进行查看与配置 + ERR_TRTC_USER_SIG_CHECK_FAILED = -100018, ///< UserSig 校验失败,请检查参数 TRTCParams.userSig 是否填写正确,或是否已经过期。您可参考 https://cloud.tencent.com/document/product/647/50686 进行校验 + + ERR_TRTC_PUSH_THIRD_PARTY_CLOUD_TIMEOUT = -3321, ///< 旁路转推请求超时 + ERR_TRTC_PUSH_THIRD_PARTY_CLOUD_FAILED = -3323, ///< 旁路转推回包异常 + ERR_TRTC_MIX_TRANSCODING_TIMEOUT = -3322, ///< 云端混流请求超时 + ERR_TRTC_MIX_TRANSCODING_FAILED = -3324, ///< 云端混流回包异常 + + ERR_TRTC_START_PUBLISHING_TIMEOUT = -3333, ///< 开始向腾讯云的直播 CDN 推流信令超时 + ERR_TRTC_START_PUBLISHING_FAILED = -3334, ///< 开始向腾讯云的直播 CDN 推流信令异常 + ERR_TRTC_STOP_PUBLISHING_TIMEOUT = -3335, ///< 停止向腾讯云的直播 CDN 推流信令超时 + ERR_TRTC_STOP_PUBLISHING_FAILED = -3336, ///< 停止向腾讯云的直播 CDN 推流信令异常 + + ERR_TRTC_CONNECT_OTHER_ROOM_TIMEOUT = -3326, ///< 请求连麦超时 + ERR_TRTC_DISCONNECT_OTHER_ROOM_TIMEOUT = -3327, ///< 请求退出连麦超时 + ERR_TRTC_CONNECT_OTHER_ROOM_INVALID_PARAMETER = -3328, ///< 无效参数 + ERR_TRTC_CONNECT_OTHER_ROOM_AS_AUDIENCE = -3330, ///< 当前是观众角色,不能请求或断开跨房连麦,需要先 switchRole() 到主播 - ERR_SERVER_SSO_APPID_NOT_FOUND = -70020, ///< sdkAppId 未找到,请确认是否已经在腾讯云上配置 - ERR_SERVER_SSO_ACCOUNT_IN_BLACKLIST = -70051, ///< 帐号已被拉入黑名单,请联系 TLS 帐号支持 QQ 3268519604 - ERR_SERVER_SSO_SIG_INVALID = -70052, ///< usersig 已经失效,请重新生成,再次尝试 - ERR_SERVER_SSO_LIMITED_BY_SECURITY = -70114, ///< 安全原因被限制 - ERR_SERVER_SSO_INVALID_LOGIN_STATUS = -70221, ///< 登录状态无效,请使用 usersig 重新鉴权 - ERR_SERVER_SSO_APPID_ERROR = -70252, ///< sdkAppId 填写错误 - ERR_SERVER_SSO_TICKET_VERIFICATION_FAILED = -70346, ///< 票据校验失败,请检查各项参数是否正确 - ERR_SERVER_SSO_TICKET_EXPIRED = -70347, ///< 票据因过期原因校验失败 - ERR_SERVER_SSO_ACCOUNT_EXCEED_PURCHASES = -70398, ///< 创建账号数量超过已购买预付费数量限制 - ERR_SERVER_SSO_INTERNAL_ERROR = -70500, ///< 服务器内部错误,请重试 - - //秒级监控上报错误码 - ERR_REQUEST_QUERY_CONFIG_TIMEOUT = -4001, ///< 请求通用配置超时 - ERR_CUSTOM_STREAM_INVALID = -4002, ///< 自定义流id错误 - ERR_USER_DEFINE_RECORD_ID_INVALID = -4003, ///< userDefineRecordId错误 - ERR_MIX_PARAM_INVALID = -4004, ///< 混流参数校验失败 - ERR_REQUEST_ACC_BY_HOST_IP = -4005, ///< 通过域名进行0x1请求 - // - /Remove From Head } TXLiteAVError; ///////////////////////////////////////////////////////////////////////////////// // // 警告码 // -//> 不需要特别关注,但您可以根据其中某些感兴趣的警告码,对当前用户进行相应的提示 -// ///////////////////////////////////////////////////////////////////////////////// typedef enum TXLiteAVWarning { - WARNING_HW_ENCODER_START_FAIL = 1103, ///< 硬编码启动出现问题,自动切换到软编码 + ///////////////////////////////////////////////////////////////////////////////// + // 视频相关警告码 + ///////////////////////////////////////////////////////////////////////////////// + WARNING_HW_ENCODER_START_FAIL = 1103, ///< 硬编码启动出现问题,自动切换到软编码 WARNING_CURRENT_ENCODE_TYPE_CHANGED = 1104, ///< 当前编码格式, 通过key 为type获取,值为1时是265编码,值为0时是264编码 WARNING_VIDEO_ENCODER_SW_TO_HW = 1107, ///< 当前 CPU 使用率太高,无法满足软件编码需求,自动切换到硬件编码 WARNING_INSUFFICIENT_CAPTURE_FPS = 1108, ///< 摄像头采集帧率不足,部分自带美颜算法的 Android 手机上会出现 @@ -310,117 +116,65 @@ typedef enum TXLiteAVWarning WARNING_REDUCE_CAPTURE_RESOLUTION = 1110, ///< 摄像头采集分辨率被降低,以满足当前帧率和性能最优解。 WARNING_CAMERA_DEVICE_EMPTY = 1111, ///< 没有检测到可用的摄像头设备 WARNING_CAMERA_NOT_AUTHORIZED = 1112, ///< 用户未授权当前应用使用摄像头 - WARNING_MICROPHONE_DEVICE_EMPTY = 1201, ///< 没有检测到可用的麦克风设备 - WARNING_SPEAKER_DEVICE_EMPTY = 1202, ///< 没有检测到可用的扬声器设备 - WARNING_MICROPHONE_NOT_AUTHORIZED = 1203, ///< 用户未授权当前应用使用麦克风 - WARNING_MICROPHONE_DEVICE_ABNORMAL = 1204, ///< 音频采集设备不可用(例如被占用或者PC判定无效设备) - WARNING_SPEAKER_DEVICE_ABNORMAL = 1205, ///< 音频播放设备不可用(例如被占用或者PC判定无效设备) WARNING_SCREEN_CAPTURE_NOT_AUTHORIZED = 1206, ///< 用户未授权当前应用使用屏幕录制 WARNING_VIDEO_FRAME_DECODE_FAIL = 2101, ///< 当前视频帧解码失败 - WARNING_AUDIO_FRAME_DECODE_FAIL = 2102, ///< 当前音频帧解码失败 - WARNING_VIDEO_PLAY_LAG = 2105, ///< 当前视频播放出现卡顿 WARNING_HW_DECODER_START_FAIL = 2106, ///< 硬解启动失败,采用软解码 WARNING_VIDEO_DECODER_HW_TO_SW = 2108, ///< 当前流硬解第一个 I 帧失败,SDK 自动切软解 WARNING_SW_DECODER_START_FAIL = 2109, ///< 软解码器启动失败 WARNING_VIDEO_RENDER_FAIL = 2110, ///< 视频渲染失败 - WARNING_START_CAPTURE_IGNORED = 4000, ///< 已经在采集,启动采集被忽略 + + ///////////////////////////////////////////////////////////////////////////////// + // 音频相关警告码 + ///////////////////////////////////////////////////////////////////////////////// + WARNING_MICROPHONE_DEVICE_EMPTY = 1201, ///< 没有检测到可用的麦克风设备 + WARNING_SPEAKER_DEVICE_EMPTY = 1202, ///< 没有检测到可用的扬声器设备 + WARNING_MICROPHONE_NOT_AUTHORIZED = 1203, ///< 用户未授权当前应用使用麦克风 + WARNING_MICROPHONE_DEVICE_ABNORMAL = 1204, ///< 音频采集设备不可用(例如被占用或者PC判定无效设备) + WARNING_SPEAKER_DEVICE_ABNORMAL = 1205, ///< 音频播放设备不可用(例如被占用或者PC判定无效设备) + WARNING_AUDIO_FRAME_DECODE_FAIL = 2102, ///< 当前音频帧解码失败 WARNING_AUDIO_RECORDING_WRITE_FAIL = 7001, ///< 音频录制写入文件失败 - WARNING_ROOM_DISCONNECT = 5101, ///< 网络断开连接 - WARNING_IGNORE_UPSTREAM_FOR_AUDIENCE = 6001, ///< 当前是观众角色,忽略上行音视频数据 - - // - Remove From Head - WARNING_NET_BUSY = 1101, ///< 网络状况不佳:上行带宽太小,上传数据受阻 - WARNING_RTMP_SERVER_RECONNECT = 1102, ///< 直播,网络断连, 已启动自动重连(自动重连连续失败超过三次会放弃) - WARNING_LIVE_STREAM_SERVER_RECONNECT = 2103, ///< 直播,网络断连, 已启动自动重连(自动重连连续失败超过三次会放弃) - WARNING_RECV_DATA_LAG = 2104, ///< 网络来包不稳:可能是下行带宽不足,或由于主播端出流不均匀 - WARNING_RTMP_DNS_FAIL = 3001, ///< 直播,DNS 解析失败 - WARNING_RTMP_SEVER_CONN_FAIL = 3002, ///< 直播,服务器连接失败 - WARNING_RTMP_SHAKE_FAIL = 3003, ///< 直播,与 RTMP 服务器握手失败 - WARNING_RTMP_SERVER_BREAK_CONNECT = 3004, ///< 直播,服务器主动断开 - WARNING_RTMP_READ_WRITE_FAIL = 3005, ///< 直播,RTMP 读/写失败,将会断开连接 - WARNING_RTMP_WRITE_FAIL = 3006, ///< 直播,RTMP 写失败(SDK 内部错误码,不会对外抛出) - WARNING_RTMP_READ_FAIL = 3007, ///< 直播,RTMP 读失败(SDK 内部错误码,不会对外抛出) - WARNING_RTMP_NO_DATA = 3008, ///< 直播,超过30s 没有数据发送,主动断开连接 - WARNING_PLAY_LIVE_STREAM_INFO_CONNECT_FAIL = 3009, ///< 直播,connect 服务器调用失败(SDK 内部错误码,不会对外抛出) - WARNING_NO_STEAM_SOURCE_FAIL = 3010, ///< 直播,连接失败,该流地址无视频(SDK 内部错误码,不会对外抛出) - WARNING_ROOM_RECONNECT = 5102, ///< 网络断连,已启动自动重连 - WARNING_ROOM_NET_BUSY = 5103, ///< 网络状况不佳:上行带宽太小,上传数据受阻 - // - /Remove From Head + + ///////////////////////////////////////////////////////////////////////////////// + // 网络相关警告码 + ///////////////////////////////////////////////////////////////////////////////// + WARNING_IGNORE_UPSTREAM_FOR_AUDIENCE = 6001, ///< 当前是观众角色,不支持发布音视频,需要先切换成主播角色 } TXLiteAVWarning; -// - Remove From Head + ///////////////////////////////////////////////////////////////////////////////// // -// (三)事件列表 +// 兼容定义(用于兼容老版本的错误码定义,请在代码中尽量使用右侧的新定义) // ///////////////////////////////////////////////////////////////////////////////// - -typedef enum TXLiteAVEvent -{ - EVT_RTMP_PUSH_CONNECT_SUCC = 1001, ///< 直播,已经连接 RTMP 推流服务器 - EVT_RTMP_PUSH_BEGIN = 1002, ///< 直播,已经与 RTMP 服务器握手完毕,开始推流 - EVT_CAMERA_START_SUCC = 1003, ///< 打开摄像头成功 - EVT_SCREEN_CAPTURE_SUCC = 1004, ///< 录屏启动成功 - EVT_UP_CHANGE_RESOLUTION = 1005, ///< 上行动态调整分辨率 - EVT_UP_CHANGE_BITRATE = 1006, ///< 码率动态调整 - EVT_FIRST_FRAME_AVAILABLE = 1007, ///< 首帧画面采集完成 - EVT_START_VIDEO_ENCODER = 1008, ///< 编码器启动成功 - EVT_SNAPSHOT_COMPLETE = 1022, ///< 一帧截图完成 - EVT_CAMERA_REMOVED = 1023, ///< 摄像头设备已被移出(Windows 和 Mac 版 SDK 使用) - EVT_CAMERA_AVAILABLE = 1024, ///< 摄像头设备重新可用(Windows 和 Mac 版 SDK 使用) - EVT_CAMERA_CLOSE = 1025, ///< 关闭摄像头完成(Windows 和 Mac 版 SDK 使用) - EVT_RTMP_PUSH_PUBLISH_START = 1026, ///< 直播,与 RTMP 服务器连接后,收到 NetStream.Publish.Start 消息,表明流发布成功(SDK 内部事件,不会对外抛出) - EVT_HW_ENCODER_START_SUCC = 1027, ///< 硬编码器启动成功 - EVT_SW_ENCODER_START_SUCC = 1028, ///< 软编码器启动成功 - EVT_LOCAL_RECORD_RESULT = 1029, ///< 本地录制结果 - EVT_LOCAL_RECORD_PROGRESS = 1030, ///< 本地录制状态通知 - - EVT_PLAY_LIVE_STREAM_CONNECT_SUCC = 2001, ///< 直播,已经连接 RTMP 拉流服务器 - EVT_PLAY_LIVE_STREAM_BEGIN = 2002, ///< 直播,已经与 RTMP 服务器握手完毕,开始拉流 - EVT_RENDER_FIRST_I_FRAME = 2003, ///< 渲染首个视频数据包(IDR) - EVT_VIDEO_PLAY_BEGIN = 2004, ///< 视频播放开始 - EVT_VIDEO_PLAY_PROGRESS = 2005, ///< 视频播放进度 - EVT_VIDEO_PLAY_END = 2006, ///< 视频播放结束 - EVT_VIDEO_PLAY_LOADING = 2007, ///< 视频播放 loading - EVT_START_VIDEO_DECODER = 2008, ///< 解码器启动 - EVT_DOWN_CHANGE_RESOLUTION = 2009, ///< 下行视频分辨率改变 - EVT_GET_VODFILE_MEDIAINFO_SUCC = 2010, ///< 点播,获取点播文件信息成功 - EVT_VIDEO_CHANGE_ROTATION = 2011, ///< 视频旋转角度发生改变 - EVT_PLAY_GET_MESSAGE = 2012, ///< 消息事件 - EVT_VOD_PLAY_PREPARED = 2013, ///< 点播,视频加载完毕 - EVT_VOD_PLAY_LOADING_END = 2014, ///< 点播,loading 结束 - EVT_PLAY_LIVE_STREAM_SWITCH_SUCC = 2015, ///< 直播,切流成功(切流可以播放不同画面大小的视频) - EVT_VOD_PLAY_TCP_CONNECT_SUCC = 2016, ///< 点播,TCP 连接成功(SDK 内部事件,不会对外抛出) - EVT_VOD_PLAY_FIRST_VIDEO_PACKET = 2017, ///< 点播,收到首帧数据(SDK 内部事件,不会对外抛出) - EVT_VOD_PLAY_DNS_RESOLVED = 2018, ///< 点播,DNS 解析完成(SDK 内部事件,不会对外抛出) - EVT_VOD_PLAY_SEEK_COMPLETE = 2019, ///< 点播,视频播放 Seek 完成(SDK 内部事件,不会对外抛出) - EVT_VIDEO_DECODER_CACHE_TOO_MANY_FRAMES = 2020, ///< 视频解码器缓存帧数过多,超过40帧(SDK 内部事件,不会对外抛出) - EVT_HW_DECODER_START_SUCC = 2021, ///< 硬解码器启动成功(SDK 内部事件,不会对外抛出) - EVT_SW_DECODER_START_SUCC = 2022, ///< 软解码器启动成功(SDK 内部事件,不会对外抛出) - EVT_AUDIO_JITTER_STATE_FIRST_LOADING = 2023, ///< 音频首次加载(SDK 内部事件,不会对外抛出) - EVT_AUDIO_JITTER_STATE_LOADING = 2024, ///< 音频正在加载(SDK 内部事件,不会对外抛出) - EVT_AUDIO_JITTER_STATE_PLAYING = 2025, ///< 音频正在播放(SDK 内部事件,不会对外抛出) - EVT_AUDIO_JITTER_STATE_FIRST_PLAY = 2026, ///< 音频首次播放(SDK 内部事件,不会对外抛出) - EVT_MIC_START_SUCC = 2027, ///< 麦克风启动成功 - EVT_PLAY_GET_METADATA = 2028, ///< 视频流MetaData事件 - EVT_MIC_RELEASE_SUCC = 2029, ///< 释放麦克风占用 - EVT_AUDIO_DEVICE_ROUTE_CHANGED = 2030, ///< 音频设备的route发生改变,即当前的输入输出设备发生改变,比如耳机被拔出 - EVT_PLAY_GET_FLVSESSIONKEY = 2031, ///< TXLivePlayer 接收到http响应头中的 flvSessionKey 信息 - - EVT_ROOM_ENTER = 1018, ///< 进入房间成功 - EVT_ROOM_EXIT = 1019, ///< 退出房间 - EVT_ROOM_USERLIST = 1020, ///< 下发房间成员列表(不包括自己) - EVT_ROOM_NEED_REENTER = 1021, ///< WiFi 切换到4G 会触发断线重连,此时需要重新进入房间(拉取最优的服务器地址) - EVT_ROOM_ENTER_FAILED = 1022, ///< 自己进入房间失败 - EVT_ROOM_USER_ENTER = 1031, ///< 进房通知 - EVT_ROOM_USER_EXIT = 1032, ///< 退房通知 - EVT_ROOM_USER_VIDEO_STATE = 1033, ///< 视频状态位变化通知 - EVT_ROOM_USER_AUDIO_STATE = 1034, ///< 音频状态位变化通知 - - EVT_ROOM_REQUEST_IP_SUCC = 8001, ///< 拉取接口机服务器地址成功 - EVT_ROOM_CONNECT_SUCC = 8002, ///< 连接接口机服务器成功 - EVT_ROOM_REQUEST_AVSEAT_SUCC = 8003, ///< 请求视频位成功 -} TXLiteAVEvent; -// - /Remove From Head - +#define ERR_ROOM_ENTER_FAIL ERR_TRTC_ENTER_ROOM_FAILED +#define ERR_ROOM_REQUEST_IP_TIMEOUT ERR_TRTC_REQUEST_IP_TIMEOUT +#define ERR_ROOM_REQUEST_ENTER_ROOM_TIMEOUT ERR_TRTC_CONNECT_SERVER_TIMEOUT + +#define ERR_ENTER_ROOM_PARAM_NULL ERR_TRTC_ROOM_PARAM_NULL +#define ERR_SDK_APPID_INVALID ERR_TRTC_INVALID_SDK_APPID +#define ERR_ROOM_ID_INVALID ERR_TRTC_INVALID_ROOM_ID +#define ERR_USER_ID_INVALID ERR_TRTC_INVALID_USER_ID +#define ERR_USER_SIG_INVALID ERR_TRTC_INVALID_USER_SIG +#define ERR_ROOM_REQUEST_ENTER_ROOM_REFUSED ERR_TRTC_ENTER_ROOM_REFUSED +#define ERR_SERVER_INFO_PRIVILEGE_FLAG_ERROR ERR_TRTC_INVALID_PRIVATE_MAPKEY +#define ERR_SERVER_INFO_SERVICE_SUSPENDED ERR_TRTC_SERVICE_SUSPENDED +#define ERR_SERVER_INFO_ECDH_GET_TINYID ERR_TRTC_USER_SIG_CHECK_FAILED +#define ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_SUB_VIDEO ERR_SCREEN_SHARE_NOT_AUTHORIZED +#define ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO ERR_SCREEN_SHRAE_OCCUPIED_BY_OTHER +#define ERR_PUBLISH_CDN_STREAM_REQUEST_TIME_OUT ERR_TRTC_PUSH_THIRD_PARTY_CLOUD_TIMEOUT +#define ERR_PUBLISH_CDN_STREAM_SERVER_FAILED ERR_TRTC_PUSH_THIRD_PARTY_CLOUD_FAILED +#define ERR_CLOUD_MIX_TRANSCODING_REQUEST_TIME_OUT ERR_TRTC_MIX_TRANSCODING_TIMEOUT +#define ERR_CLOUD_MIX_TRANSCODING_SERVER_FAILED ERR_TRTC_MIX_TRANSCODING_FAILED + +#define ERR_ROOM_REQUEST_START_PUBLISHING_TIMEOUT ERR_TRTC_START_PUBLISHING_TIMEOUT +#define ERR_ROOM_REQUEST_START_PUBLISHING_ERROR ERR_TRTC_START_PUBLISHING_FAILED +#define ERR_ROOM_REQUEST_STOP_PUBLISHING_TIMEOUT ERR_TRTC_STOP_PUBLISHING_TIMEOUT +#define ERR_ROOM_REQUEST_STOP_PUBLISHING_ERROR ERR_TRTC_STOP_PUBLISHING_FAILED + +#define ERR_ROOM_REQUEST_CONN_ROOM_TIMEOUT ERR_TRTC_CONNECT_OTHER_ROOM_TIMEOUT +#define ERR_ROOM_REQUEST_DISCONN_ROOM_TIMEOUT ERR_TRTC_DISCONNECT_OTHER_ROOM_TIMEOUT +#define ERR_ROOM_REQUEST_CONN_ROOM_INVALID_PARAM ERR_TRTC_CONNECT_OTHER_ROOM_INVALID_PARAMETER +#define ERR_CONNECT_OTHER_ROOM_AS_AUDIENCE ERR_TRTC_CONNECT_OTHER_ROOM_AS_AUDIENCE + +// clang-format on #endif /* __TXLITEAVCODE_H__ */ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVEncodedDataProcessingListener.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVEncodedDataProcessingListener.h index 33e79d2..52d301c 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVEncodedDataProcessingListener.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVEncodedDataProcessingListener.h @@ -1,54 +1,65 @@ -/* -* Module: live 编码数据回调 -* -* Function: 回调推流端编码完,和 播放端解码前的数据 -* -*/ +// Copyright (c) 2021 Tencent. All rights reserved. - -#ifndef TXLiteAVEncodedDataProcessingListener_h -#define TXLiteAVEncodedDataProcessingListener_h +#ifndef SDK_TRTC_INCLUDE_ITXLITEAVENCODEDDATAPROCESSINGLISTENER_H_ +#define SDK_TRTC_INCLUDE_ITXLITEAVENCODEDDATAPROCESSINGLISTENER_H_ #include <stdio.h> + #include "TXLiteAVBuffer.h" +#ifdef __cplusplus + namespace liteav { -struct TXLiteAVEncodedData { - const char * userId; // didEncodeVideo 和 didEncodeAudio 回调时,此字段为null; - int streamType; // 视频流类型,参考 TRTCVideoStreamType,audio时,此字段为0 - const liteav::TXLiteAVBuffer * originData; // 原始数据 - liteav::TXLiteAVBuffer * processedData; // 写回处理后的数据 +struct LITEAV_EXPORT TXLiteAVEncodedData { + // didEncodeVideo 和 didEncodeAudio 回调时,此字段为 null; + const char* userId; + + // 视频流类型,参考 TRTCVideoStreamType,audio 时,此字段为0 + int streamType; + + // 原始数据 + const liteav::TXLiteAVBuffer* originData; + + // 写回处理后的数据 + liteav::TXLiteAVBuffer* processedData; }; -class ITXLiteAVEncodedDataProcessingListener { -public: - virtual ~ITXLiteAVEncodedDataProcessingListener() {} - - /** - * 回调编码完的视频数据。 - * @note videoData.userId = nullptr - */ - virtual bool didEncodeVideo(TXLiteAVEncodedData & videoData) { return false; } - - /** - * 回调解码前的视频数据。 - * @note videoData.userId 表示对应的user,当userId 为 nullptr时,表示此时先接收到数据了,对应的userId还未完成同步。获取到userId之后会回调正确的userId - */ - virtual bool willDecodeVideo(TXLiteAVEncodedData & videoData) { return false; } - - /** - * 回调编码完的音频数据。 - * @note audioData.userId = nullptr - */ - virtual bool didEncodeAudio(TXLiteAVEncodedData & audioData) { return false; } - - /** - * 回调解码前的音频数据。 - * @note audioData.userId 表示对应的user,当userId 为 nullptr时,表示此时先接收到数据了,对应的userId还未完成同步。获取到userId之后会回调正确的userId - */ - virtual bool willDecodeAudio(TXLiteAVEncodedData & audioData) { return false; } +class LITEAV_EXPORT ITXLiteAVEncodedDataProcessingListener { + public: + virtual ~ITXLiteAVEncodedDataProcessingListener() {} + + /** + * 回调编码完的视频数据。 + * @note videoData.userId = nullptr + */ + virtual bool didEncodeVideo(TXLiteAVEncodedData& videoData) = 0; + + /** + * 回调解码前的视频数据。 + * @note videoData.userId 表示对应的 user,当userId 为 + * nullptr 时,表示此时先接收到数据了,对应的 userId 还未完成同步。获取到 + * userId 之后会回调正确的 userId + */ + virtual bool willDecodeVideo(TXLiteAVEncodedData& videoData) = 0; + + /** + * 回调编码完的音频数据。 + * @note audioData.userId = nullptr + */ + virtual bool didEncodeAudio(TXLiteAVEncodedData& audioData) = 0; + + /** + * 回调解码前的音频数据。 + * @note audioData.userId 表示对应的 user,当 userId 为 + * nullptr 时,表示此时先接收到数据了,对应的 userId 还未完成同步。获取到 + * userId 之后会回调正确的 userId + */ + virtual bool willDecodeAudio(TXLiteAVEncodedData& audioData) = 0; }; -} -#endif /* TXLiteAVEncodedDataProcessingListener_h */ +} // namespace liteav + +#endif // __cplusplus + +#endif // SDK_TRTC_INCLUDE_ITXLITEAVENCODEDDATAPROCESSINGLISTENER_H_ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSDK.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSDK.h index eddca64..a99f284 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSDK.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSDK.h @@ -1,30 +1,35 @@ -// -// TXLiteAVSDK.h -// TXLiteAVSDK -// -// Created by alderzhang on 2017/6/9. -// Copyright © 2017年 Tencent. All rights reserved. -// - - +/* + * Copyright (c) 2022 Tencent. All Rights Reserved. + * + */ +#import <TXLiteAVSDK_TRTC/TXLiteAVCode.h> +#import <TXLiteAVSDK_TRTC/TXLiteAVSymbolExport.h> +#import <TXLiteAVSDK_TRTC/ITRTCAudioPacketListener.h> +#import <TXLiteAVSDK_TRTC/TXLiteAVBuffer.h> +#import <TXLiteAVSDK_TRTC/TXLiteAVEncodedDataProcessingListener.h> #import <TXLiteAVSDK_TRTC/TRTCCloud.h> #import <TXLiteAVSDK_TRTC/TRTCCloudDef.h> #import <TXLiteAVSDK_TRTC/TRTCCloudDelegate.h> #import <TXLiteAVSDK_TRTC/TRTCStatistics.h> -#import <TXLiteAVSDK_TRTC/TXAudioCustomProcessDelegate.h> -#import <TXLiteAVSDK_TRTC/TXAudioEffectManager.h> -#import <TXLiteAVSDK_TRTC/TXAudioRawDataDelegate.h> -#import <TXLiteAVSDK_TRTC/TXBeautyManager.h> -#import <TXLiteAVSDK_TRTC/TXDeviceManager.h> -#import <TXLiteAVSDK_TRTC/TXLiteAVCode.h> -#import <TXLiteAVSDK_TRTC/TXLiveAudioSessionDelegate.h> -#import <TXLiteAVSDK_TRTC/TXLiveBase.h> #import <TXLiteAVSDK_TRTC/TXLivePlayConfig.h> -#import <TXLiteAVSDK_TRTC/TXLivePlayListener.h> +#import <TXLiteAVSDK_TRTC/TXAudioRawDataDelegate.h> #import <TXLiteAVSDK_TRTC/TXLivePlayer.h> -#import <TXLiteAVSDK_TRTC/TXLiveRecordListener.h> -#import <TXLiteAVSDK_TRTC/TXLiveRecordTypeDef.h> -#import <TXLiteAVSDK_TRTC/TXLiveSDKEventDef.h> #import <TXLiteAVSDK_TRTC/TXLiveSDKTypeDef.h> +#import <TXLiteAVSDK_TRTC/TXLivePlayListener.h> +#import <TXLiteAVSDK_TRTC/TXLiveRecordTypeDef.h> #import <TXLiteAVSDK_TRTC/TXVideoCustomProcessDelegate.h> +#import <TXLiteAVSDK_TRTC/TXAudioCustomProcessDelegate.h> +#import <TXLiteAVSDK_TRTC/TXLiveAudioSessionDelegate.h> +#import <TXLiteAVSDK_TRTC/TXLiveRecordListener.h> +#import <TXLiteAVSDK_TRTC/TXLiveBase.h> +#import <TXLiteAVSDK_TRTC/V2TXLivePremier.h> +#import <TXLiteAVSDK_TRTC/V2TXLiveCode.h> +#import <TXLiteAVSDK_TRTC/V2TXLiveDef.h> +#import <TXLiteAVSDK_TRTC/V2TXLiveProperty.h> +#import <TXLiteAVSDK_TRTC/V2TXLivePlayer.h> +#import <TXLiteAVSDK_TRTC/V2TXLivePlayerObserver.h> +#import <TXLiteAVSDK_TRTC/TXDeviceManager.h> +#import <TXLiteAVSDK_TRTC/TXAudioEffectManager.h> +#import <TXLiteAVSDK_TRTC/TXBeautyManager.h> +#import <TXLiteAVSDK_TRTC/TXLiveSDKEventDef.h> diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSymbolExport.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSymbolExport.h new file mode 100644 index 0000000..aeb6dc6 --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSymbolExport.h @@ -0,0 +1,11 @@ +// Copyright (c) 2021 Tencent. All rights reserved. +#ifndef SDK_COMMON_APPLE_TXLITEAVSYMBOLEXPORT_H_ +#define SDK_COMMON_APPLE_TXLITEAVSYMBOLEXPORT_H_ + +#if defined(BUILD_LITEAVSDK) +#define LITEAV_EXPORT __attribute__((visibility("default"))) +#else +#define LITEAV_EXPORT +#endif + +#endif // SDK_COMMON_APPLE_TXLITEAVSYMBOLEXPORT_H_ \ No newline at end of file diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveAudioSessionDelegate.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveAudioSessionDelegate.h index 2d6ed36..aa51ed9 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveAudioSessionDelegate.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveAudioSessionDelegate.h @@ -1,8 +1,11 @@ +// Copyright (c) 2021 Tencent. All rights reserved. + #ifndef TXLiveAudioSessionDelegate_h #define TXLiveAudioSessionDelegate_h #import <AVFoundation/AVFoundation.h> +NS_ASSUME_NONNULL_BEGIN @protocol TXLiveAudioSessionDelegate <NSObject> #if TARGET_OS_IPHONE @@ -10,7 +13,9 @@ - (BOOL)setActive:(BOOL)active error:(NSError **)outError; @optional -- (BOOL)setActive:(BOOL)active withOptions:(AVAudioSessionSetActiveOptions)options error:(NSError **)outError; +- (BOOL)setActive:(BOOL)active + withOptions:(AVAudioSessionSetActiveOptions)options + error:(NSError **)outError; @optional - (BOOL)setMode:(NSString *)mode error:(NSError **)outError; @@ -19,10 +24,15 @@ - (BOOL)setCategory:(NSString *)category error:(NSError **)outError; @optional -- (BOOL)setCategory:(NSString *)category withOptions:(AVAudioSessionCategoryOptions)options error:(NSError **)outError; +- (BOOL)setCategory:(NSString *)category + withOptions:(AVAudioSessionCategoryOptions)options + error:(NSError **)outError; @optional -- (BOOL)setCategory:(NSString *)category mode:(NSString *)mode options:(AVAudioSessionCategoryOptions)options error:(NSError **)outError; +- (BOOL)setCategory:(NSString *)category + mode:(NSString *)mode + options:(AVAudioSessionCategoryOptions)options + error:(NSError **)outError; @optional - (BOOL)setPreferredIOBufferDuration:(NSTimeInterval)duration error:(NSError **)outError; @@ -35,6 +45,11 @@ @optional - (BOOL)overrideOutputAudioPort:(AVAudioSessionPortOverride)portOverride error:(NSError **)outError; +@optional +- (BOOL)setPreferredInput:(nullable AVAudioSessionPortDescription *)inPort + error:(NSError **)outError; + #endif @end #endif /* TXLiveAudioSessionDelegate_h */ +NS_ASSUME_NONNULL_END diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveBase.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveBase.h index 3bf1326..77013f1 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveBase.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveBase.h @@ -1,20 +1,25 @@ +// Copyright © 2020 Tencent. All rights reserved. + +#import "TXLiteAVSymbolExport.h" #import "TXLiveAudioSessionDelegate.h" +NS_ASSUME_NONNULL_BEGIN + typedef NS_ENUM(NSInteger, TX_Enum_Type_LogLevel) { - ///输出所有级别的log - LOGLEVEL_VERBOSE = 0, - /// 输出 DEBUG,INFO,WARNING,ERROR 和 FATAL 级别的log - LOGLEVEL_DEBUG = 1, - /// 输出 INFO,WARNING,ERROR 和 FATAL 级别的log - LOGLEVEL_INFO = 2, - /// 只输出WARNING,ERROR 和 FATAL 级别的log - LOGLEVEL_WARN = 3, - /// 只输出ERROR 和 FATAL 级别的log - LOGLEVEL_ERROR = 4, - /// 只输出 FATAL 级别的log - LOGLEVEL_FATAL = 5, - /// 不输出任何sdk log - LOGLEVEL_NULL = 6, + ///输出所有级别的log + LOGLEVEL_VERBOSE = 0, + /// 输出 DEBUG,INFO,WARNING,ERROR 和 FATAL 级别的log + LOGLEVEL_DEBUG = 1, + /// 输出 INFO,WARNING,ERROR 和 FATAL 级别的log + LOGLEVEL_INFO = 2, + /// 只输出WARNING,ERROR 和 FATAL 级别的log + LOGLEVEL_WARN = 3, + /// 只输出ERROR 和 FATAL 级别的log + LOGLEVEL_ERROR = 4, + /// 只输出 FATAL 级别的log + LOGLEVEL_FATAL = 5, + /// 不输出任何sdk log + LOGLEVEL_NULL = 6, }; @protocol TXLiveBaseDelegate <NSObject> @@ -22,20 +27,34 @@ typedef NS_ENUM(NSInteger, TX_Enum_Type_LogLevel) { /** @brief Log回调 - @discussion + @discussion 1.实现TXLiveBaseDelegate,建议在一个比较早的初始化类中如AppDelegate 2.在初始化中设置此回调,eg:[TXLiveBase sharedInstance].delegate = self; 3.level类型参见TX_Enum_Type_LogLevel 4.module值暂无具体意义,目前为固定值TXLiteAVSDK */ -- (void)onLog:(NSString*)log LogLevel:(int)level WhichModule:(NSString*)module; +- (void)onLog:(NSString *)log LogLevel:(int)level WhichModule:(NSString *)module; + +/** + * @brief NTP 校时回调,调用 TXLiveBase updateNetworkTime 后会触发 + * @param errCode 0:表示校时成功且偏差在30ms以内,1:表示校时成功但偏差可能在 30ms + * 以上,-1:表示校时失败 + */ +- (void)onUpdateNetworkTime:(int)errCode message:(NSString *)errMsg; + +/** + @brief setLicenceURL 接口回调, result = 0 成功,负数失败。 + @discussion + 需在调用 setLicenceURL 前设置 delegate + */ +- (void)onLicenceLoaded:(int)result Reason:(NSString *)reason; @end -@interface TXLiveBase : NSObject +LITEAV_EXPORT @interface TXLiveBase : NSObject /// 通过这个delegate将全部log回调给SDK使用者,由SDK使用者来决定log如何处理 -@property (nonatomic, weak) id<TXLiveBaseDelegate> delegate; +@property(nonatomic, weak, nullable) id<TXLiveBaseDelegate> delegate; + (instancetype)sharedInstance; @@ -46,13 +65,15 @@ typedef NS_ENUM(NSInteger, TX_Enum_Type_LogLevel) { * @param env_config 需要接入的环境,SDK 默认接入的环境是:默认正式环境。 * @return 0:成功;其他:错误 * - * @note 目标市场为中国大陆的客户请不要调用此接口,如果目标市场为海外用户,请通过技术支持联系我们,了解 env_config 的配置方法,以确保 App 遵守 GDPR 标准。 + * @note + * 目标市场为中国大陆的客户请不要调用此接口,如果目标市场为海外用户,请通过技术支持联系我们,了解 + * env_config 的配置方法,以确保 App 遵守 GDPR 标准。 */ + (int)setGlobalEnv:(const char *)env_config; -/** 设置log输出级别 +/** + * 设置 log 输出级别 * @param level 参见 LOGLEVEL - * */ + (void)setLogLevel:(TX_Enum_Type_LogLevel)level; @@ -64,27 +85,60 @@ typedef NS_ENUM(NSInteger, TX_Enum_Type_LogLevel) { + (void)setAppVersion:(NSString *)verNum; -+ (void)setAudioSessionDelegate:(id<TXLiveAudioSessionDelegate>)delegate; ++ (void)setAudioSessionDelegate:(nullable id<TXLiveAudioSessionDelegate>)delegate; -/// 获取SDK版本信息 +/** + * @brief 获取 SDK 版本信息 + * @return SDK 版本信息 + */ + (NSString *)getSDKVersionStr; -/// 获取pitu版本信息 +/** + * @brief 获取 pitu 版本信息 + * @return pitu 版本信息 + */ + (NSString *)getPituSDKVersion; -/// 设置appID,云控使用 -+ (void)setAppID:(NSString*)appID; +/** + * @brief 设置 appID,云控使用 + */ ++ (void)setAppID:(NSString *)appID; -/// 设置sdk的licence下载url和key +/** + * @brief 设置 sdk 的 Licence 下载 url 和 key + */ + (void)setLicenceURL:(NSString *)url key:(NSString *)key; -/// 设置userId,用于数据上报 +/** + * @brief 设置 userId,用于数据上报 + */ + (void)setUserId:(NSString *)userId; -/// 获取 Licence 信息 +/** + * @brief 获取 Licence 信息 + * @return Licence 信息 + */ + (NSString *)getLicenceInfo; -/// 设置HEVC外部解码器工厂实例 +/** + * @brief 设置 HEVC 外部解码器工厂实例 + */ + (void)setExternalDecoderFactory:(id)decoderFactory; +/** + * 启动 NTP 校时服务 + * + * @return 0:启动成功;< 0:启动失败 + */ ++ (NSInteger)updateNetworkTime; + +/** + * 获取 NTP 时间戳(毫秒),请在收到 onUpdateNetworkTime 回调后使用 + * + * @return NTP 时间戳(毫秒),若返回 0:未启动 NTP 校时或校时失败,请重启校时 + */ ++ (NSInteger)getNetworkTimestamp; + @end + +NS_ASSUME_NONNULL_END \ No newline at end of file diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayConfig.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayConfig.h index c86260b..e40e378 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayConfig.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayConfig.h @@ -1,12 +1,7 @@ -/* - * Module: TXLivePlayConfig @ TXLiteAVSDK - * - * Function: 腾讯云直播播放器的参数配置模块 - * - * Version: <:Version:> - */ +// Copyright © 2020 Tencent. All rights reserved. #import <Foundation/Foundation.h> +#import "TXLiteAVSymbolExport.h" /// @defgroup TXLivePlayConfig_ios TXLivePlayConfig /// 腾讯云直播播放器的参数配置模块 @@ -17,7 +12,7 @@ * * 主要负责 TXLivePlayer 对应的参数设置,其中绝大多数设置项在播放开始之后再设置是无效的。 */ -@interface TXLivePlayConfig : NSObject +LITEAV_EXPORT @interface TXLivePlayConfig : NSObject ///////////////////////////////////////////////////////////////////////////////// // @@ -62,24 +57,29 @@ /// 标准直播流都会在最开始的阶段有一个 MetaData 数据头,该数据头支持定制。 /// 您可以通过 TXLivePushConfig 中的 metaData 属性设置一些自定义数据,再通过 TXLivePlayListener 中的 /// onPlayEvent(EVT_PLAY_GET_METADATA) 消息接收到这些数据。 -///【特别说明】每条音视频流中只能设置一个 MetaData 数据头,除非断网重连,否则 TXLivePlayer 的 EVT_PLAY_GET_METADATA 消息也只会收到一次。 +///【特别说明】每条音视频流中只能设置一个 MetaData 数据头,除非断网重连,否则 TXLivePlayer 的 +/// EVT_PLAY_GET_METADATA 消息也只会收到一次。 @property(nonatomic, assign) BOOL enableMetaData; ///【字段含义】是否开启 HTTP 头信息回调,默认值为 @“” -/// HTTP 响应头中除了“content-length”、“content-type”等标准字段,不同云服务商还可能会添加一些非标准字段。 -/// 比如腾讯云会在直播 CDN 的 HTTP-FLV 格式的直播流中增加 “X-Tlive-SpanId” 响应头,并在其中设置一个随机字符串,用来唯一标识一次直播。 -/// -/// 如果您在使用腾讯云的直播 CDN,可以设置 flvSessionKey 为 @“X-Tlive-SpanId”,SDK 会在 HTTP 响应头里解析这个字段, -/// 并通过 TXLivePlayListener 中的 onPlayEvent(EVT_PLAY_GET_FLVSESSIONKEY) 事件通知给您的 App。 +/// HTTP +/// 响应头中除了“content-length”、“content-type”等标准字段,不同云服务商还可能会添加一些非标准字段。 +/// 比如腾讯云会在直播 CDN 的 HTTP-FLV 格式的直播流中增加 “X-Tlive-SpanId” +/// 响应头,并在其中设置一个随机字符串,用来唯一标识一次直播。 /// -///【特别说明】每条音视频流中只能解析一个 flvSessionKey,除非断网重连,否则 EVT_PLAY_GET_FLVSESSIONKEY 只会抛送一次。 -@property(nonatomic, copy) NSString* flvSessionKey; +/// 如果您在使用腾讯云的直播 CDN,可以设置 flvSessionKey 为 @“X-Tlive-SpanId”,SDK 会在 HTTP +/// 响应头里解析这个字段, 并通过 TXLivePlayListener 中的 onPlayEvent(EVT_PLAY_GET_FLVSESSIONKEY) +/// 事件通知给您的 App。 +/// +///【特别说明】每条音视频流中只能解析一个 flvSessionKey,除非断网重连,否则 +/// EVT_PLAY_GET_FLVSESSIONKEY 只会抛送一次。 +@property(nonatomic, copy) NSString *flvSessionKey; ///【字段含义】视频渲染对象回调的视频格式,默认值:kCVPixelFormatType_420YpCbCr8Planar -///【特别说明】支持:kCVPixelFormatType_420YpCbCr8Planar 和 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange +///【特别说明】支持:kCVPixelFormatType_420YpCbCr8Planar 和 +/// kCVPixelFormatType_420YpCbCr8BiPlanarFullRange @property(nonatomic, assign) OSType playerPixelFormatType; - ///////////////////////////////////////////////////////////////////////////////// // // 待废弃设置项 @@ -90,7 +90,7 @@ @property(nonatomic, assign) BOOL enableNearestIP; ///【字段含义】RTMP 传输通道的类型,待废弃,默认值为:RTMP_CHANNEL_TYPE_AUTO -@property (nonatomic, assign) int rtmpChannelType; +@property(nonatomic, assign) int rtmpChannelType; ///【字段含义】视频缓存目录,点播 MP4、HLS 有效 @property NSString *cacheFolderPath; diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayListener.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayListener.h index 8325e61..c75427e 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayListener.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayListener.h @@ -1,31 +1,25 @@ -/* - * Module: TXLivePlayListener @ TXLiteAVSDK - * - * Function: 腾讯云直播播放的回调通知 - * - * Version: <:Version:> - */ - -#import <Foundation/Foundation.h> -#import "TXLiveSDKTypeDef.h" - -/// @defgroup TXLivePlayListener_ios TXLivePlayListener -/// 腾讯云直播播放的回调通知 -/// @{ -@protocol TXLivePlayListener <NSObject> - -/** - * 直播事件通知 - * @param EvtID 参见 TXLiveSDKEventDef.h - * @param param 参见 TXLiveSDKTypeDef.h - */ -- (void)onPlayEvent:(int)EvtID withParam:(NSDictionary *)param; - -/** - * 网络状态通知 - * @param param 参见 TXLiveSDKTypeDef.h - */ -- (void)onNetStatus:(NSDictionary *)param; - -@end -/// @} +// Copyright (c) 2021 Tencent. All rights reserved. + +#import <Foundation/Foundation.h> +#import "TXLiveSDKTypeDef.h" + +/// @defgroup TXLivePlayListener_ios TXLivePlayListener +/// 腾讯云直播播放的回调通知 +/// @{ +@protocol TXLivePlayListener <NSObject> + +/** + * 直播事件通知 + * @param EvtID 参见 TXLiveSDKEventDef.h + * @param param 参见 TXLiveSDKTypeDef.h + */ +- (void)onPlayEvent:(int)evtID withParam:(NSDictionary *)param; + +/** + * 网络状态通知 + * @param param 参见 TXLiveSDKTypeDef.h + */ +- (void)onNetStatus:(NSDictionary *)param; + +@end +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayer.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayer.h index 97414d8..0fe14ba 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayer.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayer.h @@ -1,20 +1,14 @@ -/* - * - * Module: TXLivePlayer @ TXLiteAVSDK - * - * Function: 腾讯云直播播放器 - * - * Version: <:Version:> - */ +// Copyright © 2020 Tencent. All rights reserved. #import <Foundation/Foundation.h> -#import "TXLiveSDKTypeDef.h" -#import "TXLivePlayListener.h" +#import "TXAudioRawDataDelegate.h" #import "TXLivePlayConfig.h" -#import "TXVideoCustomProcessDelegate.h" -#import "TXLiveRecordTypeDef.h" +#import "TXLivePlayListener.h" #import "TXLiveRecordListener.h" -#import "TXAudioRawDataDelegate.h" +#import "TXLiveRecordTypeDef.h" +#import "TXLiveSDKEventDef.h" +#import "TXLiveSDKTypeDef.h" +#import "TXVideoCustomProcessDelegate.h" /// @defgroup TXLivePlayer_ios TXLivePlayer /// 腾讯云直播播放器接口类 @@ -26,23 +20,25 @@ * @note 新版本的点播 SDK,推荐参考 TXVodPlayer.h */ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { - /// RTMP 直播 - PLAY_TYPE_LIVE_RTMP = 0, - /// FLV 直播 - PLAY_TYPE_LIVE_FLV = 1, - /// FLV 点播 - PLAY_TYPE_VOD_FLV = 2, - /// HLS 点播 - PLAY_TYPE_VOD_HLS = 3, - /// MP4点播 - PLAY_TYPE_VOD_MP4 = 4, - /// RTMP 直播加速播放 - PLAY_TYPE_LIVE_RTMP_ACC = 5, - /// 本地视频文件 - PLAY_TYPE_LOCAL_VIDEO = 6, +#ifndef TX_PLAY_TYPE +#define TX_PLAY_TYPE + /// RTMP 直播 + PLAY_TYPE_LIVE_RTMP = 0, + /// FLV 直播 + PLAY_TYPE_LIVE_FLV = 1, + /// FLV 点播 + PLAY_TYPE_VOD_FLV = 2, + /// HLS 点播 + PLAY_TYPE_VOD_HLS = 3, + /// MP4点播 + PLAY_TYPE_VOD_MP4 = 4, + /// RTMP 直播加速播放 + PLAY_TYPE_LIVE_RTMP_ACC = 5, + /// 本地视频文件 + PLAY_TYPE_LOCAL_VIDEO = 6, +#endif }; - /** * 视频播放器 * @@ -51,7 +47,7 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { * - 针对腾讯云的拉流地址,可使用直播时移功能,能够实现直播观看与时移观看的无缝切换。 * - 支持自定义的音视频数据处理,让您可以根据项目需要处理直播流中的音视频数据后,进行渲染以及播放。 */ -@interface TXLivePlayer : NSObject +LITEAV_EXPORT @interface TXLivePlayer : NSObject ///////////////////////////////////////////////////////////////////////////////// // @@ -64,17 +60,17 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { /** * 1.1 设置播放回调,见 “TXLivePlayListener.h” 文件中的详细定义 */ -@property(nonatomic, weak) id <TXLivePlayListener> delegate; +@property(nonatomic, weak) id<TXLivePlayListener> delegate; /** * 1.2 设置视频处理回调,见 “TXVideoCustomProcessDelegate.h” 文件中的详细定义 */ -@property(nonatomic, weak) id <TXVideoCustomProcessDelegate> videoProcessDelegate; +@property(nonatomic, weak) id<TXVideoCustomProcessDelegate> videoProcessDelegate; /** * 1.3 设置音频处理回调,见 “TXAudioRawDataDelegate.h” 文件中的详细定义 */ -@property(nonatomic, weak) id <TXAudioRawDataDelegate> audioRawDataDelegate; +@property(nonatomic, weak) id<TXAudioRawDataDelegate> audioRawDataDelegate; /** * 1.4 是否开启硬件加速,默认值:NO @@ -89,13 +85,12 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { /** * 1.6 设置短视频录制回调,见 “TXLiveRecordListener.h” 文件中的详细定义 */ -@property (nonatomic, weak) id<TXLiveRecordListener> recordDelegate; +@property(nonatomic, weak) id<TXLiveRecordListener> recordDelegate; /** * 1.7 startPlay 后是否立即播放,默认 YES,只有点播有效 */ -@property (nonatomic) BOOL isAutoPlay; - +@property(nonatomic) BOOL isAutoPlay; /// @} @@ -110,7 +105,9 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { /** * 2.1 创建 Video 渲染 View,该控件承载着视频内容的展示。 * - * 变更历史:1.5.2版本将参数 frame 废弃,设置此参数无效,控件大小与参数 view 的大小保持一致,如需修改控件的大小及位置,请调整父 view 的大小及位置。 参考文档:[绑定渲染界面](https://www.qcloud.com/doc/api/258/4736#step-3.3A-.E7.BB.91.E5.AE.9A.E6.B8.B2.E6.9F.93.E7.95.8C.E9.9D.A2) + * 变更历史:1.5.2版本将参数 frame 废弃,设置此参数无效,控件大小与参数 view + * 的大小保持一致,如需修改控件的大小及位置,请调整父 view 的大小及位置。 + * 参考文档:[绑定渲染界面](https://www.qcloud.com/doc/api/258/4736#step-3.3A-.E7.BB.91.E5.AE.9A.E6.B8.B2.E6.9F.93.E7.95.8C.E9.9D.A2) * * @param frame Widget 在父 view 中的 frame * @param view 父 view @@ -120,7 +117,8 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { /* * 修改 VideoWidget frame - * 变更历史:1.5.2版本将此方法废弃,调用此方法无效,如需修改控件的大小及位置,请调整父 view 的大小及位置 + * 变更历史:1.5.2版本将此方法废弃,调用此方法无效,如需修改控件的大小及位置,请调整父 view + * 的大小及位置 * 参考文档:https://www.qcloud.com/doc/api/258/4736#step-3.3A-.E7.BB.91.E5.AE.9A.E6.B8.B2.E6.9F.93.E7.95.8C.E9.9D.A2 */ //- (void)resetVideoWidgetFrame:(CGRect)frame; @@ -198,6 +196,14 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { */ - (void)snapshot:(void (^)(TXImage *))snapshotCompletionBlock; +/** + * 3.4 获取当前渲染帧 pts + * + * @return 0:当前未处于正在播放状态(例如:未起播) + * >0:当前渲染视频帧的 pts,处于正在播放状态 (单位: 毫秒) + */ +- (uint64_t)getCurrentRenderPts; + /// @} ///////////////////////////////////////////////////////////////////////////////// @@ -233,14 +239,15 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { * * @param volumeEvaluationListener 音量大小回调接口,音量取值范围0 - 100 */ -- (void)setAudioVolumeEvaluationListener:(void(^)(int))volumeEvaluationListener; +- (void)setAudioVolumeEvaluationListener:(void (^)(int))volumeEvaluationListener; /** * 4.5 启用音量大小提示 * * 开启后会在 volumeEvaluationListener 中获取到 SDK 对音量大小值的评估。 * - * @param interval 决定了 volumeEvaluationListener 回调的触发间隔,单位为ms,最小间隔为100ms,如果小于等于0则会关闭回调,建议设置为300ms; + * @param interval 决定了 volumeEvaluationListener + * 回调的触发间隔,单位为ms,最小间隔为100ms,如果小于等于0则会关闭回调,建议设置为300ms; */ - (void)enableAudioVolumeEvaluation:(NSUInteger)interval; @@ -257,7 +264,8 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { /** * 5.1 直播时移准备,拉取该直播流的起始播放时间。 * - * 使用时移功能需在播放开始后调用此方法,否则时移失败。时移的使用请参考文档 [超级播放器](https://cloud.tencent.com/document/product/881/20208#.E6.97.B6.E7.A7.BB.E6.92.AD.E6.94.BE) + * 使用时移功能需在播放开始后调用此方法,否则时移失败。时移的使用请参考文档 + * [超级播放器](https://cloud.tencent.com/document/product/881/20208#.E6.97.B6.E7.A7.BB.E6.92.AD.E6.94.BE) * * @warning 非腾讯云直播地址不能时移 * @@ -266,7 +274,7 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { * * @return 0:OK;-1:无播放地址;-2:appId 未配置 */ -- (int)prepareLiveSeek:(NSString*)domain bizId:(NSInteger)bizId; +- (int)prepareLiveSeek:(NSString *)domain bizId:(NSInteger)bizId; /** * 5.2 停止时移播放,返回直播 @@ -357,7 +365,7 @@ typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { * @note 该接口用于调用一些实验性功能 * @param jsonStr 接口及参数描述的 JSON 字符串 */ -- (void)callExperimentalAPI:(NSString*)jsonStr; +- (void)callExperimentalAPI:(NSString *)jsonStr; /// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordListener.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordListener.h index fd86b08..cd4d238 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordListener.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordListener.h @@ -1,5 +1,6 @@ -#import "TXLiveRecordTypeDef.h" +// Copyright (c) 2021 Tencent. All rights reserved. +#import "TXLiveRecordTypeDef.h" /** * 短视频录制回调定义 @@ -10,18 +11,16 @@ /** * 短视频录制进度 */ --(void) onRecordProgress:(NSInteger)milliSecond; +- (void)onRecordProgress:(NSInteger)milliSecond; /** * 短视频录制完成 */ --(void) onRecordComplete:(TXRecordResult*)result; +- (void)onRecordComplete:(TXRecordResult*)result; /** * 短视频录制事件通知 */ --(void) onRecordEvent:(NSDictionary*)evt; +- (void)onRecordEvent:(NSDictionary*)evt; @end - - diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordTypeDef.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordTypeDef.h index 7159c30..e639b42 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordTypeDef.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordTypeDef.h @@ -1,36 +1,32 @@ +// Copyright (c) 2021 Tencent. All rights reserved. + #import <Foundation/Foundation.h> #import "TXLiveSDKTypeDef.h" /// PlayRecord 录制类型定义 -typedef NS_ENUM(NSInteger, TXRecordType) -{ - ///视频源为正在播放的视频流 - RECORD_TYPE_STREAM_SOURCE = 1, +typedef NS_ENUM(NSInteger, TXRecordType) { + ///视频源为正在播放的视频流 + RECORD_TYPE_STREAM_SOURCE = 1, }; - - /// 录制结果错误码定义 -typedef NS_ENUM(NSInteger, TXRecordResultCode) -{ - /// 录制成功(业务层主动结束录制) - RECORD_RESULT_OK = 0, - /// 录制成功(sdk自动结束录制,可能原因:1,app进入后台,2,app被闹钟或电话打断,3,网络断连接) - RECORD_RESULT_OK_INTERRUPT = 1, - /// 录制失败 - RECORD_RESULT_FAILED = 1001, +typedef NS_ENUM(NSInteger, TXRecordResultCode) { + /// 录制成功(业务层主动结束录制) + RECORD_RESULT_OK = 0, + /// 录制成功(sdk自动结束录制,可能原因:1,app进入后台,2,app被闹钟或电话打断,3,网络断连接) + RECORD_RESULT_OK_INTERRUPT = 1, + /// 录制失败 + RECORD_RESULT_FAILED = 1001, }; - /// 录制结果 -@interface TXRecordResult : NSObject +LITEAV_EXPORT @interface TXRecordResult : NSObject /// 错误码 -@property (nonatomic, assign) TXRecordResultCode retCode; +@property(nonatomic, assign) TXRecordResultCode retCode; /// 错误描述信息 -@property (nonatomic, strong) NSString* descMsg; +@property(nonatomic, strong) NSString* descMsg; /// 视频文件path -@property (nonatomic, strong) NSString* videoPath; +@property(nonatomic, strong) NSString* videoPath; /// 视频封面 -@property (nonatomic, strong) TXImage* coverImage; +@property(nonatomic, strong) TXImage* coverImage; @end - diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKEventDef.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKEventDef.h index dac1575..5039e5f 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKEventDef.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKEventDef.h @@ -1,103 +1,207 @@ +// Copyright © 2020 Tencent. All rights reserved. + #ifndef __TX_LIVE_SDK_EVENT_DEF_H__ #define __TX_LIVE_SDK_EVENT_DEF_H__ -#include "TXLiteAVCode.h" +///////////////////////////////////////////////////////////////////////////////// +// +// 事件码和错误码定义 +// +// 以下错误码适用于 V1 版本的 TXLivePusher 和 TXLivePlayer +// 如果您是新版本客户,推荐使用 V2 版本的 V2TXLivePusher 和 V2TXLivePlayer +// +///////////////////////////////////////////////////////////////////////////////// +// clang-format off enum EventID { - /********************************************************************************** - * 推流事件列表 - **********************************************************************************/ - PUSH_EVT_CONNECT_SUCC = EVT_RTMP_PUSH_CONNECT_SUCC, ///< 直播: 已经连接RTMP推流服务器 - PUSH_EVT_PUSH_BEGIN = EVT_RTMP_PUSH_BEGIN, ///< 直播: 已经与RTMP服务器握手完毕,开始推流 - PUSH_EVT_OPEN_CAMERA_SUCC = EVT_CAMERA_START_SUCC, ///< 打开摄像头成功 - PUSH_EVT_CHANGE_RESOLUTION = EVT_UP_CHANGE_RESOLUTION, ///< 推流动态调整分辨率 - PUSH_EVT_CHANGE_BITRATE = EVT_UP_CHANGE_BITRATE, ///< 推流动态调整码率 - PUSH_EVT_FIRST_FRAME_AVAILABLE = EVT_FIRST_FRAME_AVAILABLE, ///< 首帧画面采集完成 - PUSH_EVT_START_VIDEO_ENCODER = EVT_START_VIDEO_ENCODER, ///< 编码器启动 - PUSH_EVT_ROOM_IN = EVT_ROOM_ENTER, ///< 已经在webrtc房间里面,进房成功后通知 - PUSH_EVT_ROOM_IN_FAILED = EVT_ROOM_ENTER_FAILED, ///< 进房失败通知 - PUSH_EVT_ROOM_OUT = EVT_ROOM_EXIT, ///< 不在webrtc房间里面,进房失败或者中途退出房间时通知 - PUSH_EVT_ROOM_USERLIST = EVT_ROOM_USERLIST, ///< 下发webrtc房间成员列表(不包括自己) - PUSH_EVT_ROOM_NEED_REENTER = EVT_ROOM_NEED_REENTER, ///< WiFi切换到4G会触发断线重连,此时需要重新进入webrtc房间(拉取最优的服务器地址) - PUSH_EVT_ROOM_USER_ENTER = EVT_ROOM_USER_ENTER, ///< 进房通知 - PUSH_EVT_ROOM_USER_EXIT = EVT_ROOM_USER_EXIT, ///< 退房通知 - PUSH_EVT_ROOM_USER_VIDEO_STATE = EVT_ROOM_USER_VIDEO_STATE, ///< 视频状态位变化通知 - PUSH_EVT_ROOM_USER_AUDIO_STATE = EVT_ROOM_USER_AUDIO_STATE, ///< 音频状态位变化通知 - - PUSH_ERR_OPEN_CAMERA_FAIL = ERR_CAMERA_START_FAIL, ///< 打开摄像头失败 - PUSH_ERR_OPEN_MIC_FAIL = ERR_MIC_START_FAIL, ///< 打开麦克风失败 - PUSH_ERR_VIDEO_ENCODE_FAIL = ERR_VIDEO_ENCODE_FAIL, ///< 视频编码失败 - PUSH_ERR_AUDIO_ENCODE_FAIL = ERR_AUDIO_ENCODE_FAIL, ///< 音频编码失败 - PUSH_ERR_UNSUPPORTED_RESOLUTION = ERR_UNSUPPORTED_RESOLUTION, ///< 不支持的视频分辨率 - PUSH_ERR_UNSUPPORTED_SAMPLERATE = ERR_UNSUPPORTED_SAMPLERATE, ///< 不支持的音频采样率 - PUSH_ERR_NET_DISCONNECT = ERR_RTMP_PUSH_NET_DISCONNECT, ///< 网络断连,且经多次重连抢救无效,可以放弃治疗,更多重试请自行重启推流 - PUSH_ERR_AUDIO_SYSTEM_NOT_WORK = -1308, ///< 系统异常,录音失败 - PUSH_ERR_INVALID_ADDRESS = ERR_RTMP_PUSH_INVALID_ADDRESS, ///< 推流地址非法 - - PUSH_WARNING_NET_BUSY = WARNING_NET_BUSY, ///< 网络状况不佳:上行带宽太小,上传数据受阻 - PUSH_WARNING_RECONNECT = WARNING_RTMP_SERVER_RECONNECT, ///< 网络断连, 已启动自动重连 (自动重连连续失败超过三次会放弃) - PUSH_WARNING_HW_ACCELERATION_FAIL = WARNING_HW_ENCODER_START_FAIL, ///< 硬编码启动失败,采用软编码 - PUSH_WARNING_VIDEO_ENCODE_FAIL = 1104, ///< 视频编码失败,非致命错,内部会重启编码器 - PUSH_WARNING_BEAUTYSURFACE_VIEW_INIT_FAIL = 1105, ///< 视频编码码率异常,警告 - PUSH_WARNING_VIDEO_ENCODE_BITRATE_OVERFLOW = 1106, ///< 视频编码码率异常,警告 - PUSH_WARNING_DNS_FAIL = WARNING_RTMP_DNS_FAIL, ///< RTMP -DNS解析失败 - PUSH_WARNING_SEVER_CONN_FAIL = WARNING_RTMP_SEVER_CONN_FAIL, ///< RTMP服务器连接失败 - PUSH_WARNING_SHAKE_FAIL = WARNING_RTMP_SHAKE_FAIL, ///< RTMP服务器握手失败 - PUSH_WARNING_SERVER_DISCONNECT = WARNING_RTMP_SERVER_BREAK_CONNECT, ///< RTMP服务器主动断开,请检查推流地址的合法性或防盗链有效期 - PUSH_WARNING_READ_WRITE_FAIL = WARNING_RTMP_READ_WRITE_FAIL, ///< RTMP 读/写失败,将会断开连接。 - - /*内部事件*/INNER_EVT_SET_BITRATE_4_SCREEN_CAPTURE = 100001, ///< 动态设置录屏编码码率 - /*内部事件*/INNER_EVT_BGM_PLAY_FINISH = 100002, ///< BGM播放完毕 - - - /********************************************************************************** - * 播放事件列表 - **********************************************************************************/ - PLAY_EVT_CONNECT_SUCC = EVT_PLAY_LIVE_STREAM_CONNECT_SUCC, ///< 直播,已经连接RTMP拉流服务器 - PLAY_EVT_RTMP_STREAM_BEGIN = EVT_PLAY_LIVE_STREAM_BEGIN, ///< 直播,已经与RTMP服务器握手完毕,开始拉流 - PLAY_EVT_RCV_FIRST_I_FRAME = EVT_RENDER_FIRST_I_FRAME, ///< 渲染首个视频数据包(IDR) - PLAY_EVT_RCV_FIRST_AUDIO_FRAME = EVT_AUDIO_JITTER_STATE_FIRST_PLAY, ///< 音频首次播放 - PLAY_EVT_PLAY_BEGIN = EVT_VIDEO_PLAY_BEGIN, ///< 视频播放开始 - PLAY_EVT_PLAY_PROGRESS = EVT_VIDEO_PLAY_PROGRESS, ///< 视频播放进度 - PLAY_EVT_PLAY_END = EVT_VIDEO_PLAY_END, ///< 视频播放结束 - PLAY_EVT_PLAY_LOADING = EVT_VIDEO_PLAY_LOADING, ///< 视频播放loading - PLAY_EVT_START_VIDEO_DECODER = EVT_START_VIDEO_DECODER, ///< 解码器启动 - PLAY_EVT_CHANGE_RESOLUTION = EVT_DOWN_CHANGE_RESOLUTION, ///< 视频分辨率改变 - PLAY_EVT_GET_PLAYINFO_SUCC = EVT_GET_VODFILE_MEDIAINFO_SUCC, ///< 获取点播文件信息成功 - PLAY_EVT_CHANGE_ROTATION = EVT_VIDEO_CHANGE_ROTATION, ///< MP4视频旋转角度 - PLAY_EVT_GET_MESSAGE = EVT_PLAY_GET_MESSAGE, ///< 消息事件 - PLAY_EVT_VOD_PLAY_PREPARED = EVT_VOD_PLAY_PREPARED, ///< 点播,视频加载完毕 - PLAY_EVT_VOD_LOADING_END = EVT_VOD_PLAY_LOADING_END, ///< 点播,loading结束 - PLAY_EVT_STREAM_SWITCH_SUCC = EVT_PLAY_LIVE_STREAM_SWITCH_SUCC, ///< 直播,切流成功(切流可以播放不同画面大小的视频) - PLAY_EVT_GET_METADATA = EVT_PLAY_GET_METADATA, ///< TXLivePlayer 接收到视频流中的 metadata 头信息(一条视频流仅触发一次) - PLAY_EVT_GET_FLVSESSIONKEY = EVT_PLAY_GET_FLVSESSIONKEY, ///< TXLivePlayer 接收到http响应头中的 flvSessionKey 信息 - - PLAY_ERR_NET_DISCONNECT = ERR_PLAY_LIVE_STREAM_NET_DISCONNECT, ///< 直播,网络断连,且经多次重连抢救无效,可以放弃治疗,更多重试请自行重启播放 - - PLAY_ERR_GET_RTMP_ACC_URL_FAIL = ERR_GET_RTMP_ACC_URL_FAIL, ///< 直播,获取加速拉流地址失败。这是由于您传给 liveplayer 的加速流地址中没有携带 txTime 和 txSecret 签名,或者是签名计算的不对。出现这个错误时,liveplayer 会放弃拉取加速流转而拉取 CDN 上的视频流,从而导致延迟很大。 - PLAY_ERR_FILE_NOT_FOUND = ERR_FILE_NOT_FOUND, ///< 播放文件不存在 - PLAY_ERR_HEVC_DECODE_FAIL = ERR_HEVC_DECODE_FAIL, ///< H265解码失败 - PLAY_ERR_HLS_KEY = ERR_VOD_DECRYPT_FAIL, ///< HLS解码key获取失败 - PLAY_ERR_GET_PLAYINFO_FAIL = ERR_GET_VODFILE_MEDIAINFO_FAIL, ///< 获取点播文件信息失败 - PLAY_ERR_STREAM_SWITCH_FAIL = ERR_PLAY_LIVE_STREAM_SWITCH_FAIL, ///< 直播,切流失败(切流可以播放不同画面大小的视频) - - PLAY_WARNING_VIDEO_DECODE_FAIL = WARNING_VIDEO_FRAME_DECODE_FAIL, ///< 当前视频帧解码失败 - PLAY_WARNING_AUDIO_DECODE_FAIL = WARNING_AUDIO_FRAME_DECODE_FAIL, ///< 当前音频帧解码失败 - PLAY_WARNING_RECONNECT = WARNING_LIVE_STREAM_SERVER_RECONNECT, ///< 网络断连, 已启动自动重连 (自动重连连续失败超过三次会放弃) - PLAY_WARNING_RECV_DATA_LAG = WARNING_RECV_DATA_LAG, ///< 网络来包不稳:可能是下行带宽不足,或由于主播端出流不均匀 - PLAY_WARNING_VIDEO_PLAY_LAG = WARNING_VIDEO_PLAY_LAG, ///< 当前视频播放出现卡顿(用户直观感受) - PLAY_WARNING_HW_ACCELERATION_FAIL = WARNING_HW_DECODER_START_FAIL, ///< 硬解启动失败,采用软解 - PLAY_WARNING_VIDEO_DISCONTINUITY = 2107, ///< 当前视频帧不连续,可能丢帧 - PLAY_WARNING_FIRST_IDR_HW_DECODE_FAIL = WARNING_VIDEO_DECODER_HW_TO_SW, ///< 当前流硬解第一个I帧失败,SDK自动切软解 - PLAY_WARNING_DNS_FAIL = WARNING_RTMP_DNS_FAIL, ///< RTMP -DNS解析失败 - PLAY_WARNING_SEVER_CONN_FAIL = WARNING_RTMP_SEVER_CONN_FAIL, ///< RTMP服务器连接失败 - PLAY_WARNING_SHAKE_FAIL = WARNING_RTMP_SHAKE_FAIL, ///< RTMP服务器握手失败 - PLAY_WARNING_SERVER_DISCONNECT = WARNING_RTMP_SERVER_BREAK_CONNECT, ///< RTMP服务器主动断开 - PLAY_WARNING_READ_WRITE_FAIL = WARNING_RTMP_READ_WRITE_FAIL, ///< RTMP 读/写失败,将会断开连接。 + ///////////////////////////////////////////////////////////////////////////////// + // 公共错误码、事件码和警告码 + ///////////////////////////////////////////////////////////////////////////////// + ERR_LICENSE_CHECK_FAIL = -5, ///< license 检查失败 + + ///////////////////////////////////////////////////////////////////////////////// + // 推流相关错误码、事件码和警告码 + ///////////////////////////////////////////////////////////////////////////////// + PUSH_EVT_CONNECT_SUCC = 1001, ///< 推流事件: 已经连接RTMP推流服务器 + PUSH_EVT_PUSH_BEGIN = 1002, ///< 推流事件: 已经与RTMP服务器握手完毕,准备开始推流 + PUSH_EVT_OPEN_CAMERA_SUCC = 1003, ///< 推流事件: 打开摄像头成功 + PUSH_EVT_SCREEN_CAPTURE_SUCC = 1004, ///< 推流事件: 屏幕录制启动成功(用于录屏直播) + PUSH_EVT_CHANGE_RESOLUTION = 1005, ///< 推流事件: SDK 主动调整了编码分辨率以适应当前主播的网络速度 + PUSH_EVT_CHANGE_BITRATE = 1006, ///< 推流事件: SDK 主动调整了编码码率以适应当前主播的网络速度 + PUSH_EVT_FIRST_FRAME_AVAILABLE = 1007, ///< 推流事件: SDK 完成了首帧画面的采集 + PUSH_EVT_START_VIDEO_ENCODER = 1008, ///< 推流事件: 编码器已经启动 + + PUSH_EVT_CAMERA_REMOVED = 1023, ///< 推流事件: 摄像头已被移除(适用于 Windows 和 Mac OS 版) + PUSH_EVT_CAMERA_AVAILABLE = 1024, ///< 推流事件: 摄像头已经可用(适用于 Windows 和 Mac OS 版) + PUSH_EVT_CAMERA_CLOSE = 1025, ///< 推流事件: 摄像头已被关闭(适用于 Windows 和 Mac OS 版) + PUSH_EVT_HW_ENCODER_START_SUCC = 1027, ///< 推流事件: 硬编码器启动成功 + PUSH_EVT_SW_ENCODER_START_SUCC = 1028, ///< 推流事件: 软编码器启动成功 + PUSH_EVT_LOCAL_RECORD_RESULT = 1029, ///< 推流事件: 本地录制完成通知 + PUSH_EVT_LOCAL_RECORD_PROGRESS = 1030, ///< 推流事件: 本地录制状态通知 + + PUSH_EVT_ROOM_IN = 1018, ///< ROOM协议:当前用户进入房间成功 + PUSH_EVT_ROOM_OUT = 1019, ///< ROOM协议:当前用户已经离开房间 + PUSH_EVT_ROOM_USERLIST = 1020, ///< ROOM协议:返回房间中的其他用户(不包含当前用户自己) + PUSH_EVT_ROOM_NEED_REENTER = 1021, ///< ROOM协议:断开连接,需要重新进入房间 + PUSH_EVT_ROOM_IN_FAILED = 1022, ///< ROOM协议:当前用户进入房间失败 + PUSH_EVT_ROOM_USER_ENTER = 1031, ///< ROOM协议:有新的远端用户进入当前房间中 + PUSH_EVT_ROOM_USER_EXIT = 1032, ///< ROOM协议:有远端用户离开当前房间 + PUSH_EVT_ROOM_USER_VIDEO_STATE = 1033, ///< ROOM协议:远端用户的视频状态发生变化(比如摄像头的开关状态) + PUSH_EVT_ROOM_USER_AUDIO_STATE = 1034, ///< ROOM协议:远端用户的音频状态发生变化(比如麦克风的开关状态) - /*UGC*/UGC_WRITE_FILE_FAIL = 4001, ///< UGC写文件失败 + PUSH_ERR_OPEN_CAMERA_FAIL = -1301, ///< 推流错误: 摄像头开启失败 + PUSH_ERR_OPEN_MIC_FAIL = -1302, ///< 推流错误: 麦克风开启失败 + PUSH_ERR_VIDEO_ENCODE_FAIL = -1303, ///< 推流错误: 视频编码器出现不可恢复的错误 + PUSH_ERR_AUDIO_ENCODE_FAIL = -1304, ///< 推流错误: 音频编码器出现不可恢复的错误 + PUSH_ERR_UNSUPPORTED_RESOLUTION = -1305, ///< 推流错误: 您指定了 SDK 尚不支持的视频分辨率 + PUSH_ERR_UNSUPPORTED_SAMPLERATE = -1306, ///< 推流错误: 您指定了 SDK 尚不支持的音频采样率 + PUSH_ERR_NET_DISCONNECT = -1307, ///< 推流错误: 网络连接断开(已经经过三次重试并且未能重连成功) + PUSH_ERR_AUDIO_SYSTEM_NOT_WORK = -1308, ///< 推流错误: 系统状态异常,无法正常采集麦克风的声音 + PUSH_ERR_INVALID_ADDRESS = -1313, ///< 推流错误: 您指定了不合法的推流地址 + PUSH_ERR_CONNECT_SERVER_FAILED = -1324, ///< 推流错误: 连接推流服务器失败(若支持智能选路,IP 全部失败) + PUSH_ERR_NETWORK_UNAVAIABLE = -1325, ///< 推流错误: 网络不可用,请确认 Wi-Fi、移动数据或者有线网络是否正常 + PUSH_ERR_SERVER_REFUSED = -1326, ///< 推流错误: 服务器拒绝连接请求,可能原因:推流地址非法;流地址被占用;txScrect校验失败;txTime过期;服务欠费等。 + + PUSH_WARNING_NET_BUSY = 1101, ///< 推流警告:上行网速不够用,建议提示用户改善当前的网络环境 + PUSH_WARNING_RECONNECT = 1102, ///< 推流警告:网络断连,已启动重连流程(重试失败超过三次会放弃) + PUSH_WARNING_HW_ACCELERATION_FAIL = 1103, ///< 推流警告:硬编码启动失败,SDK 已经自动切换到软编码模式 + PUSH_WARNING_VIDEO_ENCODE_FAIL = 1104, ///< 推流警告:当前视频帧未能成功编码,非致命错,SDK 内部会自行规避 + PUSH_WARNING_DNS_FAIL = 3001, ///< 推流警告:DNS 解析失败,SDK 已经启动重试流程 + PUSH_WARNING_SEVER_CONN_FAIL = 3002, ///< 推流警告:服务器连接失败,SDK 已经启动重试流程 + PUSH_WARNING_SHAKE_FAIL = 3003, ///< 推流警告:同 RTMP 服务器的握手失败,SDK 已经启动重试流程 + PUSH_WARNING_SERVER_DISCONNECT = 3004, ///< 推流警告:RTMP 服务器主动断开,请检查推流地址的合法性或防盗链有效期 + PUSH_WARNING_READ_WRITE_FAIL = 3005, ///< 推流警告:RTMP 写操作失败,当前连接将会断开 + + ///////////////////////////////////////////////////////////////////////////////// + // 播放相关错误码、事件码和警告码 + ///////////////////////////////////////////////////////////////////////////////// + PLAY_EVT_CONNECT_SUCC = 2001, ///< 播放事件: 已经连接到服务器 + PLAY_EVT_RTMP_STREAM_BEGIN = 2002, ///< 播放事件: 已经连接服务器,开始拉流 + PLAY_EVT_RCV_FIRST_I_FRAME = 2003, ///< 播放事件: 成功接受到第一个视频帧 + PLAY_EVT_RCV_FIRST_AUDIO_FRAME = 2026, ///< 播放事件: 成功接受到第一个音频帧 + PLAY_EVT_PLAY_BEGIN = 2004, ///< 播放事件: 播放已经开始 + PLAY_EVT_PLAY_PROGRESS = 2005, ///< 播放事件: 播放进度更新,点播播放器(VodPlayer)专用 + PLAY_EVT_PLAY_END = 2006, ///< 播放事件: 播放已经结束 + PLAY_EVT_PLAY_LOADING = 2007, ///< 播放事件: 数据缓冲中 + PLAY_EVT_START_VIDEO_DECODER = 2008, ///< 播放事件: 视频解码器已经启动 + PLAY_EVT_CHANGE_RESOLUTION = 2009, ///< 播放事件: 视频分辨率发生变化 + PLAY_EVT_GET_PLAYINFO_SUCC = 2010, ///< 播放事件: 成功获取到点播文件的信息,点播播放器(VodPlayer)专用 + PLAY_EVT_CHANGE_ROTATION = 2011, ///< 播放事件: MP4 视频的旋转角度发生变化,点播播放器(VodPlayer)专用 + PLAY_EVT_GET_MESSAGE = 2012, ///< 播放事件: 接收到视频流中的 SEI 消息(https://cloud.tencent.com/document/product/454/7880#Message) + PLAY_EVT_VOD_PLAY_PREPARED = 2013, ///< 播放事件: 视频加载完毕,点播播放器(VodPlayer)专用 + PLAY_EVT_VOD_LOADING_END = 2014, ///< 播放事件: 视频缓冲结束,点播播放器(VodPlayer)专用 + PLAY_EVT_STREAM_SWITCH_SUCC = 2015, ///< 播放事件: 已经成功完成切流(在不同清晰度的视频流之间进行切换) + PLAY_EVT_GET_METADATA = 2028, ///< 播放事件: TXLivePlayer 接收到视频流中的 metadata 头信息(一条视频流仅触发一次) + PLAY_EVT_GET_FLVSESSIONKEY = 2031, ///< 播放事件: TXLivePlayer 接收到 http 响应头中的 flvSessionKey 信息 + PLAY_EVT_AUDIO_SESSION_INTERRUPT = 2032, ///< 播放事件: Audio Session 被其他 App 中断(仅适用于 iOS 平台) + + PLAY_ERR_NET_DISCONNECT = -2301, ///< 直播错误: 网络连接断开(已经经过三次重试并且未能重连成功) + PLAY_ERR_GET_RTMP_ACC_URL_FAIL = -2302, ///< 直播错误: 获取加速流失败,可能是由于您指定的加速流URL中没有携带正确的txTime和txSecret参数,SDK会自动切换到高延迟的 CDN 地址。 + PLAY_ERR_HEVC_DECODE_FAIL = -2304, ///< 直播错误: HEVC 解码失败,并且没有找到备用的选解码器 + PLAY_ERR_STREAM_SWITCH_FAIL = -2307, ///< 直播错误: 切换直播流失败 + PLAY_ERR_STREAM_SERVER_REFUSED = -2308, ///< 直播错误:服务器拒绝连接请求,可能原因:播放地址非法;txScrect校验失败;txTime过期;服务欠费等。 + PLAY_ERR_FILE_NOT_FOUND = -2303, ///< 点播错误: 播放文件不存在 + PLAY_ERR_HLS_KEY = -2305, ///< 点播错误: HLS 解码 KEY 获取失败 + PLAY_ERR_GET_PLAYINFO_FAIL = -2306, ///< 点播错误: 获取点播文件的文件信息失败 + + PLAY_WARNING_VIDEO_DECODE_FAIL = 2101, ///< 直播警告:当前视频帧解码失败,SDK内部会尝试自动恢复 + PLAY_WARNING_AUDIO_DECODE_FAIL = 2102, ///< 直播警告:当前音频帧解码失败,SDK内部会尝试自动恢复 + PLAY_WARNING_RECONNECT = 2103, ///< 直播警告:网络断连,已启动重连流程(重试失败超过三次会放弃) + PLAY_WARNING_RECV_DATA_LAG = 2104, ///< 直播警告:音视频流拉取不稳定,可能由于网络原因所致 + PLAY_WARNING_VIDEO_PLAY_LAG = 2105, ///< 直播警告:当前视频画面出现卡顿 + PLAY_WARNING_HW_ACCELERATION_FAIL = 2106, ///< 直播警告:硬件解码失败,自动切换到软件解码 + PLAY_WARNING_VIDEO_DISCONTINUITY = 2107, ///< 直播警告:检测到视频帧不连续 + PLAY_WARNING_FIRST_IDR_HW_DECODE_FAIL = 2108, ///< 直播警告:视频硬解码失败,SDK 内部自动切换到软件解码 + PLAY_WARNING_DNS_FAIL = 3001, ///< 直播警告:DNS 解析失败,SDK 已经启动重试流程 + PLAY_WARNING_SEVER_CONN_FAIL = 3002, ///< 直播警告:服务器连接失败,SDK 已经启动重试流程 + PLAY_WARNING_SHAKE_FAIL = 3003, ///< 直播警告:同 RTMP 服务器的握手失败,SDK 已经启动重试流程 + PLAY_WARNING_SERVER_DISCONNECT = 3004, ///< 直播警告:RTMP 服务器主动断开,请检查播放地址的合法性或防盗链有效期 + PLAY_WARNING_READ_WRITE_FAIL = 3005, ///< 直播警告:RTMP 读操作失败,当前连接将会断开 }; -#endif // __TX_LIVE_SDK_TYPE_DEF_H__ + +///////////////////////////////////////////////////////////////////////////////// +// +// 兼容定义 +// (用于兼容老版本的错误码定义,请在代码中尽量使用右侧的新定义) +// +///////////////////////////////////////////////////////////////////////////////// +#define EVT_RTMP_PUSH_CONNECT_SUCC PUSH_EVT_CONNECT_SUCC +#define EVT_RTMP_PUSH_BEGIN PUSH_EVT_PUSH_BEGIN +#define EVT_CAMERA_START_SUCC PUSH_EVT_OPEN_CAMERA_SUCC +#define EVT_SCREEN_CAPTURE_SUCC PUSH_EVT_SCREEN_CAPTURE_SUCC +#define EVT_UP_CHANGE_RESOLUTION PUSH_EVT_CHANGE_RESOLUTION +#define EVT_UP_CHANGE_BITRATE PUSH_EVT_CHANGE_BITRATE +#define EVT_FIRST_FRAME_AVAILABLE PUSH_EVT_FIRST_FRAME_AVAILABLE +#define EVT_START_VIDEO_ENCODER PUSH_EVT_START_VIDEO_ENCODER + +#define EVT_CAMERA_REMOVED PUSH_EVT_CAMERA_REMOVED +#define EVT_CAMERA_AVAILABLE PUSH_EVT_CAMERA_AVAILABLE +#define EVT_CAMERA_CLOSE PUSH_EVT_CAMERA_CLOSE +#define EVT_HW_ENCODER_START_SUCC PUSH_EVT_HW_ENCODER_START_SUCC +#define EVT_SW_ENCODER_START_SUCC PUSH_EVT_SW_ENCODER_START_SUCC +#define EVT_LOCAL_RECORD_RESULT PUSH_EVT_LOCAL_RECORD_RESULT +#define EVT_LOCAL_RECORD_PROGRESS PUSH_EVT_LOCAL_RECORD_PROGRESS + +#define EVT_ROOM_ENTER PUSH_EVT_ROOM_IN +#define EVT_ROOM_ENTER_FAILED PUSH_EVT_ROOM_IN_FAILED +#define EVT_ROOM_EXIT PUSH_EVT_ROOM_OUT +#define EVT_ROOM_USERLIST PUSH_EVT_ROOM_USERLIST +#define EVT_ROOM_NEED_REENTER PUSH_EVT_ROOM_NEED_REENTER +#define EVT_ROOM_USER_ENTER PUSH_EVT_ROOM_USER_ENTER +#define EVT_ROOM_USER_EXIT PUSH_EVT_ROOM_USER_EXIT +#define EVT_ROOM_USER_VIDEO_STATE PUSH_EVT_ROOM_USER_VIDEO_STATE +#define EVT_ROOM_USER_AUDIO_STATE PUSH_EVT_ROOM_USER_AUDIO_STATE + +#define ERR_RTMP_PUSH_NET_DISCONNECT PUSH_ERR_NET_DISCONNECT +#define ERR_RTMP_PUSH_INVALID_ADDRESS PUSH_ERR_INVALID_ADDRESS +#define ERR_RTMP_PUSH_NET_ALLADDRESS_FAIL PUSH_ERR_CONNECT_SERVER_FAILED +#define ERR_RTMP_PUSH_NO_NETWORK PUSH_ERR_NETWORK_UNAVAIABLE +#define ERR_RTMP_PUSH_SERVER_REFUSE PUSH_ERR_SERVER_REFUSED + +#define WARNING_NET_BUSY PUSH_WARNING_NET_BUSY +#define WARNING_RTMP_SERVER_RECONNECT PUSH_WARNING_RECONNECT +#define WARNING_RTMP_DNS_FAIL PUSH_WARNING_DNS_FAIL +#define WARNING_RTMP_SEVER_CONN_FAIL PUSH_WARNING_SEVER_CONN_FAIL +#define WARNING_RTMP_SHAKE_FAIL PUSH_WARNING_SHAKE_FAIL +#define WARNING_RTMP_SERVER_BREAK_CONNECT PUSH_WARNING_SERVER_DISCONNECT +#define WARNING_RTMP_READ_WRITE_FAIL PUSH_WARNING_READ_WRITE_FAIL + +#define EVT_PLAY_LIVE_STREAM_CONNECT_SUCC PLAY_EVT_CONNECT_SUCC +#define EVT_PLAY_LIVE_STREAM_BEGIN PLAY_EVT_RTMP_STREAM_BEGIN +#define EVT_RENDER_FIRST_I_FRAME PLAY_EVT_RCV_FIRST_I_FRAME +#define EVT_AUDIO_JITTER_STATE_FIRST_PLAY PLAY_EVT_RCV_FIRST_AUDIO_FRAME +#define EVT_VIDEO_PLAY_BEGIN PLAY_EVT_PLAY_BEGIN +#define EVT_VIDEO_PLAY_PROGRESS PLAY_EVT_PLAY_PROGRESS +#define EVT_VIDEO_PLAY_END PLAY_EVT_PLAY_END +#define EVT_VIDEO_PLAY_LOADING PLAY_EVT_PLAY_LOADING +#define EVT_START_VIDEO_DECODER PLAY_EVT_START_VIDEO_DECODER +#define EVT_DOWN_CHANGE_RESOLUTION PLAY_EVT_CHANGE_RESOLUTION +#define EVT_GET_VODFILE_MEDIAINFO_SUCC PLAY_EVT_GET_PLAYINFO_SUCC +#define EVT_VIDEO_CHANGE_ROTATION PLAY_EVT_CHANGE_ROTATION +#define EVT_PLAY_GET_MESSAGE PLAY_EVT_GET_MESSAGE +#define EVT_VOD_PLAY_PREPARED PLAY_EVT_VOD_PLAY_PREPARED +#define EVT_VOD_PLAY_LOADING_END PLAY_EVT_VOD_LOADING_END +#define EVT_PLAY_LIVE_STREAM_SWITCH_SUCC PLAY_EVT_STREAM_SWITCH_SUCC +#define EVT_PLAY_GET_METADATA PLAY_EVT_GET_METADATA +#define EVT_PLAY_GET_FLVSESSIONKEY PLAY_EVT_GET_FLVSESSIONKEY +#define EVT_AUDIO_SESSION_INTERRUPT PLAY_EVT_AUDIO_SESSION_INTERRUPT + +#define ERR_PLAY_LIVE_STREAM_NET_DISCONNECT PLAY_ERR_NET_DISCONNECT +#define ERR_GET_RTMP_ACC_URL_FAIL PLAY_ERR_GET_RTMP_ACC_URL_FAIL +#define ERR_FILE_NOT_FOUND PLAY_ERR_FILE_NOT_FOUND +#define ERR_VOD_DECRYPT_FAIL PLAY_ERR_HLS_KEY +#define ERR_GET_VODFILE_MEDIAINFO_FAIL PLAY_ERR_GET_PLAYINFO_FAIL +#define ERR_PLAY_LIVE_STREAM_SWITCH_FAIL PLAY_ERR_STREAM_SWITCH_FAIL +#define ERR_PLAY_LIVE_STREAM_SERVER_REFUSE PLAY_ERR_STREAM_SERVER_REFUSED + +#define WARNING_LIVE_STREAM_SERVER_RECONNECT PLAY_WARNING_RECONNECT +#define WARNING_RECV_DATA_LAG PLAY_WARNING_RECV_DATA_LAG +#define WARNING_VIDEO_PLAY_LAG PLAY_WARNING_VIDEO_PLAY_LAG + +#define EVT_SNAPSHOT_COMPLETE 1022 ///< 已经完成一帧截图 + +// clang-format on +#endif // __TX_LIVE_SDK_TYPE_DEF_H__ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKTypeDef.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKTypeDef.h index 9f7ad56..4863db4 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKTypeDef.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKTypeDef.h @@ -1,357 +1,390 @@ -#ifndef __TX_LIVE_SDK_TYPE_DEF_H__ -#define __TX_LIVE_SDK_TYPE_DEF_H__ - -#include "TXLiveSDKEventDef.h" -#import <Foundation/Foundation.h> - -#if TARGET_OS_IPHONE -#import <UIKit/UIKit.h> -typedef UIView TXView; -typedef UIImage TXImage; -typedef UIEdgeInsets TXEdgeInsets; -#elif TARGET_OS_MAC -#import <AppKit/AppKit.h> -typedef NSView TXView; -typedef NSImage TXImage; -typedef NSEdgeInsets TXEdgeInsets; -#endif - - - -///////////////////////////////////////////////////////////////////////////////// -// -// 【视频相关枚举值定义】 -// -///////////////////////////////////////////////////////////////////////////////// - -/** - * 1.1 视频分辨率 - * - * 在普通模式下,TXLivePusher 只支持三种固定的分辨率,即:360 × 640、540 × 960 以及 720 × 1280。 - * - *【如何横屏推流】 - * 如果希望使用 640 × 360、960 × 540、1280 × 720 这样的横屏分辨率,需要设置 TXLivePushConfig 中的 homeOrientation 属性, - * 并使用 TXLivePusher 中的 setRenderRotation 接口进行画面旋转。 - * - *【自定义分辨率】 - * 如果希望使用其他分辨率,可以设置 TXLivePushConfig 中的 customModeType 为 CUSTOM_MODE_VIDEO_CAPTURE, - * 自己采集 SampleBuffer 送给 TXLivePusher 的 sendVideoSampleBuffer 接口。 - * - *【建议的分辨率】 - * 手机直播场景下最常用的分辨率为 9:16 的竖屏分辨率 540 × 960。 - * 从清晰的角度,540 × 960 比 360 × 640 要清晰,同时跟 720 × 1280 相当。 - * 从性能的角度,540 × 960 可以避免前置摄像头开启 720 × 1280 的采集分辨率,对于美颜开销很大的场景能节省不少的计算量。 - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_VideoResolution) { - - /// 竖屏分辨率,宽高比为 9:16 - VIDEO_RESOLUTION_TYPE_360_640 = 0, ///< 建议码率 800kbps - VIDEO_RESOLUTION_TYPE_540_960 = 1, ///< 建议码率 1200kbps - VIDEO_RESOLUTION_TYPE_720_1280 = 2, ///< 建议码率 1800kbps - VIDEO_RESOLUTION_TYPE_1080_1920 = 30, ///< 建议码率 3000kbps - - - /// 如下均为内建分辨率,为 SDK 内部使用,不支持通过接口进行设置 - VIDEO_RESOLUTION_TYPE_640_360 = 3, - VIDEO_RESOLUTION_TYPE_960_540 = 4, - VIDEO_RESOLUTION_TYPE_1280_720 = 5, - VIDEO_RESOLUTION_TYPE_1920_1080 = 31, - - VIDEO_RESOLUTION_TYPE_320_480 = 6, - VIDEO_RESOLUTION_TYPE_180_320 = 7, - VIDEO_RESOLUTION_TYPE_270_480 = 8, - VIDEO_RESOLUTION_TYPE_320_180 = 9, - VIDEO_RESOLUTION_TYPE_480_270 = 10, - - VIDEO_RESOLUTION_TYPE_240_320 = 11, - VIDEO_RESOLUTION_TYPE_360_480 = 12, - VIDEO_RESOLUTION_TYPE_480_640 = 13, - VIDEO_RESOLUTION_TYPE_320_240 = 14, - VIDEO_RESOLUTION_TYPE_480_360 = 15, - VIDEO_RESOLUTION_TYPE_640_480 = 16, - - VIDEO_RESOLUTION_TYPE_480_480 = 17, - VIDEO_RESOLUTION_TYPE_270_270 = 18, - VIDEO_RESOLUTION_TYPE_160_160 = 19, -}; - -/** - * 1.2 画面质量挡位 - * - * 如果您希望调整直播的编码参数,建议您直接使用 TXLivePusher 提供的 setVideoQuality 接口。 - * 由于视频编码参数中的分辨率,码率和帧率对最终效果都有着复杂的影响,如果您之前没有相关操作经验,不建议直接修改这些编码参数。 - * 我们在 setVideoQuality 接口中提供了如下几个挡位供您选择: - * - * 1. 标清:采用 360 × 640 的分辨率,码率调控范围 300kbps - 800kbps,关闭网络自适应时的码率为 800kbps,适合网络较差的直播环境。 - * 2. 高清:采用 540 × 960 的分辨率,码率调控范围 600kbps - 1500kbps,关闭网络自适应时的码率为 1200kbps,常规手机直播的推荐挡位。 - * 3. 超清:采用 720 × 1280 的分辨率,码率调控范围 600kbps - 1800kbps,关闭网络自适应时的码率为 1800kbps,能耗高,但清晰度较标清提升并不明显。 - * 4. 连麦(大主播):主播从原来的“推流状态”进入“连麦状态”后,可以通过 setVideoQuality 接口调整自 MAIN_PUBLISHER 挡位。 - * 5. 连麦(小主播):观众从原来的“播放状态”进入“连麦状态”后,可以通过 setVideoQuality 接口调整自 SUB_PUBLISHER 挡位。 - * 6. 视频通话:该选项后续会逐步废弃,如果您希望实现纯视频通话而非直播功能,推荐使用腾讯云 [TRTC](https://cloud.tencent.com/product/trtc) 服务。 - * - * 【推荐设置】如果您对整个平台的清晰度要求比较高,推荐使用 setVideoQuality(HIGH_DEFINITION, NO, NO) 的组合。 - * 如果您的主播有很多三四线城市的网络适配要求,推荐使用 setVideoQuality(HIGH_DEFINITION, YES, NO) 的组合。 - * - * @note 在开启硬件加速后,您可能会发现诸如 368 × 640 或者 544 × 960 这样的“不完美”分辨率。 - * 这是由于部分硬编码器要求像素能被 16 整除所致,属于正常现象,您可以通过播放端的填充模式解决“小黑边”问题。 - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_VideoQuality) { - VIDEO_QUALITY_STANDARD_DEFINITION = 1, ///< 标清:采用 360 × 640 的分辨率 - VIDEO_QUALITY_HIGH_DEFINITION = 2, ///< 高清:采用 540 × 960 的分辨率 - VIDEO_QUALITY_SUPER_DEFINITION = 3, ///< 超清:采用 720 × 1280 的分辨率 - VIDEO_QUALITY_ULTRA_DEFINITION = 7, ///< 蓝光:采用 1080 × 1920 的分辨率 - VIDEO_QUALITY_LINKMIC_MAIN_PUBLISHER = 4, ///< 连麦场景下的大主播使用 - VIDEO_QUALITY_LINKMIC_SUB_PUBLISHER = 5, ///< 连麦场景下的小主播(连麦的观众)使用 - VIDEO_QUALITY_REALTIME_VIDEOCHAT = 6, ///< 纯视频通话场景使用(已废弃) -}; - -/** - * 1.3 画面旋转方向 - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_HomeOrientation) { - HOME_ORIENTATION_RIGHT = 0, ///< HOME 键在右边,横屏模式 - HOME_ORIENTATION_DOWN = 1, ///< HOME 键在下面,手机直播中最常见的竖屏直播模式 - HOME_ORIENTATION_LEFT = 2, ///< HOME 键在左边,横屏模式 - HOME_ORIENTATION_UP = 3, ///< HOME 键在上边,竖屏直播(适合小米 MIX2) -}; - -/** - * 1.4 画面填充模式 - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_RenderMode) { - - RENDER_MODE_FILL_SCREEN = 0, ///< 图像铺满屏幕,不留黑边,如果图像宽高比不同于屏幕宽高比,部分画面内容会被裁剪掉。 - RENDER_MODE_FILL_EDGE = 1, ///< 图像适应屏幕,保持画面完整,但如果图像宽高比不同于屏幕宽高比,会有黑边的存在。 -}; - -/** - * 1.5 美颜风格 - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_BeautyStyle) { - BEAUTY_STYLE_SMOOTH = 0, ///< 光滑,磨皮程度较高,更适合秀场直播类场景下使用。 - BEAUTY_STYLE_NATURE = 1, ///< 自然,磨皮算法会最大限度保留皮肤细节。 - BEAUTY_STYLE_PITU = 2, ///< 由上海优图实验室提供的美颜算法,磨皮效果介于光滑和自然之间,比光滑保留更多皮肤细节,比自然磨皮程度更高。 -}; - -/** - * 1.6 美颜程度,取值范围1 - 9,该枚举值定义了关闭和最大值。 - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_BeautyFilterDepth) { - BEAUTY_FILTER_DEPTH_CLOSE = 0, ///< 关闭美颜 - BEAUTY_FILTER_DEPTH_MAX = 9, ///< 最大美颜强度 -}; - - -/** - * 1.6 网络自适应算法,推荐选项:AUTO_ADJUST_LIVEPUSH_STRATEGY - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_AutoAdjustStrategy) { - AUTO_ADJUST_NONE = -1, ///< 非法数值,用于 SDK 内部做合法性检查 - - AUTO_ADJUST_LIVEPUSH_STRATEGY = 0, ///< 最适合直播模式下的流控算法 - AUTO_ADJUST_LIVEPUSH_RESOLUTION_STRATEGY = 1, ///< 不推荐:SDK 内部会调整视频分辨率,如果有 H5 分享的需求请勿使用 - AUTO_ADJUST_REALTIME_VIDEOCHAT_STRATEGY = 5, ///< 待废弃,请使用腾讯云 TRTC 服务 - - AUTO_ADJUST_BITRATE_STRATEGY_1 = 0, ///< 已经废弃 - AUTO_ADJUST_BITRATE_RESOLUTION_STRATEGY_1 = 1, ///< 已经废弃 - AUTO_ADJUST_BITRATE_STRATEGY_2 = 2, ///< 已经废弃 - AUTO_ADJUST_BITRATE_RESOLUTION_STRATEGY_2 = 3, ///< 已经废弃 - AUTO_ADJUST_REALTIME_BITRATE_STRATEGY = 4, ///< 已经废弃 - AUTO_ADJUST_REALTIME_BITRATE_RESOLUTION_STRATEGY = 5, ///< 已经废弃 -}; - -/** - * 1.7 视频帧的数据格式(未压缩前的) - */ -typedef NS_ENUM(NSInteger, TXVideoType) { - - VIDEO_TYPE_420SP = 1, ///< Android 视频采集格式,PixelFormat.YCbCr_420_SP 17 - VIDEO_TYPE_420YpCbCr = 2, ///< iOS 视频采集格式,kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange - VIDEO_TYPE_420P = 3, ///< yuv420p格式 - VIDEO_TYPE_BGRA8888 = 4, ///< BGRA8888 - VIDEO_TYPE_RGBA8888 = 5, ///< RGBA8888 - VIDEO_TYPE_NV12 = 6, ///< NV12(iOS) -}; - -/** - * 1.8 本地视频预览镜像类型 - * - * iOS 的本地画面提供三种设置模式 - */ -typedef NS_ENUM(NSUInteger, TXLocalVideoMirrorType) { - LocalVideoMirrorType_Auto = 0, ///< 前置摄像头镜像,后置摄像头不镜像 - LocalVideoMirrorType_Enable = 1, ///< 前后置摄像头画面均镜像 - LocalVideoMirrorType_Disable = 2, ///< 前后置摄像头画面均不镜像 -}; - -///////////////////////////////////////////////////////////////////////////////// -// -// 【音频相关枚举值定义】 -// -///////////////////////////////////////////////////////////////////////////////// - -/** - * 2.1 音频采样率 - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_AudioSampleRate) { - - AUDIO_SAMPLE_RATE_8000 = 0, ///< 8k采样率 - AUDIO_SAMPLE_RATE_16000 = 1, ///< 16k采样率 - AUDIO_SAMPLE_RATE_32000 = 2, ///< 32k采样率 - AUDIO_SAMPLE_RATE_44100 = 3, ///< 44.1k采样率 - AUDIO_SAMPLE_RATE_48000 = 4, ///< 48k采样率 -}; - -/** - * 2.2 混响类型 - */ -typedef NS_ENUM(NSInteger, TXReverbType) { - REVERB_TYPE_0 = 0, ///< 关闭混响 - REVERB_TYPE_1 = 1, ///< KTV - REVERB_TYPE_2 = 2, ///< 小房间 - REVERB_TYPE_3 = 3, ///< 大会堂 - REVERB_TYPE_4 = 4, ///< 低沉 - REVERB_TYPE_5 = 5, ///< 洪亮 - REVERB_TYPE_6 = 6, ///< 金属声 - REVERB_TYPE_7 = 7, ///< 磁性 -}; - -/** - * 2.3 变声选项 - */ -typedef NS_ENUM(NSInteger, TXVoiceChangerType) { - - VOICECHANGER_TYPE_0 = 0, ///< 关闭变声 - VOICECHANGER_TYPE_1 = 1, ///< 熊孩子 - VOICECHANGER_TYPE_2 = 2, ///< 萝莉 - VOICECHANGER_TYPE_3 = 3, ///< 大叔 - VOICECHANGER_TYPE_4 = 4, ///< 重金属 - VOICECHANGER_TYPE_5 = 5, ///< 感冒 - VOICECHANGER_TYPE_6 = 6, ///< 外国人 - VOICECHANGER_TYPE_7 = 7, ///< 困兽 - VOICECHANGER_TYPE_8 = 8, ///< 死肥仔 - VOICECHANGER_TYPE_9 = 9, ///< 强电流 - VOICECHANGER_TYPE_10 = 10, ///< 重机械 - VOICECHANGER_TYPE_11 = 11, ///< 空灵 -}; - -/** - * 2.4 声音播放模式(音频路由) - * - * 一般手机都有两个扬声器,设置音频路由的作用就是要决定声音从哪个扬声器播放出来。 - * - Speakerphone:扬声器,位于手机底部,声音偏大,适合外放音乐。 - * - Earpiece:听筒,位于手机顶部,声音偏小,适合通话。 - */ -typedef NS_ENUM(NSInteger, TXAudioRouteType) { - AUDIO_ROUTE_SPEAKER = 0, ///< 扬声器,位于手机底部,声音偏大,适合外放音乐。 - AUDIO_ROUTE_RECEIVER = 1, ///< 听筒,位于手机顶部,声音偏小,适合通话。 -}; - -/** - * 2.5 系统音量类型 - * - * 该枚举值用于控制推流过程中使用何种系统音量类型 - */ -typedef NS_ENUM(NSInteger, TXSystemAudioVolumeType) { - SYSTEM_AUDIO_VOLUME_TYPE_AUTO = 0, ///< 默认类型,SDK会自动选择合适的音量类型 - SYSTEM_AUDIO_VOLUME_TYPE_MEDIA = 1, ///< 仅使用媒体音量,SDK不再使用通话音量 - SYSTEM_AUDIO_VOLUME_TYPE_VOIP = 2, ///< 仅使用通话音量,SDK一直使用通话音量 -}; - -/** - * 2.6 推流用网络通道(待废弃) - */ -typedef NS_ENUM(NSInteger, TX_Enum_Type_RTMPChannel) { - - RTMP_CHANNEL_TYPE_AUTO = 0, ///< 自动:推腾讯云使用加速协议,推友商云使用标准 RTMP 协议。 - RTMP_CHANNEL_TYPE_STANDARD = 1, ///< 标准 RTMP 协议 - RTMP_CHANNEL_TYPE_PRIVATE = 2, ///< 腾讯云专属加速协议 -}; - - -/** - * 2.7 屏幕采集源(用于录屏推流) - */ -#if TARGET_OS_OSX -typedef NS_ENUM(NSInteger, TXCaptureVideoInputSource) { - TXCaptureVideoInputSourceCamera, - TXCaptureVideoInputSourceScreen, - TXCaptureVideoInputSourceWindow -}; -#endif - - - - -///////////////////////////////////////////////////////////////////////////////// -// -// 【状态通知字段名 onNetStatus】 -// -///////////////////////////////////////////////////////////////////////////////// - -/** - * TXLivePushListener 和 TXLivePlayListener 的 onNetStatus() 会以 2s 一次的时间间隔,定时通知网络状态和内部指标, - * 这些数值采用 key-value 的组织格式,其中 key 值的定义如下: - */ - -#define NET_STATUS_CPU_USAGE @"CPU_USAGE" ///> 进程 CPU 占用率 -#define NET_STATUS_CPU_USAGE_D @"CPU_USAGE_DEVICE" ///> 系统 CPU 占用率 - -#define NET_STATUS_VIDEO_WIDTH @"VIDEO_WIDTH" ///> 视频分辨率宽度 -#define NET_STATUS_VIDEO_HEIGHT @"VIDEO_HEIGHT" ///> 视频分辨率高度 -#define NET_STATUS_VIDEO_FPS @"VIDEO_FPS" ///> 视频帧率:也就是视频编码器每秒生产了多少帧画面。 -#define NET_STATUS_VIDEO_GOP @"VIDEO_GOP" ///> 关键帧间隔:即每两个关键帧(I帧)间隔时长,单位:秒。 -#define NET_STATUS_VIDEO_BITRATE @"VIDEO_BITRATE" ///> 视频码率:即视频编码器每秒生产了多少视频数据,单位:kbps。 -#define NET_STATUS_AUDIO_BITRATE @"AUDIO_BITRATE" ///> 音频码率:即音频编码器每秒生产了多少音频数据,单位:kbps。 -#define NET_STATUS_NET_SPEED @"NET_SPEED" ///> 传输速度:即每秒钟发送或接收了多少字节的数据。 - -#define NET_STATUS_VIDEO_CACHE @"VIDEO_CACHE" ///> TXLivePusher:主播端堆积的视频帧数;TXLivePlayer:播放端缓冲的视频总时长。 -#define NET_STATUS_AUDIO_CACHE @"AUDIO_CACHE" ///> TXLivePusher:主播端堆积的音频帧数;TXLivePlayer:播放端缓冲的音频总时长。 -#define NET_STATUS_VIDEO_DROP @"VIDEO_DROP" ///> TXLivePusher:主播端主动丢弃的视频帧数;TXLivePlayer: N/A。 -#define NET_STATUS_AUDIO_DROP @"AUDIO_DROP" ///> 暂未使用 - -#define NET_STATUS_V_DEC_CACHE_SIZE @"V_DEC_CACHE_SIZE" ///> TXLivePlayer:播放端解码器中缓存的视频帧数(Android 端硬解码时存在)。 -#define NET_STATUS_V_SUM_CACHE_SIZE @"V_SUM_CACHE_SIZE" ///> TXLivePlayer:播放端缓冲的总视频帧数,该数值越大,播放延迟越高。 -#define NET_STATUS_AV_PLAY_INTERVAL @"AV_PLAY_INTERVAL" ///> TXLivePlayer:音画同步错位时间(播放),单位 ms,此数值越小,音画同步越好。 -#define NET_STATUS_AV_RECV_INTERVAL @"AV_RECV_INTERVAL" ///> TXLivePlayer:音画同步错位时间(网络),单位 ms,此数值越小,音画同步越好。 -#define NET_STATUS_AUDIO_CACHE_THRESHOLD @"AUDIO_CACHE_THRESHOLD" ///> TXLivePlayer:音频缓冲时长阈值,缓冲超过该阈值后,播放器会开始调控延时。 - -#define NET_STATUS_AUDIO_INFO @"AUDIO_INFO" ///> 音频信息:包括采样率信息和声道数信息 -#define NET_STATUS_NET_JITTER @"NET_JITTER" ///> 网络抖动:数值越大表示抖动越大,网络越不稳定 -#define NET_STATUS_QUALITY_LEVEL @"NET_QUALITY_LEVEL" ///> 网络质量:0:未定义 1:最好 2:好 3:一般 4:差 5:很差 6:不可用 -#define NET_STATUS_SERVER_IP @"SERVER_IP" ///> 连接的Server IP地址 - - -///////////////////////////////////////////////////////////////////////////////// -// -// 【事件通知字段名 onPushEvent onPlayEvent】 -// -///////////////////////////////////////////////////////////////////////////////// - - -/** - * 腾讯云 LiteAVSDK 通过 TXLivePushListener 中的 onPushEvent(),TXLivePlayListener 中的 onPlayEvent() 向您通知内部错误、警告和事件: - * - 错误:严重且不可恢复的错误,会中断 SDK 的正常逻辑。 - * - 警告:非致命性的提醒和警告,可以不理会。 - * - 事件:SDK 的流程和状态通知,比如开始推流,开始播放,等等。 - * - * 这些数值采用 key-value 的组织格式,其中 key 值的定义如下: - */ -#define EVT_MSG @"EVT_MSG" ///> 事件ID -#define EVT_TIME @"EVT_TIME" ///> 事件发生的UTC毫秒时间戳 -#define EVT_UTC_TIME @"EVT_UTC_TIME" ///> 事件发生的UTC毫秒时间戳(兼容性) -#define EVT_BLOCK_DURATION @"EVT_BLOCK_DURATION" ///> 卡顿时间(毫秒) -#define EVT_PARAM1 @"EVT_PARAM1" ///> 事件参数1 -#define EVT_PARAM2 @"EVT_PARAM2" ///> 事件参数2 -#define EVT_GET_MSG @"EVT_GET_MSG" ///> 消息内容,收到PLAY_EVT_GET_MESSAGE事件时,通过该字段获取消息内容 -#define EVT_PLAY_PROGRESS @"EVT_PLAY_PROGRESS" ///> 点播:视频播放进度 -#define EVT_PLAY_DURATION @"EVT_PLAY_DURATION" ///> 点播:视频总时长 -#define EVT_PLAYABLE_DURATION @"PLAYABLE_DURATION" ///> 点播:视频可播放时长 -#define EVT_PLAY_COVER_URL @"EVT_PLAY_COVER_URL" ///> 点播:视频封面 -#define EVT_PLAY_URL @"EVT_PLAY_URL" ///> 点播:视频播放地址 -#define EVT_PLAY_NAME @"EVT_PLAY_NAME" ///> 点播:视频名称 -#define EVT_PLAY_DESCRIPTION @"EVT_PLAY_DESCRIPTION" ///> 点播:视频简介 - -#define STREAM_ID @"STREAM_ID" - -#endif +// Copyright © 2020 Tencent. All rights reserved. + +#ifndef __TX_LIVE_SDK_TYPE_DEF_H__ +#define __TX_LIVE_SDK_TYPE_DEF_H__ + +#import <Foundation/Foundation.h> +#import "TXLiteAVSymbolExport.h" +#import "TXLiveSDKEventDef.h" +#if TARGET_OS_IPHONE +#import <UIKit/UIKit.h> +typedef UIView TXView; +typedef UIImage TXImage; +typedef UIEdgeInsets TXEdgeInsets; +#elif TARGET_OS_MAC +#import <AppKit/AppKit.h> +typedef NSView TXView; +typedef NSImage TXImage; +typedef NSEdgeInsets TXEdgeInsets; +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// 【视频相关枚举值定义】 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 1.1 视频分辨率 + * + * 在普通模式下,TXLivePusher 只支持三种固定的分辨率,即:360 × 640、540 × 960 以及 720 × 1280。 + * + *【如何横屏推流】 + * 如果希望使用 640 × 360、960 × 540、1280 × 720 这样的横屏分辨率,需要设置 TXLivePushConfig 中的 + *homeOrientation 属性, 并使用 TXLivePusher 中的 setRenderRotation 接口进行画面旋转。 + * + *【自定义分辨率】 + * 如果希望使用其他分辨率,可以设置 TXLivePushConfig 中的 customModeType 为 + *CUSTOM_MODE_VIDEO_CAPTURE, 自己采集 SampleBuffer 送给 TXLivePusher 的 sendVideoSampleBuffer + *接口。 + * + *【建议的分辨率】 + * 手机直播场景下最常用的分辨率为 9:16 的竖屏分辨率 540 × 960。 + * 从清晰的角度,540 × 960 比 360 × 640 要清晰,同时跟 720 × 1280 相当。 + * 从性能的角度,540 × 960 可以避免前置摄像头开启 720 × 1280 + *的采集分辨率,对于美颜开销很大的场景能节省不少的计算量。 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_VideoResolution) { + + /// 竖屏分辨率,宽高比为 9:16 + VIDEO_RESOLUTION_TYPE_360_640 = 0, ///< 建议码率 800kbps + VIDEO_RESOLUTION_TYPE_540_960 = 1, ///< 建议码率 1200kbps + VIDEO_RESOLUTION_TYPE_720_1280 = 2, ///< 建议码率 1800kbps + VIDEO_RESOLUTION_TYPE_1080_1920 = 30, ///< 建议码率 3000kbps + + /// 如下均为内建分辨率,为 SDK 内部使用,不支持通过接口进行设置 + VIDEO_RESOLUTION_TYPE_640_360 = 3, + VIDEO_RESOLUTION_TYPE_960_540 = 4, + VIDEO_RESOLUTION_TYPE_1280_720 = 5, + VIDEO_RESOLUTION_TYPE_1920_1080 = 31, + + VIDEO_RESOLUTION_TYPE_320_480 = 6, + VIDEO_RESOLUTION_TYPE_180_320 = 7, + VIDEO_RESOLUTION_TYPE_270_480 = 8, + VIDEO_RESOLUTION_TYPE_320_180 = 9, + VIDEO_RESOLUTION_TYPE_480_270 = 10, + + VIDEO_RESOLUTION_TYPE_240_320 = 11, + VIDEO_RESOLUTION_TYPE_360_480 = 12, + VIDEO_RESOLUTION_TYPE_480_640 = 13, + VIDEO_RESOLUTION_TYPE_320_240 = 14, + VIDEO_RESOLUTION_TYPE_480_360 = 15, + VIDEO_RESOLUTION_TYPE_640_480 = 16, + + VIDEO_RESOLUTION_TYPE_480_480 = 17, + VIDEO_RESOLUTION_TYPE_270_270 = 18, + VIDEO_RESOLUTION_TYPE_160_160 = 19, +}; + +/** + * 1.2 画面质量挡位 + * + * 如果您希望调整直播的编码参数,建议您直接使用 TXLivePusher 提供的 setVideoQuality 接口。 + * 由于视频编码参数中的分辨率,码率和帧率对最终效果都有着复杂的影响,如果您之前没有相关操作经验,不建议直接修改这些编码参数。 + * 我们在 setVideoQuality 接口中提供了如下几个挡位供您选择: + * + * 1. 标清:采用 360 × 640 的分辨率,码率调控范围 300kbps - 800kbps,关闭网络自适应时的码率为 + * 800kbps,适合网络较差的直播环境。 + * 2. 高清:采用 540 × 960 的分辨率,码率调控范围 600kbps - 1500kbps,关闭网络自适应时的码率为 + * 1200kbps,常规手机直播的推荐挡位。 + * 3. 超清:采用 720 × 1280 的分辨率,码率调控范围 600kbps - 1800kbps,关闭网络自适应时的码率为 + * 1800kbps,能耗高,但清晰度较标清提升并不明显。 + * 4. 连麦(大主播):主播从原来的“推流状态”进入“连麦状态”后,可以通过 setVideoQuality 接口调整自 + * MAIN_PUBLISHER 挡位。 + * 5. 连麦(小主播):观众从原来的“播放状态”进入“连麦状态”后,可以通过 setVideoQuality 接口调整自 + * SUB_PUBLISHER 挡位。 + * 6. 视频通话:该选项后续会逐步废弃,如果您希望实现纯视频通话而非直播功能,推荐使用腾讯云 + * [TRTC](https://cloud.tencent.com/product/trtc) 服务。 + * + * 【推荐设置】如果您对整个平台的清晰度要求比较高,推荐使用 setVideoQuality(HIGH_DEFINITION, NO, NO) + * 的组合。 如果您的主播有很多三四线城市的网络适配要求,推荐使用 setVideoQuality(HIGH_DEFINITION, + * YES, NO) 的组合。 + * + * @note 在开启硬件加速后,您可能会发现诸如 368 × 640 或者 544 × 960 这样的“不完美”分辨率。 + * 这是由于部分硬编码器要求像素能被 16 + * 整除所致,属于正常现象,您可以通过播放端的填充模式解决“小黑边”问题。 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_VideoQuality) { + VIDEO_QUALITY_STANDARD_DEFINITION = 1, ///< 标清:采用 360 × 640 的分辨率 + VIDEO_QUALITY_HIGH_DEFINITION = 2, ///< 高清:采用 540 × 960 的分辨率 + VIDEO_QUALITY_SUPER_DEFINITION = 3, ///< 超清:采用 720 × 1280 的分辨率 + VIDEO_QUALITY_ULTRA_DEFINITION = 7, ///< 蓝光:采用 1080 × 1920 的分辨率 + VIDEO_QUALITY_LINKMIC_MAIN_PUBLISHER = 4, ///< 连麦场景下的大主播使用 + VIDEO_QUALITY_LINKMIC_SUB_PUBLISHER = 5, ///< 连麦场景下的小主播(连麦的观众)使用 + VIDEO_QUALITY_REALTIME_VIDEOCHAT = 6, ///< 纯视频通话场景使用(已废弃) +}; + +/** + * 1.3 画面旋转方向 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_HomeOrientation) { + HOME_ORIENTATION_RIGHT = 0, ///< HOME 键在右边,横屏模式 + HOME_ORIENTATION_DOWN = 1, ///< HOME 键在下面,手机直播中最常见的竖屏直播模式 + HOME_ORIENTATION_LEFT = 2, ///< HOME 键在左边,横屏模式 + HOME_ORIENTATION_UP = 3, ///< HOME 键在上边,竖屏直播(适合小米 MIX2) +}; + +/** + * 1.4 画面填充模式 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_RenderMode) { + + RENDER_MODE_FILL_SCREEN = + 0, ///< 图像铺满屏幕,不留黑边,如果图像宽高比不同于屏幕宽高比,部分画面内容会被裁剪掉。 + RENDER_MODE_FILL_EDGE = + 1, ///< 图像适应屏幕,保持画面完整,但如果图像宽高比不同于屏幕宽高比,会有黑边的存在。 +}; + +/** + * 1.5 美颜风格 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_BeautyStyle) { + BEAUTY_STYLE_SMOOTH = 0, ///< 光滑,磨皮程度较高,更适合秀场直播类场景下使用。 + BEAUTY_STYLE_NATURE = 1, ///< 自然,磨皮算法会最大限度保留皮肤细节。 + BEAUTY_STYLE_PITU = + 2, ///< 由上海优图实验室提供的美颜算法,磨皮效果介于光滑和自然之间,比光滑保留更多皮肤细节,比自然磨皮程度更高。 +}; + +/** + * 1.6 美颜程度,取值范围1 - 9,该枚举值定义了关闭和最大值。 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_BeautyFilterDepth) { + BEAUTY_FILTER_DEPTH_CLOSE = 0, ///< 关闭美颜 + BEAUTY_FILTER_DEPTH_MAX = 9, ///< 最大美颜强度 +}; + +/** + * 1.6 网络自适应算法,推荐选项:AUTO_ADJUST_LIVEPUSH_STRATEGY + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_AutoAdjustStrategy) { + AUTO_ADJUST_NONE = -1, ///< 非法数值,用于 SDK 内部做合法性检查 + + AUTO_ADJUST_LIVEPUSH_STRATEGY = 0, ///< 最适合直播模式下的流控算法 + AUTO_ADJUST_LIVEPUSH_RESOLUTION_STRATEGY = + 1, ///< 不推荐:SDK 内部会调整视频分辨率,如果有 H5 分享的需求请勿使用 + AUTO_ADJUST_REALTIME_VIDEOCHAT_STRATEGY = 5, ///< 待废弃,请使用腾讯云 TRTC 服务 + + AUTO_ADJUST_BITRATE_STRATEGY_1 = 0, ///< 已经废弃 + AUTO_ADJUST_BITRATE_RESOLUTION_STRATEGY_1 = 1, ///< 已经废弃 + AUTO_ADJUST_BITRATE_STRATEGY_2 = 2, ///< 已经废弃 + AUTO_ADJUST_BITRATE_RESOLUTION_STRATEGY_2 = 3, ///< 已经废弃 + AUTO_ADJUST_REALTIME_BITRATE_STRATEGY = 4, ///< 已经废弃 + AUTO_ADJUST_REALTIME_BITRATE_RESOLUTION_STRATEGY = 5, ///< 已经废弃 +}; + +/** + * 1.7 视频帧的数据格式(未压缩前的) + */ +typedef NS_ENUM(NSInteger, TXVideoType) { + + VIDEO_TYPE_420SP = 1, ///< Android 视频采集格式,PixelFormat.YCbCr_420_SP 17 + VIDEO_TYPE_420YpCbCr = 2, ///< iOS 视频采集格式,kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange + VIDEO_TYPE_420P = 3, ///< yuv420p格式 + VIDEO_TYPE_BGRA8888 = 4, ///< BGRA8888 + VIDEO_TYPE_RGBA8888 = 5, ///< RGBA8888 + VIDEO_TYPE_NV12 = 6, ///< NV12(iOS) +}; + +/** + * 1.8 本地视频预览镜像类型 + * + * iOS 的本地画面提供三种设置模式 + */ +typedef NS_ENUM(NSUInteger, TXLocalVideoMirrorType) { + LocalVideoMirrorType_Auto = 0, ///< 前置摄像头镜像,后置摄像头不镜像 + LocalVideoMirrorType_Enable = 1, ///< 前后置摄像头画面均镜像 + LocalVideoMirrorType_Disable = 2, ///< 前后置摄像头画面均不镜像 +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// 【音频相关枚举值定义】 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 2.1 音频采样率 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_AudioSampleRate) { + + AUDIO_SAMPLE_RATE_8000 = 0, ///< 8k采样率 + AUDIO_SAMPLE_RATE_16000 = 1, ///< 16k采样率 + AUDIO_SAMPLE_RATE_32000 = 2, ///< 32k采样率 + AUDIO_SAMPLE_RATE_44100 = 3, ///< 44.1k采样率 + AUDIO_SAMPLE_RATE_48000 = 4, ///< 48k采样率 +}; + +/** + * 2.2 混响类型 + */ +typedef NS_ENUM(NSInteger, TXReverbType) { + REVERB_TYPE_0 = 0, ///< 关闭混响 + REVERB_TYPE_1 = 1, ///< KTV + REVERB_TYPE_2 = 2, ///< 小房间 + REVERB_TYPE_3 = 3, ///< 大会堂 + REVERB_TYPE_4 = 4, ///< 低沉 + REVERB_TYPE_5 = 5, ///< 洪亮 + REVERB_TYPE_6 = 6, ///< 金属声 + REVERB_TYPE_7 = 7, ///< 磁性 +}; + +/** + * 2.3 变声选项 + */ +typedef NS_ENUM(NSInteger, TXVoiceChangerType) { + + VOICECHANGER_TYPE_0 = 0, ///< 关闭变声 + VOICECHANGER_TYPE_1 = 1, ///< 熊孩子 + VOICECHANGER_TYPE_2 = 2, ///< 萝莉 + VOICECHANGER_TYPE_3 = 3, ///< 大叔 + VOICECHANGER_TYPE_4 = 4, ///< 重金属 + VOICECHANGER_TYPE_5 = 5, ///< 感冒 + VOICECHANGER_TYPE_6 = 6, ///< 外国人 + VOICECHANGER_TYPE_7 = 7, ///< 困兽 + VOICECHANGER_TYPE_8 = 8, ///< 死肥仔 + VOICECHANGER_TYPE_9 = 9, ///< 强电流 + VOICECHANGER_TYPE_10 = 10, ///< 重机械 + VOICECHANGER_TYPE_11 = 11, ///< 空灵 +}; + +/** + * 2.4 声音播放模式(音频路由) + * + * 一般手机都有两个扬声器,设置音频路由的作用就是要决定声音从哪个扬声器播放出来。 + * - Speakerphone:扬声器,位于手机底部,声音偏大,适合外放音乐。 + * - Earpiece:听筒,位于手机顶部,声音偏小,适合通话。 + */ +typedef NS_ENUM(NSInteger, TXAudioRouteType) { + AUDIO_ROUTE_SPEAKER = 0, ///< 扬声器,位于手机底部,声音偏大,适合外放音乐。 + AUDIO_ROUTE_RECEIVER = 1, ///< 听筒,位于手机顶部,声音偏小,适合通话。 +}; + +/** + * 2.5 系统音量类型 + * + * 该枚举值用于控制推流过程中使用何种系统音量类型 + */ +typedef NS_ENUM(NSInteger, TXSystemAudioVolumeType) { + SYSTEM_AUDIO_VOLUME_TYPE_AUTO = 0, ///< 默认类型,SDK会自动选择合适的音量类型 + SYSTEM_AUDIO_VOLUME_TYPE_MEDIA = 1, ///< 仅使用媒体音量,SDK不再使用通话音量 + SYSTEM_AUDIO_VOLUME_TYPE_VOIP = 2, ///< 仅使用通话音量,SDK一直使用通话音量 +}; + +/** + * 2.6 推流用网络通道(待废弃) + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_RTMPChannel) { +#ifndef TX_RTMP_CHANNEL +#define TX_RTMP_CHANNEL + RTMP_CHANNEL_TYPE_AUTO = 0, ///< 自动:推腾讯云使用加速协议,推友商云使用标准 RTMP 协议。 + RTMP_CHANNEL_TYPE_STANDARD = 1, ///< 标准 RTMP 协议 + RTMP_CHANNEL_TYPE_PRIVATE = 2, ///< 腾讯云专属加速协议 +#endif +}; + +/** + * 2.7 屏幕采集源(用于录屏推流) + */ +#if TARGET_OS_OSX +typedef NS_ENUM(NSInteger, TXCaptureVideoInputSource) { + TXCaptureVideoInputSourceCamera, + TXCaptureVideoInputSourceScreen, + TXCaptureVideoInputSourceWindow +}; +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// 【状态通知字段名 onNetStatus】 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * TXLivePushListener 和 TXLivePlayListener 的 onNetStatus() 会以 2s + * 一次的时间间隔,定时通知网络状态和内部指标, 这些数值采用 key-value 的组织格式,其中 key + * 值的定义如下: + */ +#ifndef TXE_NET_STATUS +#define TXE_NET_STATUS + +#define NET_STATUS_CPU_USAGE @"CPU_USAGE" ///> 进程 CPU 占用率 +#define NET_STATUS_CPU_USAGE_D @"CPU_USAGE_DEVICE" ///> 系统 CPU 占用率 + +#define NET_STATUS_VIDEO_WIDTH @"VIDEO_WIDTH" ///> 视频分辨率宽度 +#define NET_STATUS_VIDEO_HEIGHT @"VIDEO_HEIGHT" ///> 视频分辨率高度 +#define NET_STATUS_VIDEO_FPS @"VIDEO_FPS" ///> 视频帧率:也就是视频编码器每秒生产了多少帧画面。 +#define NET_STATUS_VIDEO_GOP @"VIDEO_GOP" ///> 关键帧间隔:即每两个关键帧(I帧)间隔时长,单位:秒。 +#define NET_STATUS_VIDEO_BITRATE \ + @"VIDEO_BITRATE" ///> 视频码率:即视频编码器每秒生产了多少视频数据,单位:kbps。 +#define NET_STATUS_AUDIO_BITRATE \ + @"AUDIO_BITRATE" ///> 音频码率:即音频编码器每秒生产了多少音频数据,单位:kbps。 +#define NET_STATUS_NET_SPEED @"NET_SPEED" ///> 传输速度:即每秒钟发送或接收了多少字节的数据。 + +#define NET_STATUS_VIDEO_CACHE \ + @"VIDEO_CACHE" ///> TXLivePusher:主播端堆积的视频帧数;TXLivePlayer:播放端缓冲的视频总时长。 +#define NET_STATUS_AUDIO_CACHE \ + @"AUDIO_CACHE" ///> TXLivePusher:主播端堆积的音频帧数;TXLivePlayer:播放端缓冲的音频总时长。 +#define NET_STATUS_VIDEO_DROP \ + @"VIDEO_DROP" ///> TXLivePusher:主播端主动丢弃的视频帧数;TXLivePlayer: N/A。 +#define NET_STATUS_AUDIO_DROP @"AUDIO_DROP" ///> 暂未使用 + +#define NET_STATUS_V_DEC_CACHE_SIZE \ + @"V_DEC_CACHE_SIZE" ///> TXLivePlayer:播放端解码器中缓存的视频帧数(Android 端硬解码时存在)。 +#define NET_STATUS_V_SUM_CACHE_SIZE \ + @"V_SUM_CACHE_SIZE" ///> TXLivePlayer:播放端缓冲的总视频帧数,该数值越大,播放延迟越高。 +#define NET_STATUS_AV_PLAY_INTERVAL \ + @"AV_PLAY_INTERVAL" ///> TXLivePlayer:音画同步错位时间(播放),单位 + /// ms,此数值越小,音画同步越好。 +#define NET_STATUS_AV_RECV_INTERVAL \ + @"AV_RECV_INTERVAL" ///> TXLivePlayer:音画同步错位时间(网络),单位 + /// ms,此数值越小,音画同步越好。 +#define NET_STATUS_AUDIO_CACHE_THRESHOLD \ + @"AUDIO_CACHE_THRESHOLD" ///> + /// TXLivePlayer:音频缓冲时长阈值,缓冲超过该阈值后,播放器会开始调控延时。 +#define NET_STATUS_AUDIO_BLOCK_TIME @"AUDIO_BLOCK_TIME" ///> 拉流专用:音频卡顿时长,单位ms +#define NET_STATUS_AUDIO_INFO @"AUDIO_INFO" ///> 音频信息:包括采样率信息和声道数信息 +#define NET_STATUS_NET_JITTER @"NET_JITTER" ///> 网络抖动:数值越大表示抖动越大,网络越不稳定 +#define NET_STATUS_QUALITY_LEVEL \ + @"NET_QUALITY_LEVEL" ///> 网络质量:0:未定义 1:最好 2:好 3:一般 4:差 5:很差 6:不可用 +#define NET_STATUS_SERVER_IP @"SERVER_IP" ///> 连接的Server IP地址 +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// 【事件通知字段名 onPushEvent onPlayEvent】 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 腾讯云 LiteAVSDK 通过 TXLivePushListener 中的 onPushEvent(),TXLivePlayListener 中的 + * onPlayEvent() 向您通知内部错误、警告和事件: + * - 错误:严重且不可恢复的错误,会中断 SDK 的正常逻辑。 + * - 警告:非致命性的提醒和警告,可以不理会。 + * - 事件:SDK 的流程和状态通知,比如开始推流,开始播放,等等。 + * + * 这些数值采用 key-value 的组织格式,其中 key 值的定义如下: + */ +#ifndef TX_EVT +#define TX_EVT + +#define EVT_MSG @"EVT_MSG" ///> 事件ID +#define EVT_TIME @"EVT_TIME" ///> 事件发生的UTC毫秒时间戳 +#define EVT_UTC_TIME @"EVT_UTC_TIME" ///> 事件发生的UTC毫秒时间戳(兼容性) +#define EVT_BLOCK_DURATION @"EVT_BLOCK_DURATION" ///> 卡顿时间(毫秒) +#define EVT_PARAM1 @"EVT_PARAM1" ///> 事件参数1 +#define EVT_PARAM2 @"EVT_PARAM2" ///> 事件参数2 +#define EVT_GET_MSG \ + @"EVT_GET_MSG" ///> 消息内容,收到PLAY_EVT_GET_MESSAGE事件时,通过该字段获取消息内容 +#define EVT_PLAY_PROGRESS @"EVT_PLAY_PROGRESS" ///> 点播:视频播放进度 +#define EVT_PLAY_DURATION @"EVT_PLAY_DURATION" ///> 点播:视频总时长 +#define EVT_PLAYABLE_DURATION @"PLAYABLE_DURATION" ///> 点播:视频可播放时长 +#define EVT_PLAY_COVER_URL @"EVT_PLAY_COVER_URL" ///> 点播:视频封面 +#define EVT_PLAY_URL @"EVT_PLAY_URL" ///> 点播:视频播放地址 +#define EVT_PLAY_NAME @"EVT_PLAY_NAME" ///> 点播:视频名称 +#define EVT_PLAY_DESCRIPTION @"EVT_PLAY_DESCRIPTION" ///> 点播:视频简介 + +#define STREAM_ID @"STREAM_ID" + +#endif + +#endif diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXVideoCustomProcessDelegate.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXVideoCustomProcessDelegate.h index 98be4fa..4128951 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXVideoCustomProcessDelegate.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/TXVideoCustomProcessDelegate.h @@ -1,10 +1,4 @@ -// -// TXVideoCustomProcessDelegate.h -// TXRTMPSDK -// -// Created by annidyfeng on 2017/3/20. -// -// +// Copyright © 2020 Tencent. All rights reserved. #import <Foundation/Foundation.h> #if TARGET_OS_IPHONE @@ -24,7 +18,8 @@ * @param width 纹理的宽度 * @param height 纹理的高度 * @return 返回给SDK的纹理 - * 说明:SDK回调出来的纹理类型是GL_TEXTURE_2D,接口返回给SDK的纹理类型也必须是GL_TEXTURE_2D; 该回调在SDK美颜之后. 纹理格式为GL_RGBA + * 说明:SDK回调出来的纹理类型是GL_TEXTURE_2D,接口返回给SDK的纹理类型也必须是GL_TEXTURE_2D; + * 该回调在SDK美颜之后. 纹理格式为GL_RGBA */ - (GLuint)onPreProcessTexture:(GLuint)texture width:(CGFloat)width height:(CGFloat)height; diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveCode.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveCode.h new file mode 100644 index 0000000..e1ca5b3 --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveCode.h @@ -0,0 +1,124 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// + +#import <Foundation/Foundation.h> + +/// @defgroup V2TXLiveCode_ios V2TXLiveCode +/// 腾讯云直播服务(LVB)错误码和警告码的定义。 +/// @{ + +///////////////////////////////////////////////////////////////////////////////// +// +// V2 错误码和警告码 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * V2 错误码和警告码 + */ +typedef NS_ENUM(NSInteger, V2TXLiveCode) { + + /// 没有错误 + V2TXLIVE_OK = 0, + + /// 暂未归类的通用错误 + V2TXLIVE_ERROR_FAILED = -1, + + ///调用 API 时,传入的参数不合法 + V2TXLIVE_ERROR_INVALID_PARAMETER = -2, + + /// API 调用被拒绝 + V2TXLIVE_ERROR_REFUSED = -3, + + /// 当前 API 不支持调用 + V2TXLIVE_ERROR_NOT_SUPPORTED = -4, + + /// license 不合法,调用失败 + V2TXLIVE_ERROR_INVALID_LICENSE = -5, + + /// 请求服务器超时 + V2TXLIVE_ERROR_REQUEST_TIMEOUT = -6, + + /// 服务器无法处理您的请求 + V2TXLIVE_ERROR_SERVER_PROCESS_FAILED = -7, + + /// 连接断开 + V2TXLIVE_ERROR_DISCONNECTED = -8, + + /// 找不到可用的 HEVC 解码器 + V2TXLIVE_ERROR_NO_AVAILABLE_HEVC_DECODERS = -2304, + + ///////////////////////////////////////////////////////////////////////////////// + // + // 网络相关的警告码 + // + ///////////////////////////////////////////////////////////////////////////////// + + /** + * 网络相关的警告码 + */ + /// 网络状况不佳:上行带宽太小,上传数据受阻 + V2TXLIVE_WARNING_NETWORK_BUSY = 1101, + + /// 当前视频播放出现卡顿 + V2TXLIVE_WARNING_VIDEO_BLOCK = 2105, + + ///////////////////////////////////////////////////////////////////////////////// + // + // 摄像头相关的警告码 + // + ///////////////////////////////////////////////////////////////////////////////// + + /** + * 摄像头相关的警告码 + */ + ///摄像头打开失败 + V2TXLIVE_WARNING_CAMERA_START_FAILED = -1301, + + /// 摄像头正在被占用中,可尝试打开其他摄像头 + V2TXLIVE_WARNING_CAMERA_OCCUPIED = -1316, + + /// 摄像头设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 + V2TXLIVE_WARNING_CAMERA_NO_PERMISSION = -1314, + + ///////////////////////////////////////////////////////////////////////////////// + // + // 麦克风相关的警告码 + // + ///////////////////////////////////////////////////////////////////////////////// + + /** + * 麦克风相关的警告码 + */ + ///麦克风打开失败 + V2TXLIVE_WARNING_MICROPHONE_START_FAILED = -1302, + + /// 麦克风正在被占用中,例如移动设备正在通话时,打开麦克风会失败 + V2TXLIVE_WARNING_MICROPHONE_OCCUPIED = -1319, + + /// 麦克风设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 + V2TXLIVE_WARNING_MICROPHONE_NO_PERMISSION = -1317, + + ///////////////////////////////////////////////////////////////////////////////// + // + // 屏幕分享相关警告码 + // + ///////////////////////////////////////////////////////////////////////////////// + + /** + * 屏幕分享相关警告码 + */ + /// 当前系统不支持屏幕分享 + V2TXLIVE_WARNING_SCREEN_CAPTURE_NOT_SUPPORTED = -1309, + + /// 开始录屏失败,如果在移动设备出现,可能是权限被用户拒绝了 + V2TXLIVE_WARNING_SCREEN_CAPTURE_START_FAILED = -1308, + + /// 录屏被系统中断 + V2TXLIVE_WARNING_SCREEN_CAPTURE_INTERRUPTED = -7001, + +}; +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveDef.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveDef.h new file mode 100644 index 0000000..f917f31 --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveDef.h @@ -0,0 +1,603 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// +/// @defgroup V2TXLiveDef_ios V2TXLiveDef +/// 腾讯云直播服务(LVB)关键类型定义 +/// @{ +#import "V2TXLiveCode.h" +#import "TXLiteAVSymbolExport.h" + +#if TARGET_OS_IPHONE +#import <UIKit/UIKit.h> +typedef UIView TXView; +typedef UIImage TXImage; +#elif TARGET_OS_MAC +#import <AppKit/AppKit.h> +typedef NSView TXView; +typedef NSImage TXImage; +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// 支持协议 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 支持协议 + */ +typedef NS_ENUM(NSUInteger, V2TXLiveMode) { + + /// 支持协议: RTMP + V2TXLiveMode_RTMP, + + ///支持协议: TRTC + V2TXLiveMode_RTC + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// (一)视频相关类型定义 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 视频相关类型定义 +/// @{ + +/** + * 视频分辨率 + */ +typedef NS_ENUM(NSInteger, V2TXLiveVideoResolution) { + + ///分辨率 160*160,码率范围:100Kbps ~ 150Kbps,帧率:15fps + V2TXLiveVideoResolution160x160, + + ///分辨率 270*270,码率范围:200Kbps ~ 300Kbps,帧率:15fps + V2TXLiveVideoResolution270x270, + + ///分辨率 480*480,码率范围:350Kbps ~ 525Kbps,帧率:15fps + V2TXLiveVideoResolution480x480, + + ///分辨率 320*240,码率范围:250Kbps ~ 375Kbps,帧率:15fps + V2TXLiveVideoResolution320x240, + + /// 分辨率 480*360,码率范围:400Kbps ~ 600Kbps,帧率:15fps + V2TXLiveVideoResolution480x360, + + /// 分辨率 640*480,码率范围:600Kbps ~ 900Kbps,帧率:15fps + V2TXLiveVideoResolution640x480, + + ///分辨率 320*180,码率范围:250Kbps ~ 400Kbps,帧率:15fps + V2TXLiveVideoResolution320x180, + + /// 分辨率 480*270,码率范围:350Kbps ~ 550Kbps,帧率:15fps + V2TXLiveVideoResolution480x270, + + ///分辨率 640*360,码率范围:500Kbps ~ 900Kbps,帧率:15fps + V2TXLiveVideoResolution640x360, + + /// 分辨率 960*540,码率范围:800Kbps ~ 1500Kbps,帧率:15fps + V2TXLiveVideoResolution960x540, + + /// 分辨率 1280*720,码率范围:1000Kbps ~ 1800Kbps,帧率:15fps + V2TXLiveVideoResolution1280x720, + + /// 分辨率 1920*1080,码率范围:2500Kbps ~ 3000Kbps,帧率:15fps + V2TXLiveVideoResolution1920x1080 + +}; + +/** + * 视频宽高比模式 + * + * @info 视频宽高比模式 + * @note + * - 横屏模式下的分辨率: V2TXLiveVideoResolution640x360 + V2TXLiveVideoResolutionModeLandscape = 640 × 360 + * - 竖屏模式下的分辨率: V2TXLiveVideoResolution640x360 + V2TXLiveVideoResolutionModePortrait = 360 × 640 + */ +typedef NS_ENUM(NSInteger, V2TXLiveVideoResolutionMode) { + + /// 横屏模式 + V2TXLiveVideoResolutionModeLandscape = 0, + + /// 竖屏模式 + V2TXLiveVideoResolutionModePortrait = 1, + +}; + +/** + * 视频编码参数。 + * + * 该设置决定远端用户看到的画面质量。 + */ +LITEAV_EXPORT @interface V2TXLiveVideoEncoderParam : NSObject + +///【字段含义】 视频分辨率 +///【特别说明】如需使用竖屏分辨率,请指定 videoResolutionMode 为 Portrait,例如: 640 × 360 + Portrait = 360 × 640。 +///【推荐取值】 +/// - 桌面平台(Win + Mac):建议选择 640 × 360 及以上分辨率,videoResolutionMode 选择 Landscape,即横屏分辨率。 +@property(nonatomic, assign) V2TXLiveVideoResolution videoResolution; + +///【字段含义】分辨率模式(横屏分辨率 or 竖屏分辨率) +///【推荐取值】桌面平台(Windows、Mac)建议选择 Landscape。 +///【特别说明】如需使用竖屏分辨率,请指定 resMode 为 Portrait,例如: 640 × 360 + Portrait = 360 × 640。 +@property(nonatomic, assign) V2TXLiveVideoResolutionMode videoResolutionMode; + +///【字段含义】视频采集帧率 +///【推荐取值】15fps 或 20fps。5fps 以下,卡顿感明显。10fps 以下,会有轻微卡顿感。20fps 以上,会浪费带宽(电影的帧率为 24fps)。 +@property(nonatomic, assign) int videoFps; + +///【字段含义】目标视频码率,SDK 会按照目标码率进行编码,只有在弱网络环境下才会主动降低视频码率。 +///【推荐取值】请参考 V2TXLiveVideoResolution 在各档位注释的最佳码率,也可以在此基础上适当调高。 +/// 比如:V2TXLiveVideoResolution1280x720 对应 1200kbps 的目标码率,您也可以设置为 1500kbps 用来获得更好的观感清晰度。 +///【特别说明】您可以通过同时设置 videoBitrate 和 minVideoBitrate 两个参数,用于约束 SDK 对视频码率的调整范围: +/// - 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK 对视频码率的自适应调节能力。 +@property(nonatomic, assign) int videoBitrate; + +///【字段含义】最低视频码率,SDK 会在网络不佳的情况下主动降低视频码率以保持流畅度,最低会降至 minVideoBitrate 所设定的数值。 +///【推荐取值】您可以通过同时设置 videoBitrate 和 minVideoBitrate 两个参数,用于约束 SDK 对视频码率的调整范围: +/// - 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK 对视频码率的自适应调节能力。 +@property(nonatomic, assign) int minVideoBitrate; + +- (instancetype _Nonnull)initWith:(V2TXLiveVideoResolution)resolution; +@end + +/** + * 本地摄像头镜像类型 + */ +typedef NS_ENUM(NSInteger, V2TXLiveMirrorType) { + + ///系统默认镜像类型,前置摄像头镜像,后置摄像头不镜像 + V2TXLiveMirrorTypeAuto, + + /// 前置摄像头和后置摄像头,都切换为镜像模式 + V2TXLiveMirrorTypeEnable, + + ///前置摄像头和后置摄像头,都切换为非镜像模式 + V2TXLiveMirrorTypeDisable + +}; + +/** + * 视频画面填充模式 + */ +typedef NS_ENUM(NSInteger, V2TXLiveFillMode) { + + /// 图像铺满屏幕,超出显示视窗的视频部分将被裁剪,画面显示可能不完整 + V2TXLiveFillModeFill, + + /// 图像长边填满屏幕,短边区域会被填充黑色,画面的内容完整 + V2TXLiveFillModeFit, + + /// 图像拉伸铺满,因此长度和宽度可能不会按比例变化 + V2TXLiveFillModeScaleFill + +}; + +/** + * 视频画面顺时针旋转角度 + */ +typedef NS_ENUM(NSInteger, V2TXLiveRotation) { + + /// 不旋转 + V2TXLiveRotation0, + + /// 顺时针旋转90度 + V2TXLiveRotation90, + + /// 顺时针旋转180度 + V2TXLiveRotation180, + + /// 顺时针旋转270度 + V2TXLiveRotation270 + +}; + +/** + * 视频帧的像素格式 + */ +typedef NS_ENUM(NSInteger, V2TXLivePixelFormat) { + + /// 未知 + V2TXLivePixelFormatUnknown, + + /// YUV420P I420 + V2TXLivePixelFormatI420, + + /// YUV420SP NV12 + V2TXLivePixelFormatNV12, + + /// BGRA8888 + V2TXLivePixelFormatBGRA32, + + /// OpenGL 2D 纹理 + V2TXLivePixelFormatTexture2D + +}; + +/** + * 视频数据包装格式 + * + * @info 视频数据包装格式。 + * @note 在自定义采集和自定义渲染功能,您需要用到下列枚举值来指定您希望以什么样的格式来包装视频数据。 + * - PixelBuffer:直接使用效率最高,iOS 系统提供了众多 API 获取或处理 PixelBuffer + * - NSData: 当使用自定义渲染时,PixelBuffer拷贝一次到NSData。当使用自定义采集时,NSData拷贝一次到PixelBuffer。因此,性能会受到一定程度的影响 + */ +typedef NS_ENUM(NSInteger, V2TXLiveBufferType) { + + /// 未知 + V2TXLiveBufferTypeUnknown, + + /// 直接使用效率最高,iOS 系统提供了众多 API 获取或处理 PixelBuffer + V2TXLiveBufferTypePixelBuffer, + + ///会有一定的性能消耗,SDK 内部是直接处理 PixelBuffer 的,所以会存在 NSData 和 PixelBuffer 之间类型转换所产生的内存拷贝开销 + V2TXLiveBufferTypeNSData, + + ///直接操作纹理 ID,性能最好 + V2TXLiveBufferTypeTexture + +}; + +/** + * 视频帧信息 + * + * @info 视频帧信息。 + * V2TXLiveVideoFrame 用来描述一帧视频画面的裸数据,它可以是一帧编码前的画面,也可以是一帧解码后的画面。 + * @note 自定义采集和自定义渲染时使用。自定义采集时,需要使用 V2TXLiveVideoFrame 来包装待发送的视频帧;自定义渲染时,会返回经过 V2TXLiveVideoFrame 包装的视频帧。 + */ +LITEAV_EXPORT @interface V2TXLiveVideoFrame : NSObject + +/// 【字段含义】视频帧像素格式 +/// 【推荐取值】V2TXLivePixelFormatNV12 +@property(nonatomic, assign) V2TXLivePixelFormat pixelFormat; + +/// 【字段含义】视频数据包装格式 +/// 【推荐取值】V2TXLiveBufferTypePixelBuffer +@property(nonatomic, assign) V2TXLiveBufferType bufferType; + +/// 【字段含义】bufferType 为 V2TXLiveBufferTypeNSData 时的视频数据 +@property(nonatomic, strong, nullable) NSData *data; + +/// 【字段含义】bufferType 为 V2TXLiveBufferTypePixelBuffer 时的视频数据 +@property(nonatomic, assign, nullable) CVPixelBufferRef pixelBuffer; + +/// 【字段含义】视频宽度 +@property(nonatomic, assign) NSUInteger width; + +/// 【字段含义】视频高度 +@property(nonatomic, assign) NSUInteger height; + +/// 【字段含义】视频帧的顺时针旋转角度 +@property(nonatomic, assign) V2TXLiveRotation rotation; + +/// 【字段含义】视频纹理ID +@property(nonatomic, assign) GLuint textureId; + +@end + +///////////////////////////////////////////////////////////////////////////////// +// +// (二)音频相关类型定义 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name 音频相关类型定义 +/// @{ + +/** + * 声音音质 + */ +typedef NS_ENUM(NSInteger, V2TXLiveAudioQuality) { + + /// 语音音质:采样率:16k;单声道;音频码率:16kbps;适合语音通话为主的场景,比如在线会议,语音通话 + V2TXLiveAudioQualitySpeech, + + /// 默认音质:采样率:48k;单声道;音频码率:50kbps;SDK 默认的音频质量,如无特殊需求推荐选择之 + V2TXLiveAudioQualityDefault, + + /// 音乐音质:采样率:48k;双声道 + 全频带;音频码率:128kbps;适合需要高保真传输音乐的场景,比如K歌、音乐直播等 + V2TXLiveAudioQualityMusic + +}; + +/** + * 音频帧数据 + */ +LITEAV_EXPORT @interface V2TXLiveAudioFrame : NSObject + +/// 【字段含义】音频数据 +@property(nonatomic, strong, nullable) NSData *data; + +/// 【字段含义】采样率 +@property(nonatomic, assign) int sampleRate; + +/// 【字段含义】声道数 +@property(nonatomic, assign) int channel; + +@end + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// (三)推流器和播放器的一些统计指标数据定义 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name 推流器和播放器的一些统计指标数据定义 +/// @{ + +/** + * 推流器的统计数据 + */ +LITEAV_EXPORT @interface V2TXLivePusherStatistics : NSObject + +/// 【字段含义】当前 App 的 CPU 使用率(%) +@property(nonatomic, assign) NSUInteger appCpu; + +/// 【字段含义】当前系统的 CPU 使用率(%) +@property(nonatomic, assign) NSUInteger systemCpu; + +/// 【字段含义】视频宽度 +@property(nonatomic, assign) NSUInteger width; + +/// 【字段含义】视频高度 +@property(nonatomic, assign) NSUInteger height; + +/// 【字段含义】帧率(fps) +@property(nonatomic, assign) NSUInteger fps; + +/// 【字段含义】视频码率(Kbps) +@property(nonatomic, assign) NSUInteger videoBitrate; + +/// 【字段含义】音频码率(Kbps) +@property(nonatomic, assign) NSUInteger audioBitrate; + +@end + +/** + * 播放器的统计数据 + */ +LITEAV_EXPORT @interface V2TXLivePlayerStatistics : NSObject + +/// 【字段含义】当前 App 的 CPU 使用率(%) +@property(nonatomic, assign) NSUInteger appCpu; + +/// 【字段含义】当前系统的 CPU 使用率(%) +@property(nonatomic, assign) NSUInteger systemCpu; + +/// 【字段含义】视频宽度 +@property(nonatomic, assign) NSUInteger width; + +/// 【字段含义】视频高度 +@property(nonatomic, assign) NSUInteger height; + +/// 【字段含义】帧率(fps) +@property(nonatomic, assign) NSUInteger fps; + +/// 【字段含义】视频码率(Kbps) +@property(nonatomic, assign) NSUInteger videoBitrate; + +/// 【字段含义】音频码率(Kbps) +@property(nonatomic, assign) NSUInteger audioBitrate; + +@end +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (四)连接状态相关枚举值定义 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 连接状态相关枚举值定义 +/// @{ + +/** + * 直播流的连接状态 + */ +typedef NS_ENUM(NSInteger, V2TXLivePushStatus) { + + /// 与服务器断开连接 + V2TXLivePushStatusDisconnected, + + /// 正在连接服务器 + V2TXLivePushStatusConnecting, + + /// 连接服务器成功 + V2TXLivePushStatusConnectSuccess, + + /// 重连服务器中 + V2TXLivePushStatusReconnecting, + +}; +/// @} + +/** + * 声音播放模式(音频路由) + */ +typedef NS_ENUM(NSInteger, V2TXAudioRoute) { + + /// 扬声器 + V2TXAudioModeSpeakerphone, + + /// 听筒 + V2TXAudioModeEarpiece, + +}; + +/** + * 混流输入类型配置 + */ +typedef NS_ENUM(NSInteger, V2TXLiveMixInputType) { + + /// 混入音视频 + V2TXLiveMixInputTypeAudioVideo, + + /// 只混入视频 + V2TXLiveMixInputTypePureVideo, + + /// 只混入音频 + V2TXLiveMixInputTypePureAudio, + +}; + +/** + * 云端混流中每一路子画面的位置信息 + */ +LITEAV_EXPORT @interface V2TXLiveMixStream : NSObject + +/// 【字段含义】参与混流的 userId +@property(nonatomic, copy, nonnull) NSString *userId; + +/// 【字段含义】参与混流的 userId 所在对应的推流 streamId,nil 表示当前推流 streamId +@property(nonatomic, copy, nullable) NSString *streamId; + +/// 【字段含义】图层位置 x 坐标(绝对像素值) +@property(nonatomic, assign) NSInteger x; + +/// 【字段含义】图层位置 y 坐标(绝对像素值) +@property(nonatomic, assign) NSInteger y; + +/// 【字段含义】图层位置宽度(绝对像素值) +@property(nonatomic, assign) NSInteger width; + +/// 【字段含义】图层位置高度(绝对像素值) +@property(nonatomic, assign) NSInteger height; + +/// 【字段含义】图层层次(1 - 15)不可重复 +@property(nonatomic, assign) NSUInteger zOrder; + +/// 【字段含义】该直播流的输入类型 +@property(nonatomic, assign) V2TXLiveMixInputType inputType; + +@end + +/** + * 云端混流(转码)配置 + */ +LITEAV_EXPORT @interface V2TXLiveTranscodingConfig : NSObject + +/// 【字段含义】最终转码后的视频分辨率的宽度 +/// 【推荐取值】推荐值:360px,如果你是纯音频推流,请将 width × height 设为 0px × 0px,否则混流后会携带一条画布背景的视频流 +@property(nonatomic, assign) NSUInteger videoWidth; + +/// 【字段含义】最终转码后的视频分辨率的高度 +/// 【推荐取值】推荐值:640px,如果你是纯音频推流,请将 width × height 设为 0px × 0px,否则混流后会携带一条画布背景的视频流 +@property(nonatomic, assign) NSUInteger videoHeight; + +/// 【字段含义】最终转码后的视频分辨率的码率(kbps) +/// 【推荐取值】如果填0,后台会根据 videoWidth 和 videoHeight 来估算码率,您也可以参考枚举定义 V2TXLiveVideoResolution 的注释 +@property(nonatomic, assign) NSUInteger videoBitrate; + +/// 【字段含义】最终转码后的视频分辨率的帧率(FPS) +/// 【推荐取值】默认值:15fps,取值范围是 (0,30] +@property(nonatomic, assign) NSUInteger videoFramerate; + +/// 【字段含义】最终转码后的视频分辨率的关键帧间隔(又称为 GOP) +/// 【推荐取值】默认值:2,单位为秒,取值范围是 [1,8] +@property(nonatomic, assign) NSUInteger videoGOP; + +/// 【字段含义】混合后画面的底色颜色,默认为黑色,格式为十六进制数字,比如:“0x61B9F1” 代表 RGB 分别为(97,158,241) +/// 【推荐取值】默认值:0x000000,黑色 +@property(nonatomic, assign) NSUInteger backgroundColor; + +/// 【字段含义】混合后画面的背景图 +/// 【推荐取值】默认值:nil,即不设置背景图 +/// 【特别说明】背景图需要您事先在 “[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 功能配置 => 素材管理” 中上传, +/// 上传成功后可以获得对应的“图片ID”,然后将“图片ID”转换成字符串类型并设置到 backgroundImage 里即可。 +/// 例如:假设“图片ID” 为 63,可以设置 backgroundImage = "63"; +@property(nonatomic, copy, nullable) NSString *backgroundImage; + +/// 【字段含义】最终转码后的音频采样率 +/// 【推荐取值】默认值:48000Hz。支持12000HZ、16000HZ、22050HZ、24000HZ、32000HZ、44100HZ、48000HZ +@property(nonatomic, assign) NSUInteger audioSampleRate; + +/// 【字段含义】最终转码后的音频码率 +/// 【推荐取值】默认值:64kbps,取值范围是 [32,192],单位:kbps +@property(nonatomic, assign) NSUInteger audioBitrate; + +/// 【字段含义】最终转码后的音频声道数 +/// 【推荐取值】默认值:1。取值范围为 [1,2] 中的整型 +@property(nonatomic, assign) NSUInteger audioChannels; + +/// 【字段含义】每一路子画面的位置信息 +@property(nonatomic, copy, nonnull) NSArray<V2TXLiveMixStream *> *mixStreams; + +/// 【字段含义】输出到 CDN 上的直播流 ID +/// 如不设置该参数,SDK 会执行默认逻辑,即房间里的多路流会混合到该接口调用者的视频流上,也就是 A + B => A; +/// 如果设置该参数,SDK 会将房间里的多路流混合到您指定的直播流 ID 上,也就是 A + B => C。 +/// 【推荐取值】默认值:nil,即房间里的多路流会混合到该接口调用者的视频流上。 +@property(nonatomic, copy, nullable) NSString *outputStreamId; + +@end + +///////////////////////////////////////////////////////////////////////////////// +// +// (五) 公共配置组件 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 公共配置组件有关的枚举值的定义 +/// @{ + +/** + * 日志级别枚举值 + */ +typedef NS_ENUM(NSInteger, V2TXLiveLogLevel) { + + /// 输出所有级别的 log + V2TXLiveLogLevelAll = 0, + + /// 输出 DEBUG,INFO,WARNING,ERROR 和 FATAL 级别的 log + V2TXLiveLogLevelDebug = 1, + + /// 输出 INFO,WARNING,ERROR 和 FATAL 级别的 log + V2TXLiveLogLevelInfo = 2, + + /// 只输出 WARNING,ERROR 和 FATAL 级别的 log + V2TXLiveLogLevelWarning = 3, + + /// 只输出 ERROR 和 FATAL 级别的 log + V2TXLiveLogLevelError = 4, + + /// 只输出 FATAL 级别的 log + V2TXLiveLogLevelFatal = 5, + + /// 不输出任何 sdk log + V2TXLiveLogLevelNULL = 6, + +}; + +/** + * Log配置 + */ +LITEAV_EXPORT @interface V2TXLiveLogConfig : NSObject + +/// 【字段含义】设置 Log 级别 +/// 【推荐取值】默认值:V2TXLiveLogLevelAll +@property(nonatomic, assign) V2TXLiveLogLevel logLevel; + +/// 【字段含义】是否通过 V2TXLivePremierObserver 接收要打印的 Log 信息 +/// 【特殊说明】如果您希望自己实现 Log 写入,可以打开此开关,Log 信息会通过 V2TXLivePremierObserver#onLog 回调给您。 +/// 【推荐取值】默认值:NO +@property(nonatomic, assign) BOOL enableObserver; + +/// 【字段含义】是否允许 SDK 在编辑器(XCoder、Android Studio、Visual Studio 等)的控制台上打印 Log +/// 【推荐取值】默认值:NO +@property(nonatomic, assign) BOOL enableConsole; + +/// 【字段含义】是否启用本地 Log 文件 +/// 【特殊说明】如非特殊需要,请不要关闭本地 Log 文件,否则腾讯云技术团队将无法在出现问题时进行跟踪和定位。 +/// 【推荐取值】默认值:YES +@property(nonatomic, assign) BOOL enableLogFile; + +/// 【字段含义】设置本地 Log 的存储目录,默认 Log 存储位置: +/// iOS & Mac: sandbox Documents/log +@property(nonatomic, copy, nullable) NSString *logPath; + +@end +/// @} + +/// @} +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayer.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayer.h new file mode 100644 index 0000000..1c4125d --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayer.h @@ -0,0 +1,232 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// + +#import "V2TXLivePlayerObserver.h" +#import "TXLiteAVSymbolExport.h" + +/// @defgroup V2TXLivePlayer_ios V2TXLivePlayer +/// 腾讯云直播播放器。<br/> +/// 主要负责从指定的直播流地址拉取音视频数据,并进行解码和本地渲染播放。 +/// +/// 播放器包含如下能力: +/// - 支持 RTMP, HTTP-FLV, TRTC 以及 WebRTC; +/// - 屏幕截图,可以截取当前直播流的视频画面; +/// - 延时调节,可以设置播放器缓存自动调整的最小和最大时间; +/// - 自定义的视频数据处理,您可以根据项目需要处理直播流中的视频数据后,再进行渲染以及播放。 +/// +/// @{ + +@protocol V2TXLivePlayer <NSObject> + +///////////////////////////////////////////////////////////////////////////////// +// +// 播放器相关接口 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 设置播放器回调。 + * + * 通过设置回调,可以监听 V2TXLivePlayer 播放器的一些回调事件, + * 包括播放器状态、播放音量回调、音视频首帧回调、统计数据、警告和错误信息等。 + * + * @param observer 播放器的回调目标对象,更多信息请查看 {@link V2TXLivePlayerObserver} + */ +- (void)setObserver:(id<V2TXLivePlayerObserver>)observer; + +/** + * 设置播放器的视频渲染 View。 该控件负责显示视频内容。 + * + * @param view 播放器渲染 View + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK:成功 + */ +- (V2TXLiveCode)setRenderView:(TXView *)view; + +/** + * 设置播放器画面的旋转角度。 + * + * @param rotation 旋转角度 {@link V2TXLiveRotation} + * - V2TXLiveRotation0【默认值】: 0度, 不旋转 + * - V2TXLiveRotation90: 顺时针旋转90度 + * - V2TXLiveRotation180: 顺时针旋转180度 + * - V2TXLiveRotation270: 顺时针旋转270度 + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)setRenderRotation:(V2TXLiveRotation)rotation; + +/** + * 设置画面的填充模式。 + * + * @param mode 画面填充模式 {@link V2TXLiveFillMode}。 + * - V2TXLiveFillModeFill 【默认值】: 图像铺满屏幕,不留黑边,如果图像宽高比不同于屏幕宽高比,部分画面内容会被裁剪掉 + * - V2TXLiveFillModeFit: 图像适应屏幕,保持画面完整,但如果图像宽高比不同于屏幕宽高比,会有黑边的存在 + * - V2TXLiveFillModeScaleFill: 图像拉伸铺满,因此长度和宽度可能不会按比例变化 + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)setRenderFillMode:(V2TXLiveFillMode)mode; + +/** + * 开始播放音视频流。 + * + * @param url 音视频流的播放地址,支持 RTMP, HTTP-FLV, TRTC。 + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 操作成功,开始连接并播放 + * - V2TXLIVE_ERROR_INVALID_PARAMETER: 操作失败,url 不合法 + * - V2TXLIVE_ERROR_REFUSED: RTC 不支持同一设备上同时推拉同一个 StreamId。 + */ +- (V2TXLiveCode)startPlay:(NSString *)url; + +/** + * 停止播放音视频流。 + * + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)stopPlay; + +/** + * 播放器是否正在播放中。 + * + * @return 是否正在播放 + * - 1: 正在播放中 + * - 0: 已经停止播放 + */ +- (int)isPlaying; + +/** + * 暂停播放器的音频流。 + * + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)pauseAudio; + +/** + * 恢复播放器的音频流。 + * + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)resumeAudio; + +/** + * 暂停播放器的视频流。 + * + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)pauseVideo; + +/** + * 恢复播放器的视频流。 + * + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)resumeVideo; + +/** + * 设置播放器音量。 + * + * @param volume 音量大小,取值范围0 - 100。【默认值】: 100 + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)setPlayoutVolume:(NSUInteger)volume; + +/** + * 设置播放器缓存自动调整的最小和最大时间 ( 单位:秒 )。 + * + * @param minTime 缓存自动调整的最小时间,取值需要大于0。【默认值】:1 + * @param maxTime 缓存自动调整的最大时间,取值需要大于0。【默认值】:5 + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + * - V2TXLIVE_ERROR_INVALID_PARAMETER: 操作失败,minTime 和 maxTime 需要大于0 + * - V2TXLIVE_ERROR_REFUSED: 播放器处于播放状态,不支持修改缓存策略 + */ +- (V2TXLiveCode)setCacheParams:(CGFloat)minTime maxTime:(CGFloat)maxTime; + +/** + * 直播流无缝切换,支持 FLV 和 LEB。 + * + * @param newUrl 新的拉流地址。 + */ +- (V2TXLiveCode)switchStream:(NSString *)newUrl; + +/** + * 启用播放音量大小提示。 + * + * 开启后可以在 {@link onPlayoutVolumeUpdate} 回调中获取到 SDK 对音量大小值的评估。 + * + * @param intervalMs 决定了 onPlayoutVolumeUpdate 回调的触发间隔,单位为ms,最小间隔为100ms,如果小于等于0则会关闭回调,建议设置为300ms;【默认值】:0,不开启 + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)enableVolumeEvaluation:(NSUInteger)intervalMs; + +/** + * 截取播放过程中的视频画面。 + * + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + * - V2TXLIVE_ERROR_REFUSED: 播放器处于停止状态,不允许调用截图操作 + */ +- (V2TXLiveCode)snapshot; + +/** + * 开启/关闭对视频帧的监听回调。 + * + * SDK 在您开启次此开关后将不再渲染视频画面,您可以通过 V2TXLivePlayerObserver 获得视频帧,并执行自定义的渲染逻辑。 + * + * @param enable 是否开启自定义渲染。【默认值】:NO + * @param pixelFormat 自定义渲染回调的视频像素格式 {@link V2TXLivePixelFormat}。 + * @param bufferType 自定义渲染回调的视频数据格式 {@link V2TXLiveBufferType}。 + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + * - V2TXLIVE_ERROR_NOT_SUPPORTED: 像素格式或者数据格式不支持 + */ +- (V2TXLiveCode)enableObserveVideoFrame:(BOOL)enable pixelFormat:(V2TXLivePixelFormat)pixelFormat bufferType:(V2TXLiveBufferType)bufferType; + +/** + * 开启接收 SEI 消息 + * + * @param enable YES: 开启接收 SEI 消息; NO: 关闭接收 SEI 消息。【默认值】: NO + * @param payloadType 指定接收 SEI 消息的 payloadType,支持 5、242,请与发送端的 payloadType 保持一致。 + * + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + */ +- (V2TXLiveCode)enableReceiveSeiMessage:(BOOL)enable payloadType:(int)payloadType; + +/** + * 是否显示播放器状态信息的调试浮层。 + * + * @param isShow 是否显示。【默认值】:NO + */ +- (void)showDebugView:(BOOL)isShow; + +/** + * 调用 V2TXLivePlayer 的高级 API 接口。 + * + * @note 该接口用于调用一些高级功能。 + * @param key 高级 API 对应的 key, 详情请参考 {@link V2TXLiveProperty} 定义。 + * @param value 调用 key 所对应的高级 API 时,需要的参数。 + * @return 返回值 {@link V2TXLiveCode} + * - V2TXLIVE_OK: 成功 + * - V2TXLIVE_ERROR_INVALID_PARAMETER: 操作失败,key 不允许为 nil + */ +- (V2TXLiveCode)setProperty:(NSString *)key value:(NSObject *)value; + +@end + +/// @} + +LITEAV_EXPORT @interface V2TXLivePlayer : NSObject<V2TXLivePlayer> + +@end diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayerObserver.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayerObserver.h new file mode 100644 index 0000000..6079abb --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayerObserver.h @@ -0,0 +1,144 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// + +#import "V2TXLiveDef.h" + +@protocol V2TXLivePlayer; + +/// @defgroup V2TXLivePlayerObserver_ios V2TXLivePlayerObserver +/// 腾讯云直播的播放器回调通知。<br/> +/// 可以接收 V2TXLivePlayer 播放器的一些回调通知,包括播放器状态、播放音量回调、音视频首帧回调、统计数据、警告和错误信息等。 +/// @{ + +@protocol V2TXLivePlayerObserver <NSObject> + +@optional + +///////////////////////////////////////////////////////////////////////////////// +// +// 直播播放器事件回调 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 直播播放器错误通知,播放器出现错误时,会回调该通知 + * + * @param player 回调该通知的播放器对象 + * @param code 错误码 {@link V2TXLiveCode} + * @param msg 错误信息 + * @param extraInfo 扩展信息 + */ +- (void)onError:(id<V2TXLivePlayer>)player code:(V2TXLiveCode)code message:(NSString *)msg extraInfo:(NSDictionary *)extraInfo; + +/** + * 直播播放器警告通知 + * + * @param player 回调该通知的播放器对象 + * @param code 警告码 {@link V2TXLiveCode} + * @param msg 警告信息 + * @param extraInfo 扩展信息 + */ +- (void)onWarning:(id<V2TXLivePlayer>)player code:(V2TXLiveCode)code message:(NSString *)msg extraInfo:(NSDictionary *)extraInfo; + +/** + * 直播播放器分辨率变化通知 + * + * @param player 回调该通知的播放器对象 + * @param width 视频宽 + * @param height 视频高 + */ +- (void)onVideoResolutionChanged:(id<V2TXLivePlayer>)player width:(NSInteger)width height:(NSInteger)height; + +/** + * 已经成功连接到服务器 + * + * @param player 回调该通知的播放器对象 + * @param extraInfo 扩展信息 + */ +- (void)onConnected:(id<V2TXLivePlayer>)player extraInfo:(NSDictionary *)extraInfo; + +/** + * 视频播放事件 + * + * @param player 回调该通知的播放器对象 + * @param firstPlay 第一次播放标志 + * @param extraInfo 扩展信息 + */ +- (void)onVideoPlaying:(id<V2TXLivePlayer>)player firstPlay:(BOOL)firstPlay extraInfo:(NSDictionary *)extraInfo; + +/** + * 音频播放事件 + * + * @param player 回调该通知的播放器对象 + * @param firstPlay 第一次播放标志 + * @param extraInfo 扩展信息 + */ +- (void)onAudioPlaying:(id<V2TXLivePlayer>)player firstPlay:(BOOL)firstPlay extraInfo:(NSDictionary *)extraInfo; + +/** + * 视频加载事件 + * + * @param player 回调该通知的播放器对象 + * @param extraInfo 扩展信息 + */ +- (void)onVideoLoading:(id<V2TXLivePlayer>)player extraInfo:(NSDictionary *)extraInfo; + +/** + * 音频加载事件 + * + * @param player 回调该通知的播放器对象 + * @param extraInfo 扩展信息 + */ +- (void)onAudioLoading:(id<V2TXLivePlayer>)player extraInfo:(NSDictionary *)extraInfo; + +/** + * 播放器音量大小回调 + * + * @param player 回调该通知的播放器对象 + * @param volume 音量大小 + * @note 调用 [enableVolumeEvaluation](@ref V2TXLivePlayer#enableVolumeEvaluation:) 开启播放音量大小提示之后,会收到这个回调通知。 + */ +- (void)onPlayoutVolumeUpdate:(id<V2TXLivePlayer>)player volume:(NSInteger)volume; + +/** + * 直播播放器统计数据回调 + * + * @param player 回调该通知的播放器对象 + * @param statistics 播放器统计数据 {@link V2TXLivePlayerStatistics} + */ +- (void)onStatisticsUpdate:(id<V2TXLivePlayer>)player statistics:(V2TXLivePlayerStatistics *)statistics; + +/** + * 截图回调 + * + * @note 调用 {@link snapshot} 截图之后,会收到这个回调通知 + * @param player 回调该通知的播放器对象 + * @param image 已截取的视频画面 + */ +- (void)onSnapshotComplete:(id<V2TXLivePlayer>)player image:(TXImage *)image; + +/** + * 自定义视频渲染回调 + * + * @param player 回调该通知的播放器对象 + * @param videoFrame 视频帧数据 {@link V2TXLiveVideoFrame} + * @note 需要您调用 {@link enableObserveVideoFrame} 开启回调开关 + */ +- (void)onRenderVideoFrame:(id<V2TXLivePlayer>)player frame:(V2TXLiveVideoFrame *)videoFrame; + +/** + * 收到 SEI 消息的回调,发送端通过 {@link V2TXLivePusher} 中的 `sendSeiMessage` 来发送 SEI 消息。 + * + * @note 调用 {@link V2TXLivePlayer} 中的 `enableReceiveSeiMessage` 开启接收 SEI 消息之后,会收到这个回调通知 + * + * @param player 回调该通知的播放器对象。 + * @param payloadType 回调数据的SEI payloadType + * @param data 数据 + */ +- (void)onReceiveSeiMessage:(id<V2TXLivePlayer>)player payloadType:(int)payloadType data:(NSData *)data; + +@end +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePremier.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePremier.h new file mode 100644 index 0000000..6ec3080 --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePremier.h @@ -0,0 +1,101 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// +#import "V2TXLiveDef.h" +#import "TXLiteAVSymbolExport.h" +NS_ASSUME_NONNULL_BEGIN + +/// @defgroup V2TXLivePremier_ios V2TXLivePremier +/// +/// @{ + +///////////////////////////////////////////////////////////////////////////////// +// +// V2TXLive 高级接口 +// +///////////////////////////////////////////////////////////////////////////////// + +@protocol V2TXLivePremierObserver; +@protocol V2TXLivePremier <NSObject> + +/** + * 获取 SDK 版本号 + */ ++ (NSString *)getSDKVersionStr; + +/** + * 设置 V2TXLivePremier 回调接口 + */ ++ (void)setObserver:(id<V2TXLivePremierObserver>)observer; + +/** + * 设置 Log 的配置信息 + */ ++ (V2TXLiveCode)setLogConfig:(V2TXLiveLogConfig *)config; + +/** + * 设置 SDK 接入环境 + * + * @note 如您的应用无特殊需求,请不要调用此接口进行设置。 + * @param env 目前支持 “default” 和 “GDPR” 两个参数 + * - default:默认环境,SDK 会在全球寻找最佳接入点进行接入。 + * - GDPR:所有音视频数据和质量统计数据都不会经过中国大陆地区的服务器。 + */ ++ (V2TXLiveCode)setEnvironment:(const char *)env; + +/** + * 设置 SDK 的授权 License + * + * 文档地址:https://cloud.tencent.com/document/product/454/34750 + * @param url licence的地址 + * @param key licence的秘钥 + */ +#if TARGET_OS_IPHONE ++ (void)setLicence:(NSString *)url key:(NSString *)key; +#endif + +/** + * 设置 SDK sock5 代理配置 + * + * @param host sock5 代理服务器的地址 + * @param port sock5 代理服务器的端口 + * @param username sock5 代理服务器的验证的用户名 + * @param password sock5 代理服务器的验证的密码 + */ ++ (V2TXLiveCode)setSocks5Proxy:(NSString *)host port:(NSInteger)port username:(NSString *)username password:(NSString *)password; + +@end + +///////////////////////////////////////////////////////////////////////////////// +// +// V2TXLive 高级回调接口 +// +///////////////////////////////////////////////////////////////////////////////// + +@protocol V2TXLivePremierObserver <NSObject> +@optional + +/** + * 自定义 Log 输出回调接口 + */ +- (void)onLog:(V2TXLiveLogLevel)level log:(NSString *)log; + +/** + * setLicence 接口回调 + * + * @param result 设置 licence 结果 0 成功,负数失败 + * @param reason 设置 licence 失败原因 + */ +- (void)onLicenceLoaded:(int)result Reason:(NSString *)reason; + +@end + +LITEAV_EXPORT @interface V2TXLivePremier : NSObject<V2TXLivePremier> + +@end + +NS_ASSUME_NONNULL_END + +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveProperty.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveProperty.h new file mode 100644 index 0000000..9b5d329 --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveProperty.h @@ -0,0 +1,52 @@ +// +// Copyright © 2022 Tencent. All rights reserved. +// +// Module: V2TXLive +// + +/// @defgroup V2TXLiveProperty_ios V2TXLiveProperty +/// V2TXLive setProperty 支持的 key +/// +/// @{ +#import <Foundation/Foundation.h> + +NS_ASSUME_NONNULL_BEGIN + +#define V2PropertyType NSString* + +/// 开启/关闭硬件加速【RTMP协议,拉流】 +/// 默认值:true +/// Value:true/false +FOUNDATION_EXTERN V2PropertyType kV2EnableHardwareAcceleration; + +/// 设置重连次数,【RTMP协议,拉流】 +/// 默认值:3 +/// Value:int +FOUNDATION_EXTERN V2PropertyType kV2MaxNumberOfReconnection; + +/// 设置重连间隔【RTMP协议,拉流】 +/// 单位:秒 +/// 默认值:3 +/// Value:int +FOUNDATION_EXTERN V2PropertyType kV2SecondsBetweenReconnection; + +/// 设置自定义编码参数【RTMP/RTC协议,推流】 +/// Value:JSON 字符串 +/// 例如: +/// ```json +///{ +/// "videoWidth":360, +/// "videoHeight":640, +/// "videoFps":15, +/// "videoBitrate":1000, +/// "minVideoBitrate":1000 +///} +///``` +FOUNDATION_EXTERN V2PropertyType kV2SetVideoQualityEx; + +@interface V2TXLiveProperty : NSObject + +@end + +NS_ASSUME_NONNULL_END +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCCloud.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCCloud.h index 9fb4fed..a36aa47 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCCloud.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCCloud.h @@ -1,1408 +1,1511 @@ -#ifndef __ITRTCCLOUD_H__ -#define __ITRTCCLOUD_H__ -/* - * Module: ITRTCCloud @ TXLiteAVSDK - * - * SDK VERSION 8.6.10094 - * - * Function: 腾讯云视频通话功能的主要接口类 - * - * 创建/使用/销毁 ITRTCCloud 对象的示例代码: - * <pre> - * ITRTCCloud *trtcCloud = getTRTCShareInstance(); - * if(trtcCloud) { - * std::string version(trtcCloud->getSDKVersion()); - * } - * // - * // - * destroyTRTCShareInstance(); - * trtcCloud = nullptr; - * </pre> - */ - -#include "TRTCCloudCallback.h" -#include "TRTCTypeDef.h" -#include "ITXAudioEffectManager.h" -#include "ITXDeviceManager.h" -#ifdef _WIN32 -#include "IDeprecatedTRTCCloud.h" -#include "TXLiteAVBase.h" -#endif // _WIN32 - - -namespace trtc { -class ITRTCCloud; -} - -/// @defgroup ITRTCCloud_cplusplus ITRTCCloud -/// 腾讯云视频通话功能的主要接口类 -/// @{ -extern "C" { - /// @name 创建与销毁 ITRTCCloud 单例 - /// @{ - -#ifdef __ANDROID__ - /** - * @brief 用于动态加载 dll 时,获取 ITRTCCloud 对象指针。 - * - * @return 返回 ITRTCCloud 单例对象的指针,注意:delete ITRTCCloud* 会编译错误,需要调用 destroyTRTCCloud 释放单例指针对象。 - * @param context Android 上下文,内部会转为 ApplicationContext 用于系统 API 调用,如果传入的 context 为空,内部会自动获取当前进程的 ApplicationContext - * @note 本接口仅适用于 Android 平台 - */ - TRTC_API trtc::ITRTCCloud* getTRTCShareInstance(void *context); -#else - /** - * @brief 用于动态加载 dll 时,获取 ITRTCCloud 对象指针。 - * - * @return 返回 ITRTCCloud 单例对象的指针,注意:delete ITRTCCloud* 会编译错误,需要调用 destroyTRTCCloud 释放单例指针对象。 - * @note 本接口适用于 Windows、Mac、iOS 平台 - */ - TRTC_API trtc::ITRTCCloud* getTRTCShareInstance(); -#endif - /** - * @brief 释放 ITRTCCloud 单例对象。 - */ - TRTC_API void destroyTRTCShareInstance(); - /// @} -} - -namespace trtc { - -class ITRTCCloud -#ifdef _WIN32 - : public IDeprecatedTRTCCloud -#endif // _WIN32 -{ - protected: - virtual ~ITRTCCloud(){}; - -public: - ///////////////////////////////////////////////////////////////////////////////// - // - - // 设置 TRTCCloudCallback 回调 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 设置 ITRTCCloudCallback 回调 - /// @{ - /** - * 设置回调接口 ITRTCCloudCallback - * - * 您可以通过 ITRTCCloudCallback 获得来自 SDK 的各种状态通知,详见 ITRTCCloudCallback.h 中的定义 - * - * @param callback 事件回调指针 - */ - virtual void addCallback(ITRTCCloudCallback* callback) = 0; - - /** - * 移除事件回调 - * - * @param callback 事件回调指针 - */ - virtual void removeCallback(ITRTCCloudCallback* callback) = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (一)房间相关接口函数 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 房间相关接口函数 - /// @{ - /** - * 1.1 进入房间 - * - * 调用接口后,您会收到来自 ITRTCCloudCallback 中的 onEnterRoom(result) 回调: - * - 如果加入成功,result 会是一个正数(result > 0),表示加入房间的时间消耗,单位是毫秒(ms)。 - * - 如果加入失败,result 会是一个负数(result < 0),表示进房失败的错误码。 - * - * 进房失败的错误码含义请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 - * - * - {@link TRTCAppSceneVideoCall}:<br> - * 视频通话场景,支持720P、1080P高清画质,单个房间最多支持300人同时在线,最高支持50人同时发言。<br> - * 适合:[1对1视频通话]、[300人视频会议]、[在线问诊]、[远程面试]等。<br> - * - {@link TRTCAppSceneAudioCall}:<br> - * 语音通话场景,支持 48kHz,支持双声道。单个房间最多支持300人同时在线,最高支持50人同时发言。<br> - * 适合:[1对1语音通话]、[300人语音会议]、[在线狼人杀]、[语音聊天室]等。<br> - * - {@link TRTCAppSceneLIVE}:<br> - * 视频互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。<br> - * 适合:[在线互动课堂]、[互动直播]、[视频相亲]、[远程培训]、[超大型会议]等。<br> - * - {@link TRTCAppSceneVoiceChatRoom}:<br> - * 语音互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。<br> - * 适合:[语聊房]、[K 歌房]、[FM 电台]等。<br> - * - * @param param 进房参数,请参考 trtc::TRTCParams - * @param scene 应用场景,目前支持视频通话(VideoCall)、在线直播(Live)、语音通话(AudioCall)、语音聊天室(VoiceChatRoom)四种场景。 - * - * @note - * 1. 当 scene 选择为 TRTCAppSceneLIVE 或 TRTCAppSceneVoiceChatRoom 时,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。<br> - * 2. 不管进房是否成功,enterRoom 都必须与 exitRoom 配对使用,在调用 exitRoom 前再次调用 enterRoom 函数会导致不可预期的错误问题。 - */ - virtual void enterRoom(const TRTCParams& params, TRTCAppScene scene) = 0; - - /** - * 1.2 离开房间 - * - * 调用 exitRoom() 接口会执行退出房间的相关逻辑,例如释放音视频设备资源和编解码器资源等。 - * 待资源释放完毕,SDK 会通过 ITRTCCloudCallback 中的 onExitRoom() 回调通知您。 - * - * 如果您要再次调用 enterRoom() 或者切换到其他的音视频 SDK,请等待 onExitRoom() 回调到来后再执行相关操作。 - * 否则可能会遇到如摄像头、麦克风设备被强占等各种异常问题。 - */ - virtual void exitRoom() = 0; - - /** - * 1.3 切换角色,仅适用于直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom) - * - * 在直播场景下,一个用户可能需要在“观众”和“主播”之间来回切换。 - * 您可以在进房前通过 TRTCParams 中的 role 字段确定角色,也可以通过 switchRole 在进房后切换角色。 - * - * @param role 目标角色,默认为主播: - * - {@link TRTCRoleAnchor} 主播,可以上行视频和音频,一个房间里最多支持50个主播同时上行音视频。 - * - {@link TRTCRoleAudience} 观众,只能观看,不能上行视频和音频,一个房间里的观众人数没有上限。 - */ - virtual void switchRole(TRTCRoleType role) = 0; - - /** - * 1.4 请求跨房通话(主播 PK) - * - * TRTC 中两个不同音视频房间中的主播,可以通过“跨房通话”功能拉通连麦通话功能。使用此功能时, - * 两个主播无需退出各自原来的直播间即可进行“连麦 PK”。 - * - * 例如:当房间“001”中的主播 A 通过 connectOtherRoom() 跟房间“002”中的主播 B 拉通跨房通话后, - * 房间“001”中的用户都会收到主播 B 的 onUserEnter(B) 回调和 onUserVideoAvailable(B,true) 回调。 - * 房间“002”中的用户都会收到主播 A 的 onUserEnter(A) 回调和 onUserVideoAvailable(A,true) 回调。 - * - * 简言之,跨房通话的本质,就是把两个不同房间中的主播相互分享,让每个房间里的观众都能看到两个主播。 - * - * <pre> - * 房间 001 房间 002 - * ------------- ------------ - * 跨房通话前:| 主播 A | | 主播 B | - * | 观众 U V W | | 观众 X Y Z | - * ------------- ------------ - * - * 房间 001 房间 002 - * ------------- ------------ - * 跨房通话后:| 主播 A B | | 主播 B A | - * | 观众 U V W | | 观众 X Y Z | - * ------------- ------------ - * </pre> - * - * 跨房通话的参数考虑到后续扩展字段的兼容性问题,暂时采用了 JSON 格式的参数,要求至少包含两个字段: - * - roomId:房间“001”中的主播 A 要跟房间“002”中的主播 B 连麦,主播 A 调用 connectOtherRoom() 时 roomId 应指定为“002”。 - * - userId:房间“001”中的主播 A 要跟房间“002”中的主播 B 连麦,主播 A 调用 connectOtherRoom() 时 userId 应指定为 B 的 userId。 - * - * 跨房通话的请求结果会通过 ITRTCCloudCallback 中的 onConnectOtherRoom() 回调通知给您。 - * - * <pre> - * //此处用到 jsoncpp 库来格式化 JSON 字符串 - * Json::Value jsonObj; - * jsonObj["roomId"] = 002; - * jsonObj["userId"] = "userB"; - * Json::FastWriter writer; - * std::string params = writer.write(jsonObj); - * trtc.ConnectOtherRoom(params.c_str()); - * </pre> - * - * @note 如果进房时,使用的是字符串房间号码,上面的 roomId 也需要相应改为 strRoomId。 - * <pre> - * //此处用到 jsoncpp 库来格式化 JSON 字符串 - * Json::Value jsonObj; - * jsonObj["strRoomId"] = "002"; - * jsonObj["userId"] = "userB"; - * Json::FastWriter writer; - * std::string params = writer.write(jsonObj); - * trtc.ConnectOtherRoom(params.c_str()); - * </pre> - * - * @param params JSON 字符串连麦参数,roomId 代表目标房间号,userId 代表目标用户 ID。 - * - */ - virtual void connectOtherRoom(const char* params) = 0; - - /** - * 1.5 关闭跨房连麦 - * - * 跨房通话的退出结果会通过 ITRTCCloudCallback 中的 onDisconnectOtherRoom() 回调通知给您。 - */ - virtual void disconnectOtherRoom() = 0; - - /** - * 1.6 设置音视频数据接收模式,需要在进房前设置才能生效 - * - * 为实现进房秒开的绝佳体验,SDK 默认进房后自动接收音视频。即在您进房成功的同时,您将立刻收到远端所有用户的音视频数据。 - * 若您没有调用 startRemoteView,视频数据将自动超时取消。 - * 若您主要用于语音聊天等没有自动接收视频数据需求的场景,您可以根据实际需求选择接收模式,以免产生预期之外的视频时长费用。 - * - * @param autoRecvAudio true:自动接收音频数据;false:需要调用 muteRemoteAudio 进行请求或取消。默认值:true - * @param autoRecvVideo true:自动接收视频数据;false:需要调用 startRemoteView/stopRemoteView 进行请求或取消。默认值:true - * - * @note 需要在进房前设置才能生效。 - */ - virtual void setDefaultStreamRecvMode(bool autoRecvAudio, bool autoRecvVideo) = 0; - -#if _WIN32||__APPLE__ - /** - * 1.7 创建子 TRTCCloud 实例 - * - * 子 TRTCCloud 实例用于进入其他房间,观看其他房间主播的音视频流,还可以在不同的房间之间切换推送音视频流。 - * - * 此接口主要应用于类似超级小班课这种需要进入多个房间推拉流的场景。 - * - * <pre> - * ITRTCCloud *mainCloud = getTRTCShareInstance(); - * // 1、mainCloud 进房并开始推送音视频流。 - * // 2、创建子 TRTCCloud 实例并进入其他房间。 - * ITRTCCloud *subCloud = mainCloud->createSubCloud(); - * subCloud->enterRoom(params, scene); - * - * // 3、切换房间推送音视频流。 - * // 3.1、mainCloud 停止推送音视频流。 - * mainCloud->switchRole(TRTCRoleAudience); - * mainCloud->muteLocalVideo(true); - * mainCloud->muteLocalAudio(true); - * // 3.2、subCLoud 推送音视频流。 - * subCloud->switchRole(TRTCRoleAnchor); - * subCloud->muteLocalVideo(false); - * subCloud->muteLocalAudio(false); - * - * // 4、subCLoud 退房。 - * subCloud->exitRoom(); - * - * // 5、销毁 subCLoud。 - * mainCloud->destroySubCloud(subCloud); - * </pre> - * - * @return 子 TRTCCloud 实例 - * @note - * - 此方法目前仅支持 Windows、iOS、Mac 平台 - * - 同一个用户,可以使用同一个 userId 进入多个不同 roomId 的房间。 - * - 两台手机不可以同时使用同一个 userId 进入同一个 roomId 的房间。 - * - 通过 createSubCloud 接口创建出来的子房间 TRTCCloud 实例有一个能力限制:不能调用子实例中与本地音视频 - * 相关的接口(除了 switchRole、muteLocalVideo 和 muteLocalAudio 之外), 设置美颜等接口请使用 - * 原 TRTCCloud 实例对象。 - * - 同一个用户,同时只能在一个 TRTCCloud 实例中推流,在不同房间同时推流会引发云端的状态混乱,导致各种 bug。 - */ - virtual ITRTCCloud* createSubCloud() = 0; - - /** - * 1.8 销毁子 TRTCCloud 实例 - * - * @note 此方法目前仅支持 Windows、iOS、Mac 平台 - */ - virtual void destroySubCloud(ITRTCCloud *cloud) = 0; -#endif - - /** - * 1.9 切换房间 - * - * 调用该接口后,用户会先退出原来的房间并快速进入 TRTCSwitchRoomConfig 中指定的新房间: - * 相比于直接调用 exitRoom + enterRoom 的方式,switchRoom 接口对主播更加友好,因为 switchRoom - * 不会停止主播端视频的采集和预览。 - * 接口调用结果会通过 ITRTCCloudCallback 中的 onSwitchRoom(errCode, errMsg) 回调通知给您。 - * - * @param config 房间切换参数,请参考 {@link TRTCSwitchRoomConfig} - */ - virtual void switchRoom(const TRTCSwitchRoomConfig& config) = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (二)CDN 相关接口函数 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name CDN 相关接口函数 - /// @{ - /** - * 2.1 开始向腾讯云的直播 CDN 推流 - * - * 该接口会指定当前用户的音视频流在腾讯云 CDN 所对应的 StreamId,进而可以指定当前用户的 CDN 播放地址。 - * - * 例如:如果我们采用如下代码设置当前用户的主画面 StreamId 为 user_stream_001,那么该用户主画面对应的 CDN 播放地址为: - * “http://yourdomain/live/user_stream_001.flv”,其中 yourdomain 为您自己备案的播放域名, - * 您可以在直播[控制台](https://console.cloud.tencent.com/live) 配置您的播放域名,腾讯云不提供默认的播放域名。 - * - * <pre> - * ITRTCCloud *trtcCloud = getTRTCShareInstance(); - * trtcCloud->enterRoom(params, TRTCAppSceneLIVE); - * trtcCloud->startLocalPreview(TXView); - * trtcCloud->startLocalAudio(TRTCAudioQuality); - * trtcCloud->startPublishing("user_stream_001", TRTCVideoStreamTypeBig); - * </pre> - * - * 您也可以在设置 enterRoom 的参数 TRTCParams 时指定 streamId, 而且我们更推荐您采用这种方案。 - * - * @param streamId 自定义流 ID。 - * @param type 仅支持 TRTCVideoStreamTypeBig 和 TRTCVideoStreamTypeSub。 - * @note 您需要先在实时音视频 [控制台](https://console.cloud.tencent.com/rav/) 中的功能配置页开启“启用旁路推流”才能生效。 - * - 若您选择“指定流旁路”,则您可以通过该接口将对应音视频流推送到腾讯云 CDN 且指定为填写的流 ID。 - * - 若您选择“全局自动旁路”,则您可以通过该接口调整默认的流 ID。 - */ - virtual void startPublishing(const char* streamId, TRTCVideoStreamType type) = 0; - - /** - * 2.2 停止向腾讯云的直播 CDN 推流 - */ - virtual void stopPublishing() = 0; - - /** - * 2.3 开始向友商云的直播 CDN 转推 - * - * 该接口跟 startPublishing() 类似,但 startPublishCDNStream() 支持向非腾讯云的直播 CDN 转推。 - * @param param CDN 转推参数,请参考 TRTCTypeDef.h 中关于 TRTCPublishCDNParam 的介绍。 - * @note 使用 startPublishing() 绑定腾讯云直播 CDN 不收取额外的费用,但使用 startPublishCDNStream() 绑定非腾讯云直播 CDN 需要收取转推费用。 - */ - virtual void startPublishCDNStream(const TRTCPublishCDNParam& param) = 0; - - /** - * 2.4 停止向非腾讯云地址转推 - */ - virtual void stopPublishCDNStream() = 0; - - /** - * 2.5 设置云端的混流转码参数 - * - * 如果您在实时音视频 [控制台](https://console.cloud.tencent.com/trtc/) 中的功能配置页开启了“启动自动旁路直播”功能, - * 房间里的每一路画面都会有一个默认的直播 [CDN 地址](https://cloud.tencent.com/document/product/647/16826)。 - * - * 一个直播间中可能有不止一位主播,而且每个主播都有自己的画面和声音,但对于 CDN 观众来说,他们只需要一路直播流, - * 所以您需要将多路音视频流混成一路标准的直播流,这就需要混流转码。 - * - * 当您调用 setMixTranscodingConfig() 接口时,SDK 会向腾讯云的转码服务器发送一条指令,目的是将房间里的多路音视频流混合为一路, - * 您可以通过 mixUsers 参数来调整每一路画面的位置,以及是否只混合声音,也可以通过 videoWidth、videoHeight、videoBitrate 等参数控制混合音视频流的编码参数。 - * - * <pre> - * 【画面1】=> 解码 ====> \ - * \ - * 【画面2】=> 解码 => 画面混合 => 编码 => 【混合后的画面】 - * / - * 【画面3】=> 解码 ====> / - * - * 【声音1】=> 解码 ====> \ - * \ - * 【声音2】=> 解码 => 声音混合 => 编码 => 【混合后的声音】 - * / - * 【声音3】=> 解码 ====> / - * </pre> - * - * 参考文档:[云端混流转码](https://cloud.tencent.com/document/product/647/16827)。 - * - * @param config 请参考 TRTCTypeDef.h 中关于 TRTCTranscodingConfig 的介绍。如果传入 nullptr 则取消云端混流转码。 - * @note 关于云端混流的注意事项: - * - 云端转码会引入一定的 CDN 观看延时,大概会增加1 - 2秒。 - * - 调用该函数的用户,会将连麦中的多路画面混合到自己当前这路画面或者 config 中指定的 streamId 上。 - * - 请注意,若您还在房间中且不再需要混流,请务必传入 nullptr 进行取消,因为当您发起混流后,云端混流模块就会开始工作,不及时取消混流可能会引起不必要的计费损失。 - * - 请放心,您退房时会自动取消混流状态。 - */ - virtual void setMixTranscodingConfig(TRTCTranscodingConfig* config) = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (三)视频相关接口函数 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 视频相关接口函数 - /// @{ -#if TARGET_PLATFORM_DESKTOP - /** - * 3.1 开启本地视频的预览画面(Windows、 Mac版本) - * - * 这个接口会启动默认的摄像头,可以通过 ITXDeviceManager::setCurrentDevice 接口选用其他摄像头 - * 当开始渲染首帧摄像头画面时,您会收到 ITRTCCloudCallback 中的 onFirstVideoFrame(nullptr) 回调。 - * - * @param rendView 承载预览画面的控件 - */ - virtual void startLocalPreview(TXView rendView) = 0; -#elif TARGET_PLATFORM_PHONE - /** - * 3.2 开启本地视频的预览画面 (iOS、 Android版本) - * 在 enterRoom 之前调用此函数,SDK 只会开启摄像头,并一直等到您调用 enterRoom 之后才开始推流。 - * 在 enterRoom 之后调用此函数,SDK 会开启摄像头并自动开始视频推流。 - * 当开始渲染首帧摄像头画面时,您会收到 ITRTCCloudCallback 中的 onFirstVideoFrame(null) 回调。 - * - * @note 如果希望开播前预览摄像头画面并调节美颜参数,您可以: - * - 方案一:在调用 enterRoom 之前调用 startLocalPreview - * - 方案二:在调用 enterRoom 之后调用 startLocalPreview + muteLocalVideo(true) - * @param frontCamera YES:前置摄像头;NO:后置摄像头。 - * @param rendView 承载视频画面的控件 - */ - virtual void startLocalPreview(bool frontCamera, TXView rendView) = 0; -#endif - - /** - * 3.3 更新本地视频预览画面的窗口 - * - * @param rendView 承载预览画面的控件 - */ - virtual void updateLocalView(TXView rendView) = 0; - - /** - * 3.4 停止本地视频采集及预览 - */ - virtual void stopLocalPreview() = 0; - - /** - * 3.5 暂停/恢复推送本地的视频数据 - * - * 当暂停推送本地视频后,房间里的其它成员将会收到 onUserVideoAvailable(userId, false) 回调通知 - * 当恢复推送本地视频后,房间里的其它成员将会收到 onUserVideoAvailable(userId, true) 回调通知 - * - * @param mute true:暂停;false:恢复 - */ - virtual void muteLocalVideo(bool mute) = 0; - - /** - * 3.6 开始拉取并显示指定用户的远端画面 - * - * 该函数会拉取指定 userid 的视频流显示在您指定的 view 控件上,您可以通过 setRemoteRenderParams 设置显示模式。 - * - 如果您提前知道房间中某个 userid 正在推流,可以直接调用 startRemoteView 显示该用户的远端画面。 - * - 如果您不知道房间中有哪些用户开启了视频,可以在 enterRoom 后等待来自 SDK 的 onUserVideoAvailable(userId, true) 回调通知。 - * 调用 startRemoteView 只是启动拉取,此时画面还需要加载,当加载完毕后 ITRTCCloudCallback 会通过 onFirstVideoFrame(userId) 通知您。 - * - * @param userId 指定远端用户的 userId - * @param streamType 指定要观看 userId 的视频流类型: - * - 高清大画面:({@link TRTCVideoStreamTypeBig}) - * - 低清大画面:({@link TRTCVideoStreamTypeSmall}) - * - 辅流(屏幕分享):({@link TRTCVideoStreamTypeSub}) - * @param rendView 承载视频画面的控件 - * - * @note 注意几点规则需要您关注:<br> - * 1. SDK 支持同时观看某 userid 的大画面和辅路,或者小画面和辅路,但不支持同时观看大画面和小画面。<br> - * 2. 只有当指定的 userid 通过 enableEncSmallVideoStream 开启双路编码后,才能观看该用户的小画面。<br> - * 3. 如果该用户的小画面不存在,则默认切换到大画面。 - */ - virtual void startRemoteView(const char* userId, TRTCVideoStreamType streamType, TXView rendView) = 0; - - /** - * 3.7 更新远端视频渲染的窗口 - * - * @param userId 对方的用户标识 - * @param streamType 要设置预览窗口的流类型(TRTCVideoStreamTypeBig、TRTCVideoStreamTypeSub) - * @param rendView 承载预览画面的控件 - */ - virtual void updateRemoteView(const char* userId, TRTCVideoStreamType streamType, TXView rendView) = 0; - - /** - * 3.8 停止显示远端视频画面,同时不再拉取该远端用户的视频数据流 - * - * 调用此接口后,SDK 会停止接收该用户的远程视频流,同时会清理相关的视频显示资源。 - * - * @param userId 指定远端用户的 userId - * @param streamType 指定要停止观看的 userId 的视频流类型: - * - 高清大画面:({@link TRTCVideoStreamTypeBig}) - * - 低清大画面:({@link TRTCVideoStreamTypeSmall}) - * - 辅流(屏幕分享):({@link TRTCVideoStreamTypeSub}) - */ - virtual void stopRemoteView(const char* userId, TRTCVideoStreamType streamType) = 0; - - /** - * 3.9 停止显示所有远端视频画面,同时不再拉取远端用户的视频数据流 - * - * @note 如果有屏幕分享的画面在显示,则屏幕分享的画面也会一并被关闭。 - */ - virtual void stopAllRemoteView() = 0; - - /** - * 3.10 暂停/恢复接收指定的远端视频流 - * - * 该接口仅暂停/恢复接收指定的远端用户的视频流,但并不释放显示资源,所以如果暂停,视频画面会冻屏在 mute 前的最后一帧。 - * - * @param userId 对方的用户标识 - * @param mute 是否暂停接收 - * @note 您在 enterRoom 之前或之后调用此 API 均能生效,在您调用 exitRoom 之后会被重置为 false。 - */ - virtual void muteRemoteVideoStream(const char* userId, bool mute) = 0; - - /** - * 3.11 暂停/恢复接收所有远端视频流 - * - * 该接口仅暂停/恢复接收所有远端用户的视频流,但并不释放显示资源,所以如果暂停,视频画面会冻屏在 mute 前的最后一帧。 - * - * @param mute 是否暂停接收 - * @note 您在 enterRoom 之前或之后调用此 API 均能生效,在您调用 exitRoom 之后会被重置为 false。 - */ - virtual void muteAllRemoteVideoStreams(bool mute) = 0; - - /** - * 3.12 设置视频编码器相关参数 - * - * 该设置决定了远端用户看到的画面质量(同时也是云端录制出的视频文件的画面质量) - * - * @param params 视频编码参数,详情请参考 TRTCTypeDef.h 中的 TRTCVideoEncParam 定义 - */ - virtual void setVideoEncoderParam(const TRTCVideoEncParam& params) = 0; - - /** - * 3.13 设置网络流控相关参数 - * - * 该设置决定了 SDK 在各种网络环境下的调控策略(例如弱网下是“保清晰”还是“保流畅”) - * - * @param params 网络流控参数,详情请参考 TRTCTypeDef.h 中的 TRTCNetworkQosParam 定义 - */ - virtual void setNetworkQosParam(const TRTCNetworkQosParam& params) = 0; - - /** - * 3.14 设置本地图像(主流)的渲染参数 - * - * @param params 本地图像的参数,详情请参考 TRTCTypeDef.h 中的 TRTCRenderParams 定义 - */ - virtual void setLocalRenderParams(const TRTCRenderParams ¶ms) = 0; - - /** - * 3.15 设置视频编码输出的画面方向,即设置远端用户观看到的和服务器录制的画面方向 - * - * @param rotation 目前支持 TRTCVideoRotation0 和 TRTCVideoRotation180 旋转角度,默认值:TRTCVideoRotation0 - */ - virtual void setVideoEncoderRotation(TRTCVideoRotation rotation) = 0; - - /** - * 3.16 设置编码器输出的画面镜像模式 - * - * 该接口不改变本地摄像头的预览画面,但会改变另一端用户看到的(以及服务器录制的)画面效果。 - * - * @param mirror 是否开启远端镜像, true:远端画面镜像;false:远端画面非镜像。默认值:false - */ - virtual void setVideoEncoderMirror(bool mirror) = 0; - - /** - * 3.17 设置远端图像的渲染模式 - * - * @param userId 对应的远端视频流用户ID - * @param streamType 远端图像的视频流类型,详见 TRTCVideoStreamType 定义 - * @param param 远端图像的参数,详情请参考 TRTCTypeDef.h 中的 TRTCRenderParams 定义 - */ - virtual void setRemoteRenderParams(const char* userId, TRTCVideoStreamType streamType, const TRTCRenderParams ¶ms) = 0; - - /** - * 3.18 开启大小画面双路编码模式 - * - * 如果当前用户是房间中的主要角色(例如主播、老师、主持人等),并且使用 PC 或者 Mac 环境,可以开启该模式。 - * 开启该模式后,当前用户会同时输出【高清】和【低清】两路视频流(但只有一路音频流)。 - * 对于开启该模式的当前用户,会占用更多的网络带宽,并且会更加消耗 CPU 计算资源。 - * - * 对于同一房间的远程观众而言: - * - 如果用户的下行网络很好,可以选择观看【高清】画面 - * - 如果用户的下行网络较差,可以选择观看【低清】画面 - * - * @param enable 是否开启小画面编码,默认值:false - * @param smallVideoParam 小流的视频参数 - */ - virtual void enableSmallVideoStream(bool enable, const TRTCVideoEncParam& smallVideoParam) = 0; - - /** - * 3.19 选定观看指定 userId 的大画面还是小画面 - * - * 此功能需要该 userId 通过 enableEncSmallVideoStream 提前开启双路编码模式。 - * 如果该 userId 没有开启双路编码模式,则此操作无效。 - * - * @param userId 用户 ID - * @param type 视频流类型,即选择看大画面还是小画面,默认为 TRTCVideoStreamTypeBig - */ - virtual void setRemoteVideoStreamType(const char* userId, TRTCVideoStreamType type) = 0; - -#if _WIN32 || __APPLE__ - /** - * 3.20 视频画面截图 - * - * 截取本地主路、本地辅路、远程主路和远端辅流的视频画面,并通过 ITRTCCloudCallback 的 onSnapshotComplete 回调返回截图画面的数据给您。 - * - * @param userId 用户 ID,空字符串表示截取本地画面 - * @param streamType 视频流类型,支持摄像头画面({@link TRTCVideoStreamTypeBig})和屏幕分享画面({@link TRTCVideoStreamTypeSub}) - * @param sourceType 视频截图来源类型,Windows 端只支持 {@link TRTCSnapshotSourceTypeStream} - */ - virtual void snapshotVideo(const char* userId, TRTCVideoStreamType streamType, TRTCSnapshotSourceType sourceType) = 0; -#endif - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (四)音频相关接口函数 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 音频相关接口函数 - /// @{ - /** - * 4.1 开启本地音频的采集和上行 - * - * 该函数会启动麦克风采集,并将音频数据传输给房间里的其他用户。 - * SDK 并不会默认开启本地的音频上行,也就说,如果您不调用这个函数,房间里的其他用户就听不到您的声音。 - * @param quality 声音质量,参见 TRTCAudioQuality - * @note TRTC SDK 并不会默认打开本地的麦克风采集。 - */ - virtual void startLocalAudio(TRTCAudioQuality quality) = 0; - - /** - * 4.2 关闭本地音频的采集和上行 - * - * 当关闭本地音频的采集和上行,房间里的其它成员会收到 onUserAudioAvailable(false) 回调通知。 - */ - virtual void stopLocalAudio() = 0; - - /** - * 4.3 静音/取消静音本地的音频 - * - * 当静音本地音频后,房间里的其它成员会收到 onUserAudioAvailable(userId, false) 回调通知。 - * 当取消静音本地音频后,房间里的其它成员会收到 onUserAudioAvailable(userId, true) 回调通知。 - * - * 与 stopLocalAudio 不同之处在于,muteLocalAudio(true) 并不会停止发送音视频数据,而是继续发送码率极低的静音包。 - * 由于 MP4 等视频文件格式,对于音频的连续性是要求很高的,使用 stopLocalAudio 会导致录制出的 MP4 不易播放。 - * 因此在对录制质量要求很高的场景中,建议选择 muteLocalAudio,从而录制出兼容性更好的 MP4 文件。 - * - * @param mute true:静音;false:取消静音 - */ - virtual void muteLocalAudio(bool mute) = 0; - - /** - * 4.4 静音/取消静音指定的远端用户的声音 - * - * @param userId 用户 ID - * @param mute true:静音;false:取消静音 - * - * @note - * - 静音时会停止接收该用户的远端音频流并停止播放,取消静音时会自动拉取该用户的远端音频流并进行播放。 - * - 您在 enterRoom 之前或之后调用此 API 均能生效,在您调用 exitRoom 之后会被重置为 false。 - */ - virtual void muteRemoteAudio(const char* userId, bool mute) = 0; - - /** - * 4.5 静音/取消静音所有用户的声音 - * - * @param mute true:静音;false:取消静音 - * - * @note - * - 静音时会停止接收所有用户的远端音频流并停止播放,取消静音时会自动拉取所有用户的远端音频流并进行播放。 - * - 您在 enterRoom 之前或之后调用此 API 均能生效,在您调用 exitRoom 之后会被重置为 false。 - */ - virtual void muteAllRemoteAudio(bool mute) = 0; - - /** - * 4.6 设置某个远程用户的播放音量 - * - * @param userId 远程用户 ID - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 - * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 - */ - virtual void setRemoteAudioVolume(const char *userId, int volume) = 0; - - /** - * 4.7 设置 SDK 采集音量。 - * - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 - * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 - */ - virtual void setAudioCaptureVolume(int volume) = 0; - - /** - * 4.8 获取 SDK 采集音量 - */ - virtual int getAudioCaptureVolume() = 0; - - /** - * 4.9 设置 SDK 播放音量。 - * - * @param volume 音量大小,100为原始音量,范围是:[0 ~ 150],默认值为100 - * - * @note - * 1. 该函数会控制最终交给系统播放的声音音量,会影响录制本地音频文件的音量大小,但不会影响耳返的音量。<br> - * 2. 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 - */ - virtual void setAudioPlayoutVolume(int volume) = 0; - - /** - * 4.10 获取 SDK 播放音量 - */ - virtual int getAudioPlayoutVolume() = 0; - - /** - * 4.11 启用或关闭音量大小提示 - * - * 开启此功能后,SDK 会在 onUserVoiceVolume() 中反馈对每一路声音音量大小值的评估。 - * 我们在 Demo 中有一个音量大小的提示条,就是基于这个接口实现的。 - * 如希望打开此功能,请在 startLocalAudio() 之前调用。 - * - * @param interval 设置 onUserVoiceVolume 回调的触发间隔,单位为ms,最小间隔为100ms,如果小于等于0则会关闭回调,建议设置为300ms - */ - virtual void enableAudioVolumeEvaluation(uint32_t interval) = 0; - - /** - * 4.12 开始录音 - * - * 该方法调用后, SDK 会将通话过程中的所有音频(包括本地音频,远端音频,BGM等)录制到一个文件里。 - * 无论是否进房,调用该接口都生效。 - * 如果调用 exitRoom 时还在录音,录音会自动停止。 - * - * @param audioRecordingParams 录音参数,请参考 TRTCAudioRecordingParams - * @return 0:成功;-1:录音已开始;-2:文件或目录创建失败;-3:后缀指定的音频格式不支持 - */ - virtual int startAudioRecording(const TRTCAudioRecordingParams& audioRecordingParams) = 0; - - /** - * 4.13 停止录音 - * - * 如果调用 exitRoom 时还在录音,录音会自动停止。 - */ - virtual void stopAudioRecording() = 0; -#if _WIN32 - /** - * 4.14 开启本地录制 - * - * 开启后把直播过程中的音视频数据录制存储到本地文件。 - * 应用场景: - * 1. 不推流情况下,通过调用 startLocalPreview 预览画面后,进行录制。 - * 2. 在推流的同时进行录制,把直播的全程录制保存到本地文件。 - * - * @param params 录制参数,请参考 {@link TRTCLocalRecordingParams} - * - */ - virtual void startLocalRecording(const TRTCLocalRecordingParams& params) = 0; - /** - * 4.15 停止本地录制 - * - * 如果调用 exitRoom 时还在录制,录制会自动停止。 - */ - virtual void stopLocalRecording() = 0; -#endif - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (五)设备相关接口函数 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 设备相关接口函数 - /// @{ - /** - * 5.1 获取设备管理模块 - * - * @return ITXDeviceManager 设备管理类 - */ - virtual ITXDeviceManager *getDeviceManager() = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (六)美颜特效和图像水印 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 美颜特效和图像水印 - /// @{ - /** - * 6.1 设置美颜、美白、红润效果级别 - * - * SDK 内部集成了两套风格不同的磨皮算法,一套我们取名叫“光滑”,适用于美女秀场,效果比较明显。 - * 另一套我们取名“自然”,磨皮算法更多地保留了面部细节,主观感受上会更加自然。 - * - * @param style 美颜风格,光滑或者自然,光滑风格磨皮更加明显,适合娱乐场景。 - * @param beautyLevel 美颜级别,取值范围0 - 9,0表示关闭,1 - 9值越大,效果越明显 - * @param whitenessLevel 美白级别,取值范围0 - 9,0表示关闭,1 - 9值越大,效果越明显 - * @param ruddinessLevel 红润级别,取值范围0 - 9,0表示关闭,1 - 9值越大,效果越明显,该参数暂未生效 - */ - virtual void setBeautyStyle(TRTCBeautyStyle style, uint32_t beautyLevel, uint32_t whitenessLevel, uint32_t ruddinessLevel) = 0; - - /** - * 6.2 设置水印 - * - * 水印的位置是通过 xOffset, yOffset, fWidthRatio 来指定的。 - * - xOffset:水印的坐标,取值范围为0 - 1的浮点数。 - * - yOffset:水印的坐标,取值范围为0 - 1的浮点数。 - * - fWidthRatio:水印的大小比例,取值范围为0 - 1的浮点数。 - * - * @param streamType 要设置水印的流类型(TRTCVideoStreamTypeBig、TRTCVideoStreamTypeSub) - * @param srcData 水印图片源数据(传 nullptr 表示去掉水印) - * @param srcType 水印图片源数据类型 - * @param nWidth 水印图片像素宽度(源数据为文件路径时忽略该参数) - * @param nHeight 水印图片像素高度(源数据为文件路径时忽略该参数) - * @param xOffset 水印显示的左上角 x 轴偏移 - * @param yOffset 水印显示的左上角 y 轴偏移 - * @param fWidthRatio 水印显示的宽度占画面宽度比例(水印按该参数等比例缩放显示) - * @note 只支持主路视频流 - */ - virtual void setWaterMark(TRTCVideoStreamType streamType, const char* srcData, TRTCWaterMarkSrcType srcType, uint32_t nWidth, uint32_t nHeight, float xOffset, float yOffset, float fWidthRatio) = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (七)音乐特效和人声特效 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 音乐特效和人声特效 - /// @{ - /** - * 7.1 获取音效管理类 ITXAudioEffectManager - * - * 该模块是整个 SDK 的音效管理模块,支持如下功能: - * - 耳机耳返:麦克风捕捉的声音实时通过耳机播放。 - * - 混响效果:KTV、小房间、大会堂、低沉、洪亮... - * - 变声特效:萝莉、大叔、重金属、外国人... - * - 背景音乐:支持在线音乐和本地音乐,支持变速、变调等特效、支持原生和伴奏并播放和循环播放。 - * - 短音效:鼓掌声、欢笑声等简短的音效文件,对于小于10秒的文件,请将 isShortFile 参数设置为 YES。 - */ - virtual ITXAudioEffectManager* getAudioEffectManager() = 0; - -#if TARGET_PLATFORM_DESKTOP - /** - * 7.2 打开系统声音采集 - * - * 开启后可以采集整个操作系统的播放声音(path 为空)或某一个播放器(path 不为空)的声音, - * 并将其混入到当前麦克风采集的声音中一起发送到云端。 - * - * - * @param path - * - path 为空,代表采集整个操作系统的声音。( Windows 平台) - * - path 填写 exe 程序(如 QQ音乐)所在的路径,将会启动此程序并只采集此程序的声音。( Windows 平台,采集程序声音仅支持32位 SDK ) - * - path 默认为空,其他值未定义。( Mac 平台) - * - * @note 此接口目前仅适用于 Windows 、 Mac 平台 - */ - virtual void startSystemAudioLoopback(const char* path = nullptr) = 0; - - /** - * 7.3 关闭系统声音采集。 - * - * @note 此接口目前仅适用于 Windows 、 Mac 平台 - */ - virtual void stopSystemAudioLoopback() = 0; - - /** - * 7.4 设置系统声音采集的音量。 - * - * @param volume 音量大小,100为原始音量,取值0 - 150,默认值为100 - * - * @note - * 1. 此接口目前仅适用于 Windows 、 Mac 平台。<br> - * 2. 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 - */ - virtual void setSystemAudioLoopbackVolume(uint32_t volume) = 0; -#endif - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (八)屏幕分享相关接口函数 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 屏幕分享相关接口函数 - /// @{ -#if TARGET_PLATFORM_DESKTOP - /** - * 8.1 启动屏幕分享 - * - * @param rendView 承载预览画面的控件,可以设置为 nullptr,表示不显示屏幕分享的预览效果。 - * @param type 屏幕分享使用的线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub),默认使用辅路。 - * @param params 屏幕分享的画面编码参数,SDK 会优先使用您通过此接口设置的编码参数: - * - 如果 params 设置为 nullptr,且您已通过 setSubStreamEncoderParam 设置过辅路视频编码参数,SDK 将使用您设置过的辅路编码参数进行屏幕分享。 - * - 如果 params 设置为 nullptr,且您未通过 setSubStreamEncoderParam 设置过辅路视频编码参数,SDK 将自适应选择最佳的编码参数进行屏幕分享。 - * - * @note 一个用户同时最多只能上传一条主路(TRTCVideoStreamTypeBig)画面和一条辅路(TRTCVideoStreamTypeSub)画面, - * 默认情况下,屏幕分享使用辅路画面,如果使用主路画面,建议您提前停止摄像头采集(stopLocalPreview)避免相互冲突。 - */ - virtual void startScreenCapture(TXView rendView, TRTCVideoStreamType type, TRTCVideoEncParam* params) = 0; - - /** - * 8.2 停止屏幕采集 - */ - virtual void stopScreenCapture() = 0; - - /** - * 8.3 暂停屏幕分享 - */ - virtual void pauseScreenCapture() = 0; - - /** - * 8.4 恢复屏幕分享 - */ - virtual void resumeScreenCapture() = 0; - - /** - * 8.5 枚举可分享的屏幕窗口,建议在 startScreenCapture 之前调用 - * - * 如果您要给您的 App 增加屏幕分享功能,一般需要先显示一个窗口选择界面,这样用户可以选择希望分享的窗口。 - * 通过如下函数,您可以获得可分享窗口的 ID、类型、窗口名称以及缩略图。 - * 拿到这些信息后,您就可以实现一个窗口选择界面,当然,您也可以使用我们在 Demo 源码中已经实现好的一个界面。 - * - * @note - * - 返回的列表中包括屏幕和应用窗口,屏幕会在列表的前面几个元素中。 - * - delete ITRTCScreenCaptureSourceList* 指针会导致编译错误,SDK 维护 ITRTCScreenCaptureSourceList 对象的生命周期。 - * - 获取完屏幕窗口列表后请手动调用 ITRTCScreenCaptureSourceList 的 release 方法释放资源,否则可能会引起内存泄漏。 - * - Windows 平台 v8.3 版本后获取窗口列表默认携带最小化窗口,且最小化窗口的缩略图数据默认填充窗口图标数据 - * - * @param thumbSize 指定要获取的窗口缩略图大小,缩略图可用于绘制在窗口选择界面上 - * @param iconSize 指定要获取的窗口图标大小 - * - * @return 窗口列表包括屏幕 - */ - virtual ITRTCScreenCaptureSourceList* getScreenCaptureSources(const SIZE &thumbSize, const SIZE &iconSize) = 0; - - /** - * 8.6 设置屏幕分享参数,该方法在屏幕分享过程中也可以调用 - * - * 如果您期望在屏幕分享的过程中,切换想要分享的窗口,可以再次调用这个函数而不需要重新开启屏幕分享。 - * - * 支持如下四种情况: - * - 共享整个屏幕:sourceInfoList 中 type 为 Screen 的 source,captureRect 设为 { 0, 0, 0, 0 } - * - 共享指定区域:sourceInfoList 中 type 为 Screen 的 source,captureRect 设为非 nullptr,例如 { 100, 100, 300, 300 } - * - 共享整个窗口:sourceInfoList 中 type 为 Window 的 source,captureRect 设为 { 0, 0, 0, 0 } - * - 共享窗口区域:sourceInfoList 中 type 为 Window 的 source,captureRect 设为非 nullptr,例如 { 100, 100, 300, 300 } - * - * - * @param source 指定分享源 - * @param captureRect 指定捕获的区域 - * @param property 指定屏幕分享目标的属性,包括捕获鼠标,高亮捕获窗口等,详情参考TRTCScreenCaptureProperty 定义 - * @note 设置高亮边框颜色、宽度参数在 Mac 平台不生效 - * - */ - virtual void selectScreenCaptureTarget(const TRTCScreenCaptureSourceInfo &source, const RECT& captureRect, const TRTCScreenCaptureProperty &property) = 0; - - /** - * 8.7 设置屏幕分享的编码器参数 - * - setVideoEncoderParam() 用于设置远端主路画面(TRTCVideoStreamTypeBig,一般用于摄像头)的编码参数。 - * - setSubStreamEncoderParam() 用于设置远端辅路画面(TRTCVideoStreamTypeSub,一般用于屏幕分享)的编码参数。 - * 该设置决定远端用户看到的画面质量,同时也是云端录制出的视频文件的画面质量。 - * - * @param params 辅流编码参数,详情请参考 TRTCTypeDef.h 中的 TRTCVideoEncParam 定义 - * @note 即使使用主路传输屏幕分享的数据(在调用 startScreenCapture 时设置 type=TRTCVideoStreamTypeBig),依然要使用此接口更新屏幕分享的编码参数。 - */ - virtual void setSubStreamEncoderParam(const TRTCVideoEncParam& params) = 0; - - /** - * 8.8 设置屏幕分享的混音音量大小 - * - * 这个数值越高,屏幕分享音量的占比就越高,麦克风音量占比就越小,所以不推荐设置得太大,否则麦克风的声音就被压制了。 - * - * @param volume 设置的混音音量大小,范围0 - 100 - */ - virtual void setSubStreamMixVolume(uint32_t volume) = 0; - - /** - * 8.9 将指定窗口加入屏幕分享的排除列表中,加入排除列表中的窗口不会被分享出去 - * - * 支持启动屏幕分享前设置过滤窗口,也支持屏幕分享过程中动态添加过滤窗口。 - * - * @param window 不希望分享出去的窗口 - * @note - * - 该方法只有在 TRTCScreenCaptureSourceInfo 中的 type 指定为 TRTCScreenCaptureSourceTypeScreen 时生效,即分享屏幕时生效 - * - 该方法添加的窗口列表会在退房后清除 - * - Mac 平台下请传入窗口 ID(即 CGWindowID),您可以通过 TRTCScreenCaptureSourceInfo 中的 sourceId 成员获得。 - */ - virtual void addExcludedShareWindow(TXView window) = 0; - - /** - * 8.10 将指定窗口从屏幕分享的排除列表中移除 - * - * @param window 不希望分享出去的窗口 - * - * @note - * - 该方法只有在 TRTCScreenCaptureSourceInfo 中的 type 指定为 TRTCScreenCaptureSourceTypeScreen 时生效,即分享屏幕时生效 - * - Mac 平台下请传入窗口 ID(即 CGWindowID),您可以通过 TRTCScreenCaptureSourceInfo 中的 sourceId 成员获得。 - */ - virtual void removeExcludedShareWindow(TXView window) = 0; - - /** - * 8.11 将所有窗口从屏幕分享的排除列表中移除 - * - * @note 该方法只有在 TRTCScreenCaptureSourceInfo 中的 type 指定为 TRTCScreenCaptureSourceTypeScreen 时生效,即分享屏幕时生效 - */ - virtual void removeAllExcludedShareWindow() = 0; - - /** - * 8.12 将指定窗口加入屏幕分享的包含列表中,加入包含列表中的窗口如果在采集窗口区域内会被分享出去 - * - * 支持启动屏幕分享前设置包含的窗口,也支持屏幕分享过程中动态添加包含的窗口。 - * - * @param window 希望被分享出去的窗口 - * @note - * - 该方法只有在 TRTCScreenCaptureSourceInfo 中的 type 指定为 TRTCScreenCaptureSourceTypeWindow 时生效,即分享窗口时生效 - * - 该方法添加的窗口列表会在退房后清除 - * - Mac 平台下请传入窗口 ID(即 CGWindowID),您可以通过 TRTCScreenCaptureSourceInfo 中的 sourceId 成员获得 - */ - virtual void addIncludedShareWindow(TXView window) = 0; - - /** - * 8.13 将指定窗口从屏幕分享的包含列表中移除 - * - * @param window 希望被分享出去的窗口 - * @note - * - 该方法只有在 TRTCScreenCaptureSourceInfo 中的 type 指定为 TRTCScreenCaptureSourceTypeWindow 时生效,即分享窗口时生效 - * - Mac 平台下请传入窗口 ID(即 CGWindowID),您可以通过 TRTCScreenCaptureSourceInfo 中的 sourceId 成员获得 - */ - virtual void removeIncludedShareWindow(TXView window) = 0; - - /** - * 8.14 将所有窗口从屏幕分享的包含列表中移除 - * - * @note 该方法只有在 TRTCScreenCaptureSourceInfo 中的 type 指定为 TRTCScreenCaptureSourceTypeWindow 时生效,即分享窗口时生效 - */ - virtual void removeAllIncludedShareWindow() = 0; -#endif - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (九)自定义采集和渲染 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 自定义采集和渲染 - /// @{ -#ifdef _WIN32 - /** - * 9.1 启用视频自定义采集模式 - * - * 开启该模式后,SDK 不再运行原有视频流上的采集流程,只保留编码和发送能力。 - * 您需要用 sendCustomVideoData() 不断地向 SDK 塞入自己采集的视频画面。 - * - * @param type 视频流类型: - * - 高清大画面:({@link TRTCVideoStreamTypeBig}) - * - 辅流(屏幕分享):({@link TRTCVideoStreamTypeSub}) - * @param enable 是否启用,默认值:false - */ - virtual void enableCustomVideoCapture(TRTCVideoStreamType type, bool enable) = 0; - - /** - * 9.2 向 SDK 投送自己采集的视频数据 - * - * TRTCVideoFrame 推荐如下填写方式(其他字段不需要填写): - * - pixelFormat: Windows、Android 平台仅支持 TRTCVideoPixelFormat_I420 - * iOS、Mac 平台支持 TRTCVideoPixelFormat_I420 和 TRTCVideoPixelFormat_BGRA32 - * - bufferType:仅支持 TRTCVideoBufferType_Buffer。 - * - data:视频帧 buffer。 - * - length:视频帧数据长度,I420 格式下,其值等于:width × height × 3 / 2。 - * - width:视频图像长度。 - * - height:视频图像宽度。 - * - timestamp:时间戳,单位毫秒(ms)。如果 timestamp 间隔不均匀,会严重影响音画同步和录制出的 MP4 质量。 - * - * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 - * @param type 指定视频流类型: - * - 高清大画面:({@link TRTCVideoStreamTypeBig}) - * - 辅流(屏幕分享):({@link TRTCVideoStreamTypeSub}) - * @param frame 视频数据,支持 I420 格式数据。 - * @note - SDK 内部有帧率控制逻辑,目标帧率以您在 setVideoEncoderParam (高清大画面) 或者 setSubStreamEncoderParam (辅流) 中设置的为准。 - * - 可以设置 frame 中的 timestamp 为 0,相当于让 SDK 自己设置时间戳,但请“均匀”地控制 sendCustomVideoData 的调用间隔,否则会导致视频帧率不稳定。 - * - Windows 平台目前仅支持传入 TRTCVideoPixelFormat_I420 格式的视频帧 - */ - virtual void sendCustomVideoData(TRTCVideoStreamType type, TRTCVideoFrame* frame) = 0; -#else - /** - * 9.1 启用视频自定义采集模式 - * - * 开启该模式后,SDK 不再运行原有的视频采集流程,只保留编码和发送能力。 - * 您需要用 sendCustomVideoData() 不断地向 SDK 塞入自己采集的视频画面。 - * - * @param enable 是否启用,默认值:false - */ - virtual void enableCustomVideoCapture(bool enable) = 0; - - /** - * 9.2 向 SDK 投送自己采集的视频数据 - * - * TRTCVideoFrame 推荐如下填写方式(其他字段不需要填写): - * - pixelFormat: Windows、Android 平台仅支持 TRTCVideoPixelFormat_I420 - * iOS、Mac 平台支持 TRTCVideoPixelFormat_I420 和 TRTCVideoPixelFormat_BGRA32 - * - bufferType:仅支持 TRTCVideoBufferType_Buffer。 - * - data:视频帧 buffer。 - * - length:视频帧数据长度,I420 格式下,其值等于:width × height × 3 / 2。 - * - width:视频图像长度。 - * - height:视频图像宽度。 - * - timestamp:时间戳,单位毫秒(ms)。如果 timestamp 间隔不均匀,会严重影响音画同步和录制出的 MP4 质量。 - * - * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 - * - * @param frame 视频数据,支持 I420 格式数据。 - * @note - SDK 内部有帧率控制逻辑,目标帧率以您在 setVideoEncoderParam 中设置的为准,太快会自动丢帧,太慢则会自动补帧。 - * - 可以设置 frame 中的 timestamp 为 0,相当于让 SDK 自己设置时间戳,但请“均匀”地控制 sendCustomVideoData 的调用间隔,否则会导致视频帧率不稳定。 - * - iOS、Mac平台目前仅支持传入 TRTCVideoPixelFormat_I420 或 TRTCVideoPixelFormat_BGRA32 格式的视频帧 - * - Android 平台目前仅支持传入 TRTCVideoPixelFormat_I420 格式的视频帧 - */ - virtual void sendCustomVideoData(TRTCVideoFrame* frame) = 0; -#endif - - /** - * 9.3 启用音频自定义采集模式 - * 开启该模式后,SDK 停止运行原有的音频采集流程,只保留编码和发送能力。 - * 您需要用 sendCustomAudioData() 不断地向 SDK 塞入自己采集的音频数据。 - * - * @param enable 是否启用,默认值:false - */ - virtual void enableCustomAudioCapture(bool enable) = 0; - - /** - * 9.4 向 SDK 投送自己采集的音频数据 - * - * TRTCAudioFrame 推荐如下填写方式: - * - audioFormat:音频数据格式,仅支持 TRTCAudioFrameFormatPCM。 - * - data:音频帧 buffer。 - * - length:音频帧数据长度,支持[5ms ~ 100ms]帧长,推荐使用20 ms帧长,【48000采样率、单声道的帧长度:48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 - * - sampleRate:采样率,支持:16000、24000、32000、44100、48000。 - * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 - * - timestamp:时间戳,单位毫秒(ms)。如果 timestamp 间隔不均匀,会严重影响音画同步和录制出的 MP4 质量。 - * - * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 - * - * @param frame 音频数据 - * @note 可以设置 frame 中的 timestamp 为 0,相当于让 SDK 自己设置时间戳,但请“均匀”地控制 sendCustomAudioData 的调用间隔,否则会导致声音断断续续。 - */ - virtual void sendCustomAudioData(TRTCAudioFrame* frame) = 0; -#ifdef _WIN32 - /** - * 9.5 控制外部音频是否要混入推流和混入播放 - * - * 通过 mixExternalAudioFrame() 增加一路音频混合到推流的音频流,同时可以支持本地播放 - * - * @param enablePublish 是否混入推流 true:混入推流;false:不混入推流,默认值:false - * @param enablePlayout 是否混入本地播放 true:混入播放;false:不混入播放,默认值:false - * @note enablePublish = false, enablePlayout = false 时,表示完全关闭这个额外的音频流,即不推流,也不播放 - */ - virtual void enableMixExternalAudioFrame(bool enablePublish, bool enablePlayout) = 0; - - /** - * 9.6 向 SDK 发送自定义辅流音频数据 - * - * TRTCAudioFrame 推荐如下填写方式(其他字段不需要填写): - * - audioFormat:仅支持 TRTCAudioFrameFormatPCM。 - * - data:音频帧 buffer。 - * - length:音频帧数据长度,推荐每帧20ms采样数。【PCM格式、48000采样率、单声道的帧长度:48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 - * - sampleRate:采样率,仅支持48000。 - * - channel:频道数量(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 - * - timestamp:时间戳,单位毫秒(ms)。如果 timestamp 间隔不均匀,会严重影响音画同步和录制出的 MP4 质量。 - * - * @param frame 音频数据 - * @note 可以设置 frame 中的 timestamp 为 0,相当于让 SDK 自己设置时间戳,但请“均匀”地控制 mixExternalAudioFrame 的调用间隔,否则会导致声音断断续续。 - */ - virtual void mixExternalAudioFrame(TRTCAudioFrame* frame) = 0; -#endif - /** - * 9.7 设置本地视频自定义渲染 - * - * @note - 设置此方法,SDK 内部会把采集到的数据回调出来,SDK 跳过 TXView 渲染逻辑 - * - 调用 setLocalVideoRenderCallback(TRTCVideoPixelFormat_Unknown, TRTCVideoBufferType_Unknown, nullptr) 停止回调。 - * - iOS、Mac、Windows 平台目前仅支持回调 TRTCVideoPixelFormat_I420 或 TRTCVideoPixelFormat_BGRA32 像素格式的视频帧 - * - Android 平台目前仅支持回调 TRTCVideoPixelFormat_I420 或 TRTCVideoPixelFormat_RGBA32 像素格式的视频帧 - * @param pixelFormat 指定回调的像素格式 - * @param bufferType 指定视频数据结构类型,目前只支持 TRTCVideoBufferType_Buffer - * @param callback 自定义渲染回调 - * @return 0:成功;<0:错误 - */ - virtual int setLocalVideoRenderCallback(TRTCVideoPixelFormat pixelFormat, TRTCVideoBufferType bufferType, ITRTCVideoRenderCallback* callback) = 0; - - /** - * 9.8 设置远端视频自定义渲染 - * - * 此方法同 setLocalVideoRenderDelegate,区别在于一个是本地画面的渲染回调, 一个是远程画面的渲染回调。 - * - * @note - 设置此方法,SDK 内部会把远端的数据解码后回调出来,SDK 跳过 TXView 渲染逻辑 - * - 调用 setRemoteVideoRenderCallback(userId, TRTCVideoPixelFormat_Unknown, TRTCVideoBufferType_Unknown, nullptr) 停止回调。 - * - iOS、Mac、Windows 平台目前仅支持回调 TRTCVideoPixelFormat_I420 或 TRTCVideoPixelFormat_BGRA32 像素格式的视频帧 - * - Android 平台目前仅支持回调 TRTCVideoPixelFormat_I420 或 TRTCVideoPixelFormat_RGBA32 像素格式的视频帧 - * @param userId 用户标识 - * @param pixelFormat 指定回调的像素格式 - * @param bufferType 指定视频数据结构类型,目前只支持 TRTCVideoBufferType_Buffer - * @param callback 自定义渲染回调 - * @return 0:成功;<0:错误 - */ - virtual int setRemoteVideoRenderCallback(const char* userId, TRTCVideoPixelFormat pixelFormat, TRTCVideoBufferType bufferType, ITRTCVideoRenderCallback* callback) = 0; - - /** - * 9.9 设置音频数据回调 - * - * 设置此方法,SDK 内部会把声音模块的数据(PCM 格式)回调出来,包括: - * - onCapturedAudioFrame:本机麦克风采集到的音频数据 - * - onPlayAudioFrame:混音前的每一路远程用户的音频数据 - * - onMixedPlayAudioFrame:各路音频数据混合后送入扬声器播放的音频数据 - * - * @param callback 声音帧数据(PCM 格式)的回调,callback = nullptr 则停止回调数据 - * @return 0:成功;<0:错误 - */ - virtual int setAudioFrameCallback(ITRTCAudioFrameCallback* callback) = 0; - /** - * 9.8 生成自定义采集时间戳 - * - * 此函数仅适合自定义视频采集时使用,当您的 App 自己或由第三方美颜 SDK 调用摄像头 API 采集视频时,由于可能引入一些耗时的外部操作(比如美颜),这会导致视频的节奏和 SDK 内部的音频节奏不一致,进而导致音画不同步。 - * 为避免发生音画不同步的问题,请按照如下步骤正确使用该接口: - * 1. 在调用系统相机 API 采集到一帧视频时,额外调用一次 generateCustomPTS() 获得 pts 时间戳。 - * 2. 在调用 {@link sendCustomVideoData()} 时,将该帧采集时记录的 pts 时间戳赋值给入参 TRTCVideoFrame 中的 timestamp 字段。 - * - * @return 时间戳(单位:ms) - */ - virtual uint64_t generateCustomPTS() = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (十)自定义消息发送 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 自定义消息发送 - /// @{ - /** - * 10.1 发送自定义消息给房间内所有用户 - * - * 该接口可以借助音视频数据通道向当前房间里的其他用户广播您自定义的数据,但因为复用了音视频数据通道, - * 请务必严格控制自定义消息的发送频率和消息体的大小,否则会影响音视频数据的质量控制逻辑,造成不确定性的问题。 - * - * @param cmdId 消息 ID,取值范围为1 - 10 - * @param data 待发送的消息,最大支持1KB(1000字节)的数据大小 - * @param dataSize 待发送的数据大小 - * @param reliable 是否可靠发送,可靠发送的代价是会引入一定的延时,因为接收端要暂存一段时间的数据来等待重传 - * @param ordered 是否要求有序,即是否要求接收端接收的数据顺序和发送端发送的顺序一致,这会带来一定的接收延时,因为在接收端需要暂存并排序这些消息 - * @return true:消息已经发出;false:消息发送失败 - * - * @note 本接口有以下限制: - * - 发送消息到房间内所有用户(暂时不支持 Web/小程序端),每秒最多能发送30条消息。 - * - 每个包最大为1KB,超过则很有可能会被中间路由器或者服务器丢弃。 - * - 每个客户端每秒最多能发送总计8KB数据。 - * - 将 reliable 和 ordered 同时设置为 true 或 false,暂不支持交叉设置。 - * - 强烈建议不同类型的消息使用不同的 cmdID,这样可以在要求有序的情况下减小消息时延。 - */ - virtual bool sendCustomCmdMsg(uint32_t cmdId, const uint8_t* data, uint32_t dataSize, bool reliable, bool ordered) = 0; - - /** - * 10.2 将小数据量的自定义数据嵌入视频帧中 - * - * 跟 sendCustomCmdMsg 的原理不同,sendSEIMsg 是将数据直接塞入视频数据头中。因此,即使视频帧被旁路到了直播 CDN 上, - * 这些数据也会一直存在。但是由于要把数据嵌入视频帧中,所以数据本身不能太大,推荐几个字节就好。 - * - * 最常见的用法是把自定义的时间戳(timstamp)用 sendSEIMsg 嵌入视频帧中,这种方案的最大好处就是可以实现消息和画面的完美对齐。 - * - * @param data 待发送的数据,最大支持1kb(1000字节)的数据大小 - * @param dataSize 待发送的数据大小 - * @param repeatCount 发送数据次数 - * @return true:消息已通过限制,等待后续视频帧发送;false:消息被限制发送 - * - * @note 本接口有以下限制: - * - 数据在接口调用完后不会被即时发送出去,而是从下一帧视频帧开始带在视频帧中发送。 - * - 发送消息到房间内所有用户,每秒最多能发送30条消息(与 sendCustomCmdMsg 共享限制)。 - * - 每个包最大为1KB,若发送大量数据,会导致视频码率增大,可能导致视频画质下降甚至卡顿(与 sendCustomCmdMsg 共享限制)。 - * - 每个客户端每秒最多能发送总计8KB数据(与 sendCustomCmdMsg 共享限制)。 - * - 若指定多次发送(repeatCount > 1),则数据会被带在后续的连续 repeatCount 个视频帧中发送出去,同样会导致视频码率增大。 - * - 如果 repeatCount > 1,多次发送,接收消息 onRecvSEIMsg 回调也可能会收到多次相同的消息,需要去重。 - */ - virtual bool sendSEIMsg(const uint8_t* data, uint32_t dataSize, int32_t repeatCount) = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (十一)设备和网络测试 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 设备和网络测试 - /// @{ - /** - * 11.1 开始进行网络测速(视频通话期间请勿测试,以免影响通话质量) - * - * 测速结果将会用于优化 SDK 接下来的服务器选择策略,因此推荐您在用户首次通话前先进行一次测速,这将有助于我们选择最佳的服务器。 - * 同时,如果测试结果非常不理想,您可以通过醒目的 UI 提示用户选择更好的网络。 - * - * @note 测速本身会消耗一定的流量,所以也会产生少量额外的流量费用。 - * - * @param sdkAppId 应用标识 - * @param userId 用户标识 - * @param userSig 用户签名 - */ - virtual void startSpeedTest(uint32_t sdkAppId, const char* userId, const char* userSig) = 0; - - /** - * 11.2 停止网络测速 - */ - virtual void stopSpeedTest() = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (十二)LOG 相关接口函数 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name LOG 相关接口函数 - /// @{ - /** - * 12.1 获取 SDK 版本信息 - * - * @return UTF-8 编码的版本号。 - */ - virtual const char* getSDKVersion() = 0; - - /** - * 12.2 设置 Log 输出级别 - * - * @param level 参见 TRTCLogLevel,默认值:TRTCLogLevelNone - */ - virtual void setLogLevel(TRTCLogLevel level) = 0; - - /** - * 12.3 启用或禁用控制台日志打印 - * - * @param enabled 指定是否启用,默认为禁止状态 - */ - virtual void setConsoleEnabled(bool enabled) = 0; - - /** - * 12.4 启用或禁用 Log 的本地压缩 - * - * 开启压缩后,Log 存储体积明显减小,但需要腾讯云提供的 Python 脚本解压后才能阅读。 - * 禁用压缩后,Log 采用明文存储,可以直接用记事本打开阅读,但占用空间较大。 - * - * @param enabled 指定是否启用,默认为禁止状态 - */ - virtual void setLogCompressEnabled(bool enabled) = 0; - - /** - * 12.5 设置日志保存路径 - * - * @note 日志文件默认保存位置: - * - Windows 平台:在 C:/Users/[系统用户名]/AppData/Roaming/Tencent/liteav/log,即 %appdata%/Tencent/liteav/log 下 - * - iOS 或 Mac 平台:在 sandbox Documents/log 下 - * - Android 平台:在 /app私有目录/files/log/tencent/liteav/ 下 - * @note 如需修改,必须在所有方法前调用,并且保证目录存在及应用有目录的读写权限。 - * @param path 存储日志的文件夹,请使用 UTF-8 编码 - */ - virtual void setLogDirPath(const char* path) = 0; - - /** - * 12.6 设置日志回调 - * - * @param callback 日志回调 - */ - virtual void setLogCallback(ITRTCLogCallback* callback) = 0; - - /** - * 12.7 显示仪表盘 - * - * 仪表盘是状态统计和事件消息浮层 view,方便调试。 - * - * @param showType 0:不显示;1:显示精简版;2:显示全量版,默认为不显示 - */ - virtual void showDebugView(int showType) = 0; - - /** - * 12.8 调用实验性 API 接口 - * - * @note 该接口用于调用一些实验性功能 - * @param jsonStr 接口及参数描述的 JSON 字符串 - */ - virtual void callExperimentalAPI(const char *jsonStr) = 0; - /// @} - - ///////////////////////////////////////////////////////////////////////////////// - // - // (十三)Windows 专有废弃方法 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name Windows 专有废弃方法 - /// @{ -#ifdef _WIN32 - using IDeprecatedTRTCCloud::startLocalAudio; - using IDeprecatedTRTCCloud::startRemoteView; - using IDeprecatedTRTCCloud::startScreenCapture; - using IDeprecatedTRTCCloud::stopRemoteView; - using IDeprecatedTRTCCloud::selectScreenCaptureTarget; - using IDeprecatedTRTCCloud::enableCustomVideoCapture; - using IDeprecatedTRTCCloud::sendCustomVideoData; -#endif // _WIN32 - /// @} -}; -/// @} -} - -#endif /* __ITRTCCLOUD_H__ */ +/** + * Module: TRTCCloud @ TXLiteAVSDK + * Function: 腾讯云 TRTC 主功能接口 + * Version: <:Version:> + */ +#ifndef __ITRTCCLOUD_H__ +#define __ITRTCCLOUD_H__ +#include "TRTCCloudCallback.h" +#include "TRTCTypeDef.h" +#include "ITXAudioEffectManager.h" +#include "ITXDeviceManager.h" +#ifdef _WIN32 +#include "IDeprecatedTRTCCloud.h" +#include "TXLiteAVBase.h" +#endif + +/// @defgroup TRTCCloud_cplusplus TRTCCloud +/// 腾讯云 TRTC 主功能接口 +/// @{ +namespace liteav { +class ITRTCCloud; +} + +/// Export the following C-style interface to facilitate “LoadLibrary()” +/// You can use the following methods to create and destroy TRTCCloud instance: +/// <pre> +/// ITRTCCloud *trtcCloud = getTRTCShareInstance(); +/// if(trtcCloud) { +/// std::string version(trtcCloud->getSDKVersion()); +/// } +/// // +/// // +/// destroyTRTCShareInstance(); +/// trtcCloud = nullptr; +/// </pre> +/// +extern "C" { +/// @name Exported C function +/// @{ +#ifdef __ANDROID__ +TRTC_API liteav::ITRTCCloud* getTRTCShareInstance(void* context); +#else +TRTC_API liteav::ITRTCCloud* getTRTCShareInstance(); +#endif +TRTC_API void destroyTRTCShareInstance(); +/// @} +} +namespace liteav { + +class ITRTCCloud +#ifdef _WIN32 + : public IDeprecatedTRTCCloud +#endif // _WIN32 +{ + protected: + virtual ~ITRTCCloud() { + } + + public: +///////////////////////////////////////////////////////////////////////////////// +// +// 创建实例和事件回调 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 创建实例和事件回调 +/// @{ + +/** + * 1.1 创建 TRTCCloud 实例(单例模式) + * + * @param context 仅适用于 Android 平台,SDK 内部会将其转化为 Android 平台的 ApplicationContext 用于调用 Androud System API。 + * 如果传入的 context 参数为空,SDK 内部会自动获取当前进程的 ApplicationContext。 + * @note + * 1. 如果您使用 delete ITRTCCloud* 会导致编译错误,请使用 destroyTRTCCloud 释放对象指针。 + * 2. 在 Windows、Mac 和 iOS 平台上,请调用 getTRTCShareInstance() 接口。 + * 3. 在 Android 平台上,请调用 getTRTCShareInstance(void *context) 接口。 + */ +#ifdef __ANDROID__ + TRTC_API static liteav::ITRTCCloud* getTRTCShareInstance(void* context); +#else + TRTC_API static liteav::ITRTCCloud* getTRTCShareInstance(); +#endif + + /** + * 1.2 销毁 TRTCCloud 实例(单例模式) + */ + TRTC_API static void destroyTRTCShareInstance(); + + /** + * 1.3 设置 TRTC 事件回调 + * + * 您可以通过 {@link TRTCCloudDelegate} 获得来自 SDK 的各类事件通知(比如:错误码,警告码,音视频状态参数等)。 + * @param listener 回调实例 + */ + virtual void addCallback(ITRTCCloudCallback* callback) = 0; + + /** + * 1.4 移除 TRTC 事件回调 + * + * @param callback + */ + virtual void removeCallback(ITRTCCloudCallback* callback) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 房间相关接口函数 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 房间相关接口函数 + /// @{ + + /** + * 2.1 进入房间 + * + * TRTC 的所有用户都需要进入房间才能“发布”或“订阅”音视频流,“发布”是指将自己的音频和视频推送到云端,“订阅”是指从云端拉取房间里其他用户的音视频流。 + * 调用该接口时,您需要指定您的应用场景 {@link TRTCAppScene} 以获取最佳的音视频传输体验,这些场景可以分成两大类: + * **实时通话:** + * 包括 {@link TRTCAppSceneVideoCall} 和 {@link TRTCAppSceneAudioCall} 两个可选项,分别是视频通话和语音通话,该模式适合 1对1 的音视频通话,或者参会人数在 300 人以内的在线会议。 + * **在线直播:** + * 包括 {@link TRTCAppSceneLIVE} 和 {@link TRTCAppSceneVoiceChatRoom} 两个可选项,分别是视频直播和语音直播,该模式适合十万人以内的在线直播场景,但需要您在接下来介绍的 TRTCParams 参数中指定 **角色(role)** 这个字段,也就是将房间中的用户区分为 + * **主播** ({@link TRTCRoleAnchor}) 和 **观众** ({@link TRTCRoleAudience}) 两种不同的角色。 调用该接口后,您会收到来自 {@link TRTCCloudDelegate} 中的 onEnterRoom(result) 回调: + * - 如果进房成功,参数 result 会是一个正数(result > 0),表示从函数调用到进入房间所花费的时间,单位是毫秒(ms)。 + * - 如果进房失败,参数 result 会是一个负数(result < 0),表示进房失败的[错误码](https://cloud.tencent.com/document/product/647/32257)。 + * @param param 进房参数,用于指定用户的身份、角色和安全票据等信息,详情请参考 {@link TRTCParams} 。 + * @param scene 应用场景,用于指定您的业务场景,同一个房间内的所有用户需要设定相同的 {@link TRTCAppScene}。 + * @note + * 1. 同一个房间内的所有用户需要设定相同的 scene。不同的 scene 会导致偶现的异常问题。 + * 2. 当您指定参数 scene 为 {@link TRTCAppSceneLIVE} 或 {@link TRTCAppSceneVoiceChatRoom} 时,您必须通过 {@link TRTCParams} 中的 “role” 字段为当前用户设定他/她在房间中的角色。 + * 3. 请您尽量保证 {@link enterRoom} 与 {@link exitRoom} 前后配对使用,即保证”先退出前一个房间再进入下一个房间”,否则会导致很多异常问题。 + */ + virtual void enterRoom(const TRTCParams& param, TRTCAppScene scene) = 0; + + /** + * 2.2 离开房间 + * + * 调用该接口会让用户离开自己所在的音视频房间,并释放摄像头、麦克风、扬声器等设备资源。 + * 等资源释放完毕之后,SDK 会通过 {@link TRTCCloudDelegate} 中的 onExitRoom() 回调向您通知。 + * 如果您要再次调用 {@link enterRoom} 或者切换到其他的供应商的 SDK,建议等待 onExitRoom() 回调到来之后再执行之后的操作,以避免摄像头或麦克风被占用的问题。 + */ + virtual void exitRoom() = 0; + + /** + * 2.3 切换角色 + * + * 调用本接口可以实现用户在“主播”和“观众”两种角色之间来回切换。 + * 由于视频直播和语音聊天室需要支持多达10万名观众同时观看,所以设定了“只有主播才能发布自己的音视频”的规则。 + * 因此,当有些观众希望发布自己的音视频流(以便能跟主播互动)时,就需要先把自己的角色切换成“主播”。 + * 您可以在进入房间时通过 {@link TRTCParams} 中的 role 字段事先确定用户的角色,也可以在进入房间后通过 switchRole 接口动态切换角色。 + * @param role 角色,默认为“主播”: + * - {@link TRTCRoleAnchor} :主播,可以发布自己的音视频,同一个房间里最多支持50个主播同时发布音视频。 + * - {@link TRTCRoleAudience} :观众,不能发布自己的音视频流,只能观看房间中其他主播的音视频。如果要发布自己的音视频,需要先通过 {@link switchRole} 切换成“主播”,同一个房间内同时最多可以容纳 10 万名观众。 + * @note + * 1. 该接口仅适用于视频直播({@link TRTCAppSceneLIVE})和语音聊天室({@link TRTCAppSceneVoiceChatRoom})这两个场景。 + * 2. 如果您在 {@link enterRoom} 时指定的 scene 为 {@link TRTCAppSceneVideoCall} 或 {@link TRTCAppSceneAudioCall},请不要调用这个接口。 + */ + virtual void switchRole(TRTCRoleType role) = 0; + + /** + * 2.4 切换角色(支持设置权限位) + * + * 调用本接口可以实现用户在“主播”和“观众”两种角色之间来回切换。 + * 由于视频直播和语音聊天室需要支持多达10万名观众同时观看,所以设定了“只有主播才能发布自己的音视频”的规则。 + * 因此,当有些观众希望发布自己的音视频流(以便能跟主播互动)时,就需要先把自己的角色切换成“主播”。 + * 您可以在进入房间时通过 {@link TRTCParams} 中的 role 字段事先确定用户的角色,也可以在进入房间后通过 switchRole 接口动态切换角色。 + * @param role 角色,默认为“主播”: + * - {@link TRTCRoleAnchor} :主播,可以发布自己的音视频,同一个房间里最多支持50个主播同时发布音视频。 + * - {@link TRTCRoleAudience} :观众,不能发布自己的音视频流,只能观看房间中其他主播的音视频。如果要发布自己的音视频,需要先通过 {@link switchRole} 切换成“主播”,同一个房间内同时最多可以容纳 10 万名观众。 + * @param privateMapKey 用于权限控制的权限票据,当您希望某个房间只能让特定的 userId 进入或者上行视频时,需要使用 privateMapKey 进行权限保护。 + * - 仅建议有高级别安全需求的客户使用,更多详情请参见 [开启高级权限控制](https://cloud.tencent.com/document/product/647/32240)。 + * @note + * 1. 该接口仅适用于视频直播({@link TRTCAppSceneLIVE})和语音聊天室({@link TRTCAppSceneVoiceChatRoom})这两个场景。 + * 2. 如果您在 {@link enterRoom} 时指定的 scene 为 {@link TRTCAppSceneVideoCall} 或 {@link TRTCAppSceneAudioCall},请不要调用这个接口。 + */ + virtual void switchRole(TRTCRoleType role, const char* privateMapKey) = 0; + + /** + * 2.5 切换房间 + * + * 使用该接口可以让用户可以快速从一个房间切换到另一个房间。 + * - 如果用户的身份是“观众”,该接口的调用效果等同于 exitRoom(当前房间) + enterRoom(新的房间)。 + * - 如果用户的身份是“主播”,该接口在切换房间的同时还会保持自己的音视频发布状态,因此在房间切换过程中,摄像头的预览和声音的采集都不会中断。 + * + * 该接口适用于在线教育场景中,监课老师在多个房间中进行快速切换的场景。在该场景下使用 switchRoom 可以获得比 exitRoom+enterRoom 更好的流畅性和更少的代码量。 + * 接口调用结果会通过 {@link TRTCCloudDelegate} 中的 onSwitchRoom(errCode, errMsg) 回调。 + * + * @param config 房间参数,详情请参考 {@link TRTCSwitchRoomConfig} 。 + * @note 由于对老版本 SDK 兼容的需求,参数 config 中同时包含 roomId 与 strRoomId 两个参数,这两个参数的填写格外讲究,请注意如下事项: + * 1. 若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,将优先选用 roomId。 + * 2. 所有房间需要同时使用 strRoomId 或同时使用 roomId,不可混用,否则将会出现很多预期之外的 bug。 + */ + virtual void switchRoom(const TRTCSwitchRoomConfig& config) = 0; + + /** + * 2.6 请求跨房通话 + * + * 默认情况下,只有同一个房间中的用户之间可以进行音视频通话,不同的房间之间的音视频流是相互隔离的。 + * 但您可以通过调用该接口,将另一个房间中某个主播音视频流发布到自己所在的房间中,与此同时,该接口也会将自己的音视频流发布到目标主播的房间中。 + * 也就是说,您可以使用该接口让身处两个不同房间中的主播进行跨房间的音视频流分享,从而让每个房间中的观众都能观看到这两个主播的音视频。该功能可以用来实现主播之间的 PK 功能。 + * 跨房通话的请求结果会通过 {@link TRTCCloudDelegate} 中的 onConnectOtherRoom() 回调通知给您。 + * 例如:当房间“101”中的主播 A 通过 connectOtherRoom() 跟房间“102”中的主播 B 建立跨房通话后, + * - 房间“101”中的用户都会收到主播 B 的 onRemoteUserEnterRoom(B) 和 onUserVideoAvailable(B,true) 这两个事件回调,即房间“101”中的用户都可以订阅主播 B 的音视频。 + * - 房间“102”中的用户都会收到主播 A 的 onRemoteUserEnterRoom(A) 和 onUserVideoAvailable(A,true) 这两个事件回调,即房间“102”中的用户都可以订阅主播 A 的音视频。 + *  + * 跨房通话的参数考虑到后续扩展字段的兼容性问题,暂时采用了 JSON 格式的参数: + * **情况一:数字房间号** + * 如果房间“101”中的主播 A 要跟房间“102”中的主播 B 连麦,那么主播 A 调用该接口时需要传入:{"roomId": 102, "userId": "userB"} + * 示例代码如下: + * <pre> + * Json::Value jsonObj; + * jsonObj["roomId"] = 102; + * jsonObj["userId"] = "userB"; + * Json::FastWriter writer; + * std::string params = writer.write(jsonObj); + * trtc.ConnectOtherRoom(params.c_str()); + * </pre> + * + * **情况二:字符串房间号** + * 如果您使用的是字符串房间号,务必请将 json 中的 “roomId” 替换成 “strRoomId”: {"strRoomId": "102", "userId": "userB"} + * 示例代码如下: + * <pre> + * Json::Value jsonObj; + * jsonObj["strRoomId"] = "102"; + * jsonObj["userId"] = "userB"; + * Json::FastWriter writer; + * std::string params = writer.write(jsonObj); + * trtc.ConnectOtherRoom(params.c_str()); + * </pre> + * + * @param param 需要你传入 JSON 格式的字符串参数,roomId 代表数字格式的房间号,strRoomId 代表字符串格式的房间号,userId 代表目标主播的用户ID。 + */ + virtual void connectOtherRoom(const char* param) = 0; + + /** + * 2.7 退出跨房通话 + * + * 退出结果会通过 **TRTCCloudDelegate** 中的 {@link onDisconnectOtherRoom} 回调通知给您。 + */ + virtual void disconnectOtherRoom() = 0; + + /** + * 2.8 设置订阅模式(需要在进入房前设置才能生效) + * + * 您可以通过该接口在“自动订阅”和“手动订阅”两种模式下进行切换: + * - 自动订阅:默认模式,用户在进入房间后会立刻接收到该房间中的音视频流,音频会自动播放,视频会自动开始解码(依然需要您通过 {@link startRemoteView} 接口绑定渲染控件)。 + * - 手动订阅:在用户进入房间后,需要手动调用 {@link startRemoteView} 接口才能启动视频流的订阅和解码,需要手动调用 {@link muteRemoteAudio} (false) 接口才能启动声音的播放。 + * + * 在绝大多数场景下,用户进入房间后都会订阅房间中所有主播的音视频流,因此 TRTC 默认采用了自动订阅模式,以求得最佳的“秒开体验”。 + * 如果您的应用场景中每个房间同时会有很多路音视频流在发布,而每个用户只想选择性地订阅其中的 1-2 路,则推荐使用“手动订阅”模式以节省流量费用。 + * @param autoRecvAudio true:自动订阅音频;false:需手动调用 muteRemoteAudio(false) 订阅音频。默认值:true。 + * @param autoRecvVideo true:自动订阅视频;false:需手动调用 startRemoteView 订阅视频。默认值:true。 + * @note + * 1. 需要在进入房间前调用该接口进行设置才能生效。 + * 2. 在自动订阅模式下,如果用户在进入房间后没有调用 {@link startRemoteView} 订阅视频流,SDK 会自动停止订阅视频流,以便达到节省流量的目的。 + */ + virtual void setDefaultStreamRecvMode(bool autoRecvAudio, bool autoRecvVideo) = 0; + +/** + * 2.9 创建子房间示例(用于多房间并发观看) + * + * TRTCCloud 一开始被设计成单例模式,限制了多房间并发观看的能力。 + * 通过调用该接口,您可以创建出多个 TRTCCloud 实例,以便同时进入多个不同的房间观看音视频流。 + * 但需要注意的是,由于摄像头和麦克风还是只有一份,因此您只能同时在一个 TRTCCloud 实例中以“主播”的身份存在,也就是您只能同时在一个 TRTCCloud 实例中发布自己的音视频流。 + * 该功能主要用于在线教育场景中一种被称为“超级小班课”的业务场景中,用于解决“每个 TRTC 的房间中最多只能有 50 人同时发布自己音视频流”的限制。 + * 示例代码如下: + * <pre> + * ITRTCCloud *mainCloud = getTRTCShareInstance(); + * mainCloud->enterRoom(params1, TRTCAppSceneLIVE); + * //... + * //Switch the role from "anchor" to "audience" in your own room + * mainCloud->switchRole(TRTCRoleAudience); + * mainCloud->muteLocalVideo(true); + * mainCloud->muteLocalAudio(true); + * //... + * //Use subcloud to enter another room and switch the role from "audience" to "anchor" + * ITRTCCloud *subCloud = mainCloud->createSubCloud(); + * subCloud->enterRoom(params2, TRTCAppSceneLIVE); + * subCloud->switchRole(TRTCRoleAnchor); + * subCloud->muteLocalVideo(false); + * subCloud->muteLocalAudio(false); + * //... + * //Exit from new room and release it. + * subCloud->exitRoom(); + * mainCloud->destroySubCloud(subCloud); + * </pre> + * + * @note + * - 同一个用户,可以使用同一个 userId 进入多个不同 roomId 的房间。 + * - 两台不同的终端设备不可以同时使用同一个 userId 进入同一个 roomId 的房间。 + * - 同一个用户,同时只能在一个 TRTCCloud 实例中推流,在不同房间同时推流会引发云端的状态混乱,导致各种 bug。 + * - 通过 createSubCloud 接口创建出来的 TRTCCloud 实例有一个能力限制:不能调用子实例中与本地音视频相关的接口(除 switchRole、muteLocalVideo 和 muteLocalAudio 之外), 设置美颜等接口请使用原 TRTCCloud 实例对象。 + * @return 子 TRTCCloud 实例 + */ +#if _WIN32 || __APPLE__ + virtual ITRTCCloud* createSubCloud() = 0; +#endif + +/** + * 2.10 销毁子房间示例 + * + * @param subCloud 子房间实例 + */ +#if _WIN32 || __APPLE__ + virtual void destroySubCloud(ITRTCCloud* subCloud) = 0; +#endif + + ///////////////////////////////////////////////////////////////////////////////// + // + // CDN 相关接口函数 + // + ///////////////////////////////////////////////////////////////////////////////// + + /** + * 3.1 开始向腾讯云直播 CDN 上发布音视频流 + * + * 该接口会向 TRTC 服务器发送指令,要求其将当前用户的音视频流旁路到直播 CDN 上。 + * 您可以通过参数 streamId 设定直播流的 StreamId,从而可以指定该用户的音视频流对应在直播 CDN 上的播放地址。 + * 例如:您可以通过该接口将当前用户的直播流 ID 指定为 user_stream_001,那么该用户音视频流对应的 CDN 播放地址为: + * “http://yourdomain/live/user_stream_001.flv”,其中 yourdomain 为您自己备案的播放域名, + * 您可以在[直播控制台](https://console.cloud.tencent.com/live) 配置您的播放域名,腾讯云不提供默认的播放域名。 + * 您也可以在设置 enterRoom 的参数 TRTCParams 时指定 streamId, 而且我们更推荐您采用这种方案。 + * @param streamId 自定义流 ID。 + * @param streamType 仅支持 {@link TRTCVideoStreamTypeBig} 和 {@link TRTCVideoStreamTypeSub}。 + * @note 您需要提前在 [实时音视频控制台](https://console.cloud.tencent.com/trtc/) 中的功能配置页面上开启“启用旁路推流”才能生效。 + * - 若您选择“指定流旁路”,则您可以通过该接口将对应音视频流推送到腾讯云 CDN 且指定为填写的流 ID。 + * - 若您选择“全局自动旁路”,则您可以通过该接口调整默认的流 ID。 + */ + virtual void startPublishing(const char* streamId, TRTCVideoStreamType streamType) = 0; + + /** + * 3.2 停止向腾讯云直播 CDN 上发布音视频流 + */ + virtual void stopPublishing() = 0; + + /** + * 3.3 开始向非腾讯云 CDN 上发布音视频流 + * + * 该接口跟 startPublishing 功能类似,不同之处在于,startPublishing 仅支持向腾讯云的 CDN 发布,而本接口支持向非腾讯云的直播 CDN 上转推音视频流。 + * @param param CDN 转推参数,详情请参考 {@link TRTCPublishCDNParam} + * @note + * - 使用 startPublishing 接口向腾讯云的直播 CDN 上发布音视频流不会收取额外费用 + * - 使用 startPublishCDNStream 接口向非腾讯云的直播 CDN 上发布音视频流,需要收取额外的转推带宽费用。 + */ + virtual void startPublishCDNStream(const TRTCPublishCDNParam& param) = 0; + + /** + * 3.4 停止向非腾讯云 CDN 上发布音视频流 + */ + virtual void stopPublishCDNStream() = 0; + + /** + * 3.5 设置云端混流的排版布局和转码参数 + * + * 在一个直播间中可能同时会有多个主播发布自己的音视频流,但对于直播 CDN 上的观众而言,只需要观看一条 HTTP-FLV 或 HLS 格式的视频流即可。 + * 当您调用本接口函数时,SDK 会向腾讯云的 TRTC 混流服务器发送一条指令,混流服务器会将房间里的多路音视频流混合成一路。 + * 您可以通过 {@link TRTCTranscodingConfig} 参数来调整每一路画面的排版布局,也可以设置混合后的音视频流的各项编码参数。 + * 参考文档:[云端混流转码](https://cloud.tencent.com/document/product/647/16827)。 + *  + * @param config 如果 config 不为空,则开启云端混流,如果 config 为空则停止云端混流。详情请参考 {@link TRTCTranscodingConfig} 。 + * @note 关于云端混流的注意事项: + * - 混流转码为收费功能,调用接口将产生云端混流转码费用,详见 [云端混流转码计费说明](https://cloud.tencent.com/document/product/647/49446) 。 + * - 调用该接口的用户,如果没设定 config 参数中的 streamId 字段,TRTC 会将房间中的多路画面混合到当前用户所对应的音视频流上,即 A + B => A。 + * - 调用该接口的用户,如果设定了 config 参数中的 streamId 字段,TRTC 会将房间中的多路画面混合到您指定的 streamId 上,即 A + B => streamId。 + * - 请注意,若您还在房间中且不再需要混流,请务必再次调用本接口并将 config 设置为空以进行取消,不及时取消混流可能会引起不必要的计费损失。 + * - 请放心,当您退房时 TRTC 会自动取消混流状态。 + */ + virtual void setMixTranscodingConfig(TRTCTranscodingConfig* config) = 0; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 视频相关接口函数 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 视频相关接口函数 +/// @{ + +/** + * 4.1 开启本地摄像头的预览画面(移动端) + * + * 在 enterRoom 之前调用此函数,SDK 只会开启摄像头,并一直等到您调用 enterRoom 之后才开始推流。 + * 在 enterRoom 之后调用此函数,SDK 会开启摄像头并自动开始视频推流。 + * 当开始渲染首帧摄像头画面时,您会收到 {@link TRTCCloudDelegate} 中的 onCameraDidReady 回调通知。 + * @param frontCamera true:前置摄像头;false:后置摄像头。 + * @param view 承载视频画面的控件 + * @note 如果希望开播前预览摄像头画面并通过 BeautyManager 调节美颜参数,您可以: + * - 方案一:在调用 enterRoom 之前调用 startLocalPreview + * - 方案二:在调用 enterRoom 之后调用 startLocalPreview + muteLocalVideo(true) + */ +#if TARGET_PLATFORM_PHONE + virtual void startLocalPreview(bool frontCamera, TXView view) = 0; +#endif + +/** + * 4.2 开启本地摄像头的预览画面(桌面端) + * + * 在调用该接口之前,您可以先调用 setCurrentCameraDevice 选择使用 Mac 自带摄像头或外接摄像头。 + * 在 enterRoom 之前调用此函数,SDK 只会开启摄像头,并一直等到您调用 enterRoom 之后才开始推流。 + * 在 enterRoom 之后调用此函数,SDK 会开启摄像头并自动开始视频推流。 + * 当开始渲染首帧摄像头画面时,您会收到 {@link TRTCCloudDelegate} 中的 onCameraDidReady 回调通知。 + * @param view 承载视频画面的控件 + * @note 如果希望开播前预览摄像头画面并通过 BeautyManager 调节美颜参数,您可以: + * - 方案一:在调用 enterRoom 之前调用 startLocalPreview + * - 方案二:在调用 enterRoom 之后调用 startLocalPreview + muteLocalVideo(true) + */ +#if TARGET_PLATFORM_DESKTOP + virtual void startLocalPreview(TXView view) = 0; +#endif + + /** + * 4.3 更新本地摄像头的预览画面 + */ + virtual void updateLocalView(TXView view) = 0; + + /** + * 4.4 停止摄像头预览 + */ + virtual void stopLocalPreview() = 0; + + /** + * 4.5 暂停/恢复发布本地的视频流 + * + * 该接口可以暂停(或恢复)发布本地的视频画面,暂停之后,同一房间中的其他用户将无法继续看到自己画面。 + * 该接口在指定 TRTCVideoStreamTypeBig 时等效于 start/stopLocalPreview 这两个接口,但具有更好的响应速度。 + * 因为 start/stopLocalPreview 需要打开和关闭摄像头,而打开和关闭摄像头都是硬件设备相关的操作,非常耗时。 + * 相比之下,muteLocalVideo 只需要在软件层面对数据流进行暂停或者放行即可,因此效率更高,也更适合需要频繁打开关闭的场景。 + * 当暂停/恢复发布指定 TRTCVideoStreamTypeBig 后,同一房间中的其他用户将会收到 onUserVideoAvailable 回调通知。 + * 当暂停/恢复发布指定 TRTCVideoStreamTypeSub 后,同一房间中的其他用户将会收到 onUserSubStreamAvailable 回调通知。 + * @param streamType 要暂停/恢复的视频流类型(仅支持 {@link TRTCVideoStreamTypeBig} 和 {@link TRTCVideoStreamTypeSub}) + * @param mute true:暂停;false:恢复。 + */ + virtual void muteLocalVideo(TRTCVideoStreamType streamType, bool mute) = 0; + + /** + * 4.6 设置本地画面被暂停期间的替代图片 + * + * 当您调用 muteLocalVideo(true) 暂停本地画面时,您可以通过调用本接口设置一张替代图片,设置后,房间中的其他用户会看到这张替代图片,而不是黑屏画面。 + * @param image 设置替代图片,空值代表在 muteLocalVideo 之后不再发送视频流数据,默认值为空。 + * @param fps 设置替代图片帧率,最小值为5,最大值为10,默认5。 + */ + virtual void setVideoMuteImage(TRTCImageBuffer* image, int fps) = 0; + + /** + * 4.7 订阅远端用户的视频流,并绑定视频渲染控件 + * + * 调用该接口可以让 SDK 拉取指定 userid 的视频流,并渲染到参数 view 指定的渲染控件上。您可以通过 {@link setRemoteRenderParams} 设置画面的显示模式。 + * - 如果您已经知道房间中有视频流的用户的 userid,可以直接调用 startRemoteView 订阅该用户的画面。 + * - 如果您不知道房间中有哪些用户在发布视频,您可以在 enterRoom 之后等待来自 {@link onUserVideoAvailable} 的通知。 + * + * 调用本接口只是启动视频流的拉取,此时画面还需要加载和缓冲,当缓冲完毕后您会收到来自 {@link onFirstVideoFrame} 的通知。 + * @param userId 指定远端用户的 ID。 + * @param streamType 指定要观看 userId 的视频流类型: + * - 高清大画面:{@link TRTCVideoStreamTypeBig} + * - 低清小画面:{@link TRTCVideoStreamTypeSmall}(需要远端用户通过 {@link enableEncSmallVideoStream} 开启双路编码后才有效果) + * - 辅流画面(常用于屏幕分享):{@link TRTCVideoStreamTypeSub} + * + * @param view 用于承载视频画面的渲染控件 + * @note 注意几点规则需要您关注: + * 1. SDK 支持同时观看某 userid 的大画面和辅路画面,或者同时观看某 userid 的小画面和辅路画面,但不支持同时观看大画面和小画面。 + * 2. 只有当指定的 userid 通过 {@link enableEncSmallVideoStream} 开启双路编码后,才能观看该用户的小画面。 + * 3. 当指定的 userid 的小画面不存在时,SDK 默认切换到该用户的大画面。 + */ + virtual void startRemoteView(const char* userId, TRTCVideoStreamType streamType, TXView view) = 0; + + /** + * 4.8 更新远端用户的视频渲染控件 + * + * 该接口可用于更新远端视频画面的渲染控件,常被用于切换显示区域的交互场景中。 + * @param view 承载视频画面的控件 + * @param streamType 要设置预览窗口的流类型(仅支持 {@link TRTCVideoStreamTypeBig} 和 {@link TRTCVideoStreamTypeSub}) + * @param userId 指定远端用户的 ID。 + */ + virtual void updateRemoteView(const char* userId, TRTCVideoStreamType streamType, TXView view) = 0; + + /** + * 4.9 停止订阅远端用户的视频流,并释放渲染控件 + * + * 调用此接口会让 SDK 停止接收该用户的视频流,并释放该路视频流的解码和渲染资源。 + * @param userId 指定远端用户的 ID。 + * @param streamType 指定要观看 userId 的视频流类型: + * - 高清大画面:{@link TRTCVideoStreamTypeBig} + * - 低清小画面:{@link TRTCVideoStreamTypeSmall} + * - 辅流画面(常用于屏幕分享):{@link TRTCVideoStreamTypeSub} + */ + virtual void stopRemoteView(const char* userId, TRTCVideoStreamType streamType) = 0; + + /** + * 4.10 停止订阅所有远端用户的视频流,并释放全部渲染资源 + * + * 调用此接口会让 SDK 停止接收所有来自远端的视频流,并释放全部的解码和渲染资源。 + * @note 如果当前有正在显示的辅路画面(屏幕分享)也会一并被停止。 + */ + virtual void stopAllRemoteView() = 0; + + /** + * 4.11 暂停/恢复订阅远端用户的视频流 + * + * 该接口仅暂停/恢复接收指定用户的视频流,但并不释放显示资源,视频画面会被冻屏在接口调用时的最后一帧。 + * @param userId 指定远端用户的 ID。 + * @param streamType 要暂停/恢复的视频流类型(仅支持 {@link TRTCVideoStreamTypeBig} 和 {@link TRTCVideoStreamTypeSub})。 + * @param mute 是否暂停接收。 + * @note 该接口支持您在进入房间(enterRoom)前调用,暂停状态会在退出房间(exitRoom)在之后会被重置。 + */ + virtual void muteRemoteVideoStream(const char* userId, TRTCVideoStreamType streamType, bool mute) = 0; + + /** + * 4.12 暂停/恢复订阅所有远端用户的视频流 + * + * 该接口仅暂停/恢复接收所有用户的视频流,但并不释放显示资源,视频画面会被冻屏在接口调用时的最后一帧。 + * @param mute 是否暂停接收 + * @note 该接口支持您在进入房间(enterRoom)前调用,暂停状态会在退出房间(exitRoom)在之后会被重置。 + */ + virtual void muteAllRemoteVideoStreams(bool mute) = 0; + + /** + * 4.13 设置视频编码器的编码参数 + * + * 该设置能够决定远端用户看到的画面质量,同时也能决定云端录制出的视频文件的画面质量。 + * @param param 用于设置视频编码器的相关参数,详情请参考 {@link TRTCVideoEncParam}。 + */ + virtual void setVideoEncoderParam(const TRTCVideoEncParam& param) = 0; + + /** + * 4.14 设置网络质量控制的相关参数 + * + * 该设置决定在差网络环境下的质量调控策略,如“画质优先”或“流畅优先”等策略。 + * @param param 用于设置网络质量控制的相关参数,详情请参考 {@link TRTCNetworkQosParam}。 + */ + virtual void setNetworkQosParam(const TRTCNetworkQosParam& param) = 0; + + /** + * 4.15 设置本地画面的渲染参数 + * + * 可设置的参数包括有:画面的旋转角度、填充模式以及左右镜像等。 + * @param params 画面渲染参数,详情请参考 {@link TRTCRenderParams}。 + */ + virtual void setLocalRenderParams(const TRTCRenderParams& params) = 0; + + /** + * 4.16 设置远端画面的渲染模式 + * + * 可设置的参数包括有:画面的旋转角度、填充模式以及左右镜像等。 + * @param userId 指定远端用户的 ID。 + * @param streamType 可以设置为主路画面(TRTCVideoStreamTypeBig)或辅路画面(TRTCVideoStreamTypeSub) + * @param params 画面渲染参数,详情请参考 {@link TRTCRenderParams}。 + */ + virtual void setRemoteRenderParams(const char* userId, TRTCVideoStreamType streamType, const TRTCRenderParams& params) = 0; + + /** + * 4.17 设置视频编码器输出的画面方向 + * + * 该设置不影响本地画面的预览方向,但会影响房间中其他用户所观看到(以及云端录制文件)的画面方向。 + * 当用户将手机或 Pad 上下颠倒时,由于摄像头的采集方向没有变,所以房间中其他用户所看到的画面会变成上下颠倒的, + * 在这种情况下,您可以通过调用该接口将 SDK 编码出的画面方向旋转180度,如此一来,房间中其他用户所看到的画面可保持正常的方向。 + * 如果您希望实现上述这种友好的交互体验,我们更推荐您直接调用 {@link setGSensorMode} 实现更加智能的方向适配,无需您手动调用本接口。 + * @param rotation 目前支持0和180两个旋转角度,默认值:TRTCVideoRotation_0,即不旋转。 + */ + virtual void setVideoEncoderRotation(TRTCVideoRotation rotation) = 0; + + /** + * 4.18 设置编码器输出的画面镜像模式 + * + * 该设置不影响本地画面的镜像模式,但会影响房间中其他用户所观看到(以及云端录制文件)的镜像模式。 + * @param mirror 是否开启远端镜像,true:开启远端画面镜像;false:关闭远端画面镜像,默认值:false。 + */ + virtual void setVideoEncoderMirror(bool mirror) = 0; + + /** + * 4.20 开启大小画面双路编码模式 + * + * 开启双路编码模式后,当前用户的编码器会同时输出【高清大画面】和【低清小画面】两路视频流(但只有一路音频流)。 + * 如此以来,房间中的其他用户就可以根据自身的网络情况或屏幕大小选择订阅【高清大画面】或是【低清小画面】。 + * @param enable 是否开启小画面编码,默认值:false + * @param smallVideoEncParam 小流的视频参数 + * @return 0:成功;-1:当前大画面已被设置为较低画质,开启双路编码已无必要。 + * @note 双路编码开启后,会消耗更多的 CPU 和 网络带宽,所以 Mac、Windows 或者高性能 Pad 可以考虑开启,不建议手机端开启。 + */ + virtual void enableSmallVideoStream(bool enable, const TRTCVideoEncParam& smallVideoEncParam) = 0; + + /** + * 4.21 切换指定远端用户的大小画面 + * + * 当房间中某个主播开启了双路编码之后,房间中其他用户通过 {@link startRemoteView} 订阅到的画面默认会是【高清大画面】。 + * 您可以通过此接口选定希望订阅的画面是大画面还是小画面,该接口在 {@link startRemoteView} 之前和之后调用均可生效。 + * @param userId 指定远端用户的 ID。 + * @param streamType 视频流类型,即选择看大画面还是小画面,默认为大画面。 + * @note 此功能需要目标用户已经通过 {@link enableEncSmallVideoStream} 提前开启了双路编码模式,否则此调用无实际效果。 + */ + virtual void setRemoteVideoStreamType(const char* userId, TRTCVideoStreamType streamType) = 0; + +/** + * 4.22 视频画面截图 + * + * 您可以通过本接口截取本地的视频画面,远端用户的主路画面以及远端用户的辅路(屏幕分享)画面。 + * @param userId 用户 ID,如指定空置表示截取本地的视频画面。 + * @param streamType 视频流类型,可选择截取主路画面({@link TRTCVideoStreamTypeBig},常用于摄像头)或辅路画面({@link TRTCVideoStreamTypeSub},常用于屏幕分享)。 + * @param sourceType 画面来源,可选择截取视频流画面({@link TRTCSnapshotSourceTypeStream})或视频渲染画面({@link TRTCSnapshotSourceTypeView}),前者一般更清晰。 + * @note Windows 平台目前仅支持截取 {@link TRTCSnapshotSourceTypeStream} 来源的视频画面。 + */ +#if _WIN32 || __APPLE__ + virtual void snapshotVideo(const char* userId, TRTCVideoStreamType streamType, TRTCSnapshotSourceType sourceType) = 0; +#endif + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 音频相关接口函数 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 音频相关接口函数 + /// @{ + + /** + * 5.1 开启本地音频的采集和发布 + * + * SDK 默认不开启麦克风,当用户需要发布本地音频时,需要调用该接口开启麦克风采集,并将音频编码并发布到当前的房间中。 + * 开启本地音频的采集和发布后,房间中的其他用户会收到 {@link onUserAudioAvailable}(userId, true) 的通知。 + * @param quality 声音音质 + * - {@link TRTCAudioQualitySpeech},流畅:采样率:16k;单声道;音频裸码率:16kbps;适合语音通话为主的场景,比如在线会议,语音通话。 + * - {@link TRTCAudioQualityDefault},默认:采样率:48k;单声道;音频裸码率:50kbps;SDK 默认的音频质量,如无特殊需求推荐选择之。 + * - {@link TRTCAudioQualityMusic},高音质:采样率:48k;双声道 + 全频带;音频裸码率:128kbps;适合需要高保真传输音乐的场景,比如在线K歌、音乐直播等。 + * @note 该函数会检查麦克风的使用权限,如果当前 App 没有麦克风权限,SDK 会自动向用户申请麦克风使用权限。 + */ + virtual void startLocalAudio(TRTCAudioQuality quality) = 0; + + /** + * 5.2 停止本地音频的采集和发布 + * + * 停止本地音频的采集和发布后,房间中的其他用户会收到 {@link onUserAudioAvailable}(userId, false) 的通知。 + */ + virtual void stopLocalAudio() = 0; + + /** + * 5.3 暂停/恢复发布本地的音频流 + * + * 当您暂停发布本地音频流之后,房间中的其他他用户会收到 {@link onUserAudioAvailable}(userId, false) 的通知。 + * 当您恢复发布本地音频流之后,房间中的其他他用户会收到 {@link onUserAudioAvailable}(userId, true) 的通知。 + * 与 {@link stopLocalAudio} 的不同之处在于,muteLocalAudio(true) 并不会释放麦克风权限,而是继续发送码率极低的静音包。 + * 这对于需要云端录制的场景非常适用,因为 MP4 等格式的视频文件,对于音频数据的连续性要求很高,使用 {@link stopLocalAudio} 会导致录制出的 MP4 文件不易播放。 + * 因此在对录制文件的质量要求较高的场景中,建议选择 muteLocalAudio 而不建议使用 stopLocalAudio。 + * @param mute true:静音;false:恢复。 + */ + virtual void muteLocalAudio(bool mute) = 0; + + /** + * 5.4 暂停/恢复播放远端的音频流 + * + * 当您静音某用户的远端音频时,SDK 会停止播放指定用户的声音,同时也会停止拉取该用户的音频数据数据。 + * @param userId 用于指定远端用户的 ID。 + * @param mute true:静音;false:取消静音。 + * @note 在进入房间(enterRoom)之前或之后调用本接口均生效,静音状态在退出房间(exitRoom) 之后会被重置为 false。 + */ + virtual void muteRemoteAudio(const char* userId, bool mute) = 0; + + /** + * 5.5 暂停/恢复播放所有远端用户的音频流 + * + * 当您静音所有用户的远端音频时,SDK 会停止播放所有来自远端的音频流,同时也会停止拉取所有用户的音频数据。 + * @param mute true:静音;false:取消静音。 + * @note 在进入房间(enterRoom)之前或之后调用本接口均生效,静音状态在退出房间(exitRoom) 之后会被重置为 false。 + */ + virtual void muteAllRemoteAudio(bool mute) = 0; + + /** + * 5.7 设定某一个远端用户的声音播放音量 + * + * 您可以通过 setRemoteAudioVolume(userId, 0) 将某一个远端用户的声音静音。 + * @param userId 用于指定远端用户的 ID。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 + */ + virtual void setRemoteAudioVolume(const char* userId, int volume) = 0; + + /** + * 5.8 设定本地音频的采集音量 + * + * @param volume 音量大小,取值范围为0 - 100;默认值:100 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 + */ + virtual void setAudioCaptureVolume(int volume) = 0; + + /** + * 5.9 获取本地音频的采集音量 + */ + virtual int getAudioCaptureVolume() = 0; + + /** + * 5.10 设定远端音频的播放音量 + * + * 该接口会控制 SDK 最终交给系统播放的声音音量,调节效果会影响到本地音频录制文件的音量大小,但不会影响到耳返的音量大小。 + * + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 + */ + virtual void setAudioPlayoutVolume(int volume) = 0; + + /** + * 5.11 获取远端音频的播放音量 + */ + virtual int getAudioPlayoutVolume() = 0; + + /** + * 5.12 启用音量大小提示 + * + * 开启此功能后,SDK 会在 {@link TRTCCloudDelegate} 中的 {@link onUserVoiceVolume} 回调中反馈远端音频的音量大小。 + * @note 如需打开此功能,请在 startLocalAudio 之前调用才可以生效。 + * @param interval 设置 onUserVoiceVolume 回调的触发间隔,单位为ms,最小间隔为100ms,如果小于等于 0 则会关闭回调,建议设置为300ms; + */ + virtual void enableAudioVolumeEvaluation(uint32_t interval) = 0; + + /** + * 5.13 开始录音 + * + * 当您调用该接口后, SDK 会将本地和远端的所有音频(包括本地音频,远端音频,背景音乐和音效等)混合并录制到一个本地文件中。 + * 该接口在进入房间前后调用均可生效,如果录制任务在退出房间前尚未通过 stopAudioRecording 停止,则退出房间后录制任务会自动被停止。 + * @param param 录音参数,请参考 {@link TRTCAudioRecordingParams} + * @return 0:成功;-1:录音已开始;-2:文件或目录创建失败;-3:后缀指定的音频格式不支持。 + */ + virtual int startAudioRecording(const TRTCAudioRecordingParams& param) = 0; + + /** + * 5.14 停止录音 + * + * 如果录制任务在退出房间前尚未通过本接口停止,则退出房间后录音任务会自动被停止。 + */ + virtual void stopAudioRecording() = 0; + +/** + * 5.15 开启本地媒体录制 + * + * 开启后把直播过程中的音视和视频内容录制到本地的一个文件中。 + * @param params 录制参数,请参考 {@link TRTCLocalRecordingParams} + */ +#if _WIN32 + virtual void startLocalRecording(const TRTCLocalRecordingParams& params) = 0; +#endif + +/** + * 5.16 停止本地媒体录制 + * + * 如果录制任务在退出房间前尚未通过本接口停止,则退出房间后录音任务会自动被停止。 + */ +#if _WIN32 + virtual void stopLocalRecording() = 0; +#endif + + /** + * 5.18 设置远端音频流智能并发播放策略 + * + * 设置远端音频流智能并发播放策略,适用于上麦人数比较多的场景。 + * @param params 音频并发参数,请参考 {@link TRTCAudioParallelParams} + */ + virtual void setRemoteAudioParallelParams(const TRTCAudioParallelParams& params) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 设备管理相关接口 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 设备管理相关接口 + /// @{ + + /** + * 6.1 获取设备管理类(TXDeviceManager) + */ + virtual ITXDeviceManager* getDeviceManager() = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 美颜特效和图像水印 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 美颜特效和图像水印 + /// @{ + + /** + * 7.1 设置美颜、美白、红润等特效 + * + * SDK 内部集成了两套风格不同的磨皮算法: + * -“光滑”:算法比较激进,磨皮效果比较明显,适用于秀场直播。 + * -“自然”:算法更多地保留了面部细节,磨皮效果更加自然,适用于绝大多数直播场景。 + * @param style 磨皮算法,有“光滑”和“自然”两种算法。 + * @param beautyLevel 美颜级别,取值范围0 - 9,0表示关闭,1 - 9值越大,效果越明显。 + * @param whitenessLevel 美白级别,取值范围0 - 9,0表示关闭,1 - 9值越大,效果越明显。 + * @param ruddinessLevel 红润级别,取值范围0 - 9,0表示关闭,1 - 9值越大,效果越明显。 + */ + virtual void setBeautyStyle(TRTCBeautyStyle style, uint32_t beautyLevel, uint32_t whitenessLevel, uint32_t ruddinessLevel) = 0; + + /** + * 7.2 添加水印 + * + * 水印的位置是通过 xOffset, yOffset, fWidthRatio 来指定的。 + * - xOffset:水印的坐标,取值范围为0 - 1的浮点数。 + * - yOffset:水印的坐标,取值范围为0 - 1的浮点数。 + * - fWidthRatio:水印的大小比例,取值范围为0 - 1的浮点数。 + * + * @param streamType 要设置水印的流类型(TRTCVideoStreamTypeBig、TRTCVideoStreamTypeSub) + * @param srcData 水印图片源数据(传 nullptr 表示去掉水印) + * @param srcType 水印图片源数据类型 + * @param nWidth 水印图片像素宽度(源数据为文件路径时忽略该参数) + * @param nHeight 水印图片像素高度(源数据为文件路径时忽略该参数) + * @param xOffset 水印显示的左上角 x 轴偏移 + * @param yOffset 水印显示的左上角 y 轴偏移 + * @param fWidthRatio 水印显示的宽度占画面宽度比例(水印按该参数等比例缩放显示) + * @note 本接口只支持给主路视频添加图片水印 + */ + virtual void setWaterMark(TRTCVideoStreamType streamType, const char* srcData, TRTCWaterMarkSrcType srcType, uint32_t nWidth, uint32_t nHeight, float xOffset, float yOffset, float fWidthRatio) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 背景音乐和声音特效 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 背景音乐和声音特效 + /// @{ + + /** + * 8.1 获取音效管理类(TXAudioEffectManager) + * + * TXAudioEffectManager 是音效管理接口,您可以通过该接口实现如下功能: + * - 背景音乐:支持在线音乐和本地音乐,支持变速、变调等特效、支持原生和伴奏并播放和循环播放。 + * - 耳机耳返:麦克风捕捉的声音实时通过耳机播放,常用于音乐直播。 + * - 混响效果:KTV、小房间、大会堂、低沉、洪亮... + * - 变声特效:萝莉、大叔、重金属... + * - 短音效:鼓掌声、欢笑声等简短的音效文件(对于小于10秒的文件,请将 isShortFile 参数设置为 true)。 + */ + virtual ITXAudioEffectManager* getAudioEffectManager() = 0; + +/** + * 8.2 开启系统声音采集(仅适用于桌面系统) + * + * 该接口会从电脑的声卡中采集音频数据,并将其混入到 SDK 当前的音频数据流中,从而使房间中的其他用户也能听到主播的电脑所播放出的声音。 + * 在线教育场景中,老师可以使用此功能让 SDK 采集教学影片中的声音,并广播给同房间中的学生。 + * 音乐直播场景中,主播可以使用此功能让 SDK 采集音乐播放器中的音乐,从而为自己的直播间增加背景音乐。 + * @param deviceName + * - 您可以指定该参数为空置(nullptr),代表让 SDK 采集整个系统的声音。 + * @note + * 在 Windows 平台下,您也可以将参数 deviceName 设置为某个应用程序的可执行文件(如 QQMuisc.exe)的绝对路径,此时 SDK 只会采集该应用程序的声音(仅支持 32 位版本的 SDK)。 + * 您也可以指定该参数为某个扬声器设备的名称来采集特定扬声器声音(通过接口 {@link TXDeviceManager} 中的 getDevicesList 接口,可以获取类型为 {@link TXMediaDeviceTypeSpeaker} 的扬声器设备)。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void startSystemAudioLoopback(const char* deviceName = nullptr) = 0; +#endif + +/** + * 8.3 停止系统声音采集(仅适用于桌面系统和 Android 系统) + */ +#if TARGET_PLATFORM_DESKTOP + virtual void stopSystemAudioLoopback() = 0; +#endif + +/** + * 8.4 设置系统声音的采集音量 + * + * @param volume 设置的音量大小,范围是:[0 ~ 150],默认值为100。 + */ +#if TARGET_PLATFORM_DESKTOP || TARGET_OS_IPHONE + virtual void setSystemAudioLoopbackVolume(uint32_t volume) = 0; +#endif + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 屏幕分享相关接口 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 屏幕分享相关接口 + /// @{ + + /** + * 9.1 开始桌面端屏幕分享(该接口仅支持桌面系统) + * + * 该接口可以抓取整个 Mac OS 系统的屏幕内容,或抓取您指定的某个应用的窗口内容,并将其分享给同房间中的其他用户。 + * @param view 渲染控件所在的父控件,可以设置为空值,表示不显示屏幕分享的预览效果。 + * @param streamType 屏幕分享使用的线路,可以设置为主路(TRTCVideoStreamTypeBig)或者辅路(TRTCVideoStreamTypeSub),推荐使用辅路。 + * @param encParam 屏幕分享的画面编码参数,SDK 会优先使用您通过此接口设置的编码参数: + * - 如果您设置 encParam 为 nil,且您已通过 setSubStreamEncoderParam 设置过辅路视频编码参数,SDK 将使用您设置过的辅路编码参数进行屏幕分享。 + * - 如果您设置 encParam 为 nil,且您未通过 setSubStreamEncoderParam 设置过辅路视频编码参数,SDK 将自动选择一个最佳的编码参数进行屏幕分享。 + * + * @note + * 1. 同一个用户同时最多只能发布一路主路({@link TRTCVideoStreamTypeBig})画面和一路辅路({@link TRTCVideoStreamTypeSub})画面。 + * 2. 默认情况下,屏幕分享使用辅路画面。如果使用主路做屏幕分享,您需要提前停止摄像头采集({@link stopLocalPreview})以避免相互冲突。 + * 3. 同一个房间中同时只能有一个用户使用辅路做屏幕分享,也就是说,同一个房间中同时只允许一个用户开启辅路。 + * 4. 当房间中已经有其他用户在使用辅路分享屏幕时,此时调用该接口会收到来自 {@link TRTCCloudDelegate} 的 onError(ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO) 回调。 + */ + virtual void startScreenCapture(TXView view, TRTCVideoStreamType streamType, TRTCVideoEncParam* encParam) = 0; + + /** + * 9.2 停止屏幕分享 + */ + virtual void stopScreenCapture() = 0; + + /** + * 9.3 暂停屏幕分享 + */ + virtual void pauseScreenCapture() = 0; + + /** + * 9.4 恢复屏幕分享 + */ + virtual void resumeScreenCapture() = 0; + +/** + * 9.5 枚举可分享的屏幕和窗口(该接口仅支持桌面系统) + * + * 当您在对接桌面端系统的屏幕分享功能时,一般都需要展示一个选择分享目标的界面,这样用户能够使用这个界面选择是分享整个屏幕还是某个窗口。 + * 通过本接口,您就可以查询到当前系统中可用于分享的窗口的 ID、名称以及缩略图。我们在 Demo 中提供了一份默认的界面实现供您参考。 + * @param thumbnailSize 指定要获取的窗口缩略图大小,缩略图可用于绘制在窗口选择界面上 + * @param iconSize 指定要获取的窗口图标大小 + * @return 窗口列表包括屏幕 + * @note + * 1. 返回的列表中包含屏幕和应用窗口,屏幕是列表中的第一个元素。如果用户有多个显示器,那么每个显示器都是一个分享目标。 + * 2. 请不要使用 delete ITRTCScreenCaptureSourceList* 删除 SourceList,这很容易导致崩溃,请使用 ITRTCScreenCaptureSourceList 中的 release 方法释放列表。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual ITRTCScreenCaptureSourceList* getScreenCaptureSources(const SIZE& thumbnailSize, const SIZE& iconSize) = 0; +#endif + +/** + * 9.6 选取要分享的屏幕或窗口(该接口仅支持桌面系统) + * + * 当您通过 getScreenCaptureSources 获取到可以分享的屏幕和窗口之后,您可以调用该接口选定期望分享的目标屏幕或目标窗口。 + * 在屏幕分享的过程中,您也可以随时调用该接口以切换分享目标。 + * 支持如下四种情况: + * - 共享整个屏幕:sourceInfoList 中 type 为 Screen 的 source,captureRect 设为 { 0, 0, 0, 0 } + * - 共享指定区域:sourceInfoList 中 type 为 Screen 的 source,captureRect 设为非 nullptr,例如 { 100, 100, 300, 300 } + * - 共享整个窗口:sourceInfoList 中 type 为 Window 的 source,captureRect 设为 { 0, 0, 0, 0 } + * - 共享窗口区域:sourceInfoList 中 type 为 Window 的 source,captureRect 设为非 nullptr,例如 { 100, 100, 300, 300 } + * @param source 指定分享源 + * @param captureRect 指定捕获的区域 + * @param property 指定屏幕分享目标的属性,包括捕获鼠标,高亮捕获窗口等,详情参考TRTCScreenCaptureProperty 定义 + * @note 设置高亮边框颜色、宽度参数在 Mac 平台不生效。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void selectScreenCaptureTarget(const TRTCScreenCaptureSourceInfo& source, const RECT& captureRect, const TRTCScreenCaptureProperty& property) = 0; +#endif + + /** + * 9.7 设置屏幕分享(即辅路)的视频编码参数(桌面系统和移动系统均已支持) + * + * 该接口可以设定远端用户所看到的屏幕分享(即辅路)的画面质量,同时也能决定云端录制出的视频文件中屏幕分享的画面质量。 + * 请注意如下两个接口的差异: + * - {@link setVideoEncoderParam} 用于设置主路画面({@link TRTCVideoStreamTypeBig},一般用于摄像头)的视频编码参数。 + * - {@link setSubStreamEncoderParam} 用于设置辅路画面({@link TRTCVideoStreamTypeSub},一般用于屏幕分享)的视频编码参数。 + * + * @param param 辅流编码参数,详情请参考 {@link TRTCVideoEncParam}。 + * @note 即使您使用主路传输屏幕分享(在调用 startScreenCapture 时设置 type=TRTCVideoStreamTypeBig),依然要使用 {@link setSubStreamEncoderParam} 设定屏幕分享的编码参数,而不要使用 {@link setVideoEncoderParam} 。 + */ + virtual void setSubStreamEncoderParam(const TRTCVideoEncParam& param) = 0; + +/** + * 9.8 设置屏幕分享时的混音音量大小(该接口仅支持桌面系统) + * + * 这个数值越高,屏幕分享音量的占比就越高,麦克风音量占比就越小,所以不推荐设置得太大,否则麦克风的声音就被压制了。 + * @param volume 设置的混音音量大小,范围0 - 100。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void setSubStreamMixVolume(uint32_t volume) = 0; +#endif + +/** + * 9.9 将指定窗口加入屏幕分享的排除列表中(该接口仅支持桌面系统) + * + * 加入排除列表中的窗口不会被分享出去,常见的用法是将某个应用的窗口加入到排除列表中以避免隐私问题。 + * 支持启动屏幕分享前设置过滤窗口,也支持屏幕分享过程中动态添加过滤窗口。 + * @param windowID 不希望分享出去的窗口 + * @note + * 1. 该接口只有在 {@link TRTCScreenCaptureSourceInfo} 中的 type 指定为 {@link TRTCScreenCaptureSourceTypeScreen} 时生效,即只有在分享整个屏幕内容时,排除指定窗口的功能才生效。 + * 2. 使用该接口添加到排除列表中的窗口会在退出房间后被 SDK 自动清除。 + * 3. Mac 平台下请传入窗口 ID(即 CGWindowID),您可以通过 {@link TRTCScreenCaptureSourceInfo} 中的 sourceId 成员获得。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void addExcludedShareWindow(TXView windowID) = 0; +#endif + +/** + * 9.10 将指定窗口从屏幕分享的排除列表中移除(该接口仅支持桌面系统) + * + * @param windowID + */ +#if TARGET_PLATFORM_DESKTOP + virtual void removeExcludedShareWindow(TXView windowID) = 0; +#endif + +/** + * 9.11 将所有窗口从屏幕分享的排除列表中移除(该接口仅支持桌面系统) + */ +#if TARGET_PLATFORM_DESKTOP + virtual void removeAllExcludedShareWindow() = 0; +#endif + +/** + * 9.12 将指定窗口加入屏幕分享的包含列表中(该接口仅支持桌面系统) + * + * 该接口只有在 {@link TRTCScreenCaptureSourceInfo} 中的 type 指定为 {@link TRTCScreenCaptureSourceTypeWindow} 时生效。即只有在分享窗口内容时,额外包含指定窗口的功能才生效。 + * 您在 {@link startScreenCapture} 之前和之后调用均可。 + * @param windowID 希望被分享出去的窗口(Windows 平台下为窗口句柄: HWND) + * @note 通过该方法添加到包含列表中的窗口,会在退出房间后被 SDK 自动清除。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void addIncludedShareWindow(TXView windowID) = 0; +#endif + +/** + * 9.13 将指定窗口从屏幕分享的包含列表中移除(该接口仅支持桌面系统) + * + * 该接口只有在 {@link TRTCScreenCaptureSourceInfo} 中的 type 指定为 {@link TRTCScreenCaptureSourceTypeWindow} 时生效。 + * 即只有在分享窗口内容时,额外包含指定窗口的功能才生效。 + * @param windowID 希望被分享出去的窗口(Mac 平台: 窗口 ID;Windows 平台: HWND) + */ +#if TARGET_PLATFORM_DESKTOP + virtual void removeIncludedShareWindow(TXView windowID) = 0; +#endif + +/** + * 9.14 将全部窗口从屏幕分享的包含列表中移除(该接口仅支持桌面系统) + * + * 该接口只有在 {@link TRTCScreenCaptureSourceInfo} 中的 type 指定为 {@link TRTCScreenCaptureSourceTypeWindow} 时生效。 + * 即只有在分享窗口内容时,额外包含指定窗口的功能才生效。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void removeAllIncludedShareWindow() = 0; +#endif + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 自定义采集和自定义渲染 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 自定义采集和自定义渲染 + /// @{ + + /** + * 10.1 启用/关闭视频自定义采集模式 + * + * 开启该模式后,SDK 不在运行原有的视频采集流程,即不再继续从摄像头采集数据和美颜,而是只保留视频编码和发送能力。 + * 您需要通过 {@link sendCustomVideoData} 不断地向 SDK 塞入自己采集的视频画面。 + * @param streamType 用于指定视频流类型,{@link TRTCVideoStreamTypeBig}:高清大画面;{@link TRTCVideoStreamTypeSub}:辅路画面。 + * @param enable 是否启用,默认值:false。 + */ + virtual void enableCustomVideoCapture(TRTCVideoStreamType streamType, bool enable) = 0; + + /** + * 10.2 向 SDK 投送自己采集的视频帧 + * + * 使用此接口可以向 SDK 投送自己采集的视频帧,SDK 会将视频帧进行编码并通过自身的网络模块传输出去。 + * 参数 {@link TRTCVideoFrame} 推荐下列填写方式(其他字段不需要填写): + * - pixelFormat:Windows 和 Android 平台仅支持 {@link TRTCVideoPixelFormat_I420},iOS 和 Mac平台支持 {@link TRTCVideoPixelFormat_I420} 和 {@link TRTCVideoPixelFormat_BGRA32}。 + * - bufferType:推荐选择 {@link TRTCVideoBufferType_Buffer}。 + * - data:用于承载视频帧数据的 buffer。 + * - length:视频帧数据长度,如果 pixelFormat 设定为 I420 格式,length 可以按照如下公式计算:length = width × height × 3 / 2。 + * - width:视频图像的宽度,如 640 px。 + * - height:视频图像的高度,如 480 px。 + * - timestamp:时间戳,单位为毫秒(ms),请使用视频帧在采集时被记录下来的时间戳(可以在采集到一帧视频帧之后,通过调用 {@link generateCustomPTS} 获取时间戳)。 + * + * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 + * @param streamType 用于指定视频流类型,{@link TRTCVideoStreamTypeBig}:高清大画面;{@link TRTCVideoStreamTypeSub}:辅路画面。 + * @param frame 视频数据,支持 I420 格式数据。 + * @note + * 1. 推荐您在采集到的一帧视频帧后,即调用 {@link generateCustomPTS} 接口获取该帧的 timestamp 数值,这样可以获得最佳的音画同步效果。 + * 2. SDK 最终编码出的视频帧率并不是由您调用本接口的频率决定的,而是由您在 {@link setVideoEncoderParam} 中所设置的 FPS 决定的。 + * 3. 请尽量保持本接口的调用间隔是均匀的,否则会导致编码器输出帧率不稳或者音画不同步等问题。 + * 4. iOS 和 Mac平台目前支持传入 {@link TRTCVideoPixelFormat_I420} 或 {@link TRTCVideoPixelFormat_BGRA32} 格式的视频帧。 + * 5. Windows 和 Android 平台目前仅支持传入 {@link TRTCVideoPixelFormat_I420} 格式的视频帧。 + */ + virtual void sendCustomVideoData(TRTCVideoStreamType streamType, TRTCVideoFrame* frame) = 0; + + /** + * 10.3 启用音频自定义采集模式 + * + * 开启该模式后,SDK 不在运行原有的音频采集流程,即不再继续从麦克风采集音频数据,而是只保留音频编码和发送能力。 + * 您需要通过 {@link sendCustomAudioData} 不断地向 SDK 塞入自己采集的音频数据。 + * @param enable 是否启用,默认值:false。 + * @note 由于回声抵消(AEC)需要严格的控制声音采集和播放的时间,所以开启自定义音频采集后,AEC 能力可能会失效。 + */ + virtual void enableCustomAudioCapture(bool enable) = 0; + + /** + * 10.4 向 SDK 投送自己采集的音频数据 + * + * 参数 {@link TRTCAudioFrame} 推荐下列填写方式(其他字段不需要填写): + * - audioFormat:音频数据格式,仅支持 TRTCAudioFrameFormatPCM。 + * - data:音频帧 buffer。音频帧数据只支持 PCM 格式,支持[5ms ~ 100ms]帧长,推荐使用 20ms 帧长,长度计算方法:【48000采样率、单声道的帧长度:48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * - sampleRate:采样率,支持:16000、24000、32000、44100、48000。 + * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 + * - timestamp:时间戳,单位为毫秒(ms),请使用音频帧在采集时被记录下来的时间戳(可以在采集到一帧音频帧之后,通过调用 {@link generateCustomPTS} 获取时间戳)。 + * + * 参考文档:[自定义采集和渲染](https://cloud.tencent.com/document/product/647/34066)。 + * @param frame 音频数据 + * @note 请您精准地按每帧时长的间隔调用本接口,数据投送间隔不均匀时极易触发声音卡顿。 + */ + virtual void sendCustomAudioData(TRTCAudioFrame* frame) = 0; + + /** + * 10.5 启用/关闭自定义音轨 + * + * 开启后,您可以通过本接口向 SDK 混入一条自定义的音轨。通过两个布尔型参数,您可以控制该音轨是否要在远端和本地播放。 + * @param enablePublish 控制混入的音轨是否要在远端播放,默认值:false。 + * @param enablePlayout 控制混入的音轨是否要在本地播放,默认值:false。 + * @note 如果您指定参数 enablePublish 和 enablePlayout 均为 false,代表完全关闭您的自定义音轨。 + */ + virtual void enableMixExternalAudioFrame(bool enablePublish, bool enablePlayout) = 0; + + /** + * 10.6 向 SDK 混入自定义音轨 + * + * 调用该接口之前,您需要先通过 {@link enableMixExternalAudioFrame} 开启自定义音轨,之后就可以通过本接口将自己的音轨以 PCM 格式混入到 SDK 中。 + * 理想情况下,我们期望您的代码能够以非常均匀的速度向 SDK 提供音轨数据。但我们也非常清楚,完美的调用间隔是一个巨大的挑战。 + * 所以 SDK 内部会开启一个音轨数据的缓冲区,该缓冲区的作用类似一个“蓄水池”,它能够暂存您传入的音轨数据,平抑由于接口调用间隔的抖动问题。 + * 本接口的返回值代表这个音轨缓冲区的大小,单位是毫秒(ms),比如:如果该接口返回 50,则代表当前的音轨缓冲区有 50ms 的音轨数据。因此只要您在 50ms 内再次调用本接口,SDK 就能保证您混入的音轨数据是连续的。 + * 当您调用该接口后,如果发现返回值 > 100ms,则可以等待一帧音频帧的播放时间之后再次调用;如果返回值 < 100ms,则代表缓冲区比较小,您可以再次混入一些音轨数据以确保音轨缓冲区的大小维持在“安全水位”以上。 + * 参数 {@link TRTCAudioFrame} 推荐下列填写方式(其他字段不需要填写): + * - data:音频帧 buffer。音频帧数据只支持 PCM 格式,支持[5ms ~ 100ms]帧长,推荐使用 20ms 帧长,长度计算方法:【48000采样率、单声道的帧长度:48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * - sampleRate:采样率,支持:16000、24000、32000、44100、48000。 + * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 + * - timestamp:时间戳,单位为毫秒(ms),请使用音频帧在采集时被记录下来的时间戳(可以在获得一帧音频帧之后,通过调用 {@link generateCustomPTS} 获得时间戳)。 + * + * @param frame 音频数据 + * + * @return >= 0 缓冲的长度,单位:ms。< 0 错误(-1 未启用 mixExternalAudioFrame) + * + * @note 请您精准地按每帧时长的间隔调用本接口,数据投送间隔不均匀时极易触发声音卡顿。 + */ + virtual int mixExternalAudioFrame(TRTCAudioFrame* frame) = 0; + + /** + * 10.7 设置推流时混入外部音频的推流音量和播放音量 + * + * @param publishVolume 设置的推流音量大小,范围0 - 100, -1表示不改变 + * @param playoutVolume 设置的播放音量大小,范围0 - 100, -1表示不改变 + */ + virtual void setMixExternalAudioVolume(int publishVolume, int playoutVolume) = 0; + + /** + * 10.8 生成自定义采集时的时间戳 + * + * 本接口仅适用于自定义采集模式,用于解决音视频帧的采集时间(capture time)和投送时间(send time)不一致所导致的音画不同步问题。 + * 当您通过 {@link sendCustomVideoData} 或 {@link sendCustomAudioData} 等接口进行自定义视频或音频采集时,请按照如下操作使用该接口: + * 1. 首先,在采集到一帧视频或音频帧时,通过调用本接口获得当时的 PTS 时间戳。 + * 2. 之后可以将该视频或音频帧送入您使用的前处理模块(如第三方美颜组件,或第三方音效组件)。 + * 3. 在真正调用 {@link sendCustomVideoData} 或 {@link sendCustomAudioData} 进行投送时,请将该帧在采集时记录的 PTS 时间戳赋值给 {@link TRTCVideoFrame} 或 {@link TRTCAudioFrame} 中的 timestamp 字段。 + * + * @return 时间戳(单位:ms) + */ + virtual uint64_t generateCustomPTS() = 0; + + /** + * 10.9 设置第三方美颜的视频数据回调 + * + * 设置该回调之后,SDK 会把采集到的视频帧通过您设置的 callback 回调出来,用于第三方美颜组件进行二次处理,之后 SDK 会将处理后的视频帧进行编码和发送。 + * @param pixelFormat 指定回调的像素格式,出于数据处理效率的考虑,目前仅支持 OpenGL 纹理格式数据。 + * @param bufferType 指定视频数据结构类型,出于数据处理效率的考虑,目前仅支持 OpenGL 纹理格式数据。 + * @param callback 自定义渲染回调,详见 {@link ITRTCVideoFrameCallback} + * @return 0:成功;<0:错误 + */ + virtual int setLocalVideoProcessCallback(TRTCVideoPixelFormat pixelFormat, TRTCVideoBufferType bufferType, ITRTCVideoFrameCallback* callback) = 0; + + /** + * 10.10 设置本地视频自定义渲染回调 + * + * 设置该回调之后,SDK 内部会跳过原来的渲染流程,并把采集到的数据回调出来,您需要自己完成画面渲染。 + * - 您可以通过调用 setLocalVideoRenderCallback(TRTCVideoPixelFormat_Unknown, TRTCVideoBufferType_Unknown, nullptr) 停止回调。 + * - iOS、Mac、Windows 平台目前仅支持回调 {@link TRTCVideoPixelFormat_I420} 或 {@link TRTCVideoPixelFormat_BGRA32} 像素格式的视频帧。 + * - Android 平台目前仅支持传入 {@link TRTCVideoPixelFormat_I420} 像素格式的视频帧。 + * + * @param pixelFormat 指定回调的像素格式 + * @param bufferType 指定视频数据结构类型,目前只支持 {@link TRTCVideoBufferType_Buffer} + * @param callback 自定义渲染回调 + * @return 0:成功;<0:错误 + */ + virtual int setLocalVideoRenderCallback(TRTCVideoPixelFormat pixelFormat, TRTCVideoBufferType bufferType, ITRTCVideoRenderCallback* callback) = 0; + + /** + * 10.11 设置远端视频自定义渲染回调 + * + * 设置该回调之后,SDK 内部会跳过原来的渲染流程,并把采集到的数据回调出来,您需要自己完成画面渲染。 + * - 您可以通过调用 setLocalVideoRenderCallback(TRTCVideoPixelFormat_Unknown, TRTCVideoBufferType_Unknown, nullptr) 停止回调。 + * - iOS、Mac、Windows 平台目前仅支持回调 {@link TRTCVideoPixelFormat_I420} 或 {@link TRTCVideoPixelFormat_BGRA32} 像素格式的视频帧。 + * - Android 平台目前仅支持传入 {@link TRTCVideoPixelFormat_I420} 像素格式的视频帧。 + * + * @note 实际使用时,需要先调用 startRemoteView(userid, nullptr) 来获取远端用户的视频流(view 设置为 nullptr 即可),否则不会有数据回调出来。 + * @param userId 远端用户id + * @param pixelFormat 指定回调的像素格式 + * @param bufferType 指定视频数据结构类型,目前只支持 {@link TRTCVideoBufferType_Buffer} + * @param callback 自定义渲染回调 + * @return 0:成功;<0:错误 + */ + virtual int setRemoteVideoRenderCallback(const char* userId, TRTCVideoPixelFormat pixelFormat, TRTCVideoBufferType bufferType, ITRTCVideoRenderCallback* callback) = 0; + + /** + * 10.12 设置音频数据自定义回调 + * + * 设置该回调之后,SDK 内部会把音频数据(PCM 格式)回调出来,包括: + * - {@link onCapturedRawAudioFrame}:本地麦克风采集到的原始音频数据回调 + * - {@link onLocalProcessedAudioFrame}:本地采集并经过音频模块前处理后的音频数据回调 + * - {@link onRemoteUserAudioFrame}:混音前的每一路远程用户的音频数据 + * - {@link onMixedPlayAudioFrame}:将各路音频混合之后并最终要由系统播放出的音频数据回调 + * + * @note 设置回调为空即代表停止自定义音频回调,反之,设置回调不为空则代表启动自定义音频回调。 + */ + virtual int setAudioFrameCallback(ITRTCAudioFrameCallback* callback) = 0; + + /** + * 10.13 设置本地麦克风采集出的原始音频帧回调格式 + * + * 本接口用于设置 {@link onCapturedRawAudioFrame} 回调出来的 AudioFrame 的格式: + * - sampleRate:采样率,支持:16000、32000、44100、48000。 + * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 + * - samplesPerCall:采样点数,定义回调数据帧长。帧长必须为 10ms 的整数倍。 + * + * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; + * 举例:48000 采样率希望回调 20ms 帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; + * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) + * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为 3840 = 960 * 2 * 2 + * @param format 音频数据回调格式。 + * @return 0:成功;<0:错误 + */ + virtual int setCapturedRawAudioFrameCallbackFormat(TRTCAudioFrameCallbackFormat* format) = 0; + + /** + * 10.14 设置经过前处理后的本地音频帧回调格式 + * + * 本接口用于设置 {@link onLocalProcessedAudioFrame} 回调出来的 AudioFrame 的格式: + * - sampleRate:采样率,支持:16000、32000、44100、48000。 + * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 + * - samplesPerCall:采样点数,定义回调数据帧长。帧长必须为 10ms 的整数倍。 + * + * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; + * 举例:48000 采样率希望回调20ms帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; + * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) + * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为 3840 = 960 * 2 * 2 + * + * @param format 音频数据回调格式。 + * @return 0:成功;<0:错误 + */ + virtual int setLocalProcessedAudioFrameCallbackFormat(TRTCAudioFrameCallbackFormat* format) = 0; + + /** + * 10.15 设置最终要由系统播放出的音频帧回调格式 + * + * 本接口用于设置 {@link onMixedPlayAudioFrame} 回调出来的 AudioFrame 的格式: + * - sampleRate:采样率,支持:16000、32000、44100、48000。 + * - channel:声道数(如果是立体声,数据是交叉的),单声道:1; 双声道:2。 + * - samplesPerCall:采样点数,定义回调数据帧长。帧长必须为 10ms 的整数倍。 + * + * 如果希望用毫秒数计算回调帧长,则将毫秒数转换成采样点数的公式为:采样点数 = 毫秒数 * 采样率 / 1000; + * 举例:48000 采样率希望回调20ms帧长的数据,则采样点数应该填: 960 = 20 * 48000 / 1000; + * 注意,最终回调的帧长度是以字节为单位,采样点数转换成字节数的计算公式为:字节数 = 采样点数 * channel * 2(位宽) + * 举例:48000 采样率,双声道,20ms 帧长,采样点数为 960,字节数为 3840 = 960 * 2 * 2 + * @param format 音频数据回调格式。 + * @return 0:成功;<0:错误 + */ + virtual int setMixedPlayAudioFrameCallbackFormat(TRTCAudioFrameCallbackFormat* format) = 0; + + /** + * 10.16 开启音频自定义播放 + * + * 如果您需要外接一些特定的音频设备,或者希望自己掌控音频的播放逻辑,您可以通过该接口启用音频自定义播放。 + * 启用音频自定义播放后,SDK 将不再调用系统的音频接口播放数据,您需要通过 {@link getCustomAudioRenderingFrame} 获取 SDK 要播放的音频帧并自行播放。 + * @param enable 是否启用音频自定义播放,默认为关闭状态。 + * @note 需要您在进入房间前设置才能生效,暂不支持进入房间后再设置。 + */ + virtual void enableCustomAudioRendering(bool enable) = 0; + + /** + * 10.17 获取可播放的音频数据 + * + * 调用该接口之前,您需要先通过 {@link enableCustomAudioRendering} 开启音频自定义播放。 + * 参数 {@link TRTCAudioFrame} 推荐下列填写方式(其他字段不需要填写): + * - sampleRate:采样率,必填,支持 16000、24000、32000、44100、48000。 + * - channel:声道数,必填,单声道请填1,双声道请填2,双声道时数据是交叉的。 + * - data:用于获取音频数据的 buffer。需要您根据一帧音频帧的帧长度分配好 data 的内存大小。 + * 获取的 PCM 数据支持 10ms 或 20ms 两种帧长,推荐使用 20ms 的帧长。 + * 计算公式为:48000采样率、单声道、且播放时长为 20ms 的一帧音频帧的 buffer 大小为 48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节。 + * + * @param audioFrame 音频数据帧。 + * @note + * 1. 参数 audioFrame 中的 sampleRate、channel 需提前设置好,同时分配好所需读取帧长的 data 空间。 + * 2. SDK 内部会根据 sampleRate 和 channel 自动填充 data 数据。 + * 3. 建议由系统的音频播放线程直接驱动该函数的调用,在播放完一帧音频之后,即调用该接口获取下一帧可播放的音频数据。 + * + */ + virtual void getCustomAudioRenderingFrame(TRTCAudioFrame* audioFrame) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 自定义消息发送接口 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 自定义消息发送接口 + /// @{ + + /** + * 11.1 使用 UDP 通道发送自定义消息给房间内所有用户 + * + * 该接口可以让您借助 TRTC 的 UDP 通道,向当前房间里的其他用户广播自定义数据,已达到传输信令的目的。 + * TRTC 中的 UDP 通道原本设计用来传输音视频数据的,该接口的原理是将您要发送的信令伪装成音视频数据包,与原本要发送的音视频数据一并发送出去。 + * 房间中的其他用户可以通过 {@link TRTCCloudDelegate} 中的 onRecvCustomCmdMsg 回调接收消息。 + * @param cmdID 消息 ID,取值范围为1 - 10。 + * @param data 待发送的消息,单个消息的最大长度被限制为 1KB。 + * @param reliable 是否可靠发送,可靠发送可以获得更高的发送成功率,但可靠发送比不可靠发送会带来更大的接收延迟。 + * @param ordered 是否要求有序,即是否要求接收端的数据包顺序和发送端的数据包顺序一致(这会带来一定的接收延时)。 + * @return true:消息已经发出;false:消息发送失败。 + * @note + * 1. 发送消息到房间内所有用户(暂时不支持 Web/小程序端),每秒最多能发送30条消息。 + * 2. 每个包最大为 1KB,超过则很有可能会被中间路由器或者服务器丢弃。 + * 3. 每个客户端每秒最多能发送总计 8KB 数据。 + * 4. 请将 reliable 和 ordered 同时设置为 true 或同时设置为 false,暂不支持交叉设置。 + * 5. 强烈建议您将不同类型的消息设定为不同的 cmdID,这样可以在要求有序的情况下减小消息时延。 + */ + virtual bool sendCustomCmdMsg(uint32_t cmdId, const uint8_t* data, uint32_t dataSize, bool reliable, bool ordered) = 0; + + /** + * 11.2 使用 SEI 通道发送自定义消息给房间内所有用户 + * + * 该接口可以让您借助 TRTC 的 SEI 通道,向当前房间里的其他用户广播自定义数据,已达到传输信令的目的。 + * 视频帧的头部有一个叫做 SEI 的头部数据块,该接口的原理就是利用这个被称为 SEI 的头部数据块,将您要发送的自定义信令嵌入其中,使其同视频帧一并发送出去。 + * 因此,与 {@link sendCustomCmdMsg} 相比,SEI 通道传输的信令具有更好的兼容性:信令可以伴随着视频帧一直传输到直播 CDN 上。 + * 不过,由于视频帧头部的数据块不能太大,建议您使用该接口时,尽量将信令控制在几个字节的大小。 + * 最常见的用法是把自定义的时间戳(timestamp)用本接口嵌入视频帧中,实现消息和画面的完美对齐(比如:教育场景下的课件和视频信号的对齐)。 + * 房间中的其他用户可以通过 {@link TRTCCloudDelegate} 中的 onRecvSEIMsg 回调接收消息。 + * @param data 待发送的数据,最大支持 1KB(1000字节)的数据大小 + * @param repeatCount 发送数据次数 + * @return true:消息已通过限制,等待后续视频帧发送;false:消息被限制发送 + * @note 本接口有以下限制: + * 1. 数据在接口调用完后不会被即时发送出去,而是从下一帧视频帧开始带在视频帧中发送。 + * 2. 发送消息到房间内所有用户,每秒最多能发送 30 条消息(与 sendCustomCmdMsg 共享限制)。 + * 3. 每个包最大为 1KB,若发送大量数据,会导致视频码率增大,可能导致视频画质下降甚至卡顿(与 sendCustomCmdMsg 共享限制)。 + * 4. 每个客户端每秒最多能发送总计8KB数据(与 sendCustomCmdMsg 共享限制)。 + * 5. 若指定多次发送(repeatCount > 1),则数据会被带在后续的连续 repeatCount 个视频帧中发送出去,同样会导致视频码率增大。 + * 6. 如果 repeatCount > 1,多次发送,接收消息 onRecvSEIMsg 回调也可能会收到多次相同的消息,需要去重。 + */ + virtual bool sendSEIMsg(const uint8_t* data, uint32_t dataSize, int32_t repeatCount) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 网络测试接口 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 网络测试接口 + /// @{ + + /** + * 12.1 开始进行网速测试(进入房间前使用) + * + * @param params 测速选项 + * @return 接口调用结果,< 0:失败 + * @note + * 1. 测速过程将产生少量的基础服务费用,详见 [计费概述 > 基础服务](https://cloud.tencent.com/document/product/647/17157#.E5.9F.BA.E7.A1.80.E6.9C.8D.E5.8A.A1) 文档说明。 + * 2. 请在进入房间前进行网速测试,在房间中网速测试会影响正常的音视频传输效果,而且由于干扰过多,网速测试结果也不准确。 + * 3. 同一时间只允许一项网速测试任务运行。 + */ + virtual int startSpeedTest(const TRTCSpeedTestParams& params) = 0; + + /** + * 12.2 停止网络测速 + */ + virtual void stopSpeedTest() = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 调试相关接口 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 调试相关接口 + /// @{ + + /** + * 13.1 获取 SDK 版本信息 + */ + virtual const char* getSDKVersion() = 0; + + /** + * 13.2 设置 Log 输出级别 + * + * @param level 参见 {@link TRTCLogLevel},默认值:{@link TRTCLogLevelNone} + */ + virtual void setLogLevel(TRTCLogLevel level) = 0; + + /** + * 13.3 启用/禁用控制台日志打印 + * + * @param enabled 指定是否启用,默认:禁止状态 + */ + virtual void setConsoleEnabled(bool enabled) = 0; + + /** + * 13.4 启用/禁用日志的本地压缩 + * + * 开启压缩后,Log 存储体积明显减小,但需要腾讯云提供的 Python 脚本解压后才能阅读。 + * 禁用压缩后,Log 采用明文存储,可以直接用记事本打开阅读,但占用空间较大。 + * @param enabled 指定是否启用,默认为启动状态 + */ + virtual void setLogCompressEnabled(bool enabled) = 0; + + /** + * 13.5 设置本地日志的保存路径 + * + * 通过该接口您可以更改 SDK 本地日志的默认存储路径,SDK 默认的本地日志的存储位置: + * - Windows 平台:在 C:/Users/[系统用户名]/AppData/Roaming/liteav/log,即 %appdata%/liteav/log 下。 + * - iOS 或 Mac 平台:在 sandbox Documents/log 下。 + * - Android 平台:在 /app私有目录/files/log/liteav/ 下。 + * + * @note 请务必在所有其他接口之前调用,并且保证您指定的目录是存在的,并且您的应用程序拥有对该目录的读写权限。 + * @param path 存储日志的路径 + */ + virtual void setLogDirPath(const char* path) = 0; + + /** + * 13.6 设置日志回调 + */ + virtual void setLogCallback(ITRTCLogCallback* callback) = 0; + + /** + * 13.7 显示仪表盘 + * + * “仪表盘”是位于视频渲染控件之上的一个半透明的调试信息浮层,用于展示音视频信息和事件信息,便于对接和调试。 + * @param showType 0:不显示;1:显示精简版(仅显示音视频信息);2:显示完整版(包含音视频信息和事件信息)。 + */ + virtual void showDebugView(int showType) = 0; + +/** + * 13.9 调用实验性接口 + */ +#ifdef _WIN32 + virtual const char* callExperimentalAPI(const char* jsonStr) = 0; +#else + virtual void callExperimentalAPI(const char* jsonStr) = 0; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 弃用接口(建议使用对应的新接口) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 弃用接口(建议使用对应的新接口) +/// @{ + +/** + * 启用视频自定义采集模式 + * + * @deprecated v8.5 版本开始不推荐使用,建议使用 {@link enableCustomVideoCapture}(streamType, enable) 接口替代之。 + */ +#ifndef _WIN32 + virtual void enableCustomVideoCapture(bool enable) = 0; +#endif + +/** + * 投送自己采集的视频数据 + * + * @deprecated v8.5 版本开始不推荐使用,建议使用 {@link sendCustomVideoData}(streamType, TRTCVideoFrame) 接口替代之。 + */ +#ifndef _WIN32 + virtual void sendCustomVideoData(TRTCVideoFrame* frame) = 0; +#endif + +/** + * 暂停/恢复发布本地的视频流 + * + * @deprecated v8.9 版本开始不推荐使用,建议使用 {@link muteLocalVideo}(streamType, mute) 接口替代之。 + */ +#ifndef _WIN32 + virtual void muteLocalVideo(bool mute) = 0; +#endif + +/** + * 暂停 / 恢复订阅远端用户的视频流 + * + * @deprecated v8.9 版本开始不推荐使用,建议使用 {@link muteRemoteVideoStream}(userId, streamType, mute) 接口替代之。 + */ +#ifndef _WIN32 + virtual void muteRemoteVideoStream(const char* userId, bool mute) = 0; +#endif + +/** + * 开始进行网络测速(进入房间前使用) + * + * @deprecated v9.2 版本开始不推荐使用,建议使用 {@link startSpeedTest}(params) 接口替代之。 + */ +#ifdef __APPLE__ + virtual void startSpeedTest(uint32_t sdkAppId, const char* userId, const char* userSig) __attribute__((deprecated("use startSpeedTest:params instead"))) = 0; +#elif !defined(_WIN32) + virtual void startSpeedTest(uint32_t sdkAppId, const char* userId, const char* userSig) = 0; +#endif + +#ifdef _WIN32 + using IDeprecatedTRTCCloud::enableCustomVideoCapture; + using IDeprecatedTRTCCloud::muteLocalVideo; + using IDeprecatedTRTCCloud::muteRemoteVideoStream; + using IDeprecatedTRTCCloud::selectScreenCaptureTarget; + using IDeprecatedTRTCCloud::sendCustomVideoData; + using IDeprecatedTRTCCloud::startLocalAudio; + using IDeprecatedTRTCCloud::startRemoteView; + using IDeprecatedTRTCCloud::startScreenCapture; + using IDeprecatedTRTCCloud::startSpeedTest; + using IDeprecatedTRTCCloud::stopRemoteView; +#endif + /// @} +}; +} // namespace liteav +/// @} + +#endif /* __ITRTCCLOUD_H__ */ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCStatistics.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCStatistics.h new file mode 100644 index 0000000..af249af --- /dev/null +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCStatistics.h @@ -0,0 +1,225 @@ +/** + * Module: TRTC 音视频统计指标(只读) + * Function: TRTC SDK 会以两秒钟一次的频率向您汇报当前实时的音视频指标(帧率、码率、卡顿情况等) + */ +/// @defgroup TRTCStatistic_cplusplus TRTCStatisic +/// Tencent Cloud TRTC :audio, video and network related statistical indicators +/// @{ + +#ifndef __TRTCSTATISTIC_H__ +#define __TRTCSTATISTIC_H__ +namespace liteav { + +///////////////////////////////////////////////////////////////////////////////// +// +// 本地的音视频统计指标 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 本地的音视频统计指标 +/// @{ + +/** + * 本地的音视频统计指标 + */ +struct TRTCLocalStatistics { + ///【字段含义】本地视频的宽度,单位 px + uint32_t width; + + ///【字段含义】本地视频的高度,单位 px + uint32_t height; + + ///【字段含义】本地视频的帧率,即每秒钟会有多少视频帧,单位:FPS + uint32_t frameRate; + + ///【字段含义】远端视频的码率,即每秒钟新产生视频数据的多少,单位 Kbps + uint32_t videoBitrate; + + ///【字段含义】远端音频的采样率,单位 Hz + uint32_t audioSampleRate; + + ///【字段含义】本地音频的码率,即每秒钟新产生音频数据的多少,单位 Kbps + uint32_t audioBitrate; + + ///【字段含义】视频流类型(高清大画面|低清小画面|辅流画面) + TRTCVideoStreamType streamType; + + ///【字段含义】音频设备采集状态(用于检测音频外设的健康度) + /// 0:采集设备状态正常;1:检测到长时间静音;2:检测到破音;3:检测到声音异常间断。 + uint32_t audioCaptureState; + + TRTCLocalStatistics() : width(0), height(0), frameRate(0), videoBitrate(0), audioSampleRate(0), audioBitrate(0), streamType(TRTCVideoStreamTypeBig), audioCaptureState(0) { + } +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 远端的音视频统计指标 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 远端的音视频统计指标 +/// @{ + +/** + * 远端的音视频统计指标 + */ +struct TRTCRemoteStatistics { + ///【字段含义】用户 ID + const char* userId; + + ///【字段含义】音频流的总丢包率(%) + /// audioPacketLoss 代表音频流历经“主播 => 云端 => 观众”这样一条完整的传输链路后,最终在观众端统计到的丢包率。 + /// audioPacketLoss 越小越好,丢包率为0即表示该路音频流的所有数据均已经完整地到达了观众端。 + ///如果出现了 downLoss == 0 但 audioPacketLoss != 0 的情况,说明该路音频流在“云端=>观众”这一段链路上没有出现丢包,但是在“主播=>云端”这一段链路上出现了不可恢复的丢包。 + uint32_t audioPacketLoss; + + ///【字段含义】该路视频流的总丢包率(%) + /// videoPacketLoss 代表该路视频流历经“主播 => 云端 => 观众”这样一条完整的传输链路后,最终在观众端统计到的丢包率。 + /// videoPacketLoss 越小越好,丢包率为0即表示该路视频流的所有数据均已经完整地到达了观众端。 + ///如果出现了 downLoss == 0 但 videoPacketLoss != 0 的情况,说明该路视频流在“云端=>观众”这一段链路上没有出现丢包,但是在“主播=>云端”这一段链路上出现了不可恢复的丢包。 + uint32_t videoPacketLoss; + + ///【字段含义】远端视频的宽度,单位 px + uint32_t width; + + ///【字段含义】远端视频的高度,单位 px + uint32_t height; + + ///【字段含义】远端视频的帧率,单位:FPS + uint32_t frameRate; + + ///【字段含义】远端视频的码率,单位 Kbps + uint32_t videoBitrate; + + ///【字段含义】本地音频的采样率,单位 Hz + uint32_t audioSampleRate; + + ///【字段含义】本地音频的码率,单位 Kbps + uint32_t audioBitrate; + + ///【字段含义】播放延迟,单位 ms + ///为了避免网络抖动和网络包乱序导致的声音和画面卡顿,TRTC 会在播放端管理一个播放缓冲区,用于对接收到的网络数据包进行整理, + ///该缓冲区的大小会根据当前的网络质量进行自适应调整,该缓冲区的大小折算成以毫秒为单位的时间长度,也就是 jitterBufferDelay。 + uint32_t jitterBufferDelay; + + ///【字段含义】端到端延迟,单位 ms + /// point2PointDelay 代表 “主播=>云端=>观众” 的延迟,更准确地说,它代表了“采集=>编码=>网络传输=>接收=>缓冲=>解码=>播放” 全链路的延迟。 + /// point2PointDelay 需要本地和远端的 SDK 均为 8.5 及以上的版本才生效,若远端用户为 8.5 以前的版本,此数值会一直为0,代表无意义。 + uint32_t point2PointDelay; + + ///【字段含义】音频播放的累计卡顿时长,单位 ms + uint32_t audioTotalBlockTime; + + ///【字段含义】音频播放卡顿率,单位 (%) + ///音频播放卡顿率(audioBlockRate) = 音频播放的累计卡顿时长(audioTotalBlockTime) / 音频播放的总时长 + uint32_t audioBlockRate; + + ///【字段含义】视频播放的累计卡顿时长,单位 ms + uint32_t videoTotalBlockTime; + + ///【字段含义】视频播放卡顿率,单位 (%) + ///视频播放卡顿率(videoBlockRate) = 视频播放的累计卡顿时长(videoTotalBlockTime) / 视频播放的总时长 + uint32_t videoBlockRate; + + ///【字段含义】该路音视频流的总丢包率(%) + ///已废弃,不推荐使用;建议使用 audioPacketLoss、videoPacketLoss 替代 + uint32_t finalLoss; + + ///【字段含义】视频流类型(高清大画面|低清小画面|辅流画面) + TRTCVideoStreamType streamType; + + TRTCRemoteStatistics() + : userId(nullptr), + audioPacketLoss(0), + videoPacketLoss(0), + width(0), + height(0), + frameRate(0), + videoBitrate(0), + audioSampleRate(0), + audioBitrate(0), + jitterBufferDelay(0), + point2PointDelay(0), + audioTotalBlockTime(0), + audioBlockRate(0), + videoTotalBlockTime(0), + videoBlockRate(0), + finalLoss(0), + streamType(TRTCVideoStreamTypeBig) { + } +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 网络和性能的汇总统计指标 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 网络和性能的汇总统计指标 +/// @{ + +/** + * 网络和性能的汇总统计指标 + */ +struct TRTCStatistics { + ///【字段含义】当前应用的 CPU 使用率,单位 (%),Android 8.0 以上不支持 + uint32_t appCpu; + + ///【字段含义】当前系统的 CPU 使用率,单位 (%),Android 8.0 以上不支持 + uint32_t systemCpu; + + ///【字段含义】从 SDK 到云端的上行丢包率,单位 (%) + ///该数值越小越好,如果 upLoss 为 0%,则意味着上行链路的网络质量很好,上传到云端的数据包基本不发生丢失。 + ///如果 upLoss 为 30%,则意味着 SDK 向云端发送的音视频数据包中,会有 30% 丢失在传输链路中。 + uint32_t upLoss; + + ///【字段含义】从云端到 SDK 的下行丢包率,单位 (%) + ///该数值越小越好,如果 downLoss 为 0%,则意味着下行链路的网络质量很好,从云端接收的数据包基本不发生丢失。 + ///如果 downLoss 为 30%,则意味着云端向 SDK 传输的音视频数据包中,会有 30% 丢失在传输链路中。 + uint32_t downLoss; + + ///【字段含义】从 SDK 到云端的往返延时,单位 ms + ///该数值代表从 SDK 发送一个网络包到云端,再从云端回送一个网络包到 SDK 的总计耗时,也就是一个网络包经历 “SDK=>云端=>SDK” 的总耗时。 + ///该数值越小越好:如果 rtt < 50ms,意味着较低的音视频通话延迟;如果 rtt > 200ms,则意味着较高的音视频通话延迟。 + ///需要特别解释的是,rtt 代表 “SDK=>云端=>SDK” 的总耗时,所不需要区分 upRtt 和 downRtt。 + uint32_t rtt; + + ///【字段含义】从 SDK 到本地路由器的往返时延,单位 ms + ///该数值代表从 SDK 发送一个网络包到本地路由器网关,再从网关回送一个网络包到 SDK 的总计耗时,也就是一个网络包经历 “SDK=>网关=>SDK” 的总耗时。 + ///该数值越小越好:如果 gatewayRtt < 50ms,意味着较低的音视频通话延迟;如果 gatewayRtt > 200ms,则意味着较高的音视频通话延迟。 + ///当网络类型为蜂窝网时,该值无效。 + uint32_t gatewayRtt; + + ///【字段含义】总发送字节数(包含信令数据和音视频数据),单位:字节数(Bytes) + uint32_t sentBytes; + + ///【字段含义】总接收字节数(包含信令数据和音视频数据),单位:字节数(Bytes) + uint32_t receivedBytes; + + ///【字段含义】本地的音视频统计信息 + ///由于本地可能有三路音视频流(即高清大画面,低清小画面,以及辅流画面),因此本地的音视频统计信息是一个数组。 + TRTCLocalStatistics* localStatisticsArray; + + ///【字段含义】数组 localStatisticsArray 的大小 + uint32_t localStatisticsArraySize; + + ///【字段含义】远端的音视频统计信息 + ///因为同时可能有多个远端用户,而且每个远端用户同时可能有多路音视频流(即高清大画面,低清小画面,以及辅流画面),因此远端的音视频统计信息是一个数组。 + TRTCRemoteStatistics* remoteStatisticsArray; + + ///【字段含义】数组 remoteStatisticsArray 的大小 + uint32_t remoteStatisticsArraySize; + + TRTCStatistics() : appCpu(0), systemCpu(0), upLoss(0), downLoss(0), rtt(0), gatewayRtt(0), sentBytes(0), receivedBytes(0), localStatisticsArray(nullptr), localStatisticsArraySize(0), remoteStatisticsArray(nullptr), remoteStatisticsArraySize(0) { + } +}; +/// @} + +} // namespace liteav + +#ifdef _WIN32 +using namespace liteav; +#endif + +#endif /* __TRTCSTATISTIC_H__ */ +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXAudioEffectManager.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXAudioEffectManager.h index 082860d..71d1892 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXAudioEffectManager.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXAudioEffectManager.h @@ -1,62 +1,189 @@ +/** + * Module: TRTC 背景音乐、短音效和人声特效的管理类 + * Function: 用于对背景音乐、短音效和人声特效进行设置的管理类 + */ +/// @defgroup TXAudioEffectManager_cplusplus TXAudioEffectManager +/// Tencent Cloud Audio Effect Management Module +/// @{ + #ifndef __ITXAUDIOEFFECTMANAGER_H__ #define __ITXAUDIOEFFECTMANAGER_H__ -namespace trtc { +namespace liteav { -/// @defgroup ITXAudioEffectManager_cplusplus ITXAudioEffectManager -/// 腾讯云视频通话功能音乐和人声设置接口 +class ITXMusicPlayObserver; +class AudioMusicParam; + +///////////////////////////////////////////////////////////////////////////////// +// +// 音效相关的枚举值定义 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 音效相关的枚举值定义 /// @{ -enum TXVoiceReverbType -{ - TXLiveVoiceReverbType_0 = 0, ///< 关闭混响 - TXLiveVoiceReverbType_1 = 1, ///< KTV - TXLiveVoiceReverbType_2 = 2, ///< 小房间 - TXLiveVoiceReverbType_3 = 3, ///< 大会堂 - TXLiveVoiceReverbType_4 = 4, ///< 低沉 - TXLiveVoiceReverbType_5 = 5, ///< 洪亮 - TXLiveVoiceReverbType_6 = 6, ///< 金属声 - TXLiveVoiceReverbType_7 = 7, ///< 磁性 + +/** + * 1.1 混响特效 + * + * 混响特效可以作用于人声之上,通过声学算法对声音进行叠加处理,模拟出各种不同环境下的临场感受,目前支持如下几种混响效果: + * 0:关闭;1:KTV;2:小房间;3:大会堂;4:低沉;5:洪亮;6:金属声;7:磁性;8:空灵;9:录音棚;10:悠扬。 + */ +enum TXVoiceReverbType { + + ///关闭特效 + TXLiveVoiceReverbType_0 = 0, + + /// KTV + TXLiveVoiceReverbType_1 = 1, + + ///小房间 + TXLiveVoiceReverbType_2 = 2, + + ///大会堂 + TXLiveVoiceReverbType_3 = 3, + + ///低沉 + TXLiveVoiceReverbType_4 = 4, + + ///洪亮 + TXLiveVoiceReverbType_5 = 5, + + ///金属声 + TXLiveVoiceReverbType_6 = 6, + + ///磁性 + TXLiveVoiceReverbType_7 = 7, + + ///空灵 + TXLiveVoiceReverbType_8 = 8, + + ///录音棚 + TXLiveVoiceReverbType_9 = 9, + + ///悠扬 + TXLiveVoiceReverbType_10 = 10, + }; +/** + * 1.2 变声特效 + * + * 变声特效可以作用于人声之上,通过声学算法对人声进行二次处理,以获得与原始声音所不同的音色,目前支持如下几种变声特效: + * 0:关闭;1:熊孩子;2:萝莉;3:大叔;4:重金属;5:感冒;6:外语腔;7:困兽;8:肥宅;9:强电流;10:重机械;11:空灵。 + */ +enum TXVoiceChangerType { + + ///关闭 + TXVoiceChangerType_0 = 0, + + ///熊孩子 + TXVoiceChangerType_1 = 1, + + ///萝莉 + TXVoiceChangerType_2 = 2, + + ///大叔 + TXVoiceChangerType_3 = 3, + + ///重金属 + TXVoiceChangerType_4 = 4, + + ///感冒 + TXVoiceChangerType_5 = 5, + + ///外语腔 + TXVoiceChangerType_6 = 6, + + ///困兽 + TXVoiceChangerType_7 = 7, + + ///肥宅 + TXVoiceChangerType_8 = 8, + + ///强电流 + TXVoiceChangerType_9 = 9, + + ///重机械 + TXVoiceChangerType_10 = 10, + + ///空灵 + TXVoiceChangerType_11 = 11, + +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 背景音乐的播放事件回调 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 背景音乐的事件回调接口 +/// @{ + +// Playback progress block of background music class ITXMusicPlayObserver { -public: - virtual ~ITXMusicPlayObserver() {} + public: + virtual ~ITXMusicPlayObserver() { + } - /// 背景音乐开始播放 - virtual void onStart(int id,int errCode) = 0; + /** + * 背景音乐开始播放 + */ + virtual void onStart(int id, int errCode) = 0; - /// 背景音乐的播放进度 - virtual void onPlayProgress(int id,long curPtsMS,long durationMS) = 0; + /** + * 背景音乐的播放进度 + */ + virtual void onPlayProgress(int id, long curPtsMS, long durationMS) = 0; - /// 背景音乐已播放完毕 - virtual void onComplete(int id,int errCode) = 0; + /** + * 背景音乐已经播放完毕 + */ + virtual void onComplete(int id, int errCode) = 0; }; +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 背景音乐的播放控制信息 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 背景音乐的播放控制信息 +/// @{ + +/** + * 背景音乐的播放控制信息 + * + * 该信息用于在接口 {@link startPlayMusic} 中指定背景音乐的相关信息,包括播放 ID、文件路径和循环次数等: + * 1. 如果要多次播放同一首背景音乐,请不要每次播放都分配一个新的 ID,我们推荐使用相同的 ID。 + * 2. 若您希望同时播放多首不同的音乐,请为不同的音乐分配不同的 ID 进行播放。 + * 3. 如果使用同一个 ID 播放不同音乐,SDK 会先停止播放旧的音乐,再播放新的音乐。 + */ class AudioMusicParam { -public: - /// 【字段含义】音乐 ID - /// 【特殊说明】SDK 允许播放多路音乐,因此需要音乐 ID 进行标记,用于控制音乐的开始、停止、音量等 + public: + ///【字段含义】音乐 ID <br/> + ///【特殊说明】SDK 允许播放多路音乐,因此需要使用 ID 进行标记,用于控制音乐的开始、停止、音量等。 int id; - /// 【字段含义】音乐文件的绝对路径 + ///【字段含义】音效文件的完整路径或 URL 地址。支持的音频格式包括 MP3、AAC、M4A、WAV char* path; - /// 【字段含义】音乐循环播放的次数 - /// 【推荐取值】取值范围为0 - 任意正整数,默认值:0。0表示播放音乐一次;1表示播放音乐两次;以此类推 + ///【字段含义】音乐循环播放的次数 <br/> + ///【推荐取值】取值范围为0 - 任意正整数,默认值:0。0表示播放音乐一次;1表示播放音乐两次;以此类推 int loopCount; - /// 【字段含义】是否将音乐传到远端 - /// 【推荐取值】YES:音乐在本地播放的同时,会上行至云端,因此远端用户也能听到该音乐;NO:音乐不会上行至云端,因此只能在本地听到该音乐。默认值:NO + ///【字段含义】是否将音乐传到远端 <br/> + ///【推荐取值】true:音乐在本地播放的同时,远端用户也能听到该音乐;false:主播只能在本地听到该音乐,远端观众听不到。默认值:false。 bool publish; - /// 【字段含义】播放的是否为短音乐文件 - /// 【推荐取值】YES:需要重复播放的短音乐文件;NO:正常的音乐文件。默认值:NO + ///【字段含义】播放的是否为短音乐文件 <br/> + ///【推荐取值】true:需要重复播放的短音乐文件;false:正常的音乐文件。默认值:false bool isShortFile; - /// 【字段含义】音乐开始播放时间点,单位毫秒 + ///【字段含义】音乐开始播放时间点,单位:毫秒。 long startTimeMS; - /// 【字段含义】音乐结束播放时间点,单位毫秒,0表示播放至文件结尾。 + ///【字段含义】音乐结束播放时间点,单位毫秒,0表示播放至文件结尾。 long endTimeMS; AudioMusicParam(int id_, char* path_) { @@ -69,166 +196,197 @@ public: endTimeMS = 0; } }; +/// @} +// Definition of audio effect management module +class ITXAudioEffectManager { + protected: + ITXAudioEffectManager() { + } + virtual ~ITXAudioEffectManager() { + } -class ITXAudioEffectManager -{ -protected: - ITXAudioEffectManager() {} - virtual ~ITXAudioEffectManager() {} - -public: -///////////////////////////////////////////////////////////////////////////////// -// -// (一)人声相关特效函数 -// -///////////////////////////////////////////////////////////////////////////////// -/// @name 人声相关特效函数 -/// @{ + public: + ///////////////////////////////////////////////////////////////////////////////// + // + // 人声相关的特效接口 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 人声相关的特效接口 + /// @{ /** - * 1.1 设置人声的混响效果(KTV、小房间、大会堂、低沉、洪亮...) + * 1.3 设置人声的混响效果 * - * @note 设置的效果在退房后会失效,如果下次进房还需要对应特效,需要调用此接口再次设置。 + * 通过该接口您可以设置人声的混响效果,具体特效请参考枚举定义{@link TXVoiceReverbType}。 + * @note 设置的效果在退出房间后会自动失效,如果下次进房还需要对应特效,需要调用此接口再次进行设置。 */ virtual void setVoiceReverbType(TXVoiceReverbType type) = 0; /** - * 1.2 设置麦克风采集人声的音量 + * 1.4 设置人声的变声特效 * - * @param volume 音量大小,100为原始音量,取值范围为0 - 150;默认值:100 + * 通过该接口您可以设置人声的变声特效,具体特效请参考枚举定义{@link TXVoiceChangeType}。 + * @note 设置的效果在退出房间后会自动失效,如果下次进房还需要对应特效,需要调用此接口再次进行设置。 + */ + virtual void setVoiceChangerType(TXVoiceChangerType type) = 0; + + /** + * 1.5 设置语音音量 * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 该接口可以设置语音音量的大小,一般配合音乐音量的设置接口 {@link setAllMusicVolume} 协同使用,用于调谐语音和音乐在混音前各自的音量占比。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ virtual void setVoiceCaptureVolume(int volume) = 0; -/// @} -///////////////////////////////////////////////////////////////////////////////// -// -// (二)背景音乐特效函数 -// -///////////////////////////////////////////////////////////////////////////////// + /** + * 1.6 设置语音音调 + * + * 该接口可以设置语音音调,用于实现变调不变速的目的。 + * @param pitch 音调,取值范围为-1.0f~1.0f,默认值:0.0f。 + */ + virtual void setVoicePitch(double pitch) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // 背景音乐的相关接口 + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name 背景音乐的相关接口 + /// @{ -/// @name 背景音乐特效函数 -/// @{ /** - * 2.1 设置背景音乐的播放进度回调接口 + * 2.0 设置背景音乐的事件回调接口 * + * 请在播放背景音乐之前使用该接口设置播放事件回调,以便感知背景音乐的播放进度。 * @param musicId 音乐 ID * @param observer 具体参考 ITXMusicPlayObserver 中定义接口 */ virtual void setMusicObserver(int musicId, ITXMusicPlayObserver* observer) = 0; /** - * 2.2 开始播放背景音乐 + * 2.1 开始播放背景音乐 * * 每个音乐都需要您指定具体的 ID,您可以通过该 ID 对音乐的开始、停止、音量等进行设置。 - * - * @note 若您想同时播放多个音乐,请分配不同的 ID 进行播放。 - * 如果使用同一个 ID 播放不同音乐,SDK 会先停止播放旧的音乐,再播放新的音乐。 * @param musicParam 音乐参数 + * @param startBlock 播放开始回调 + * @param progressBlock 播放进度回调 + * @param completeBlock 播放结束回调 + * @note + * 1. 如果要多次播放同一首背景音乐,请不要每次播放都分配一个新的 ID,我们推荐使用相同的 ID。 + * 2. 若您希望同时播放多首不同的音乐,请为不同的音乐分配不同的 ID 进行播放。 + * 3. 如果使用同一个 ID 播放不同音乐,SDK 会先停止播放旧的音乐,再播放新的音乐。 */ virtual void startPlayMusic(AudioMusicParam musicParam) = 0; /** - * 2.3 停止播放背景音乐 + * 2.2 停止播放背景音乐 * * @param id 音乐 ID */ virtual void stopPlayMusic(int id) = 0; /** - * 2.4 暂停播放背景音乐 + * 2.3 暂停播放背景音乐 * * @param id 音乐 ID */ virtual void pausePlayMusic(int id) = 0; /** - * 2.5 恢复播放背景音乐 + * 2.4 恢复播放背景音乐 * * @param id 音乐 ID */ virtual void resumePlayMusic(int id) = 0; /** - * 2.6 设置背景音乐的远端音量大小,即主播可以通过此接口设置远端观众能听到的背景音乐的音量大小。 + * 2.5 设置所有背景音乐的本地音量和远端音量的大小 * - * @param id 音乐 ID - * @param volume 音量大小,100为原始音量,取值范围为0 - 150;默认值:100 - * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 该接口可以设置所有背景音乐的本地音量和远端音量。 + * - 本地音量:即主播本地可以听到的背景音乐的音量大小。 + * - 远端音量:即观众端可以听到的背景音乐的音量大小。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - virtual void setMusicPublishVolume(int id, int volume) = 0; + virtual void setAllMusicVolume(int volume) = 0; /** - * 2.7 设置背景音乐的本地音量大小,即主播可以通过此接口设置主播自己本地的背景音乐的音量大小。 - * - * @param id 音乐 ID - * @param volume 音量大小,100为原始音量,取值范围为0 - 150;默认值:100 + * 2.6 设置某一首背景音乐的远端音量的大小 * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 该接口可以细粒度地控制每一首背景音乐的远端音量,也就是观众端可听到的背景音乐的音量大小。 + * @param id 音乐 ID + * @param volume 音量大小,取值范围为0 - 100;默认值:100 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - virtual void setMusicPlayoutVolume(int id, int volume) = 0; + virtual void setMusicPublishVolume(int id, int volume) = 0; /** - * 2.8 设置全局背景音乐的本地和远端音量的大小 - * - * @param volume 音量大小,100为原始音量,取值范围为0 - 150;默认值:100 + * 2.7 设置某一首背景音乐的本地音量的大小 * - * @note 如果要将 volume 设置为大于100的数值,需要进行特殊配置,请联系技术支持。 + * 该接口可以细粒度地控制每一首背景音乐的本地音量,也就是主播本地可以听到的背景音乐的音量大小。 + * @param id 音乐 ID + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ - virtual void setAllMusicVolume(int volume) = 0; + virtual void setMusicPlayoutVolume(int id, int volume) = 0; /** - * 2.9 调整背景音乐的音调高低 + * 2.8 调整背景音乐的音调高低 * - * @param id 音乐 ID + * @param id 音乐 ID * @param pitch 音调,默认值是0.0f,范围是:[-1 ~ 1] 之间的浮点数; */ virtual void setMusicPitch(int id, float pitch) = 0; /** - * 2.10 调整背景音乐的变速效果 + * 2.9 调整背景音乐的变速效果 * - * @param id 音乐 ID + * @param id 音乐 ID * @param speedRate 速度,默认值是1.0f,范围是:[0.5 ~ 2] 之间的浮点数; */ virtual void setMusicSpeedRate(int id, float speedRate) = 0; /** - * 2.11 获取背景音乐当前的播放进度(单位:毫秒) + * 2.10 获取背景音乐的播放进度(单位:毫秒) * - * @param id 音乐 ID + * @param id 音乐 ID * @return 成功返回当前播放时间,单位:毫秒,失败返回-1 */ virtual long getMusicCurrentPosInMS(int id) = 0; /** - * 2.12 设置背景音乐的播放进度(单位:毫秒) - * - * @note 请尽量避免频繁地调用该接口,因为该接口可能会再次读写音乐文件,耗时稍高。 - * 当配合进度条使用时,请在进度条拖动完毕的回调中调用,而避免在拖动过程中实时调用。 + * 2.11 获取背景音乐的总时长(单位:毫秒) * - * @param id 音乐 ID - * @param pts 单位: 毫秒 + * @param path 音乐文件路径。 + * @return 成功返回时长,失败返回-1 */ - virtual void seekMusicToPosInTime(int id, int pts) = 0; + virtual long getMusicDurationInMS(char* path) = 0; /** - * 2.13 获取景音乐文件的总时长(单位:毫秒) + * 2.12 设置背景音乐的播放进度(单位:毫秒) * - * @param path 音乐文件路径,如果 path 为空,那么返回当前正在播放的 music 时长。 - * @return 成功返回时长,失败返回-1 + * @param id 音乐 ID + * @param pts 单位: 毫秒 + * @note 请尽量避免过度频繁地调用该接口,因为该接口可能会再次读写音乐文件,耗时稍高。 + * 因此,当用户拖拽音乐的播放进度条时,请在用户完成拖拽操作后再调用本接口。 + * 因为 UI 上的进度条控件往往会以很高的频率反馈用户的拖拽进度,如不做频率限制,会导致较差的用户体验。 */ - virtual long getMusicDurationInMS(char* path) = 0; + virtual void seekMusicToPosInTime(int id, int pts) = 0; + /// @} }; -/// @} -} +} // End of namespace liteav + +// 9.0 开始 C++ 接口将声明在 liteav 命名空间下,为兼容之前的使用方式,将 trtc 作为 liteav 的别名 +// namespace trtc = liteav; #ifdef _WIN32 -using namespace trtc; +using namespace liteav; #endif #endif /* __ITXAUDIOEFFECTMANAGER_H__ */ + +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXDeviceManager.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXDeviceManager.h index c981f41..f5fcac4 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXDeviceManager.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXDeviceManager.h @@ -1,3 +1,11 @@ +/** + * Module: TRTC 音视频设备管理模块 + * Function: 用于管理摄像头、麦克风和扬声器等音视频相关的硬件设备 + */ +/// @defgroup TXDeviceManager_cplusplus TXDeviceManager +/// Tencent Cloud Device Management Module +/// @{ + #ifndef __ITXDEVICEMANAGER_H__ #define __ITXDEVICEMANAGER_H__ @@ -6,73 +14,170 @@ #include <TargetConditionals.h> #endif -namespace trtc { +namespace liteav { class ITRTCVideoRenderCallback; -/// @defgroup ITXDeviceManager_cplusplus ITXDeviceManager -/// 腾讯云视频通话功能的设备管理接口类 +///////////////////////////////////////////////////////////////////////////////// +// +// 音视频设备相关的类型定义 +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 音视频设备相关的类型定义 /// @{ + /** - * 系统音量类型(仅适用于移动端设备) + * 系统音量类型(仅适用于移动设备) * - * 智能手机一般具备两种系统音量类型,即通话音量类型和媒体音量类型。 - * - 通话音量:手机专门为通话场景设计的音量类型,使用手机自带的回声抵消功能,音质相比媒体音量类型较差, - * 无法通过音量按键将音量调成零,但是支持蓝牙耳机上的麦克风。 + * @deprecated v9.5 版本开始不推荐使用。 + * 现代智能手机中一般都具备两套系统音量类型,即“通话音量”和“媒体音量”。 + * - 通话音量:手机专门为接打电话所设计的音量类型,自带回声抵消(AEC)功能,并且支持通过蓝牙耳机上的麦克风进行拾音,缺点是音质比较一般。 + * 当您通过手机侧面的音量按键下调手机音量时,如果无法将其调至零(也就是无法彻底静音),说明您的手机当前出于通话音量。 + * - 媒体音量:手机专门为音乐场景所设计的音量类型,无法使用系统的 AEC 功能,并且不支持通过蓝牙耳机的麦克风进行拾音,但具备更好的音乐播放效果。 + * 当您通过手机侧面的音量按键下调手机音量时,如果能够将手机音量调至彻底静音,说明您的手机当前出于媒体音量。 * - * - 媒体音量:手机专门为音乐场景设计的音量类型,音质相比于通话音量类型要好,通过通过音量按键可以将音量调成零。 - * 使用媒体音量类型时,如果要开启回声抵消(AEC)功能,SDK 会开启内置的声学处理算法对声音进行二次处理。 - * 在媒体音量模式下,蓝牙耳机无法使用自带的麦克风采集声音,只能使用手机上的麦克风进行声音采集。 + * SDK 目前提供了三种系统音量类型的控制模式:自动切换模式、全程通话音量模式、全程媒体音量模式。 + */ +enum TXSystemVolumeType { + + ///自动切换模式 + TXSystemVolumeTypeAuto = 0, + + ///全程媒体音量 + TXSystemVolumeTypeMedia = 1, + + ///全程通话音量 + TXSystemVolumeTypeVOIP = 2, + +}; + +/** + * 音频路由(即声音的播放模式) * - * SDK 目前提供了三种系统音量类型的控制模式,分别为: - * - Auto:“麦上通话,麦下媒体”,即主播上麦时使用通话音量,观众不上麦则使用媒体音量,适合在线直播场景。 - * 如果您在 enterRoom 时选择的场景为 TRTCAppSceneLIVE 或 TRTCAppSceneVoiceChatRoom,SDK 会自动选择该模式。 + * 音频路由,即声音是从手机的扬声器还是从听筒中播放出来,因此该接口仅适用于手机等移动端设备。 + * 手机有两个扬声器:一个是位于手机顶部的听筒,一个是位于手机底部的立体声扬声器。 + * - 设置音频路由为听筒时,声音比较小,只有将耳朵凑近才能听清楚,隐私性较好,适合用于接听电话。 + * - 设置音频路由为扬声器时,声音比较大,不用将手机贴脸也能听清,因此可以实现“免提”的功能。 + */ +enum TXAudioRoute { + + /// Speakerphone:使用扬声器播放(即“免提”),扬声器位于手机底部,声音偏大,适合外放音乐。 + TXAudioRouteSpeakerphone = 0, + + /// Earpiece:使用听筒播放,听筒位于手机顶部,声音偏小,适合需要保护隐私的通话场景。 + TXAudioRouteEarpiece = 1, + +}; + +/** + * 设备类型(仅适用于桌面平台) * - * - VOIP:全程使用通话音量,适合多人会议场景。 - * 如果您在 enterRoom 时选择的场景为 TRTCAppSceneVideoCall 或 TRTCAppSceneAudioCall,SDK 会自动选择该模式。 + * 该枚举值用于定义三种类型的音视频设备,即摄像头、麦克风和扬声器,以便让一套设备管理接口可以操控三种不同类型的设备。 + */ +enum TXMediaDeviceType { + + ///未定义的设备类型 + TXMediaDeviceTypeUnknown = -1, + + ///麦克风类型设备 + TXMediaDeviceTypeMic = 0, + + ///扬声器类型设备 + TXMediaDeviceTypeSpeaker = 1, + + ///摄像头类型设备 + TXMediaDeviceTypeCamera = 2, + +}; + +/** + * 设备操作 * - * - Media:通话全程使用媒体音量,不常用,适合个别有特殊需求(如主播外接声卡)的应用场景。 + * 该枚举值用于本地设备的状态变化通知{@link onDeviceChanged}。 + */ +enum TXMediaDeviceState { + + ///设备已被插入 + TXMediaDeviceStateAdd = 0, + + ///设备已被移除 + TXMediaDeviceStateRemove = 1, + + ///设备已启用 + TXMediaDeviceStateActive = 2, + +}; + +/** + * 摄像头采集偏好 * + * 该枚举类型用于摄像头采集参数设置。 */ -enum TXSystemVolumeType -{ - /// “麦上通话,麦下媒体”,即主播上麦时使用通话音量,观众不上麦则使用媒体音量,适合在线直播场景。<br> - /// 如果您在 enterRoom 时选择的场景为 TRTCAppSceneLIVE 或 TRTCAppSceneVoiceChatRoom,SDK 会自动选择该模式。 - TXSystemVolumeTypeAuto = 0, - - /// 通话全程使用媒体音量,不常用,适合个别有特殊需求(如主播外接声卡)的应用场景。 - TXSystemVolumeTypeMedia = 1, - - /// 全程使用通话音量,适合多人会议场景。<br> - /// 如果您在 enterRoom 时选择的场景为 TRTCAppSceneVideoCall 或 TRTCAppSceneAudioCall 会自动选择该模式。 - TXSystemVolumeTypeVOIP = 2, +#ifdef _WIN32 +enum TXCameraCaptureMode { + + ///自动调整采集参数。 + /// SDK 根据实际的采集设备性能及网络情况,选择合适的摄像头输出参数,在设备性能及视频预览质量之间,维持平衡。 + TXCameraResolutionStrategyAuto = 0, + + ///优先保证设备性能。 + /// SDK 根据用户设置编码器的分辨率和帧率,选择最接近的摄像头输出参数,从而保证设备性能。 + TXCameraResolutionStrategyPerformance = 1, + + ///优先保证视频预览质量。 + /// SDK选择较高的摄像头输出参数,从而提高预览视频的质量。在这种情况下,会消耗更多的 CPU 及内存做视频前处理。 + TXCameraResolutionStrategyHighQuality = 2, + + ///允许用户设置本地摄像头采集的视频宽高。 + TXCameraCaptureManual = 3, + }; /** - * 声音播放路由(仅适用于移动端设备) + * 摄像头采集参数 * - * 一般手机都有两个扬声器,设置音频路由的作用就是要决定声音从哪个扬声器播放出来: - * - Speakerphone:扬声器,位于手机底部,声音偏大,适合外放音乐。 - * - Earpiece:听筒,位于手机顶部,声音偏小,适合通话。 + * 该设置能决定本地预览图像画质。 */ -enum TXAudioRoute { - TXAudioRouteSpeakerphone = 0, ///< 扬声器 - TXAudioRouteEarpiece = 1, ///< 听筒 +struct TXCameraCaptureParam { + ///【字段含义】摄像头采集偏好 + TXCameraCaptureMode mode; + + ///【字段含义】采集图像长度 + int width; + + ///【字段含义】采集图像宽度 + int height; + + TXCameraCaptureParam() : mode(TXCameraResolutionStrategyAuto), width(640), height(360) { + } }; +#endif /** - * 设备类型 + * 音视频设备的相关信息(仅适用于桌面平台) + * + * 该结构体用于描述一个音视频设备的关键信息,比如设备ID、设备名称等等,以便用户能够在用户界面上选择自己期望使用的音视频设备。 */ -enum TXMediaDeviceType -{ - TXMediaDeviceTypeUnknown = -1, ///< 未知类型 - TXMediaDeviceTypeMic = 0, ///< 麦克风 - TXMediaDeviceTypeSpeaker = 1, ///< 扬声器或听筒 - TXMediaDeviceTypeCamera = 2, ///< 摄像头 +class ITXDeviceInfo { + protected: + virtual ~ITXDeviceInfo() { + } + + public: + /// release function, don't use delete!!! + virtual void release() = 0; + + ///设备 id (UTF-8) + virtual const char* getDevicePID() = 0; + + ///设备名称 (UTF-8) + virtual const char* getDeviceName() = 0; }; /** - * 设备列表 + * 设备信息列表(仅适用于桌面平台) + * + * 此结构体的作用相当于 std::vector<ITXDeviceInfo>,用于解决不同版本的 STL 容器的二进制兼容问题。 */ class ITXDeviceCollection { protected: @@ -80,323 +185,309 @@ class ITXDeviceCollection { } public: - /** - * @return 设备个数 - */ + ///设备数量 virtual uint32_t getCount() = 0; - /** - * @return 设备名称,字符编码格式是UTF-8 - */ + ///设备名字 (UTF-8),index 为设备索引,值为 [0,getCount)。返回值为设备名称 (UTF-8) virtual const char* getDeviceName(uint32_t index) = 0; - /** - * @return 设备PID,字符编码格式是UTF-8 - */ + ///设备唯一标识 (UTF-8) index 为设备索引,值为 [0,getCount) virtual const char* getDevicePID(uint32_t index) = 0; - /** - * @brief 遍历完设备后,调用release释放资源。 - */ + ///设备信息(json格式) + ///@note + /// - 示例:{"SupportedResolution":[{"width":640,"height":480},{"width":320,"height":240}]} + /// param index 设备索引,值为 [0,getCount),return 返回 json 格式的设备信息 + virtual const char* getDeviceProperties(uint32_t index) = 0; + + ///释放设备列表,请不要使用 delete 释放资源 !!! virtual void release() = 0; }; +/// @} -/** - * 设备 Item 信息 - */ -class ITXDeviceInfo { +#if (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32 +class ITXDeviceObserver { + public: + virtual ~ITXDeviceObserver() { + } + + /** + * 本地设备的通断状态发生变化(仅适用于桌面系统) + * + * 当本地设备(包括摄像头、麦克风以及扬声器)被插入或者拔出时,SDK 便会抛出此事件回调。 + * @param deviceId 设备 ID + * @param type 设备类型 + * @param state 通断状态,0:设备已添加;1:设备已被移除;2:设备已启用。 + */ + virtual void onDeviceChanged(const char* deviceId, TXMediaDeviceType type, TXMediaDeviceState state) { + } + +}; // End of class ITXDeviceObserver +#endif + +class ITXDeviceManager { protected: - virtual ~ITXDeviceInfo() { + ITXDeviceManager() { + } + virtual ~ITXDeviceManager() { } public: +///////////////////////////////////////////////////////////////////////////////// +// +// 移动端设备操作接口(iOS Android) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 移动端设备操作接口 +/// @{ + +/** + * 1.1 判断当前是否为前置摄像头(仅适用于移动端) + */ +#if __ANDROID__ || (__APPLE__ && TARGET_OS_IOS) + virtual bool isFrontCamera() = 0; + /** - * @return 设备名称,字符编码格式是UTF-8 + * 1.2 切换前置或后置摄像头(仅适用于移动端) */ - virtual const char* getDeviceName() = 0; + virtual int switchCamera(bool frontCamera) = 0; /** - * @return 设备PID,字符编码格式是UTF-8 + * 1.3 获取摄像头的最大缩放倍数(仅适用于移动端) */ - virtual const char* getDevicePID() = 0; + virtual float getCameraZoomMaxRatio() = 0; /** - * @brief 获取完设备信息后,调用release释放资源。 + * 1.4 设置摄像头的缩放倍数(仅适用于移动端) + * + * @param zoomRatio 取值范围1 - 5,取值为1表示最远视角(正常镜头),取值为5表示最近视角(放大镜头)。最大值推荐为5,若超过5,视频数据会变得模糊不清。 */ - virtual void release() = 0; -}; + virtual int setCameraZoomRatio(float zoomRatio) = 0; -class ITXDeviceManager { -protected: - ITXDeviceManager() {} - virtual ~ITXDeviceManager() {} + /** + * 1.5 查询是否支持自动识别人脸位置(仅适用于移动端) + */ + virtual bool isAutoFocusEnabled() = 0; -public: - -#if (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32 /** - * 获取设备列表 + * 1.6 开启自动对焦功能(仅适用于移动端) * - * @param type 设备类型,指定需要获取哪种设备的列表。详见 TXMediaDeviceType 定义。 + * 开启后,SDK 会自动检测画面中的人脸位置,并将摄像头的焦点始终对焦在人脸位置上。 + */ + virtual int enableCameraAutoFocus(bool enabled) = 0; + + /** + * 1.7 设置摄像头的对焦位置(仅适用于移动端) + * + * 您可以通过该接口实现如下交互: + * 1. 在本地摄像头的预览画面上,允许用户单击操作。 + * 2. 在用户的单击位置显示一个矩形方框,以示摄像头会在此处对焦。 + * 3. 随后将用户点击位置的坐标通过本接口传递给 SDK,之后 SDK 会操控摄像头按照用户期望的位置进行对焦。 + * @param position 对焦位置,请传入期望对焦点的坐标值 + * @return 0:操作成功;负数:操作失败。 + * @note 使用该接口的前提是先通过 {@link enableCameraAutoFocus} 关闭自动对焦功能。 + */ + virtual int setCameraFocusPosition(float x, float y) = 0; + + /** + * 1.8 开启/关闭闪光灯,也就是手电筒模式(仅适用于移动端) + */ + virtual int enableCameraTorch(bool enabled) = 0; + + /** + * 1.9 设置音频路由(仅适用于移动端) * - * @note - delete ITXDeviceCollection* 指针会导致编译错误,SDK 维护 ITXDeviceCollection 对象的生命周期 - * - 使用完毕后请调用 release 方法释放资源 - * - type 只支持 TXMediaDeviceTypeMic、TXMediaDeviceTypeSpeaker、TXMediaDeviceTypeCamera - * - 此接口只支持 Mac 和 Windows 平台 + * 手机有两个音频播放设备:一个是位于手机顶部的听筒,一个是位于手机底部的立体声扬声器。 + * 设置音频路由为听筒时,声音比较小,只有将耳朵凑近才能听清楚,隐私性较好,适合用于接听电话。 + * 设置音频路由为扬声器时,声音比较大,不用将手机贴脸也能听清,因此可以实现“免提”的功能。 */ + virtual int setAudioRoute(TXAudioRoute route) = 0; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 桌面端设备操作接口(Windows Mac) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 桌面端设备操作接口 +/// @{ + +/** + * 2.1 获取设备列表(仅适用于桌面端) + * + * @param type 设备类型,指定需要获取哪种设备的列表。详见 TXMediaDeviceType 定义。 + * @note + * - 使用完毕后请调用 release 方法释放资源,这样可以让 SDK 维护 ITXDeviceCollection 对象的生命周期。 + * - 不要使用 delete 释放返回的 Collection 对象,delete ITXDeviceCollection* 指针会导致异常崩溃。 + * - type 只支持 TXMediaDeviceTypeMic、TXMediaDeviceTypeSpeaker、TXMediaDeviceTypeCamera。 + * - 此接口只支持 Mac 和 Windows 平台 + */ +#if (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32 virtual ITXDeviceCollection* getDevicesList(TXMediaDeviceType type) = 0; /** - * 指定当前设备 + * 2.2 设置当前要使用的设备(仅适用于桌面端) * - * @param type 设备类型,根据设备类型指定当前设备。详见 TXMediaDeviceType 定义。 - * @param deviceId 从 getDevicesList 中得到的设备 ID - * @return 0:操作成功 负数:失败 - * @note - type 只支持 TXMediaDeviceTypeMic、TXMediaDeviceTypeSpeaker、TXMediaDeviceTypeCamera - * - 此接口只支持 Mac 和 Windows 平台 + * @param type 设备类型,详见 TXMediaDeviceType 定义。 + * @param deviceId 设备ID,您可以通过接口 {@link getDevicesList} 获得设备 ID。 + * @return 0:操作成功;负数:操作失败。 */ virtual int setCurrentDevice(TXMediaDeviceType type, const char* deviceId) = 0; /** - * 获取当前使用的设备 - * - * @param type 设备类型,根据设备类型获取当前设备信息。详见 TXMediaDeviceType 定义。 - * @return ITRTCDeviceInfo 设备信息,能获取设备 ID 和设备名称 - * @note 此接口只支持 Mac 和 Windows 平台 + * 2.3 获取当前正在使用的设备(仅适用于桌面端) */ virtual ITXDeviceInfo* getCurrentDevice(TXMediaDeviceType type) = 0; /** - * 设置当前设备的音量 + * 2.4 设置当前设备的音量(仅适用于桌面端) * - * @param type 设备类型,根据设备类型获取当前设备音量。详见 TXMediaDeviceType 定义。 - * @param volume 音量大小 - * @return 0:操作成功 负数:失败 - * @note - type 只支持 TXMediaDeviceTypeMic、TXMediaDeviceTypeSpeaker - * - 此接口只支持 Mac 和 Windows 平台 + * 这里的音量指的是麦克风的采集音量或者扬声器的播放音量,摄像头是不支持设置音量的。 + * @param volume 音量大小,取值范围为0 - 100,默认值:100。 + * @note 如果将 volume 设置成 100 之后感觉音量还是太小,可以将 volume 最大设置成 150,但超过 100 的 volume 会有爆音的风险,请谨慎操作。 */ virtual int setCurrentDeviceVolume(TXMediaDeviceType type, uint32_t volume) = 0; /** - * 获取当前设备的音量 - * - * @param type 设备类型,根据设备类型获取当前设备音量。详见 TXMediaDeviceType 定义。 + * 2.5 获取当前设备的音量(仅适用于桌面端) * - * @note - type 只支持 TXMediaDeviceTypeMic、TXMediaDeviceTypeSpeaker - * - 此接口只支持 Mac 和 Windows 平台 + * 这里的音量指的是麦克风的采集音量或者扬声器的播放音量,摄像头是不支持获取音量的。 */ virtual uint32_t getCurrentDeviceVolume(TXMediaDeviceType type) = 0; /** - * 设置当前设备是否静音 + * 2.6 设置当前设备的静音状态(仅适用于桌面端) * - * @param type 设备类型,根据设备类型设置当前设备状态。详见 TXMediaDeviceType 定义。 - * @param mute 是否静音/禁画 - * @return 0:操作成功 负数:失败 - * @note - type 只支持 TXMediaDeviceTypeMic、TXMediaDeviceTypeSpeaker - * - 此接口只支持 Mac 和 Windows 平台 + * 这里的音量指的是麦克风和扬声器,摄像头是不支持静音操作的。 */ virtual int setCurrentDeviceMute(TXMediaDeviceType type, bool mute) = 0; /** - * 查询当前设备是否静音 + * 2.7 获取当前设备的静音状态(仅适用于桌面端) * - * @param type 设备类型,根据设备类型获取当前设备状态。详见 TXMediaDeviceType 定义。 - * @return true : 当前设备已静音;false : 当前设备未静音 - * @note type 只支持 TXMediaDeviceTypeMic、TXMediaDeviceTypeSpeaker + * 这里的音量指的是麦克风和扬声器,摄像头是不支持静音操作的。 */ virtual bool getCurrentDeviceMute(TXMediaDeviceType type) = 0; /** - * 开始摄像头测试 + * 2.8 开始摄像头测试(仅适用于桌面端) * - * @param view 预览控件所在的父控件 - * @return 0:操作成功 负数:失败 - * @note - 在测试过程中可以使用 setCurrentCameraDevice 接口切换摄像头。 - * - 此接口只支持 Mac 和 Windows 平台 + * @note 在测试过程中可以使用 {@link setCurrentDevice} 接口切换摄像头。 */ virtual int startCameraDeviceTest(void* view) = 0; -#ifdef _WIN32 /** - * 开始进行摄像头测试 - * 会触发 onFirstVideoFrame 回调接口 - * - * @param callback 摄像头预览自定义渲染画面回调 - * @return 0:操作成功 负数:失败 - * @note - 在测试过程中可以使用 setCurrentCameraDevice 接口切换摄像头。 - * - 此接口只支持 Windows 平台 - */ - virtual int startCameraDeviceTest(ITRTCVideoRenderCallback* callback) = 0; -#endif - - /** - * 结束摄像头测试 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Mac 和 Windows 平台 + * 2.9 结束摄像头测试(仅适用于桌面端) */ virtual int stopCameraDeviceTest() = 0; /** - * 开始麦克风测试 + * 2.10 开始麦克风测试(仅适用于桌面端) * - * @param interval 音量回调间隔 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Mac 和 Windows 平台 + * 该接口可以测试麦克风是否能正常工作,测试到的麦克风采集音量的大小,会以回调的形式通知给您,其中 volume 的取值范围为0 - 100。 + * @param interval 麦克风音量的回调间隔。 */ virtual int startMicDeviceTest(uint32_t interval) = 0; /** - * 结束麦克风测试 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Mac 和 Windows 平台 + * 2.11 结束麦克风测试(仅适用于桌面端) */ virtual int stopMicDeviceTest() = 0; /** - * 开始扬声器测试 + * 2.12 开始扬声器测试(仅适用于桌面端) * - * 该方法播放指定的音频文件测试播放设备是否能正常工作。如果能听到声音,说明播放设备能正常工作。 + * 该接口通过播放指定的音频文件,用于测试播放设备是否能正常工作。如果用户在测试时能听到声音,说明播放设备能正常工作。 * @param filePath 声音文件的路径 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Mac 和 Windows 平台 */ virtual int startSpeakerDeviceTest(const char* filePath) = 0; /** - * 停止扬声器测试 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Mac 和 Windows 平台 + * 2.13 结束扬声器测试(仅适用于桌面端) */ virtual int stopSpeakerDeviceTest() = 0; +#endif +/** + * 2.14 开始摄像头测试(仅适用于 Windows 系统) + * + * 该接口支持自定义渲染,即您可以通过接 ITRTCVideoRenderCallback 回调接口接管摄像头的渲染画面。 + */ #ifdef _WIN32 - /** - * 设置 Windows 系统音量合成器中当前进程的音量 - * - * @param volume 音量值,取值范围[0~100] - * @return 0:成功 - */ - virtual int setApplicationPlayVolume(int volume) = 0; - - /** - * 获取 Windows 系统音量合成器中当前进程的音量 - * - * @return 返回音量值,取值范围[0~100] - */ - virtual int getApplicationPlayVolume() = 0; - - /** - * 设置 Windows 系统音量合成器中当前进程的静音状态 - * - * @param bMute 是否设置为静音状态 - * @return 0 设置成功 - */ - virtual int setApplicationMuteState(bool bMute) = 0; - - /** - * 获取 Windows 系统音量合成器中当前进程的静音状态 - * - * @return 返回静音状态 - */ - virtual bool getApplicationMuteState() = 0; + virtual int startCameraDeviceTest(ITRTCVideoRenderCallback* callback) = 0; #endif - -#elif __ANDROID__ || (__APPLE__ && TARGET_OS_IOS) - /** - * 切换摄像头 - * - * @param frontCamera YES:切换到前置摄像头 NO:切换到后置摄像头 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual int switchCamera(bool frontCamera) = 0; - /** - * 当前是否为前置摄像头 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual bool isFrontCamera() = 0; +/** + * 2.15 设置 Windows 系统音量合成器中当前进程的音量(仅适用于 Windows 系统) + */ +#ifdef _WIN32 + virtual int setApplicationPlayVolume(int volume) = 0; +#endif - /** - * 获取摄像头最大缩放倍数 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual float getCameraZoomMaxRatio() = 0; +/** + * 2.16 获取 Windows 系统音量合成器中当前进程的音量(仅适用于 Windows 系统) + */ +#ifdef _WIN32 + virtual int getApplicationPlayVolume() = 0; +#endif - /** - * 设置摄像头缩放倍数 - * - * @param zoomRatio 缩放倍数 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual int setCameraZoomRatio(float zoomRatio) = 0; +/** + * 2.17 设置 Windows 系统音量合成器中当前进程的静音状态(仅适用于 Windows 系统) + */ +#ifdef _WIN32 + virtual int setApplicationMuteState(bool bMute) = 0; +#endif - /** - * 设置是否自动识别人脸位置 - * - * @param enabled YES:开启;NO:关闭,默认值:YES - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual int enableCameraAutoFocus (bool enabled) = 0; +/** + * 2.18 获取 Windows 系统音量合成器中当前进程的静音状态(仅适用于 Windows 系统) + */ +#ifdef _WIN32 + virtual bool getApplicationMuteState() = 0; +#endif - /** - * 查询是否支持自动识别人脸位置 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual bool isAutoFocusEnabled () = 0; +/** + * 2.19 设置摄像头采集偏好 + */ +#ifdef _WIN32 + virtual void setCameraCapturerParam(const TXCameraCaptureParam& params) = 0; +#endif - /** - * 设置摄像头焦点 - * - * @param x 焦点横坐标 - * @param y 焦点纵坐标 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual int setCameraFocusPosition (float x, float y) = 0; +/** + * 2.20 设置 onDeviceChanged 事件回调 + */ +#if (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32 + virtual void setDeviceObserver(ITXDeviceObserver* observer) = 0; +#endif - /** - * 设置是否开启闪光灯 - * - * @param enabled YES:开启;NO:关闭,默认值:NO - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual int enableCameraTorch (bool enabled) = 0; +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 弃用接口(建议使用对应的新接口) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 弃用接口(建议使用对应的新接口) +/// @{ - /** - * 设置通话时使用的系统音量类型 - * - * @note - * 1. 需要在调用 startLocalAudio() 之前调用该接口。<br> - * 2. 如无特殊需求,不推荐您自行设置,您只需通过 enterRoom 设置好适合您的场景,SDK 内部会自动选择相匹配的音量类型。 - * - * @param type 系统音量类型,如无特殊需求,不推荐您自行设置。 - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual int setSystemVolumeType (TXSystemVolumeType type) = 0; +/** + * 设置系统音量类型(仅适用于移动端) + * + * @deprecated v9.5 版本开始不推荐使用,建议使用 {@link TRTCCloud} 中的 startLocalAudio(quality) 接口替代之,通过 quality 参数来决策音质。 + */ +#if __ANDROID__ || (__APPLE__ && TARGET_OS_IOS) + virtual int setSystemVolumeType(TXSystemVolumeType type) = 0; +#endif - /** - * 设置设置音频路由 - * - * 微信和手机 QQ 视频通话功能的免提模式就是基于音频路由实现的。 - * 一般手机都有两个扬声器,一个是位于顶部的听筒扬声器,声音偏小;一个是位于底部的立体声扬声器,声音偏大。 - * 设置音频路由的作用就是决定声音使用哪个扬声器播放。 - * - * @param route 音频路由,即声音由哪里输出(扬声器、听筒),默认值:TXAudioRouteSpeakerphone - * @return 0:操作成功 负数:失败 - * @note 此接口只支持 Android 和 iOS 平台 - */ - virtual int setAudioRoute (TXAudioRoute route) = 0; - + /// @} +}; // End of class ITXDeviceManager +} // namespace liteav + +// 9.0 开始 C++ 接口将声明在 liteav 命名空间下,为兼容之前的使用方式,将 trtc 作为 liteav 的别名 +// namespace trtc = liteav; + +#ifdef _WIN32 +using namespace liteav; #endif -}; +#endif /* __ITXDEVICEMANAGER_H__ */ /// @} -} - -#endif /* ITXDeviceManager_h */ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCCloudCallback.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCCloudCallback.h index 4f0ab84..dd0823d 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCCloudCallback.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCCloudCallback.h @@ -1,74 +1,77 @@ - /* - * Module: TRTCCloudCallback @ TXLiteAVSDK - * - * Function: 腾讯云视频通话功能的回调接口类,若想从C++代码中获取到TRTC SDK的回调,请继承此类并调用 ITRTCCloud::addCallback(TRTCCloudCallback* callback)设置观察者 - * +/** + * Module: TRTCCloudDelegate @ TXLiteAVSDK + * Function: 腾讯云实时音视频的事件回调接口 */ - +/// @defgroup TRTCCloudCallback_cplusplus TRTCCloudCallback +/// 腾讯云实时音视频的事件回调接口 +/// @{ #ifndef __TRTCCLOUDCALLBACK_H__ #define __TRTCCLOUDCALLBACK_H__ #include "TRTCTypeDef.h" #include "ITXDeviceManager.h" #include "TXLiteAVCode.h" +#include "ITRTCStatistics.h" -namespace trtc { +namespace liteav { -/// @defgroup TRTCCloudCallback_cplusplus TRTCCloudCallback -/// 腾讯云视频通话功能的回调接口类 -/// @{ -/** - * 腾讯云视频通话功能的回调接口类 - */ -class ITRTCCloudCallback -{ -public: - virtual ~ITRTCCloudCallback() {} +class ITRTCCloudCallback { + public: + virtual ~ITRTCCloudCallback() { + } ///////////////////////////////////////////////////////////////////////////////// // - // (一)错误事件和警告事件 + // 错误和警告事件 // ///////////////////////////////////////////////////////////////////////////////// - /// @name 错误事件和警告事件 + /// @name 错误和警告事件 /// @{ + /** - * 1.1 错误回调:SDK 不可恢复的错误,一定要监听,并分情况给用户适当的界面提示。 + * 1.1 错误事件回调 + * + * 错误事件,表示 SDK 抛出的不可恢复的错误,比如进入房间失败或设备开启失败等。 + * 参考文档:[错误码表](https://cloud.tencent.com/document/product/647/32257) * - * @param errCode 错误码 - * @param errMsg 错误信息 - * @param extraInfo 扩展信息字段,个别错误码可能会带额外的信息帮助定位问题 + * @param errCode 错误码 + * @param errMsg 错误信息 + * @param extInfo 扩展信息字段,个别错误码可能会带额外的信息帮助定位问题 */ virtual void onError(TXLiteAVError errCode, const char* errMsg, void* extraInfo) = 0; /** - * 1.2 警告回调:用于告知您一些非严重性问题,例如出现了卡顿或者可恢复的解码失败。 + * 1.2 警告事件回调 + * + * 警告事件,表示 SDK 抛出的提示性问题,比如视频出现卡顿或 CPU 使用率太高等。 + * 参考文档:[错误码表](https://cloud.tencent.com/document/product/647/32257) * * @param warningCode 警告码 * @param warningMsg 警告信息 - * @param extraInfo 扩展信息字段,个别警告码可能会带额外的信息帮助定位问题 + * @param extInfo 扩展信息字段,个别警告码可能会带额外的信息帮助定位问题 */ virtual void onWarning(TXLiteAVWarning warningCode, const char* warningMsg, void* extraInfo) = 0; - /// @} + /// @} ///////////////////////////////////////////////////////////////////////////////// // - // (二)房间事件回调 + // 房间相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// - /// @name 房间事件回调 + /// @name 房间相关事件回调 /// @{ + /** - * 2.1 已加入房间的回调 - * - * 调用 TRTCCloud 中的 enterRoom() 接口执行进房操作后,会收到来自 SDK 的 onEnterRoom(result) 回调: + * 2.1 进入房间成功与否的事件回调 * - * - 如果加入成功,result 会是一个正数(result > 0),代表加入房间的时间消耗,单位是毫秒(ms)。 - * - 如果加入失败,result 会是一个负数(result < 0),代表进房失败的错误码。 - * 进房失败的错误码含义请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 + * 调用 TRTCCloud 中的 enterRoom() 接口执行进房操作后,会收到来自 TRTCCloudDelegate 的 onEnterRoom(result) 回调: + * - 如果加入成功,回调 result 会是一个正数(result > 0),代表进入房间所消耗的时间,单位是毫秒(ms)。 + * - 如果加入失败,回调 result 会是一个负数(result < 0),代表失败原因的错误码。 + * 进房失败的错误码含义请参见[错误码表](https://cloud.tencent.com/document/product/647/32257)。 * - * @note 在 Ver6.6 之前的版本,只有进房成功会抛出 onEnterRoom(result) 回调,进房失败由 onError() 回调抛出。 - * 在 Ver6.6 及之后改为:进房成功返回正的 result,进房失败返回负的 result,同时进房失败也会有 onError() 回调抛出。 + * @note + * 1. 在 Ver6.6 之前的版本,只有进房成功会抛出 onEnterRoom(result) 回调,进房失败由 onError() 回调抛出。 + * 2. 在 Ver6.6 之后的版本:无论进房成功或失败,均会抛出 onEnterRoom(result) 回调,同时进房失败也会有 onError() 回调抛出。 * * @param result result > 0 时为进房耗时(ms),result < 0 时为进房错误码。 */ @@ -78,12 +81,12 @@ public: * 2.2 离开房间的事件回调 * * 调用 TRTCCloud 中的 exitRoom() 接口会执行退出房间的相关逻辑,例如释放音视频设备资源和编解码器资源等。 - * 待资源释放完毕,SDK 会通过 onExitRoom() 回调通知到您。 + * 待 SDK 占用的所有资源释放完毕后,SDK 会抛出 onExitRoom() 回调通知到您。 * * 如果您要再次调用 enterRoom() 或者切换到其他的音视频 SDK,请等待 onExitRoom() 回调到来后再执行相关操作。 * 否则可能会遇到例如摄像头、麦克风设备被强占等各种异常问题。 * - * @param reason 离开房间原因,0:主动调用 exitRoom 退房;1:被服务器踢出当前房间;2:当前房间整个被解散。 + * @param reason 离开房间原因,0:主动调用 exitRoom 退出房间;1:被服务器踢出当前房间;2:当前房间整个被解散。 */ virtual void onExitRoom(int reason) = 0; @@ -96,666 +99,913 @@ public: * @param errCode 错误码,ERR_NULL 代表切换成功,其他请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 * @param errMsg 错误信息。 */ - virtual void onSwitchRole(TXLiteAVError errCode, const char* errMsg) {} + virtual void onSwitchRole(TXLiteAVError errCode, const char* errMsg) { + } /** - * 2.4 请求跨房通话(主播 PK)的结果回调 + * 2.4 切换房间的结果回调 * - * 调用 TRTCCloud 中的 connectOtherRoom() 接口会将两个不同房间中的主播拉通视频通话,也就是所谓的“主播PK”功能。 - * 调用者会收到 onConnectOtherRoom() 回调来获知跨房通话是否成功, - * 如果成功,两个房间中的所有用户都会收到 PK 主播的 onUserVideoAvailable() 回调。 + * 调用 TRTCCloud 中的 switchRoom() 接口可以让用户快速地从一个房间切换到另一个房间, + * 待 SDK 切换完成后,会抛出 onSwitchRoom() 事件回调。 * - * @param userId 要 PK 的目标主播 userId。 * @param errCode 错误码,ERR_NULL 代表切换成功,其他请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 * @param errMsg 错误信息。 */ - virtual void onConnectOtherRoom(const char* userId, TXLiteAVError errCode, const char* errMsg) {} + virtual void onSwitchRoom(TXLiteAVError errCode, const char* errMsg) { + } /** - * 2.5 结束跨房通话(主播 PK)的结果回调 + * 2.5 请求跨房通话的结果回调 + * + * 调用 TRTCCloud 中的 connectOtherRoom() 接口会将两个不同房间中的主播拉通视频通话,也就是所谓的“主播PK”功能。 + * 调用者会收到 onConnectOtherRoom() 回调来获知跨房通话是否成功, + * 如果成功,两个房间中的所有用户都会收到来自另一个房间中的 PK 主播的 onUserVideoAvailable() 回调。 + * + * @param userId 要跨房通话的另一个房间中的主播的用户 ID。 + * @param errCode 错误码,ERR_NULL 代表切换成功,其他请参见[错误码](https://cloud.tencent.com/document/product/647/32257)。 + * @param errMsg 错误信息。 */ - virtual void onDisconnectOtherRoom(TXLiteAVError errCode, const char* errMsg) {} + virtual void onConnectOtherRoom(const char* userId, TXLiteAVError errCode, const char* errMsg) { + } /** - * 2.6 切换房间 (switchRoom) 的结果回调 + * 2.6 结束跨房通话的结果回调 */ - virtual void onSwitchRoom(TXLiteAVError errCode, const char* errMsg) {} - /// @} + virtual void onDisconnectOtherRoom(TXLiteAVError errCode, const char* errMsg) { + } + /// @} ///////////////////////////////////////////////////////////////////////////////// // - // (三)成员事件回调 + // 用户相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// - /// @name 成员事件回调 + /// @name 用户相关事件回调 /// @{ + /** * 3.1 有用户加入当前房间 * - * 出于性能方面的考虑,在两种不同的应用场景下,该通知的行为会有差别: - * - 通话场景(TRTCAppSceneVideoCall 和 TRTCAppSceneAudioCall):该场景下用户没有角色的区别,任何用户进入房间都会触发该通知。 - * - 直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom):该场景不限制观众的数量,如果任何用户进出都抛出回调会引起很大的性能损耗,所以该场景下只有主播进入房间时才会触发该通知,观众进入房间不会触发该通知。 - * - * - * @note 注意 onRemoteUserEnterRoom 和 onRemoteUserLeaveRoom 只适用于维护当前房间里的“成员列表”,如果需要显示远程画面,建议使用监听 onUserVideoAvailable() 事件回调。 - * - * @param userId 用户标识 + * 出于性能方面的考虑,在 TRTC 两种不同的应用场景(即 AppScene,在 enterRoom 时通过第二个参数指定)下,该通知的行为会有差别: + * - 直播类场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom):该场景下的用户区分主播和观众两种角色,只有主播进入房间时才会触发该通知,观众进入房间时不会触发该通知。 + * - 通话类场景(TRTCAppSceneVideoCall 和 TRTCAppSceneAudioCall):该场景下的用户没有角色的区分(可认为都是主播),任何用户进入房间都会触发该通知。 + * @param userId 远端用户的用户标识 + * @note + * 1. 事件回调 onRemoteUserEnterRoom 和 onRemoteUserLeaveRoom 只适用于维护当前房间里的“用户列表”,有此事件回调不代表一定有视频画面。 + * 2. 如果需要显示远程画面,请监听代表某个用户是否有视频画面的 onUserVideoAvailable() 事件回调。 */ - virtual void onRemoteUserEnterRoom(const char* userId) {} + virtual void onRemoteUserEnterRoom(const char* userId) { + } /** * 3.2 有用户离开当前房间 * - * 与 onRemoteUserEnterRoom 相对应,在两种不同的应用场景下,该通知的行为会有差别: - * - 通话场景(TRTCAppSceneVideoCall 和 TRTCAppSceneAudioCall):该场景下用户没有角色的区别,任何用户的离开都会触发该通知。 - * - 直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom):只有主播离开房间时才会触发该通知,观众离开房间不会触发该通知。 + * 与 onRemoteUserEnterRoom 相对应,在两种不同的应用场景(即 AppScene,在 enterRoom 时通过第二个参数指定)下,该通知的行为会有差别: + * - 直播类场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom):只有主播离开房间时才会触发该通知,观众离开房间不会触发该通知。 + * - 通话类场景(TRTCAppSceneVideoCall 和 TRTCAppSceneAudioCall):该场景下用户没有角色的区别,任何用户的离开都会触发该通知。 * - * @param userId 用户标识 + * @param userId 远端用户的用户标识 * @param reason 离开原因,0表示用户主动退出房间,1表示用户超时退出,2表示被踢出房间。 */ - virtual void onRemoteUserLeaveRoom(const char* userId, int reason) {} + virtual void onRemoteUserLeaveRoom(const char* userId, int reason) { + } /** - * 3.3 用户是否开启摄像头视频 + * 3.3 某远端用户发布/取消了主路视频画面 * - * 当您收到 onUserVideoAvailable(userId, YES) 通知时,表示该路画面已经有可用的视频数据帧到达。 - * 此时,您需要调用 startRemoteView(userId) 接口加载该用户的远程画面。 - * 然后,您还会收到名为 onFirstVideoFrame(userId) 的首帧画面渲染回调。 + * “主路画面”一般被用于承载摄像头画面。当您收到 onUserVideoAvailable(userId, true) 通知时,表示该路画面已经有可播放的视频帧到达。 + * 此时,您需要调用 {@link startRemoteView} 接口订阅该用户的远程画面,订阅成功后,您会继续收到该用户的首帧画面渲染回调 onFirstVideoFrame(userid)。 * - * 当您收到 onUserVideoAvailable(userId, NO) 通知时,表示该路远程画面已被关闭, - * 可能由于该用户调用了 muteLocalVideo() 或 stopLocalPreview()。 + * 当您收到 onUserVideoAvailable(userId, false) 通知时,表示该路远程画面已经被关闭,关闭的原因可能是该用户调用了 {@link muteLocalVideo} 或 {@link stopLocalPreview}。 * - * @param userId 用户标识 - * @param available 画面是否开启 + * @param userId 远端用户的用户标识 + * @param available 该用户是否发布(或取消发布)了主路视频画面,true: 发布;false:取消发布。 */ - virtual void onUserVideoAvailable(const char* userId, bool available) {} + virtual void onUserVideoAvailable(const char* userId, bool available) { + } /** - * 3.4 用户是否开启屏幕分享 + * 3.4 某远端用户发布/取消了辅路视频画面 * - * @param userId 用户标识 - * @param available 屏幕分享是否开启 + * “辅路画面”一般被用于承载屏幕分享的画面。当您收到 onUserSubStreamAvailable(userId, true) 通知时,表示该路画面已经有可播放的视频帧到达。 + * 此时,您需要调用 {@link startRemoteSubStreamView} 接口订阅该用户的远程画面,订阅成功后,您会继续收到该用户的首帧画面渲染回调 onFirstVideoFrame(userid)。 + * @param userId 远端用户的用户标识 + * @param available 该用户是否发布(或取消发布)了辅路视频画面,true: 发布;false:取消发布。 + * @note 显示辅路画面使用的函数是 {@link startRemoteSubStreamView} 而非 {@link startRemoteView}。 */ - virtual void onUserSubStreamAvailable(const char* userId, bool available) {} + virtual void onUserSubStreamAvailable(const char* userId, bool available) { + } /** - * 3.5 用户是否开启音频上行 + * 3.5 某远端用户发布/取消了自己的音频 * - * @param userId 用户标识 - * @param available 声音是否开启 + * 当您收到 onUserAudioAvailable(userId, true) 通知时,表示该用户发布了自己的声音,此时 SDK 的表现为: + * - 在自动订阅模式下,您无需做任何操作,SDK 会自动播放该用户的声音。 + * - 在手动订阅模式下,您可以通过 {@link muteRemoteAudio}(userid, false) 来播放该用户的声音。 + * @param userId 远端用户的用户标识 + * @param available 该用户是否发布(或取消发布)了自己的音频,true: 发布;false:取消发布。 + * @note SDK 默认使用自动订阅模式,您可以通过 {@link setDefaultStreamRecvMode} 设置为手动订阅,但需要在您进入房间之前调用才生效。 */ - virtual void onUserAudioAvailable(const char* userId, bool available) {} + virtual void onUserAudioAvailable(const char* userId, bool available) { + } /** - * 3.6 开始渲染本地或远程用户的首帧画面 + * 3.6 SDK 开始渲染自己本地或远端用户的首帧画面 * - * 如果 userId 为 null,表示开始渲染本地采集的摄像头画面,需要您先调用 startLocalPreview 触发。 - * 如果 userId 不为 null,表示开始渲染远程用户的首帧画面,需要您先调用 startRemoteView 触发。 + * SDK 会在渲染自己本地或远端用户的首帧画面时抛出该事件,您可以通过回调事件中的 userId 参数来判断事件来自于“本地”还是来自于“远端”。 + * - 如果 userId 为空值,代表 SDK 已经开始渲染自己本地的视频画面,不过前提是您已经调用了 {@link startLocalPreview} 或 {@link startScreenCapture}。 + * - 如果 userId 不为空,代表 SDK 已经开始渲染远端用户的视频画面,不过前提是您已经调用了 {@link startRemoteView} 订阅了该用户的视频画面。 + * @param userId 本地或远端的用户标识,如果 userId 为空值代表自己本地的首帧画面已到来,userId 不为空则代表远端用户的首帧画面已到来。 + * @param streamType 视频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 + * @param width 画面的宽度。 + * @param height 画面的高度。 + * @note + * 1. 只有当您调用了 {@link startLocalPreview} 或 {@link startScreenCapture} 之后,才会触发自己本地的首帧画面事件回调。 + * 2. 只有当您调用了 {@link startRemoteView} 或 {@link startRemoteSubStreamView} 之后,才会触发远端用户的首帧画面事件回调。 + */ + virtual void onFirstVideoFrame(const char* userId, const TRTCVideoStreamType streamType, const int width, const int height) { + } + + /** + * 3.7 SDK 开始播放远端用户的首帧音频 * - * @note 只有当您调用 startLocalPreview()、startRemoteView() 或 startRemoteSubStreamView() 之后,才会触发该回调。 + * SDK 会在播放远端用户的首帧音频时抛出该事件,本地音频的首帧事件暂不抛出。 * - * @param userId 本地或远程用户 ID,如果 userId == null 代表本地,userId != null 代表远程。 - * @param streamType 视频流类型:摄像头或屏幕分享。 - * @param width 画面宽度 - * @param height 画面高度 + * @param userId 远端用户的用户标识 */ - virtual void onFirstVideoFrame(const char* userId, const TRTCVideoStreamType streamType, const int width, const int height) {} + virtual void onFirstAudioFrame(const char* userId) { + } /** - * 3.7 开始播放远程用户的首帧音频(本地声音暂不通知) + * 3.8 自己本地的首个视频帧已被发布出去 * - * @param userId 远程用户 ID。 + * 当您成功进入房间并通过 {@link startLocalPreview} 或 {@link startScreenCapture} 开启本地视频采集之后(开启采集和进入房间的先后顺序无影响), + * SDK 就会开始进行视频编码,并通过自身的网络模块向云端发布自己本地的视频数据。 + * 当 SDK 成功地向云端送出自己的第一帧视频数据帧以后,就会抛出 onSendFirstLocalVideoFrame 事件回调。 + * + * @param streamType 视频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 */ - virtual void onFirstAudioFrame(const char* userId) {} + virtual void onSendFirstLocalVideoFrame(const TRTCVideoStreamType streamType) { + } /** - * 3.8 首帧本地视频数据已经被送出 + * 3.9 自己本地的首个音频帧已被发布出去 * - * SDK 会在 enterRoom() 并 startLocalPreview() 成功后开始摄像头采集,并将采集到的画面进行编码。 - * 当 SDK 成功向云端送出第一帧视频数据后,会抛出这个回调事件。 + * 当您成功进入房间并通过 {@link startLocalAudio} 开启本地音频采集之后(开启采集和进入房间的先后顺序无影响), + * SDK 就会开始进行音频编码,并通过自身的网络模块向云端发布自己本地的音频数据。 + * 当 SDK 成功地向云端送出自己的第一帧音频数据帧以后,就会抛出 onSendFirstLocalAudioFrame 事件回调。 + */ + virtual void onSendFirstLocalAudioFrame() { + } + + /** + * 3.10 远端视频状态变化的事件回调 * - * @param streamType 视频流类型,主画面、小画面或辅流画面(屏幕分享) + * 您可以通过此事件回调获取远端每一路画面的播放状态(包括 Playing、Loading 和 Stopped 三种状态),从而进行相应的 UI 展示。 + * @param userId 用户标识 + * @param streamType 视频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 + * @param status 视频状态:包括 Playing、Loading 和 Stopped 三种状态。 + * @param reason 视频状态改变的原因 + * @param extrainfo 额外信息 */ - virtual void onSendFirstLocalVideoFrame(const TRTCVideoStreamType streamType) {} + virtual void onRemoteVideoStatusUpdated(const char* userId, TRTCVideoStreamType streamType, TRTCAVStatusType status, TRTCAVStatusChangeReason reason, void* extrainfo) { + } /** - * 3.9 首帧本地音频数据已经被送出 + * 3.11 用户视频大小发生改变回调 * - * SDK 会在 enterRoom() 并 startLocalAudio() 成功后开始麦克风采集,并将采集到的声音进行编码。 - * 当 SDK 成功向云端送出第一帧音频数据后,会抛出这个回调事件。 + * 当您收到 onUserVideoSizeChanged(userId, streamtype, newWidth, newHeight) 通知时,表示该路画面大小发生了调整,调整的原因可能是该用户调用了 setVideoEncoderParam 或者 setSubStreamEncoderParam 重新设置了画面尺寸。 + * @param userId 用户标识 + * @param streamType 视频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 + * @param newWidth 视频流的宽度(像素) + * @param newHeight 视频流的高度(像素) */ - virtual void onSendFirstLocalAudioFrame() {} - /// @} + virtual void onUserVideoSizeChanged(const char* userId, TRTCVideoStreamType streamType, int newWidth, int newHeight) { + } + /// @} ///////////////////////////////////////////////////////////////////////////////// // - // (四)统计和质量回调 + // 网络和技术指标统计回调 // ///////////////////////////////////////////////////////////////////////////////// - /// @name 统计和质量回调 + /// @name 网络和技术指标统计回调 /// @{ + /** - * 4.1 网络质量:该回调每2秒触发一次,统计当前网络的上行和下行质量 - * - * @note userId == null 代表自己当前的视频质量 + * 4.1 网络质量的实时统计回调 * + * 该统计回调每间隔2秒抛出一次,用于通知 SDK 感知到的当前网络的上行和下行质量。 + * SDK 会使用一组内嵌的自研算法对当前网络的延迟高低、带宽大小以及稳定情况进行评估,并计算出一个的评估结果: + * 如果评估结果为 1(Excellent) 代表当前的网络情况非常好,如果评估结果为 6(Down)代表当前网络无法支撑 TRTC 的正常通话。 * @param localQuality 上行网络质量 * @param remoteQuality 下行网络质量 - * @param remoteQualityCount 下行网络质量的数组大小 + * @note 回调参数 localQuality 和 remoteQuality 中的 userId 如果为空置,代表本组数据统计的是自己本地的网络质量,否则是代表远端用户的网络质量。 */ - virtual void onNetworkQuality(TRTCQualityInfo localQuality, TRTCQualityInfo* remoteQuality, uint32_t remoteQualityCount) {} + virtual void onNetworkQuality(TRTCQualityInfo localQuality, TRTCQualityInfo* remoteQuality, uint32_t remoteQualityCount) { + } /** - * 4.2 技术指标统计回调 + * 4.2 音视频技术指标的实时统计回调 * - * 如果您是熟悉音视频领域相关术语,可以通过这个回调获取 SDK 的所有技术指标。 - * 如果您是首次开发音视频相关项目,可以只关注 onNetworkQuality 回调。 + * 该统计回调每间隔2秒抛出一次,用于通知 SDK 内部音频、视频以及网络相关的专业技术指标,这些信息在 {@link TRTCStatistics} 均有罗列: + * - 视频统计信息:视频的分辨率(resolution)、帧率(FPS)和比特率(bitrate)等信息。 + * - 音频统计信息:音频的采样率(samplerate)、声道(channel)和比特率(bitrate)等信息。 + * - 网络统计信息:SDK 和云端一次往返(SDK => Cloud => SDK)的网络耗时(rtt)、丢包率(loss)、上行流量(sentBytes)和下行流量(receivedBytes)等信息。 * - * @param statis 统计数据,包括本地和远程的 - * @note 每2秒回调一次 + * @param statistics 统计数据,包括自己本地的统计信息和远端用户的统计信息,详情请参考 {@link TRTCStatistics}。 + * @note 如果您只需要获知当前网络质量的好坏,并不需要花太多时间研究本统计回调,更推荐您使用 {@link onNetworkQuality} 来解决问题。 */ - virtual void onStatistics(const TRTCStatistics& statis) {} - /// @} + virtual void onStatistics(const TRTCStatistics& statistics) { + } + /** + * 4.3 网速测试的结果回调 + * + * 该统计回调由 {@link startSpeedTest:} 触发。 + * + * @param result 网速测试数据数据,包括丢包、往返延迟、上下行的带宽速率,详情请参考 {@link TRTCSpeedTestResult}。 + */ + virtual void onSpeedTestResult(const TRTCSpeedTestResult& result) { + } + /// @} ///////////////////////////////////////////////////////////////////////////////// // - // (五)服务器事件回调 + // 与云端连接情况的事件回调 // ///////////////////////////////////////////////////////////////////////////////// - - /// @name 服务器事件回调 + /// @name 与云端连接情况的事件回调 /// @{ - /** - * 5.1 SDK 跟服务器的连接断开 - */ - virtual void onConnectionLost() {} /** - * 5.2 SDK 尝试重新连接到服务器 + * 5.1 SDK 与云端的连接已经断开 + * + * SDK 会在跟云端的连接断开时抛出此事件回调,导致断开的原因大多是网络不可用或者网络切换所致,比如用户在通话中走进电梯时就可能会遇到此事件。 + * 在抛出此事件之后,SDK 会努力跟云端重新建立连接,重连过程中会抛出 {@link onTryToReconnect},连接恢复后会抛出 {@link onConnectionRecovery} 。 + * 所以,SDK 会在如下三个连接相关的事件中按如下规律切换: + * <pre> + * [onConnectionLost] =====> [onTryToReconnect] =====> [onConnectionRecovery] + * /|\ | + * |------------------------------------------------------| + * </pre> */ - virtual void onTryToReconnect() {} + virtual void onConnectionLost() { + } /** - * 5.3 SDK 跟服务器的连接恢复 + * 5.2 SDK 正在尝试重新连接到云端 + * + * SDK 会在跟云端的连接断开时抛出 {@link onConnectionLost},之后会努力跟云端重新建立连接并抛出本事件,连接恢复后会抛出 {@link onConnectionRecovery}。 */ - virtual void onConnectionRecovery() {} + virtual void onTryToReconnect() { + } /** - * 5.4 服务器测速的回调,SDK 对多个服务器 IP 做测速,每个 IP 的测速结果通过这个回调通知 + * 5.3 SDK 与云端的连接已经恢复 * - * @param currentResult 当前完成的测速结果 - * @param finishedCount 已完成测速的服务器数量 - * @param totalCount 需要测速的服务器总数量 + * SDK 会在跟云端的连接断开时抛出 {@link onConnectionLost},之后会努力跟云端重新建立连接并抛出{@link onTryToReconnect},连接恢复后会抛出本事件回调。 */ - virtual void onSpeedTest(const TRTCSpeedTestResult& currentResult, uint32_t finishedCount, uint32_t totalCount) {} - /// @} + virtual void onConnectionRecovery() { + } + /// @} ///////////////////////////////////////////////////////////////////////////////// // - // (六)硬件设备事件回调 + // 硬件设备相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// - /// @name 硬件设备事件回调 + /// @name 硬件设备相关事件回调 /// @{ + /** * 6.1 摄像头准备就绪 + * + * 当您调用 {@link startLocalPreivew} 之后,SDK 会尝试启动摄像头,如果摄像头能够启动成功就会抛出本事件。 + * 如果启动失败,大概率是因为当前应用没有获得访问摄像头的权限,或者摄像头当前正在被其他程序独占使用中。 + * 您可以通过捕获 {@link onError} 事件回调获知这些异常情况并通过 UI 界面提示用户。 */ - virtual void onCameraDidReady() {} + virtual void onCameraDidReady() { + } /** * 6.2 麦克风准备就绪 + * + * 当您调用 {@link startLocalAudio} 之后,SDK 会尝试启动麦克风,如果麦克风能够启动成功就会抛出本事件。 + * 如果启动失败,大概率是因为当前应用没有获得访问麦克风的权限,或者麦克风当前正在被其他程序独占使用中。 + * 您可以通过捕获 {@link onError} 事件回调获知这些异常情况并通过 UI 界面提示用户。 */ - virtual void onMicDidReady() {} + virtual void onMicDidReady() { + } /** - * 6.3 用于提示音量大小的回调,包括每个 userId 的音量和远端总音量 - * - * 您可以通过调用 TRTCCloud 中的 enableAudioVolumeEvaluation 接口来开关这个回调或者设置它的触发间隔。 - * 需要注意的是,调用 enableAudioVolumeEvaluation 开启音量回调后,无论频道内是否有人说话,都会按设置的时间间隔调用这个回调, - * 如果没有人说话,则 userVolumes 为空,totalVolume 为0。 + * 6.4 音量大小的反馈回调 * - * @param userVolumes 所有正在说话的房间成员的音量,取值范围0 - 100。 - * @param userVolumesCount 房间成员数量 - * @param totalVolume 所有远端成员的总音量, 取值范围0 - 100。 - * @note userId 为 null 时表示自己的音量,userVolumes 内仅包含正在说话(音量不为0)的用户音量信息。 + * SDK 可以评估每一路音频的音量大小,并每隔一段时间抛出该事件回调,您可以根据音量大小在 UI 上做出相应的提示,比如“波形图”或“音量槽”。 + * 要完成这个功能, 您需要先调用 {@link enableAudioVolumeEvaluation} 开启这个能力并设定事件抛出的时间间隔。 + * 需要补充说明的是,无论当前房间中是否有人说话,SDK 都会按照您设定的时间间隔定时抛出此事件回调,只不过当没有人说话时,userVolumes 为空,totalVolume 为 0。 + * @param userVolumes 是一个数组,用于承载所有正在说话的用户的音量大小,取值范围 0 - 100。 + * @param totalVolume 所有远端用户的总音量大小, 取值范围 0 - 100。 + * @note userVolumes 为一个数组,对于数组中的每一个元素,当 userId 为空时表示本地麦克风采集的音量大小,当 userId 不为空时代表远端用户的音量大小。 */ - virtual void onUserVoiceVolume(TRTCVolumeInfo* userVolumes, uint32_t userVolumesCount, uint32_t totalVolume) {} + virtual void onUserVoiceVolume(TRTCVolumeInfo* userVolumes, uint32_t userVolumesCount, uint32_t totalVolume) { + } +/** + * 6.5 本地设备的通断状态发生变化(仅适用于桌面系统) + * + * 当本地设备(包括摄像头、麦克风以及扬声器)被插入或者拔出时,SDK 便会抛出此事件回调。 + * + * @param deviceId 设备 ID + * @param deviceType 设备类型 + * @param state 通断状态,0:设备已添加;1:设备已被移除;1:设备已启用。 + */ #if TARGET_PLATFORM_DESKTOP - /** - * 6.4 本地设备通断回调 - * - * @param deviceId 设备 ID - * @param type 设备类型 - * @param state 事件类型 - */ - virtual void onDeviceChange(const char* deviceId, TRTCDeviceType type, TRTCDeviceState state) {} + virtual void onDeviceChange(const char* deviceId, TRTCDeviceType type, TRTCDeviceState state) { + } +#endif - /** - * 6.5 麦克风测试音量回调 - * - * 麦克风测试接口 startMicDeviceTest 会触发这个回调 - * - * @param volume 音量值,取值范围0 - 100 - */ - virtual void onTestMicVolume(uint32_t volume) {} +/** + * 6.6 当前麦克风的系统采集音量发生变化 + * + * 在 Mac 或 Windows 这样的桌面操作系统上,用户可以在设置中心找到声音相关的设置面板,并设置麦克风的采集音量大小。 + * 用户将麦克风的采集音量设置得越大,麦克风采集到的声音的原始音量也就会越大,反之就会越小。 + * 在有些型号的键盘以及笔记本电脑上,用户还可以通过按下“禁用麦克风”按钮(图标是一个话筒上上叠加了一道代表禁用的斜线)来将麦克风静音。 + * + * 当用户通过系统设置界面或者通过键盘上的快捷键设定操作系统的麦克风采集音量时,SDK 便会抛出此事件。 + * @param volume 系统采集音量,取值范围 0 - 100,用户可以在系统的声音设置面板上进行拖拽调整。 + * @param muted 麦克风是否被用户禁用了:true 被禁用,false 被启用。 + * @note 您需要调用 {@link enableAudioVolumeEvaluation} 接口并设定(interval>0)开启次事件回调,设定(interval == 0)关闭此事件回调。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onAudioDeviceCaptureVolumeChanged(uint32_t volume, bool muted) { + } +#endif - /** - * 6.6 扬声器测试音量回调 - * - * 扬声器测试接口 startSpeakerDeviceTest 会触发这个回调 - * - * @param volume 音量值,取值范围0 - 100 - */ - virtual void onTestSpeakerVolume(uint32_t volume) {} +/** + * 6.7 当前系统的播放音量发生变化 + * + * 在 Mac 或 Windows 这样的桌面操作系统上,用户可以在设置中心找到声音相关的设置面板,并设置系统的播放音量大小。 + * 在有些型号的键盘以及笔记本电脑上,用户还可以通过按下“静音”按钮(图标是一个喇叭上叠加了一道代表禁用的斜线)来将系统静音。 + * + * 当用户通过系统设置界面或者通过键盘上的快捷键设定操作系统的播放音量时,SDK 便会抛出此事件。 + * @param volume 系统播放音量,取值范围 0 - 100,用户可以在系统的声音设置面板上进行拖拽调整。 + * @param muted 系统是否被用户静音了:true 被静音,false 已恢复。 + * @note 您需要调用 {@link enableAudioVolumeEvaluation} 接口并设定(interval>0)开启次事件回调,设定(interval == 0)关闭此事件回调。 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onAudioDevicePlayoutVolumeChanged(uint32_t volume, bool muted) { + } +#endif - /** - * 6.7 当前音频采集设备音量变化通知 - * - * @note 使用 enableAudioVolumeEvaluation(interval>0)开启,(interval==0)关闭 - * - * @param volume 音量值,取值范围0 - 100 - * @param muted 当前采集音频设备是否被静音,true:静音;false:取消静音 - */ - virtual void onAudioDeviceCaptureVolumeChanged(uint32_t volume, bool muted) {} +/** + * 6.8 系统声音采集是否被成功开启的事件回调(仅适用于 Mac 系统) + * + * 在 Mac 系统上,您可以通过调用 {@link startSystemAudioLoopback} 为当前系统安装一个音频驱动,并让 SDK 通过该音频驱动捕获当前 Mac 系统播放出的声音。 + * 当用于播片教学或音乐直播中,比如老师端可以使用此功能,让 SDK 能够采集老师所播放的电影中的声音,使同房间的学生端也能听到电影中的声音。 + * SDK 会将统声音采集是否被成功开启的结果,通过本事件回调抛出,需要您关注参数中的错误码。 + * + * @param err ERR_NULL 表示成功,其余值表示失败。 + */ +#if TARGET_PLATFORM_MAC + virtual void onSystemAudioLoopbackError(TXLiteAVError errCode) { + } +#endif - /** - * 6.8 当前音频播放设备音量变化通知 - * - * @note 使用 enableAudioVolumeEvaluation(interval>0)开启,(interval==0)关闭 - * - * @param volume 音量值,取值范围0 - 100 - * @param muted 当前音频播放设备是否被静音,true:静音;false:取消静音 - */ - virtual void onAudioDevicePlayoutVolumeChanged(uint32_t volume, bool muted) {} +/** + * 6.9 测试麦克风时的音量回调 + * + * 当您调用 {@link startMicDeviceTest} 测试麦克风是否正常工作时,SDK 会不断地抛出此回调,参数中的 volume 代表当前麦克风采集到的音量大小。 + * 如果在测试期间 volume 出现了大小波动的情况,说明麦克风状态健康;如果 volume 的数值始终是 0,说明麦克风的状态异常,需要提示用户进行更换。 + * + * @param volume 麦克风采集到的音量值,取值范围0 - 100 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onTestMicVolume(uint32_t volume) { + } #endif - -#if TARGET_PLATFORM_MAC - /** - * 6.9 系统声音采集结果回调 - * - * 系统声音采集接口 startSystemAudioLoopback 会触发这个回调 - * - * @param errCode ERR_NULL 表示成功,其余值表示失败 - */ - virtual void onSystemAudioLoopbackError(TXLiteAVError errCode) {} + +/** + * 6.10 测试扬声器时的音量回调 + * + * 当您调用 {@link startSpeakerDeviceTest} 测试扬声器是否正常工作时,SDK 会不断地抛出此回调。 + * 参数中的 volume 代表的是 SDK 提交给系统扬声器去播放的声音的音量值大小,如果该数值持续变化,但用户反馈听不到声音,则说明扬声器状态异常。 + * + * @param volume SDK 提交给扬声器去播放的声音的音量,取值范围0 - 100 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onTestSpeakerVolume(uint32_t volume) { + } #endif - /// @} + /// @} ///////////////////////////////////////////////////////////////////////////////// // - // (七)自定义消息的接收回调 + // 自定义消息的接收事件回调 // ///////////////////////////////////////////////////////////////////////////////// - /// @name 自定义消息的接收回调 + /// @name 自定义消息的接收事件回调 /// @{ + /** - * 7.1 收到自定义消息回调 + * 7.1 收到自定义消息的事件回调 * - * 当房间中的某个用户使用 sendCustomCmdMsg 发送自定义消息时,房间中的其它用户可以通过 onRecvCustomCmdMsg 接口接收消息 + * 当房间中的某个用户使用 {@link sendCustomCmdMsg} 发送自定义 UDP 消息时,房间中的其它用户可以通过 onRecvCustomCmdMsg 事件回调接收到该条消息。 * * @param userId 用户标识 * @param cmdID 命令 ID * @param seq 消息序号 * @param message 消息数据 - * @param messageSize 消息数据大小 */ - virtual void onRecvCustomCmdMsg(const char* userId, int32_t cmdID, uint32_t seq, const uint8_t* message, uint32_t messageSize) {} + virtual void onRecvCustomCmdMsg(const char* userId, int32_t cmdID, uint32_t seq, const uint8_t* message, uint32_t messageSize) { + } /** - * 7.2 自定义消息丢失回调 + * 7.2 自定义消息丢失的事件回调 * - * 实时音视频使用 UDP 通道,即使设置了可靠传输(reliable)也无法确保100@%不丢失,只是丢消息概率极低,能满足常规可靠性要求。 - * 在发送端设置了可靠传输(reliable)后,SDK 都会通过此回调通知过去时间段内(通常为5s)传输途中丢失的自定义消息数量统计信息。 + * 当您使用 {@link sendCustomCmdMsg} 发送自定义 UDP 消息时,即使设置了可靠传输(reliable),也无法确保100@%不丢失,只是丢消息概率极低,能满足常规可靠性要求。 + * 在发送端设置了可靠运输(reliable)后,SDK 都会通过此回调通知过去时间段内(通常为5s)传输途中丢失的自定义消息数量统计信息。 * - * @note 只有在发送端设置了可靠传输(reliable),接收方才能收到消息的丢失回调 * @param userId 用户标识 * @param cmdID 命令 ID * @param errCode 错误码 * @param missed 丢失的消息数量 + * @note 只有在发送端设置了可靠传输(reliable),接收方才能收到消息的丢失回调 */ - virtual void onMissCustomCmdMsg(const char* userId, int32_t cmdID, int32_t errCode, int32_t missed) {} + virtual void onMissCustomCmdMsg(const char* userId, int32_t cmdID, int32_t errCode, int32_t missed) { + } /** * 7.3 收到 SEI 消息的回调 * - * 当房间中的某个用户使用 sendSEIMsg 发送数据时,房间中的其它用户可以通过 onRecvSEIMsg 接口接收数据。 + * 当房间中的某个用户使用 {@link sendSEIMsg} 借助视频数据帧发送 SEI 消息时,房间中的其它用户可以通过 onRecvSEIMsg 事件回调接收到该条消息。 * * @param userId 用户标识 * @param message 数据 - * @param messageSize 数据大小 */ - virtual void onRecvSEIMsg(const char* userId, const uint8_t* message, uint32_t messageSize) {}; - /// @} + virtual void onRecvSEIMsg(const char* userId, const uint8_t* message, uint32_t messageSize) { + } + /// @} ///////////////////////////////////////////////////////////////////////////////// // - // (八)CDN 旁路转推回调 + // CDN 相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// - /// @name CDN 旁路转推回调 + /// @name CDN 相关事件回调 /// @{ + /** - * 8.1 开始向腾讯云的直播 CDN 推流的回调,对应于 TRTCCloud 中的 startPublishing() 接口 + * 8.1 开始向腾讯云直播 CDN 上发布音视频流的事件回调 + * + * 当您调用 {@link startPublishing} 开始向腾讯云直播 CDN 上发布音视频流时,SDK 会立刻将这一指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 * * @param err 0表示成功,其余值表示失败 * @param errMsg 具体错误原因 */ - virtual void onStartPublishing(int err, const char *errMsg) {}; + virtual void onStartPublishing(int err, const char* errMsg) { + } /** - * 8.2 停止向腾讯云的直播 CDN 推流的回调,对应于 TRTCCloud 中的 stopPublishing() 接口 + * 8.2 停止向腾讯云直播 CDN 上发布音视频流的事件回调 + * + * 当您调用 {@link stopPublishing} 停止向腾讯云直播 CDN 上发布音视频流时,SDK 会立刻将这一指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 * * @param err 0表示成功,其余值表示失败 * @param errMsg 具体错误原因 */ - virtual void onStopPublishing(int err, const char *errMsg) {}; + virtual void onStopPublishing(int err, const char* errMsg) { + } /** - * 8.3 启动旁路推流到 CDN 完成的回调 - * - * 对应于 TRTCCloud 中的 startPublishCDNStream() 接口 + * 8.3 开始向非腾讯云 CDN 上发布音视频流的事件回调 * - * @note Start 回调如果成功,只能说明转推请求已经成功告知给腾讯云,如果目标 CDN 有异常,还是有可能会转推失败。 + * 当您调用 {@link startPublishCDNStream} 开始向非腾讯云直播 CDN 上发布音视频流时,SDK 会立刻将这一指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 + * @param err 0表示成功,其余值表示失败 + * @param errMsg 具体错误原因 + * @note 当您收到成功的事件回调时,只是说明您的发布指令已经同步到腾讯云后台服务器,但如果目标 CDN 厂商的服务器不接收该条视频流,依然可能导致发布失败。 */ - virtual void onStartPublishCDNStream(int errCode, const char* errMsg) {}; + virtual void onStartPublishCDNStream(int errCode, const char* errMsg) { + } /** - * 8.4 停止旁路推流到 CDN 完成的回调 + * 8.4 停止向非腾讯云 CDN 上发布音视频流的事件回调 * - * 对应于 TRTCCloud 中的 stopPublishCDNStream() 接口 + * 当您调用 {@link stopPublishCDNStream} 开始向非腾讯云直播 CDN 上发布音视频流时,SDK 会立刻将这一指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 * + * @param err 0表示成功,其余值表示失败 + * @param errMsg 具体错误原因 */ - virtual void onStopPublishCDNStream(int errCode, const char* errMsg) {}; + virtual void onStopPublishCDNStream(int errCode, const char* errMsg) { + } /** - * 8.5 设置云端的混流转码参数的回调,对应于 TRTCCloud 中的 setMixTranscodingConfig() 接口 + * 8.5 设置云端混流的排版布局和转码参数的事件回调 * - * @param errCode 0表示成功,其余值表示失败 - * @param errMsg 具体错误原因 + * 当您调用 {@link setMixTranscodingConfig} 调整云端混流的排版布局和转码参数时,SDK 会立刻将这一调整指令同步给云端服务器。 + * 随后 SDK 会收到来自云端服务器的处理结果,并将指令的执行结果通过本事件回调通知给您。 + * + * @param err 错误码:0表示成功,其余值表示失败。 + * @param errMsg 具体的错误原因。 */ - virtual void onSetMixTranscodingConfig(int errCode, const char* errMsg) {}; - /// @} + virtual void onSetMixTranscodingConfig(int err, const char* errMsg) { + } + /// @} ///////////////////////////////////////////////////////////////////////////////// // - // (九)屏幕分享回调 - // + // 屏幕分享相关事件回调 // ///////////////////////////////////////////////////////////////////////////////// - /// @name 屏幕分享回调 + /// @name 屏幕分享相关事件回调 /// @{ -#ifdef _WIN32 - /** - * 9.1 当屏幕分享窗口被遮挡无法正常捕获时,SDK 会通过此回调通知,可在此回调里通知用户移开遮挡窗口 - * 目前只支持Windows平台 - */ - virtual void onScreenCaptureCovered() {}; -#endif - /** - * 9.2 当屏幕分享开始时,SDK 会通过此回调通知 - */ - virtual void onScreenCaptureStarted() {}; /** - * 9.3 当屏幕分享暂停时,SDK 会通过此回调通知 + * 9.1 屏幕分享开启的事件回调 * - * @param reason 停止原因,0:表示用户主动暂停;1:表示设置屏幕分享参数导致的暂停;2:表示屏幕分享窗口被最小化导致的暂停;3:表示屏幕分享窗口被隐藏导致的暂停 + * 当您通过 {@link startScreenCapture} 等相关接口启动屏幕分享时,SDK 便会抛出此事件回调。 */ - virtual void onScreenCapturePaused(int reason) {}; + virtual void onScreenCaptureStarted() { + } /** - * 9.4 当屏幕分享恢复时,SDK 会通过此回调通知 + * 9.2 屏幕分享暂停的事件回调 * - * @param reason 停止原因,0:表示用户主动恢复,1:表示屏幕分享参数设置完毕后自动恢复;2:表示屏幕分享窗口从最小化被恢复;3:表示屏幕分享窗口从隐藏被恢复 + * 当您通过 {@link pauseScreenCapture} 暂停屏幕分享时,SDK 便会抛出此事件回调。 + * @param reason 原因。 + * - 0:用户主动暂停。 + * - 1:注意此字段的含义在 MAC 和 Windows 平台有稍微差异。屏幕窗口不可见暂停(Mac)。表示设置屏幕分享参数导致的暂停(Windows)。 + * - 2:表示屏幕分享窗口被最小化导致的暂停(仅 Windows)。 + * - 3:表示屏幕分享窗口被隐藏导致的暂停(仅 Windows)。 */ - virtual void onScreenCaptureResumed(int reason) {}; + virtual void onScreenCapturePaused(int reason) { + } /** - * 9.5 当屏幕分享停止时,SDK 会通过此回调通知 + * 9.3 屏幕分享恢复的事件回调 * - * @param reason 停止原因,0:表示用户主动停止;1:表示屏幕分享窗口被关闭 + * 当您通过 {@link resumeScreenCapture} 恢复屏幕分享时,SDK 便会抛出此事件回调。 + * @param reason 恢复原因。 + * - 0:用户主动恢复。 + * - 1:注意此字段的含义在 MAC 和 Windows 平台有稍微差异。屏幕窗口恢复可见从而恢复分享(Mac)。屏幕分享参数设置完毕后自动恢复(Windows) + * - 2:表示屏幕分享窗口从最小化被恢复(仅 Windows)。 + * - 3:表示屏幕分享窗口从隐藏被恢复(仅 Windows)。 */ - virtual void onScreenCaptureStoped(int reason) {}; - /// @} - - - ///////////////////////////////////////////////////////////////////////////////// - // - // (十)截图回调 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 截图回调 - /// @{ - /** - * 10.1 截图完成时回调 - * - * @param userId 用户 ID,空字符串表示截取本地画面 - * @param type 视频流类型 - * @param data 截图数据,为 nullptr 表示截图失败 - * @param length 截图数据长度,对于BGRA32而言,length = width * height * 4 - * @param width 截图画面的宽度 - * @param height 截图画面的高度 - * @param format 截图数据格式,目前只支持 TRTCVideoPixelFormat_BGRA32 - */ - virtual void onSnapshotComplete(const char* userId, TRTCVideoStreamType type, char* data, - uint32_t length, uint32_t width, uint32_t height, - TRTCVideoPixelFormat format) { + virtual void onScreenCaptureResumed(int reason) { } - /// @} - ///////////////////////////////////////////////////////////////////////////////// - // - // (十一)本地录制回调 - // - ///////////////////////////////////////////////////////////////////////////////// - /// @name 本地录制回调 - /// @{ + /** - * 11.1 录制任务已经开始 + * 9.4 屏幕分享停止的事件回调 * - * @param errCode 错误码 0:初始化录制成功;-1:初始化录制失败;-2: 文件后缀名有误。 - - * @param storagePath 录制文件存储路径 + * 当您通过 {@link stopScreenCapture} 停止屏幕分享时,SDK 便会抛出此事件回调。 + * @param reason 停止原因,0:用户主动停止;1:屏幕窗口关闭导致停止;2:表示屏幕分享的显示屏状态变更(如接口被拔出、投影模式变更等)。 */ - virtual void onLocalRecordBegin(int errCode, const char* storagePath) {} + virtual void onScreenCaptureStoped(int reason) { + } - /** - * 11.2 录制任务进行中 - * @param duration 已经录制的累计时长,单位毫秒 - * @param storagePath 录制文件存储路径 - */ - virtual void onLocalRecording(long duration, const char* storagePath) {} - /** - * 11.3 录制任务已结束 - * - * @param errCode 错误码 0:录制成功;-1:录制失败;-2:切换分辨率或横竖屏导致录制结束。 +/** + * 9.5 屏幕分享的目标窗口被遮挡的事件回调(仅适用于 Windows 操作系统) + * + * 当屏幕分享的目标窗口被遮挡无法正常捕获时,SDK 会抛出此事件回调,你可以在捕获到该事件回调后,通过 UI 上的一些变化来提示用户移开遮盖窗口。 + */ +#ifdef _WIN32 + virtual void onScreenCaptureCovered() { + } +#endif - * @param storagePath 录制文件存储路径 - */ - virtual void onLocalRecordComplete(int errCode, const char* storagePath) {} /// @} - ///////////////////////////////////////////////////////////////////////////////// // - // (十二)Windows 专有废弃方法 + // 本地录制和本地截图的事件回调 // ///////////////////////////////////////////////////////////////////////////////// - /// @name Windows 专有废弃方法 + /// @name 本地录制和本地截图的事件回调 /// @{ -#ifdef _WIN32 + /** - * 11.1 废弃接口:有主播加入当前房间 - * - * 该回调接口可以被看作是 onRemoteUserEnterRoom 的废弃版本,不推荐使用。请使用 onUserVideoAvailable 或 onRemoteUserEnterRoom 进行替代。 + * 10.1 本地录制任务已经开始的事件回调 * - * @note 该接口已被废弃,不推荐使用 - * - * @param userId 用户标识 + * 当您调用 {@link startLocalRecording} 启动本地媒体录制任务时,SDK 会抛出该事件回调,用于通知您录制任务是否已经顺利启动。 + * @param errCode 错误码 0:初始化录制成功;-1:初始化录制失败;-2: 文件后缀名有误。 + * @param storagePath 录制文件存储路径 */ - virtual __declspec(deprecated("use onRemoteUserEnterRoom instead")) - void onUserEnter(const char* userId) {} + virtual void onLocalRecordBegin(int errCode, const char* storagePath) { + } /** - * 11.2 废弃接口:有主播离开当前房间 + * 10.2 本地录制任务正在进行中的进展事件回调 * - * 该回调接口可以被看作是 onRemoteUserLeaveRoom 的废弃版本,不推荐使用。请使用 onUserVideoAvailable 或 onRemoteUserLeaveRoom 进行替代。 + * 当您调用 {@link startLocalRecording} 成功启动本地媒体录制任务后,SDK 变会定时地抛出本事件回调。 + * 您可通过捕获该事件回调来获知录制任务的健康状况。 + * 您可以在 {@link startLocalRecording} 时设定本事件回调的抛出间隔。 * - * @note 该接口已被废弃,不推荐使用 - * - * @param userId 用户标识 - * @param reason 离开原因。 + * @param duration 已经录制的累计时长,单位毫秒 + * @param storagePath 录制文件存储路径 */ - virtual __declspec(deprecated("use onRemoteUserLeaveRoom instead")) - void onUserExit(const char* userId, int reason) {} + virtual void onLocalRecording(long duration, const char* storagePath) { + } /** - * 11.3 废弃接口:播放音效结束回调 + * 10.3 本地录制任务已经结束的事件回调 * - * @param effectId 音效id - * @param code 0表示播放正常结束;其他表示异常结束 + * 当您调用 {@link stopLocalRecording} 停止本地媒体录制任务时,SDK 会抛出该事件回调,用于通知您录制任务的最终结果。 + * @param errCode 错误码 0:录制成功;-1:录制失败;-2:切换分辨率或横竖屏导致录制结束;-3:音频数据或者视频数据一直没有到达导致没有开始正式录制。 + * @param storagePath 录制文件存储路径 */ - virtual __declspec(deprecated("use ITXAudioEffectManager.startPlayMusic instead")) - void onAudioEffectFinished(int effectId, int code) {}; + virtual void onLocalRecordComplete(int errCode, const char* storagePath) { + } /** - * 11.4 废弃接口:开始播放背景音乐 + * 10.4 本地截图完成的事件回调 * - * @param errCode 错误码 + * @param userId 用户标识,如果 userId 为空字符串,则代表截取的是本地画面。 + * @param type 视频流类型 + * @param data 截图数据,为 nullptr 表示截图失败 + * @param length 截图数据长度,对于BGRA32而言,length = width * height * 4 + * @param width 截图画面的宽度 + * @param height 截图画面的高度 + * @param format 截图数据格式,目前只支持 TRTCVideoPixelFormat_BGRA32 + * @param bmp 截图结果,如果 bmp 为 null 代表本次截图操作失败。 + * @note 全平台 C++ 接口和 Java 接口在参数上是不一样的,C++ 接口用 7 个参数描述一个截图画面,Java 接口只用一个 Bitmap 描述一个截图画面 */ - virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) - void onPlayBGMBegin(TXLiteAVError errCode) {} + virtual void onSnapshotComplete(const char* userId, TRTCVideoStreamType type, char* data, uint32_t length, uint32_t width, uint32_t height, TRTCVideoPixelFormat format) { + } - /** - * 11.5 废弃接口:播放背景音乐的进度 - * - * @param progressMS 已播放时间 - * @param durationMS 总时间 - */ - virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) - void onPlayBGMProgress(uint32_t progressMS, uint32_t durationMS) {} +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// 废弃的事件回调(建议使用对应的新回调) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name 废弃的事件回调(建议使用对应的新回调) +/// @{ + +/** + * 有主播加入当前房间(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link onRemoteUserEnterRoom} 替代之。 + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use onRemoteUserEnterRoom instead")) void onUserEnter(const char* userId) { + } +#endif + +/** + * 有主播离开当前房间(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link onRemoteUserLeaveRoom} 替代之。 + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use onRemoteUserLeaveRoom instead")) void onUserExit(const char* userId, int reason) { + } +#endif + +/** + * 音效播放已结束(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link ITXAudioEffectManager} 接口替代之。 + * 新的接口中不再区分背景音乐和音效,而是统一用 {@link startPlayMusic} 取代之。 + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) void onAudioEffectFinished(int effectId, int code) { + } +#endif + +/** + * 开始播放背景音乐(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link ITXMusicPlayObserver} 接口替代之。 + * 新的接口中不再区分背景音乐和音效,而是统一用 {@link startPlayMusic} 取代之。 + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) void onPlayBGMBegin(TXLiteAVError errCode) { + } +#endif + +/** + * 背景音乐的播放进度回调(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link ITXMusicPlayObserver} 接口替代之。 + * 新的接口中不再区分背景音乐和音效,而是统一用 {@link startPlayMusic} 取代之。 + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) void onPlayBGMProgress(uint32_t progressMS, uint32_t durationMS) { + } +#endif + +/** + * 背景音乐播放已经结束(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link ITXMusicPlayObserver} 接口替代之。 + * 新的接口中不再区分背景音乐和音效,而是统一用 {@link startPlayMusic} 取代之。 + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) void onPlayBGMComplete(TXLiteAVError errCode) { + } +#endif + +/** + * 服务器测速的结果回调(已废弃) + * + * @deprecated 新版本开始不推荐使用,建议使用 {@link onSpeedTestResult:} 接口替代之。 + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use onSpeedTestResult instead")) void onSpeedTest(const TRTCSpeedTestResult& currentResult, uint32_t finishedCount, uint32_t totalCount) { + } +#elif defined(__APPLE__) + virtual void onSpeedTest(const TRTCSpeedTestResult& currentResult, uint32_t finishedCount, uint32_t totalCount) { + } + __attribute__((deprecated("use onSpeedTestResult instead"))); +#else + virtual void onSpeedTest(const TRTCSpeedTestResult& currentResult, uint32_t finishedCount, uint32_t totalCount) { + } +#endif - /** - * 11.6 废弃接口:播放背景音乐结束 - * - * @param errCode 错误码 - */ - virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) - void onPlayBGMComplete(TXLiteAVError errCode) {} -#endif // _WIN32 /// @} -}; +}; // End of interface ITRTCCloudCallback ///////////////////////////////////////////////////////////////////////////////// // -// (十三)自定义视频渲染回调 +// 视频数据自定义回调 // ///////////////////////////////////////////////////////////////////////////////// +/// @name 视频数据自定义回调 +/// @{ + +class ITRTCVideoRenderCallback { + public: + virtual ~ITRTCVideoRenderCallback() { + } -/// 自定义视频渲染回调 -class ITRTCVideoRenderCallback -{ -public: - virtual ~ITRTCVideoRenderCallback() {} /** - * 12.1 自定义视频渲染回调 - * - * 可以通过 setLocalVideoRenderCallback 和 setRemoteVideoRenderCallback 接口设置自定义渲染回调 - * - * @param userId 用户标识 - * @param streamType 流类型:即摄像头还是屏幕分享 - * @param frame 视频帧数据 + * 自定义视频渲染回调 * - * @note - 在iOS和Mac平台上回调的视频帧为TRTCVideoBufferType_Buffer类型 + * 当您设置了本地或者远端的视频自定义渲染回调之后,SDK 就会将原本要交给渲染控件进行渲染的视频帧通过此回调接口抛送给您,以便于您进行自定义渲染。 + * @param frame 待渲染的视频帧信息 + * @param userId 视频源的 userId,如果是本地视频回调(setLocalVideoRenderDelegate),该参数可以忽略 + * @param streamType 频流类型:主路(Main)一般用于承载摄像头画面,辅路(Sub)一般用于承载屏幕分享画面。 */ - virtual void onRenderVideoFrame(const char* userId, TRTCVideoStreamType streamType, TRTCVideoFrame* frame) {} -}; + virtual void onRenderVideoFrame(const char* userId, TRTCVideoStreamType streamType, TRTCVideoFrame* frame) { + } + +}; // End of interface ITRTCVideoRenderCallback + +class ITRTCVideoFrameCallback { + public: + virtual ~ITRTCVideoFrameCallback() { + } + /** + * 用于对接第三方美颜组件的视频处理回调 + * + * 如果您选购了第三方美颜组件,就需要在 TRTCCloud 中设置第三方美颜回调,之后 TRTC 就会将原本要进行预处理的视频帧通过此回调接口抛送给您。 + * 之后您就可以将 TRTC 抛出的视频帧交给第三方美颜组件进行图像处理,由于抛出的数据是可读且可写的,因此第三方美颜的处理结果也可以同步给 TRTC 进行后续的编码和发送。 + * 情况一:美颜组件自身会产生新的纹理 + * 如果您使用的美颜组件会在处理图像的过程中产生一帧全新的纹理(用于承载处理后的图像),那请您在回调函数中将 dstFrame.textureId 设置为新纹理的 ID: + * + * 情况二:美颜组件需要您提供目标纹理 + * 如果您使用的第三方美颜模块并不生成新的纹理,而是需要您设置给该模块一个输入纹理和一个输出纹理,则可以考虑如下方案: + * ```ObjectiveC + * uint32_t onProcessVideoFrame(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame{ + * thirdparty_process(srcFrame.textureId, srcFrame.width, srcFrame.height, dstFrame.textureId); + * return 0; + * } + * ``` + * ```java + * int onProcessVideoFrame(TRTCCloudDef.TRTCVideoFrame srcFrame, TRTCCloudDef.TRTCVideoFrame dstFrame) { + * thirdparty_process(srcFrame.texture.textureId, srcFrame.width, srcFrame.height, dstFrame.texture.textureId); + * return 0; + * } + * ``` + * @param srcFrame 用于承载 TRTC 采集到的摄像头画面 + * @param dstFrame 用于接收第三方美颜处理过的视频画面 + * @note 目前仅支持 OpenGL 纹理方案( PC 仅支持 TRTCVideoBufferType_Buffer 格式)。 + */ + virtual int onProcessVideoFrame(TRTCVideoFrame* srcFrame, TRTCVideoFrame* dstFrame) { + return 0; + } + +}; // End of class ITRTCVideoFrameCallback + +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (十四)音频数据回调 +// 音频数据自定义回调 // ///////////////////////////////////////////////////////////////////////////////// +/// @name 音频数据自定义回调 +/// @{ + +class ITRTCAudioFrameCallback { + public: + virtual ~ITRTCAudioFrameCallback() { + } -/// 音频数据回调 -class ITRTCAudioFrameCallback -{ -public: - virtual ~ITRTCAudioFrameCallback() {} /** - * 13.1 本地麦克风采集到的音频数据回调 + * 本地采集并经过音频模块前处理后的音频数据回调 + * + * 当您设置完音频数据自定义回调之后,SDK 内部会把刚采集到并经过前处理(ANS、AEC、AGC)之后的数据,以 PCM 格式的形式通过本接口回调给您。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 * - * @param frame 音频数据 - * @note - 请不要在此回调函数中做任何耗时操作,建议直接拷贝到另一线程进行处理,否则会导致各种声音问题。 - * @note - 此接口回调出的音频数据支持修改。 - * @note - 此接口回调出的音频时间帧长固定为0.02s。 - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 - 以SDK默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 - * @note - 此接口回调出的音频数据包含背景音、音效、混响等前处理效果。 + * @param frame PCM 格式的音频数据帧 + * @note + * 1. 请不要在此回调函数中做任何耗时操作,由于 SDK 每隔 20ms 就要处理一帧音频数据,如果您的处理时间超过 20ms,就会导致声音异常。 + * 2. 此接口回调出的音频数据是可读写的,也就是说您可以在回调函数中同步修改音频数据,但请保证处理耗时。 + * 3. 此接口回调出的音频数据已经经过了前处理(ANS、AEC、AGC),但**不包含**背景音、音效、混响等前处理效果,延迟较低。 */ - virtual void onCapturedAudioFrame(TRTCAudioFrame *frame) {}; + virtual void onCapturedRawAudioFrame(TRTCAudioFrame* frame) { + } -#if TARGET_PLATFORM_PHONE /** - * 13.2 本地采集并经过音频模块前处理后的音频数据回调 + * 本地采集并经过音频模块前处理、音效处理和混 BGM 后的音频数据回调 * - * @param frame 音频数据 - * @note - 请不要在此回调函数中做任何耗时操作,建议直接拷贝到另一线程进行处理,否则会导致各种声音问题。 - * @note - 此接口回调出的音频数据包含背景音、音效、混响等前处理效果,延迟较高。 - * @note - 此接口回调出的音频数据支持修改。 - * @note - 此接口回调出的音频时间帧长固定为0.02s。 - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 - 以SDK默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * 当您设置完音频数据自定义回调之后,SDK 内部会把刚采集到并经过前处理、音效处理和混 BGM 之后的数据,在最终进行网络编码之前,以 PCM 格式的形式通过本接口回调给您。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 + * + * 特殊说明: + * 您可以通过设置接口中的 `TRTCAudioFrame.extraData` 字段,达到传输信令的目的。 + * 由于音频帧头部的数据块不能太大,建议您写入 `extraData` 时,尽量将信令控制在几个字节的大小,如果超过 100 个字节,写入的数据不会被发送。 + * 房间内其他用户可以通过 {@link TRTCAudioFrameDelegate} 中的 `onRemoteUserAudioFrame` 中的 `TRTCAudioFrame.extraData` 字段回调接收数据。 + * + * @param frame PCM 格式的音频数据帧 + * @note + * 1. 请不要在此回调函数中做任何耗时操作,由于 SDK 每隔 20ms 就要处理一帧音频数据,如果您的处理时间超过 20ms,就会导致声音异常。 + * 2. 此接口回调出的音频数据是可读写的,也就是说您可以在回调函数中同步修改音频数据,但请保证处理耗时。 + * 3. 此接口回调出的数据已经经过了前处理(ANS、AEC、AGC)、音效和混 BGM 处理,声音的延迟相比于 {@link onCapturedRawAudioFrame} 要高一些。 */ - virtual void onLocalProcessedAudioFrame(TRTCAudioFrame *frame) {}; -#endif + virtual void onLocalProcessedAudioFrame(TRTCAudioFrame* frame) { + } /** - * 13.3 混音前的每一路远程用户的音频数据(例如您要对某一路的语音进行文字转换,必须要使用这里的原始数据,而不是混音之后的数据) + * 混音前的每一路远程用户的音频数据 + * + * 当您设置完音频数据自定义回调之后,SDK 内部会把远端的每一路原始数据,在最终混音之前,以 PCM 格式的形式通过本接口回调给您。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 * - * @param frame 音频数据 - * @param userId 用户标识 - * @note - 请不要在此回调函数中做任何耗时操作,建议直接拷贝到另一线程进行处理,否则会导致各种声音问题。 - * - 此接口回调出的音频数据是只读的,不支持修改。 + * @param frame PCM 格式的音频数据帧 + * @param userId 用户标识 + * @note 此接口回调出的音频数据是只读的,不支持修改 */ - virtual void onPlayAudioFrame(TRTCAudioFrame *frame, const char* userId) {}; + virtual void onPlayAudioFrame(TRTCAudioFrame* frame, const char* userId) { + } /** - * 13.4 各路音频数据混合后送入喇叭播放的音频数据 + * 将各路待播放音频混合之后并在最终提交系统播放之前的数据回调 + * + * 当您设置完音频数据自定义回调之后,SDK 内部会把各路待播放的音频混合之后的音频数据,在提交系统播放之前,以 PCM 格式的形式通过本接口回调给您。 + * - 此接口回调出的音频时间帧长固定为0.02s,格式为 PCM 格式。 + * - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 + * - 以 TRTC 默认的音频录制格式48000采样率、单声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 1 × 16bit = 15360bit = 1920字节】。 * - * @param frame 音频数据 - * @note - 请不要在此回调函数中做任何耗时操作,建议直接拷贝到另一线程进行处理,否则会导致各种声音问题。 - * @note - 此接口回调出的音频数据支持修改。 - * @note - 此接口回调出的音频时间帧长固定为0.02s。 - 由时间帧长转化为字节帧长的公式为【采样率 × 时间帧长 × 声道数 × 采样点位宽】。 - 以SDK默认的音频播放格式48000采样率、双声道、16采样点位宽为例,字节帧长为【48000 × 0.02s × 2 × 16bit = 30720bit = 3840字节】。 - * @note - 此接口回调出的音频数据是各路音频播放数据的混合,不包含耳返的音频数据。 + * @param frame PCM 格式的音频数据帧 + * @note + * 1. 请不要在此回调函数中做任何耗时操作,由于 SDK 每隔 20ms 就要处理一帧音频数据,如果您的处理时间超过 20ms,就会导致声音异常。 + * 2. 此接口回调出的音频数据是可读写的,也就是说您可以在回调函数中同步修改音频数据,但请保证处理耗时。 + * 3. 此接口回调出的是对各路待播放音频数据的混合,但其中并不包含耳返的音频数据。 */ - virtual void onMixedPlayAudioFrame(TRTCAudioFrame *frame) {}; -}; + virtual void onMixedPlayAudioFrame(TRTCAudioFrame* frame) { + } + +}; // End of interface ITRTCAudioFrameCallback +/// @} ///////////////////////////////////////////////////////////////////////////////// // -// (十五)Log 信息回调 +// 更多事件回调接口 // ///////////////////////////////////////////////////////////////////////////////// +/// @name 更多事件回调接口 +/// @{ + +class ITRTCLogCallback { + public: + virtual ~ITRTCLogCallback() { + } -/// 日志相关回调 -class ITRTCLogCallback -{ -public: - virtual ~ITRTCLogCallback() {} /** - * 14.1 有日志打印时的回调 + * 本地 LOG 的打印回调 * + * 如果您希望捕获 SDK 的本地日志打印行为,可以通过设置日志回调,让 SDK 将要打印的日志都通过本回调接口抛送给您。 * @param log 日志内容 - * @param level 日志等级 参见 TRTCLogLevel - * @param module 暂无具体意义,目前为固定值 TXLiteAVSDK + * @param level 日志等级 参见 TRTC_LOG_LEVEL + * @param module 保留字段,暂无具体意义,目前为固定值 TXLiteAVSDK */ - virtual void onLog(const char* log, TRTCLogLevel level, const char* module) {} -}; + virtual void onLog(const char* log, TRTCLogLevel level, const char* module) { + } -/// @} -} +}; // End of interface ITRTCLogCallback -#endif /* __TRTCENGINECALLBACK_H__ */ +/// @} +} /* namespace liteav*/ +#endif /* __TRTCCLOUDCALLBACK_H__ */ +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCTypeDef.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCTypeDef.h index e4f83cc..3669f41 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCTypeDef.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCTypeDef.h @@ -1,10 +1,10 @@ -/* +/** * Module: TRTC 关键类型定义 - * * Function: 分辨率、质量等级等枚举和常量值的定义 - * */ - +/// @defgroup TRTCCloudDef_cplusplus 关键类型定义 +/// 腾讯云实时音视频的关键类型定义 +/// @{ #ifndef __TRTCTYPEDEF_H__ #define __TRTCTYPEDEF_H__ @@ -31,50 +31,12 @@ #define TRTC_API #endif -#define TARGET_PLATFORM_DESKTOP (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32 -#define TARGET_PLATFORM_PHONE __ANDROID__ || (__APPLE__ && TARGET_OS_IOS) -#define TARGET_PLATFORM_MAC __APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE - -namespace trtc { - -/** - * 渲染控件 - * - * TXView 根据编译平台的不同,做不同的类型转换,来保证兼容性 - * Windows 平台:请传入渲染控件的 HWND; - * iOS 平台:请传入 UIView 对象的指针(需强转为 void * 类型); - * Mac 平台:请传入 NSView 对象的指针(需强转为 void * 类型); - * Android 平台:请传入指向 TXCloudVideoView 对象的 jobject 指针(需强转为 void * 类型); - * - * iOS、Windows、Mac 代码示例: - * <pre> - * //以基于 QT 通过 C++ 接口进行 iOS、Windows、Mac 开发,调用 startRemoteView 为例 - * QWidget *videoView; //此处省略设置 videoView 属性的代码 - * getTRTCShareInstance()->startRemoteView(userId, TRTCVideoStreamTypeBig, reinterpret_cast<TXView>(videoView->winId())); - * <pre> - * - * Android 代码示例: - * <pre> - * //以基于 Android Studio 通过 C++ 接口进行 Android 开发,调用 startRemoteView 为例 - * //声明 native 方法,形参类型为 TXCloudVideoView - * native void nativeStartRemoteView(String userId, int streamType, TXCloudVideoView view); - * //在 native 方法的实现中,C++ 层可以获取到 jobject 类型的 TXCloudVideoView 对象 - * Java_com_example_test_MainActivity_nativeStartRemoteView(JNIEnv *env, jobject thiz, jstring user_id, jint stream_type, jobject view) { - * const char *user_id_chars = env->GetStringUTFChars(user_id, nullptr); - * trtc_cloud->startRemoteView(user_id_chars, (trtc::TRTCVideoStreamType)stream_type, view); - * env->ReleaseStringUTFChars(user_id, user_id_chars); - * } - * <pre> - */ -#ifdef _WIN32 -typedef HWND TXView; -#else -typedef void * TXView; -#endif +#define TARGET_PLATFORM_DESKTOP ((__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32) +#define TARGET_PLATFORM_PHONE (__ANDROID__ || (__APPLE__ && TARGET_OS_IOS)) +#define TARGET_PLATFORM_MAC (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) -/** - * 窗口尺寸和位置结构体 - */ +namespace liteav { +/// @{ #ifndef _WIN32 struct RECT { int left = 0; @@ -82,1317 +44,1502 @@ struct RECT { int right = 0; int bottom = 0; }; - -struct SIZE -{ +struct SIZE { long width = 0; long height = 0; }; #endif -/// @defgroup TRTCTypeDef_cplusplus 关键类型定义 -/// 腾讯云视频通话功能的关键类型定义 -/// @{ ///////////////////////////////////////////////////////////////////////////////// // -// 【(一)视频相关枚举值定义】 +// 渲染控件 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * [VIEW] 用于渲染视频画面的渲染控件 + * TRTC 中有很多需要操控视频画面的接口,这些接口都需要您指定视频渲染控件。 + * 1. ObjectiveC 接口 iOS 和 MAC + * - 在 iOS 系统中,您可以直接使用 UIView 作为视频渲染控件,SDK 会在您提供的 UIView 上绘制视频画面。 + * - 在 Mac 系统中,您可以直接使用 NSView 作为视频渲染控件,SDK 会在您提供的 NSView 上绘制视频画面。 + * 示例代码如下: + * UIView *videoView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 360, 640)]; + * [self.view addSubview:videoView]; + * [trtcCloud startLocalPreview:YES view:_localView]; + * 2. 在 Android 平台中,您可以使用我们提供的 TXCloudVideoView 作为视频渲染控件,它支持 SurfaceView 和 TextureView 两种渲染方案。 + * - 当用于渲染本地的视频画面时:TXCloudVideoView 会优先使用 SurfaceView,该方案性能较好,但是不支持对 View 做动画或者变形特效。 + * - 当用于渲染远端的视频画面时:TXCloudVideoView 会优先使用 TextureView,该方案灵活度高,能够更好地支持动画或者变形特效。 + * 如果您希望强制使用某一种方案,可以按照如下方法进行编码: + * 用法一:强制使用 TextureView: + * TXCloudVideoView localView = findViewById(R.id.trtc_tc_cloud_view_main); + * localView.addVideoView(new TextureView(context)); + * mTRTCCloud.startLocalPreview(true, localView); + * 用法二:强制使用 SurfaceView: + * SurfaceView surfaceView = new SurfaceView(this); + * TXCloudVideoView localView = new TXCloudVideoView(surfaceView); + * mTRTCCloud.startLocalPreview(true, localView); + * 3. 全平台方案 View + * 由于全平台 C++ 接口需要使用统一的参数类型,所以您需要在调用这些接口时,将渲染控件统一转换成 TXView 类型的指针: + * - iOS 平台:您可以使用 UIView 对象作为渲染控件,在调用 C++ 接口时请传入 UIView 对象的指针(需强转为 void* 类型)。 + * - Mac 平台:您可以使用 NSView 对象作为渲染控件,在调用 C++ 接口时请传入 NSView 对象的指针(需强转为 void* 类型)。 + * - Android 平台:在调用 C++ 接口时请传入指向 TXCloudVideoView 对象的 jobject 指针(需强转为 void* 类型)。 + * - Windows 平台:您可以使用窗口句柄 HWND 作为渲染控件,在调用 C++ 接口时需要将 HWND 强转为 void* 类型。 + * 代码示例一:在 QT 下使用 C++ 全平台接口 + * QWidget *videoView; + * // The relevant code for setting the videoView is omitted here... + * getTRTCShareInstance()->startLocalPreview(reinterpret_cast<TXView>(videoView->winId())); + * 代码示例二:在 Android 平台下,通过 JNI 调用 C++ 全平台接口 + * native void nativeStartLocalPreview(String userId, int streamType, TXCloudVideoView view); + * //... + * Java_com_example_test_MainActivity_nativeStartRemoteView(JNIEnv *env, jobject thiz, jstring user_id, jint stream_type, jobject view) { + * const char *user_id_chars = env->GetStringUTFChars(user_id, nullptr); + * trtc_cloud->startRemoteView(user_id_chars, (liteav::TRTCVideoStreamType)stream_type, view); + * env->ReleaseStringUTFChars(user_id, user_id_chars); + * } + */ +#ifdef _WIN32 +// Windows: HWND +typedef HWND TXView; +#else +// iOS: UIView; Mac OS: NSView; Android: jobject of TXCloudVideoView +typedef void *TXView; +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// 视频相关枚举值定义 // ///////////////////////////////////////////////////////////////////////////////// /** * 1.1 视频分辨率 * - * 此处仅定义横屏分辨率,如需使用竖屏分辨率(例如360 × 640),需要同时指定 TRTCVideoResolutionMode 为 Portrait。 + * 此处仅定义横屏分辨率(如 640 × 360),如需使用竖屏分辨率(如360 × 640),需要同时指定 TRTCVideoResolutionMode 为 Portrait。 */ -enum TRTCVideoResolution -{ - // 宽高比1:1 - TRTCVideoResolution_120_120 = 1, ///< [C] 建议码率80kbps - TRTCVideoResolution_160_160 = 3, ///< [C] 建议码率100kbps - TRTCVideoResolution_270_270 = 5, ///< [C] 建议码率200kbps - TRTCVideoResolution_480_480 = 7, ///< [C] 建议码率350kbps - - // 宽高比4:3 - TRTCVideoResolution_160_120 = 50, ///< [C] 建议码率100kbps - TRTCVideoResolution_240_180 = 52, ///< [C] 建议码率150kbps - TRTCVideoResolution_280_210 = 54, ///< [C] 建议码率200kbps - TRTCVideoResolution_320_240 = 56, ///< [C] 建议码率250kbps - TRTCVideoResolution_400_300 = 58, ///< [C] 建议码率300kbps - TRTCVideoResolution_480_360 = 60, ///< [C] 建议码率400kbps - TRTCVideoResolution_640_480 = 62, ///< [C] 建议码率600kbps - TRTCVideoResolution_960_720 = 64, ///< [C] 建议码率1000kbps - - // 宽高比16:9 - TRTCVideoResolution_160_90 = 100, ///< [C] 建议码率150kbps - TRTCVideoResolution_256_144 = 102, ///< [C] 建议码率200kbps - TRTCVideoResolution_320_180 = 104, ///< [C] 建议码率250kbps - TRTCVideoResolution_480_270 = 106, ///< [C] 建议码率350kbps - TRTCVideoResolution_640_360 = 108, ///< [C] 建议码率550kbps - TRTCVideoResolution_960_540 = 110, ///< [C] 建议码率850kbps - TRTCVideoResolution_1280_720 = 112, ///< [C] 摄像头采集 - 建议码率1200kbps - ///< [S] 屏幕分享 - 建议码率:低清:1000kbps 高清:1600kbps - TRTCVideoResolution_1920_1080 = 114, ///< [S] 屏幕分享 - 建议码率2000kbps +enum TRTCVideoResolution { + + ///宽高比 1:1;分辨率 120x120;建议码率(VideoCall)80kbps; 建议码率(LIVE)120kbps。 + TRTCVideoResolution_120_120 = 1, + + ///宽高比 1:1 分辨率 160x160;建议码率(VideoCall)100kbps; 建议码率(LIVE)150kbps。 + TRTCVideoResolution_160_160 = 3, + + ///宽高比 1:1;分辨率 270x270;建议码率(VideoCall)200kbps; 建议码率(LIVE)300kbps。 + TRTCVideoResolution_270_270 = 5, + + ///宽高比 1:1;分辨率 480x480;建议码率(VideoCall)350kbps; 建议码率(LIVE)500kbps。 + TRTCVideoResolution_480_480 = 7, + + ///宽高比4:3;分辨率 160x120;建议码率(VideoCall)100kbps; 建议码率(LIVE)150kbps。 + TRTCVideoResolution_160_120 = 50, + + ///宽高比 4:3;分辨率 240x180;建议码率(VideoCall)150kbps; 建议码率(LIVE)250kbps。 + TRTCVideoResolution_240_180 = 52, + + ///宽高比 4:3;分辨率 280x210;建议码率(VideoCall)200kbps; 建议码率(LIVE)300kbps。 + TRTCVideoResolution_280_210 = 54, + + ///宽高比 4:3;分辨率 320x240;建议码率(VideoCall)250kbps; 建议码率(LIVE)375kbps。 + TRTCVideoResolution_320_240 = 56, + + ///宽高比 4:3;分辨率 400x300;建议码率(VideoCall)300kbps; 建议码率(LIVE)450kbps。 + TRTCVideoResolution_400_300 = 58, + + ///宽高比 4:3;分辨率 480x360;建议码率(VideoCall)400kbps; 建议码率(LIVE)600kbps。 + TRTCVideoResolution_480_360 = 60, + + ///宽高比 4:3;分辨率 640x480;建议码率(VideoCall)600kbps; 建议码率(LIVE)900kbps。 + TRTCVideoResolution_640_480 = 62, + + ///宽高比 4:3;分辨率 960x720;建议码率(VideoCall)1000kbps; 建议码率(LIVE)1500kbps。 + TRTCVideoResolution_960_720 = 64, + + ///宽高比 16:9;分辨率 160x90;建议码率(VideoCall)150kbps; 建议码率(LIVE)250kbps。 + TRTCVideoResolution_160_90 = 100, + + ///宽高比 16:9;分辨率 256x144;建议码率(VideoCall)200kbps; 建议码率(LIVE)300kbps。 + TRTCVideoResolution_256_144 = 102, + + ///宽高比 16:9;分辨率 320x180;建议码率(VideoCall)250kbps; 建议码率(LIVE)400kbps。 + TRTCVideoResolution_320_180 = 104, + + ///宽高比 16:9;分辨率 480x270;建议码率(VideoCall)350kbps; 建议码率(LIVE)550kbps。 + TRTCVideoResolution_480_270 = 106, + + ///宽高比 16:9;分辨率 640x360;建议码率(VideoCall)500kbps; 建议码率(LIVE)900kbps。 + TRTCVideoResolution_640_360 = 108, + + ///宽高比 16:9;分辨率 960x540;建议码率(VideoCall)850kbps; 建议码率(LIVE)1300kbps。 + TRTCVideoResolution_960_540 = 110, + + ///宽高比 16:9;分辨率 1280x720;建议码率(VideoCall)1200kbps; 建议码率(LIVE)1800kbps。 + TRTCVideoResolution_1280_720 = 112, + + ///宽高比 16:9;分辨率 1920x1080;建议码率(VideoCall)2000kbps; 建议码率(LIVE)3000kbps。 + TRTCVideoResolution_1920_1080 = 114, + }; /** - * 1.2 视频分辨率模式 + * 1.2 视频宽高比模式 * - * - 横屏分辨率:TRTCVideoResolution_640_360 + TRTCVideoResolutionModeLandscape = 640 × 360 - * - 竖屏分辨率:TRTCVideoResolution_640_360 + TRTCVideoResolutionModePortrait = 360 × 640 + * TRTCVideoResolution 中仅定义了横屏分辨率(如 640 × 360),如需使用竖屏分辨率(如360 × 640),需要同时指定 TRTCVideoResolutionMode 为 Portrait。 */ -enum TRTCVideoResolutionMode -{ - TRTCVideoResolutionModeLandscape = 0, ///< 横屏分辨率 - TRTCVideoResolutionModePortrait = 1, ///< 竖屏分辨率 +enum TRTCVideoResolutionMode { + + ///横屏分辨率,例如:TRTCVideoResolution_640_360 + TRTCVideoResolutionModeLandscape = 640 × 360。 + TRTCVideoResolutionModeLandscape = 0, + + ///竖屏分辨率,例如:TRTCVideoResolution_640_360 + TRTCVideoResolutionModePortrait = 360 × 640。 + TRTCVideoResolutionModePortrait = 1, + }; /** * 1.3 视频流类型 * - * TRTC 内部有三种不同的音视频流,分别为: - * - 主画面:最常用的一条线路,一般用来传输摄像头的视频数据。 - * - 小画面:跟主画面的内容相同,但是分辨率和码率更低。 - * - 辅流画面:一般用于屏幕分享或远程播片(例如老师播放视频给学生观看)。 - * - * @note - 如果主播的上行网络和性能比较好,则可以同时送出大小两路画面。 - * @note - SDK 不支持单独开启小画面,小画面必须依附于主画面而存在。 + * TRTC 内部有三种不同的视频流,分别是: + * - 高清大画面:一般用来传输摄像头的视频数据。 + * - 低清小画面:小画面和大画面的内容相互,但是分辨率和码率都比大画面低,因此清晰度也更低。 + * - 辅流画面:一般用于屏幕分享,同一时间在同一个房间中只允许一个用户发布辅流视频,其他用户必须要等该用户关闭之后才能发布自己的辅流。 + * @note 不支持单独开启低清小画面,小画面必须依附于大画面而存在,SDK 会自动设定低清小画面的分辨率和码率。 */ -enum TRTCVideoStreamType -{ - TRTCVideoStreamTypeBig = 0, ///< 主画面视频流 - TRTCVideoStreamTypeSmall = 1, ///< 小画面视频流 - TRTCVideoStreamTypeSub = 2, ///< 辅流(屏幕分享) +enum TRTCVideoStreamType { + + ///高清大画面,一般用来传输摄像头的视频数据。 + TRTCVideoStreamTypeBig = 0, + + ///低清小画面:小画面和大画面的内容相互,但是分辨率和码率都比大画面低,因此清晰度也更低。 + TRTCVideoStreamTypeSmall = 1, + + ///辅流画面:一般用于屏幕分享,同一时间在同一个房间中只允许一个用户发布辅流视频,其他用户必须要等该用户关闭之后才能发布自己的辅流。 + TRTCVideoStreamTypeSub = 2, + }; /** * 1.4 视频画面填充模式 * - * 如果画面的显示分辨率不等于画面的原始分辨率,就需要您设置画面的填充模式: - * - TRTCVideoFillMode_Fill,图像铺满屏幕,超出显示视窗的视频部分将被裁剪,画面显示可能不完整。 - * - TRTCVideoFillMode_Fit,图像长边填满屏幕,短边区域会被填充黑色,但画面的内容肯定是完整的。 + * 如果视频显示区域的宽高比不等于视频内容的宽高比时,就需要您指定画面的填充模式: */ -enum TRTCVideoFillMode -{ - /// 图像铺满屏幕,超出显示视窗的视频部分将被裁剪 +enum TRTCVideoFillMode { + + ///填充模式:即将画面内容居中等比缩放以充满整个显示区域,超出显示区域的部分将会被裁剪掉,此模式下画面可能不完整。 TRTCVideoFillMode_Fill = 0, - /// 图像长边填满屏幕,短边区域会被填充黑色 + + ///适应模式:即按画面长边进行缩放以适应显示区域,短边部分会被填充为黑色,此模式下图像完整但可能留有黑边。 TRTCVideoFillMode_Fit = 1, + }; /** * 1.5 视频画面旋转方向 * - * TRTC SDK 提供了对本地和远程画面的旋转角度设置 API,下列的旋转角度都是指顺时针方向的。 - * + * TRTC 提供了对本地和远程画面的旋转角度设置 API,下列的旋转角度都是指顺时针方向的。 */ -enum TRTCVideoRotation -{ - TRTCVideoRotation0 = 0, ///< 顺时针旋转0度 - TRTCVideoRotation90 = 1, ///< 顺时针旋转90度 - TRTCVideoRotation180 = 2, ///< 顺时针旋转180度 - TRTCVideoRotation270 = 3, ///< 顺时针旋转270度 +enum TRTCVideoRotation { + + ///不旋转 + TRTCVideoRotation0 = 0, + + ///顺时针旋转90度 + TRTCVideoRotation90 = 1, + + ///顺时针旋转180度 + TRTCVideoRotation180 = 2, + + ///顺时针旋转270度 + TRTCVideoRotation270 = 3, + }; /** * 1.6 美颜(磨皮)算法 * - * TRTC SDK 内置多种不同的磨皮算法,您可以选择最适合您产品定位的方案。 + * TRTC 内置多种不同的磨皮算法,您可以选择最适合您产品定位的方案。 */ -enum TRTCBeautyStyle -{ - TRTCBeautyStyleSmooth = 0, ///< 光滑,适用于美女秀场,效果比较明显。 - TRTCBeautyStyleNature = 1, ///< 自然,磨皮算法更多地保留了面部细节,主观感受上会更加自然。 +enum TRTCBeautyStyle { + + ///光滑,算法比较激进,磨皮效果比较明显,适用于秀场直播。 + TRTCBeautyStyleSmooth = 0, + + ///自然,算法更多地保留了面部细节,磨皮效果更加自然,适用于绝大多数直播场景。 + TRTCBeautyStyleNature = 1, + }; /** * 1.7 视频像素格式 * - * TRTC SDK 提供针对视频的自定义采集和自定义渲染功能,在自定义采集功能中,您可以用下列枚举值描述您采集的视频像素格式。 - * 在自定义渲染功能中,您可以指定您期望 SDK 回调的视频像素格式。 + * TRTC 提供针对视频的自定义采集和自定义渲染功能: + * - 在自定义采集功能中,您可以用下列枚举值描述您采集的视频像素格式。 + * - 在自定义渲染功能中,您可以指定您期望 SDK 回调出的视频像素格式。 */ -enum TRTCVideoPixelFormat -{ +enum TRTCVideoPixelFormat { + + ///未定义的格式 TRTCVideoPixelFormat_Unknown = 0, - TRTCVideoPixelFormat_I420 = 1, ///< I420 - TRTCVideoPixelFormat_Texture_2D = 2, ///< OpenGL 2D 纹理 - TRTCVideoPixelFormat_BGRA32 = 3, ///< BGRA32 - TRTCVideoPixelFormat_RGBA32 = 5, ///< RGBA32 -}; + /// YUV420P(I420) 格式 + TRTCVideoPixelFormat_I420 = 1, + + /// OpenGL 2D 纹理格式 + TRTCVideoPixelFormat_Texture_2D = 2, + + /// BGRA 格式 + TRTCVideoPixelFormat_BGRA32 = 3, + + /// RGBA 格式 + TRTCVideoPixelFormat_RGBA32 = 5, + +}; /** - * 1.8 视频数据包装格式 + * 1.8 视频数据传递方式 + * + * 在自定义采集和自定义渲染功能,您需要用到下列枚举值来指定您希望以什么方式传递视频数据: + * - 方案一:使用内存 Buffer 传递视频数据,该方案在 iOS 效率尚可,但在 Android 系统上效率较差,Windows 暂时仅支持内存 Buffer 的传递方式。 + * - 方案二:使用 Texture 纹理传递视频数据,该方案在 iOS 和 Android 系统下均有较高的效率,Windows 暂不支持,需要您有一定的 OpenGL 编程基础。 */ -enum TRTCVideoBufferType -{ +enum TRTCVideoBufferType { + + ///未定义的传递方式 TRTCVideoBufferType_Unknown = 0, - TRTCVideoBufferType_Buffer = 1, ///< 二进制Buffer类型 - TRTCVideoBufferType_Texture = 3, ///< 纹理类型 + + ///使用内存 Buffer 传递视频数据,iOS: PixelBuffer;Android: 用于 JNI 层的 Direct Buffer;Win: 内存数据块。 + TRTCVideoBufferType_Buffer = 1, + + ///使用 Texture 纹理传递视频数据 + TRTCVideoBufferType_Texture = 3, + }; /** - * 1.9 画面渲染镜像类型 + * 1.9 视频的镜像类型 * - * TRTC 的画面镜像提供下列设置模式 + * 视频的镜像是指对视频内容进行左右翻转,尤其是对本地的摄像头预览视频,开启镜像后能给主播带来熟悉的“照镜子”体验。 */ -enum TRTCVideoMirrorType -{ +enum TRTCVideoMirrorType { + +///自动模式:如果正使用前置摄像头则开启镜像,如果是后置摄像头则不开启镜像(仅适用于移动设备)。 #if TARGET_PLATFORM_PHONE - TRTCVideoMirrorType_Auto = 0, ///< 只适用于移动端,本地预览时,前置摄像头镜像,后置摄像头不镜像 + TRTCVideoMirrorType_Auto = 0, #endif - TRTCVideoMirrorType_Enable = 1, ///< 所有画面均镜像 - TRTCVideoMirrorType_Disable = 2, ///< 所有画面均不镜像 + + ///强制开启镜像,不论当前使用的是前置摄像头还是后置摄像头。 + TRTCVideoMirrorType_Enable = 1, + + ///强制关闭镜像,不论当前使用的是前置摄像头还是后置摄像头。 + TRTCVideoMirrorType_Disable = 2, + }; /** - * 1.10 视频截图来源 + * 1.10 本地视频截图的数据源 + * + * SDK 支持从如下两种数据源中截取图片并保存成本地文件: + * - 视频流:从视频流中截取原生的视频内容,截取的内容不受渲染控件的显示控制。 + * - 渲染层:从渲染控件中截取显示的视频内容,可以做到用户所见即所得的效果,但如果显示区域过小,截取出的图片也会很小。 */ -enum TRTCSnapshotSourceType -{ - TRTCSnapshotSourceTypeStream = 0, ///< 从视频流上截取视频画面 - TRTCSnapshotSourceTypeView = 1, ///< 从渲染 View 上截取视频画面 +enum TRTCSnapshotSourceType { + + ///从视频流中截取原生的视频内容,截取的内容不受渲染控件的显示控制。 + TRTCSnapshotSourceTypeStream = 0, + + ///从渲染控件中截取显示的视频内容,可以做到用户所见即所得的效果,但如果显示区域过小,截取出的图片也会很小。 + TRTCSnapshotSourceTypeView = 1, + }; ///////////////////////////////////////////////////////////////////////////////// // -// 【(二)网络相关枚举值定义】 +// 网络相关枚举值定义 // ///////////////////////////////////////////////////////////////////////////////// /** * 2.1 应用场景 * - * TRTC 可用于视频会议和在线直播等多种应用场景,针对不同的应用场景,TRTC SDK 的内部会进行不同的优化配置: - * - TRTCAppSceneVideoCall :视频通话场景,适合[1对1视频通话]、[300人视频会议]、[在线问诊]、[视频聊天]、[远程面试]等。 - * - TRTCAppSceneLIVE :视频互动直播,适合[视频低延时直播]、[十万人互动课堂]、[视频直播 PK]、[视频相亲房]、[互动课堂]、[远程培训]、[超大型会议]等。 - * - TRTCAppSceneAudioCall :语音通话场景,适合[1对1语音通话]、[300人语音会议]、[语音聊天]、[语音会议]、[在线狼人杀]等。 - * - TRTCAppSceneVoiceChatRoom:语音互动直播,适合:[语音低延时直播]、[语音直播连麦]、[语聊房]、[K 歌房]、[FM 电台]等。 + * TRTC 针对常见的音视频应用场景都进行了定向优化,以满足各种垂直场景下的差异化要求,主要场景可以分为如下两类: + * - 直播(LIVE)场景:包括 LIVE 和 VoiceChatRoom,前者是音频+视频,后者是纯音频。 + * 直播场景下,用户被分成“主播”和“观众”两种角色,单个房间中同时最多支持10万人在线,适合于观众人数众多的直播场景。 + * - 实时(RTC)场景:包括 VideoCall 和 AudioCall,前者是音频+视频,后者是纯音频。 + * 实时场景下,用户没有角色的差异,但单个房间中同时最多支持 300 人在线,适合于小范围实时通信的场景。 */ -enum TRTCAppScene -{ - /// 视频通话场景,支持720P、1080P高清画质,单个房间最多支持300人同时在线,最高支持50人同时发言。<br> - /// 适合:[1对1视频通话]、[300人视频会议]、[在线问诊]、[视频聊天]、[远程面试]等。 - TRTCAppSceneVideoCall = 0, - - /// 视频互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。<br> - /// 适合:[视频低延时直播]、[十万人互动课堂]、[视频直播 PK]、[视频相亲房]、[互动课堂]、[远程培训]、[超大型会议]等。<br> - /// 注意:此场景下,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。 - TRTCAppSceneLIVE = 1, - - /// 语音通话场景,支持 48kHz,支持双声道。单个房间最多支持300人同时在线,最高支持50人同时发言。<br> - /// 适合:[1对1语音通话]、[300人语音会议]、[语音聊天]、[语音会议]、[在线狼人杀]等。 - TRTCAppSceneAudioCall = 2, - - /// 语音互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。<br> - /// 适合:[语音低延时直播]、[语音直播连麦]、[语聊房]、[K 歌房]、[FM 电台]等。<br> - /// 注意:此场景下,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。 +enum TRTCAppScene { + + ///视频通话场景,支持720P、1080P高清画质,单个房间最多支持300人同时在线,最高支持50人同时发言。 + ///适用于[1对1视频通话]、[300人视频会议]、[在线问诊]、[教育小班课]、[远程面试]等业务场景。 + TRTCAppSceneVideoCall = 0, + + ///视频互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。 + ///适用于[低延时互动直播]、[大班课]、[主播PK]、[视频相亲]、[在线互动课堂]、[远程培训]、[超大型会议]等业务场景。 + ///@note 此场景下,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。 + TRTCAppSceneLIVE = 1, + + ///语音通话场景,默认采用 SPEECH 音质,单个房间最多支持300人同时在线,最高支持50人同时发言。 + ///适用于[1对1语音通话]、[300人语音会议]、[语音聊天]、[语音会议]、[在线狼人杀]等业务场景。 + TRTCAppSceneAudioCall = 2, + + ///语音互动直播,支持平滑上下麦,切换过程无需等待,主播延时小于300ms;支持十万级别观众同时播放,播放延时低至1000ms。 + ///适用于[语音俱乐部]、[在线K歌房]、[音乐直播间]、[FM电台]等业务场景。 + ///@note 此场景下,您必须通过 TRTCParams 中的 role 字段指定当前用户的角色。 TRTCAppSceneVoiceChatRoom = 3, + }; /** - * 2.2 角色,仅适用于直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom) + * 2.2 角色 * - * 在直播场景中,多数用户仅为观众,个别用户是主播,这种角色区分有利于 TRTC 进行更好的定向优化。 - * - * - Anchor:主播,可以上行视频和音频,一个房间里最多支持50个主播同时上行音视频。 - * - Audience:观众,只能观看,不能上行视频和音频,一个房间里的观众人数没有上限。 + * 仅适用于直播类场景(即 TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom),把用户区分成两种不同的身份: + * - 主播:可以随时发布自己的音视频流,但人数有限制,同一个房间中最多只允许 50 个主播同时发布自己的音视频流。 + * - 观众:只能观看其他用户的音视频流,要发布音视频流,需要先通过 {@link switchRole} 切换成主播,同一个房间中最多能容纳10万观众。 */ -enum TRTCRoleType -{ - TRTCRoleAnchor = 20, ///< 主播 - TRTCRoleAudience = 21, ///< 观众 +enum TRTCRoleType { + + ///主播:可以随时发布自己的音视频流,但人数有限制,同一个房间中最多只允许 50 个主播同时发布自己的音视频流。 + TRTCRoleAnchor = 20, + + ///观众:只能观看其他用户的音视频流,要发布音视频流,需要先通过 {@link switchRole} 切换成主播,同一个房间中最多能容纳10万观众。 + TRTCRoleAudience = 21, + }; /** - * 2.3 流控模式 - * - * TRTC SDK 内部需要时刻根据网络情况调整内部的编解码器和网络模块,以便能够对网络的变化做出反应。 - * 为了支持快速算法升级,SDK 内部设置了两种不同的流控模式: - * - ModeServer:云端控制,默认模式,推荐选择。 - * - ModeClient:本地控制,用于 SDK 开发内部调试,客户请勿使用。 - * - * @note 推荐您使用云端控制,这样每当我们升级 Qos 算法时,您无需升级 SDK 即可体验更好的效果。 + * 2.3 流控模式(已废弃) */ -enum TRTCQosControlMode -{ - TRTCQosControlModeClient, ///< 客户端控制(用于 SDK 开发内部调试,客户请勿使用) - TRTCQosControlModeServer, ///< 云端控制 (默认) +enum TRTCQosControlMode { + + ///本地控制,用于 SDK 开发内部调试,客户请勿使用。 + TRTCQosControlModeClient = 0, + + ///云端控制,默认模式,推荐选择。 + TRTCQosControlModeServer = 1, + }; /** * 2.4 画质偏好 * - * 指当 TRTC SDK 在遇到弱网络环境时,您期望“保清晰”或“保流畅”: - * - * - Smooth:弱网下保流畅。即在遭遇弱网环境时首先确保声音的流畅和优先发送,画面会变得模糊且会有较多马赛克,但可以保持流畅不卡顿。 - * - Clear:弱网下保清晰。即在遭遇弱网环境时,画面会尽可能保持清晰,但可能会更容易出现卡顿。 + * TRTC 在弱网络环境下有两种调控模式:“优先保证画面清晰”或“优先保证画面流畅”,两种模式均会优先保障声音数据的传输。 */ -enum TRTCVideoQosPreference -{ - TRTCVideoQosPreferenceSmooth = 1, ///< 弱网下保流畅 - TRTCVideoQosPreferenceClear = 2, ///< 弱网下保清晰 +enum TRTCVideoQosPreference { + + ///流畅优先:即当前网络不足以传输既清晰又流畅的画面时,优先保证画面的流畅性,代价就是画面会比较模糊且伴随有较多的马赛克。 + TRTCVideoQosPreferenceSmooth = 1, + + ///清晰优先(默认值):即当前网络不足以传输既清晰又流畅的画面时,优先保证画面的清晰度,代价就是画面会比较卡顿。 + TRTCVideoQosPreferenceClear = 2, + }; /** * 2.5 网络质量 * - * TRTC SDK 对网络质量定义了六种不同的级别,Excellent 表示最好,Down 表示不可用。 + * TRTC 会每隔两秒对当前的网络质量进行评估,评估结果为六个等级:Excellent 表示最好,Down 表示最差。 */ -enum TRTCQuality -{ - TRTCQuality_Unknown = 0, ///< 未定义 - TRTCQuality_Excellent = 1, ///< 最好 - TRTCQuality_Good = 2, ///< 好 - TRTCQuality_Poor = 3, ///< 一般 - TRTCQuality_Bad = 4, ///< 差 - TRTCQuality_Vbad = 5, ///< 很差 - TRTCQuality_Down = 6, ///< 不可用 +enum TRTCQuality { + + ///未定义 + TRTCQuality_Unknown = 0, + + ///当前网络非常好 + TRTCQuality_Excellent = 1, + + ///当前网络比较好 + TRTCQuality_Good = 2, + + ///当前网络一般 + TRTCQuality_Poor = 3, + + ///当前网络较差 + TRTCQuality_Bad = 4, + + ///当前网络很差 + TRTCQuality_Vbad = 5, + + ///当前网络不满足 TRTC 的最低要求 + TRTCQuality_Down = 6, + }; /** - * 2.6 混流输入类型 + * 2.6 视频状态类型 + * + * 该枚举类型用于视频状态变化回调接口{@link onRemoteVideoStatusUpdated},用于指定当前的视频状态。 */ -enum TRTCMixInputType -{ - /// 不指定,根据pureAudio值决定混流类型 - TRTCMixInputTypeUndefined = 0, - /// 混入音视频 - TRTCMixInputTypeAudioVideo = 1, - /// 只混入视频 - TRTCMixInputTypePureVideo = 2, - /// 只混入音频 - TRTCMixInputTypePureAudio = 3, -}; +enum TRTCAVStatusType { -///////////////////////////////////////////////////////////////////////////////// -// -// 【(三)声音相关枚举值定义】 -// -///////////////////////////////////////////////////////////////////////////////// + ///停止播放 + TRTCAVStatusStopped = 0, + + ///正在播放 + TRTCAVStatusPlaying = 1, + + ///正在加载 + TRTCAVStatusLoading = 2, + +}; /** - * 3.1 音频帧的格式 + * 2.7 视频状态变化原因类型 * + * 该枚举类型用于视频状态变化回调接口{@link onRemoteVideoStatusUpdated},用于指定当前的视频状态原因。 */ -enum TRTCAudioFrameFormat -{ - TRTCAudioFrameFormatNone = 0, - TRTCAudioFrameFormatPCM, ///< PCM,每个采样点占16bit数据量。 +enum TRTCAVStatusChangeReason { + + ///缺省值 + TRTCAVStatusChangeReasonInternal = 0, + + ///网络缓冲 + TRTCAVStatusChangeReasonBufferingBegin = 1, + + ///结束缓冲 + TRTCAVStatusChangeReasonBufferingEnd = 2, + + ///本地启动视频流播放 + TRTCAVStatusChangeReasonLocalStarted = 3, + + ///本地停止视频流播放 + TRTCAVStatusChangeReasonLocalStopped = 4, + + ///远端视频流开始(或继续) + TRTCAVStatusChangeReasonRemoteStarted = 5, + + ///远端视频流停止(或中断 + TRTCAVStatusChangeReasonRemoteStopped = 6, + }; +///////////////////////////////////////////////////////////////////////////////// +// +// 音频相关枚举值定义 +// +///////////////////////////////////////////////////////////////////////////////// + /** * 3.2 声音音质 * - * 音频音质用来衡量声音的保真程度,TRTCAudioQualitySpeech 适用于通话场景,TRTCAudioQualityMusic 适用于高音质音乐场景。 + * TRTC 提供了三种精心校调好的模式,用来满足各种垂直场景下对音质的差异化追求: + * - 人声模式(Speech):适用于以人声沟通为主的应用场景,该模式下音频传输的抗性较强,TRTC 会通过各种人声处理技术保障在弱网络环境下的流畅度最佳。 + * - 音乐模式(Music):适用于对声乐要求很苛刻的场景,该模式下音频传输的数据量很大,TRTC 会通过各项技术确保音乐信号在各频段均能获得高保真的细节还原度。 + * - 默认模式(Default):介于 Speech 和 Music 之间的档位,对音乐的还原度比人声模式要好,但传输数据量比音乐模式要低很多,对各种场景均有不错的适应性。 */ -enum TRTCAudioQuality -{ - /// 流畅音质:采样率:16k;单声道;音频裸码率:16kbps;适合语音通话为主的场景,比如在线会议,语音通话。 +enum TRTCAudioQuality { + + ///人声模式:采样率:16k;单声道;编码码率:16kbps;具备几个模式中最强的网络抗性,适合语音通话为主的场景,比如在线会议,语音通话等。 TRTCAudioQualitySpeech = 1, - /// 默认音质:采样率:48k;单声道;音频裸码率:50kbps;SDK 默认的音频质量,如无特殊需求推荐选择之。 + + ///默认模式:采样率:48k;单声道;编码码率:50kbps;介于 Speech 和 Music 之间的档位,SDK 默认档位,推荐选择。 TRTCAudioQualityDefault = 2, - /// 高音质:采样率:48k;双声道 + 全频带;音频裸码率:128kbps;适合需要高保真传输音乐的场景,比如K歌、音乐直播等。 + + ///音乐模式:采样率:48k;全频带立体声;编码码率:128kbps;适合需要高保真传输音乐的场景,比如在线K歌、音乐直播等。 TRTCAudioQualityMusic = 3, + +}; + +/** + * 3.7 音频帧的内容格式 + */ +enum TRTCAudioFrameFormat { + + /// None + TRTCAudioFrameFormatNone = 0, + + /// PCM 格式的音频数据 + TRTCAudioFrameFormatPCM, + }; ///////////////////////////////////////////////////////////////////////////////// // -// 【(四)更多枚举值定义】 +// 更多枚举值定义 // ///////////////////////////////////////////////////////////////////////////////// /** * 4.1 Log 级别 + * + * 不同的日志等级定义了不同的详实程度和日志数量,推荐一般情况下将日志等级设置为:TRTCLogLevelInfo。 */ -enum TRTCLogLevel -{ - TRTCLogLevelVerbose = 0, ///< 输出所有级别的 Log - TRTCLogLevelDebug = 1, ///< 输出 DEBUG,INFO,WARNING,ERROR 和 FATAL 级别的 Log - TRTCLogLevelInfo = 2, ///< 输出 INFO,WARNING,ERROR 和 FATAL 级别的 Log - TRTCLogLevelWarn = 3, ///< 只输出WARNING,ERROR 和 FATAL 级别的 Log - TRTCLogLevelError = 4, ///< 只输出ERROR 和 FATAL 级别的 Log - TRTCLogLevelFatal = 5, ///< 只输出 FATAL 级别的 Log - TRTCLogLevelNone = 6, ///< 不输出任何 SDK Log +enum TRTCLogLevel { + + ///输出所有级别的 Log + TRTCLogLevelVerbose = 0, + + ///输出 DEBUG,INFO,WARNING,ERROR 和 FATAL 级别的 Log + TRTCLogLevelDebug = 1, + + ///输出 INFO,WARNING,ERROR 和 FATAL 级别的 Log + TRTCLogLevelInfo = 2, + + ///输出WARNING,ERROR 和 FATAL 级别的 Log + TRTCLogLevelWarn = 3, + + ///输出ERROR 和 FATAL 级别的 Log + TRTCLogLevelError = 4, + + ///仅输出 FATAL 级别的 Log + TRTCLogLevelFatal = 5, + + ///不输出任何 SDK Log + TRTCLogLevelNone = 6, + }; /** - * 4.2 设备操作 + * 4.3 屏幕分享的目标类型(仅适用于桌面端) */ -enum TRTCDeviceState -{ - TRTCDeviceStateAdd = 0, ///< 添加设备 - TRTCDeviceStateRemove = 1, ///< 移除设备 - TRTCDeviceStateActive = 2, ///< 设备已启用 +enum TRTCScreenCaptureSourceType { + + ///未定义 + TRTCScreenCaptureSourceTypeUnknown = -1, + + ///该分享目标是某一个应用的窗口 + TRTCScreenCaptureSourceTypeWindow = 0, + + ///该分享目标是某一台显示器的屏幕 + TRTCScreenCaptureSourceTypeScreen = 1, + + ///该分享目标是用户自定义的数据源 + TRTCScreenCaptureSourceTypeCustom = 2, + }; /** - * 4.3 设备类型 + * 4.4 云端混流的排版模式 * - * 以下定义仅用于兼容原有接口,具体定义参见 ITXDeviceManager.h 文件 + * TRTC 的云端混流服务能够将房间中的多路音视频流混合成一路,因此您需要指定画面的排版方案,我们提供了如下几种排版模式: + */ +enum TRTCTranscodingConfigMode { + + ///未定义 + TRTCTranscodingConfigMode_Unknown = 0, + + ///全手动排版模式 + ///该模式下,您需要指定每一路画面的精确排版位置。该模式的自由度最高,但易用性也最差: + ///- 您需要填写 TRTCTranscodingConfig 中的所有参数,包括每一路画面(TRTCMixUser)的位置坐标。 + ///- 您需要监听 TRTCCloudDelegate 中的 onUserVideoAvailable() 和 onUserAudioAvailable() 事件回调,并根据当前房间中各个麦上用户的音视频状态不断地调整 mixUsers 参数。 + TRTCTranscodingConfigMode_Manual = 1, + + ///纯音频模式 + ///该模式适用于语音通话(AudioCall)和语音聊天室(VoiceChatRoom)等纯音频的应用场景。 + ///- 您只需要在进入房间后,通过 setMixTranscodingConfig() 接口设置一次,之后 SDK 就会自动把房间内所有上麦用户的声音混流到当前用户的直播流上。 + ///- 您无需设置 TRTCTranscodingConfig 中的 mixUsers 参数,只需设置 audioSampleRate、audioBitrate 和 audioChannels 等参数即可。 + TRTCTranscodingConfigMode_Template_PureAudio = 2, + + ///预排版模式 + ///最受欢迎的排版模式,因为该模式支持您通过占位符提前对各路画面的位置进行设定,之后 SDK 会自动根据房间中画面的路数动态进行适配调整。 + ///此模式下,您依然需要设置 mixUsers 参数,但可以将 userId 设置为“占位符”,可选的占位符有: + /// - "$PLACE_HOLDER_REMOTE$" : 指代远程用户的画面,可以设置多个。 + /// - "$PLACE_HOLDER_LOCAL_MAIN$" : 指代本地摄像头画面,只允许设置一个。 + /// - "$PLACE_HOLDER_LOCAL_SUB$" : 指代本地屏幕分享画面,只允许设置一个。 + ///此模式下,您不需要监听 TRTCCloudDelegate 中的 onUserVideoAvailable() 和 onUserAudioAvailable() 回调进行实时调整, + ///只需要在进房成功后调用一次 setMixTranscodingConfig() 即可,之后 SDK 会自动将真实的 userId 补位到您设置的占位符上。 + TRTCTranscodingConfigMode_Template_PresetLayout = 3, + + ///屏幕分享模式 + ///适用于在线教育场景等以屏幕分享为主的应用场景,仅支持 Windows 和 Mac 两个平台的 SDK。 + ///该模式下,SDK 会先根据您通过 videoWidth 和 videoHeight 参数设置的目标分辨率构建一张画布, + ///- 当老师未开启屏幕分享时,SDK 会将老师的摄像头画面等比例拉伸绘制到该画布上; + ///- 当老师开启屏幕分享之后,SDK 会将屏幕分享画面绘制到同样的画布上。 + ///此种排版模式的目的是为了确保混流模块的输出分辨率一致,避免课程回放和网页观看的花屏问题(网页播放器不支持可变分辨率)。 + ///同时,连麦学生的声音也会被默认混合到老师的音视频流中。 + ///< br> + ///由于教学模式下的视频内容以屏幕分享为主,因此同时传输摄像头画面和屏幕分享画面是非常浪费带宽的。 + ///推荐的做法是直接将摄像头画面通过 setLocalVideoRenderCallback 接口自定义绘制到当前屏幕上。 + ///在该模式下,您无需设置 TRTCTranscodingConfig 中的 mixUsers 参数,SDK 不会混合学生的画面,以免干扰屏幕分享的效果。 + ///< br> + ///您可以将 TRTCTranscodingConfig 中的 width × height 设为 0px × 0px,SDK 会自动根据用户当前屏幕的宽高比计算出一个合适的分辨率: + ///- 如果老师当前屏幕宽度 <= 1920px,SDK 会使用老师当前屏幕的实际分辨率。 + ///- 如果老师当前屏幕宽度 > 1920px,SDK 会根据当前屏幕宽高比,选择 1920x1080(16:9)、1920x1200(16:10)、1920x1440(4:3) 三种分辨率中的一种。 + TRTCTranscodingConfigMode_Template_ScreenSharing = 4, + +}; + +/** + * 4.5 媒体录制类型 + * + * 该枚举类型用于本地媒体录制接口{@link startLocalRecording},用于指定是录制音视频文件还是纯音频文件。 + */ +enum TRTCLocalRecordType { + + ///仅录制音频 + TRTCLocalRecordType_Audio = 0, + + ///仅录制视频 + TRTCLocalRecordType_Video = 1, + + ///同时录制音频和视频 + TRTCLocalRecordType_Both = 2, + +}; + +/** + * 4.6 混流输入类型 + */ +enum TRTCMixInputType { + + ///默认值 + ///考虑到针对老版本的兼容性,如果您指定了 inputType 为 Undefined,SDK 会根据另一个参数 pureAudio 的数值决定混流输入类型 + TRTCMixInputTypeUndefined = 0, + + ///混入音频和视频 + TRTCMixInputTypeAudioVideo = 1, + + ///只混入视频 + TRTCMixInputTypePureVideo = 2, + + ///只混入音频 + TRTCMixInputTypePureAudio = 3, + + ///混入水印 + ///此时您无需指定 userId 字段,但需要指定 image 字段,推荐使用 png 格式的图片。 + TRTCMixInputTypeWatermark = 4, + +}; + +/** + * 4.7 设备类型(仅适用于桌面平台) + * + * 该枚举值用于定义三种类型的音视频设备,即摄像头、麦克风和扬声器,以便让一套设备管理接口可以操控三种不同类型的设备。 + * 自 Ver8.0 版本开始,TRTC 在 TXDeviceManager 中重新定义了 “TXMediaDeviceType” 用于替换老版本中的 “TRTCMediaDeviceType”, + * 此处仅保留 “TRTCMediaDeviceType” 的定义,用于兼容老版本的客户代码。 */ typedef TXMediaDeviceType TRTCDeviceType; -#define TRTCDeviceTypeUnknow TXMediaDeviceTypeUnknown -#define TRTCDeviceTypeMic TXMediaDeviceTypeMic +#define TRTCDeviceTypeUnknow TXMediaDeviceTypeUnknown +#define TRTCDeviceTypeMic TXMediaDeviceTypeMic #define TRTCDeviceTypeSpeaker TXMediaDeviceTypeSpeaker -#define TRTCDeviceTypeCamera TXMediaDeviceTypeCamera +#define TRTCDeviceTypeCamera TXMediaDeviceTypeCamera /** - * 4.4 水印图片的源类型 + * 4.8 水印图片的源类型 */ -enum TRTCWaterMarkSrcType -{ - TRTCWaterMarkSrcTypeFile = 0, ///< 图片文件路径,支持 BMP、GIF、JPEG、PNG、TIFF、Exif、WMF 和 EMF 文件格式 - TRTCWaterMarkSrcTypeBGRA32 = 1, ///< BGRA32格式内存块 - TRTCWaterMarkSrcTypeRGBA32 = 2, ///< RGBA32格式内存块 +enum TRTCWaterMarkSrcType { + + ///图片文件路径,支持 BMP、GIF、JPEG、PNG、TIFF、Exif、WMF 和 EMF 文件格式 + TRTCWaterMarkSrcTypeFile = 0, + + /// BGRA32格式内存块 + TRTCWaterMarkSrcTypeBGRA32 = 1, + + /// RGBA32格式内存块 + TRTCWaterMarkSrcTypeRGBA32 = 2, + }; /** - * 4.5 屏幕分享目标信息 + * 4.9 设备操作 + * + * 该枚举值用于本地设备的状态变化通知{@link onDeviceChange}。 */ -enum TRTCScreenCaptureSourceType -{ - TRTCScreenCaptureSourceTypeUnknown = -1, - TRTCScreenCaptureSourceTypeWindow = 0, ///< 该分享目标是某一个窗口 - TRTCScreenCaptureSourceTypeScreen = 1, ///< 该分享目标是整个桌面 - TRTCScreenCaptureSourceTypeCustom = 2, +typedef TXMediaDeviceState TRTCDeviceState; +#define TRTCDeviceStateAdd TXMediaDeviceStateAdd +#define TRTCDeviceStateRemove TXMediaDeviceStateRemove +#define TRTCDeviceStateActive TXMediaDeviceStateActive + +/** + * 4.11 音频录制内容类型 + * + * 该枚举类型用于音频录制接口{@link startAudioRecording},用于指定录制音频的内容。 + */ +enum TRTCAudioRecordingContent { + + ///录制本地和远端所有音频 + TRTCAudioRecordingContentAll = 0, + + ///仅录制本地音频 + TRTCAudioRecordingContentLocal = 1, + + ///仅录制远端音频 + TRTCAudioRecordingContentRemote = 2, + }; ///////////////////////////////////////////////////////////////////////////////// // -// 【(五)TRTC 核心类型定义】 +// TRTC 核心类型定义 // ///////////////////////////////////////////////////////////////////////////////// /** - * 5.1 进房相关参数 + * 5.1 进房参数 * - * 只有该参数填写正确,才能顺利调用 enterRoom 进入 roomId 所指定的音视频房间。 + * 作为 TRTC SDK 的进房参数,只有该参数填写正确,才能顺利进入 roomId 或者 strRoomId 所指定的音视频房间。 + * 由于历史原因,TRTC 支持数字和字符串两种类型的房间号,分别是 roomId 和 strRoomId。 + * 请注意:不要混用 roomId 和 strRoomId,因为它们之间是不互通的,比如数字 123 和字符串 “123” 在 TRTC 看来是两个完全不同的房间。 */ -struct TRTCParams -{ - - ///【字段含义】应用标识(必填),腾讯视频云基于 sdkAppId 完成计费统计。 - ///【推荐取值】在 [实时音视频控制台](https://console.cloud.tencent.com/rav/) 创建应用后可在帐号信息页面中得到该 ID。 +struct TRTCParams { + ///【字段含义】应用标识(必填),腾讯云基于 sdkAppId 完成计费统计。 + ///【推荐取值】在 [实时音视频控制台](https://console.cloud.tencent.com/rav/) 创建应用后可以在账号信息页面中得到该 ID。 uint32_t sdkAppId; ///【字段含义】用户标识(必填),当前用户的 userId,相当于用户名,使用 UTF-8 编码。 - ///【推荐取值】如果一个用户在您的帐号系统中的 ID 为“abc”,则 userId 即可设置为“abc”。 - const char * userId; + ///【推荐取值】如果一个用户在您的帐号系统中的 ID 为“mike”,则 userId 即可设置为“mike”。 + const char *userId; - ///【字段含义】用户签名(必填),当前 userId 对应的验证签名,相当于登录密码。 + ///【字段含义】用户签名(必填),当前 userId 对应的验证签名,相当于使用云服务的登录密码。 ///【推荐取值】具体计算方法请参见 [如何计算UserSig](https://cloud.tencent.com/document/product/647/17275)。 - const char * userSig; + const char *userSig; - ///【字段含义】房间号码(必填),在同一个房间内的用户可以看到彼此并进行视频通话。 - ///【推荐取值】您可以自定义设置该参数值,但不可重复。如果您的用户帐号 ID (userId)为数字类型,可直接使用创建者的用户 ID 作为 roomId。 + ///【字段含义】数字房间号,在同一个房间里的用户(userId)可以彼此看到对方并进行音视频通话。 + ///【推荐取值】取值范围:1 - 4294967294。 + ///【特别说明】roomId 与 strRoomId 是互斥的,若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,SDK 将优先选用 roomId。 + ///【请您注意】不要混用 roomId 和 strRoomId,因为它们之间是不互通的,比如数字 123 和字符串 “123” 在 TRTC 看来是两个完全不同的房间。 uint32_t roomId; - ///【字段含义】字符串房间号码,在同一个房间里的用户(userId)可以彼此看到对方并进行视频通话。 + ///【字段含义】字符串房间号,在同一个房间里的用户(userId)可以彼此看到对方并进行音视频通话。 + ///【特别说明】roomId 与 strRoomId 是互斥的,若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,SDK 将优先选用 roomId。 + ///【请您注意】不要混用 roomId 和 strRoomId,因为它们之间是不互通的,比如数字 123 和字符串 “123” 在 TRTC 看来是两个完全不同的房间。 ///【推荐取值】限制长度为64字节。以下为支持的字符集范围(共 89 个字符): - /// -大小写英文字母(a-zA-Z); - /// -数字(0-9); - /// -空格、"!"、"#"、"$"、"%"、"&"、"("、")"、"+"、"-"、":"、";"、"<"、"="、"."、">"、"?"、"@"、"["、"]"、"^"、"_"、" {"、"}"、"|"、"~"、","。 - ///【特殊说明】roomId 与 strRoomId 必填一个,若您选用 strRoomId,则 roomId 需要填写为0。若两者都填,将优先选用 roomId。 - const char* strRoomId; - - ///【字段含义】直播场景下的角色,仅适用于直播场景(TRTCAppSceneLIVE 和 TRTCAppSceneVoiceChatRoom),视频通话场景下指定无效。 - ///【推荐取值】默认值:主播(TRTCRoleAnchor) + /// - 大小写英文字母(a-zA-Z); + /// - 数字(0-9); + /// - 空格、"!"、"#"、"$"、"%"、"&"、"("、")"、"+"、"-"、":"、";"、"<"、"="、"."、">"、"?"、"@"、"["、"]"、"^"、"_"、" {"、"}"、"|"、"~"、","。 + const char *strRoomId; + + ///【字段含义】直播场景下的角色,仅适用于直播场景({@link TRTCAppSceneLIVE} 和{@link TRTCAppSceneVoiceChatRoom}),通话场景下指定该参数是无效的。 + ///【推荐取值】默认值:主播({@link TRTCRoleAnchor})。 TRTCRoleType role; - ///【字段含义】绑定腾讯云直播 CDN 流 ID[非必填],设置之后,您就可以在腾讯云直播 CDN 上通过标准直播方案(FLV或HLS)播放该用户的音视频流。 - ///【推荐取值】限制长度为64字节,可以不填写,一种推荐的方案是使用 “sdkappid_roomid_userid_main” 作为 streamid,这样比较好辨认且不会在您的多个应用中发生冲突。 + ///【字段含义】用于指定在腾讯云直播平台上的 streamId(选填),设置之后,您可以在腾讯云直播 CDN 上通过标准拉流方案(FLV或HLS)播放该用户的音视频流。 + ///【推荐取值】限制长度为64字节,可以不填写,一种推荐的方案是使用 “sdkappid_roomid_userid_main” 作为 streamid,这中命名方式容易辨认且不会在您的多个应用中发生冲突。 ///【特殊说明】要使用腾讯云直播 CDN,您需要先在[控制台](https://console.cloud.tencent.com/trtc/) 中的功能配置页开启“启动自动旁路直播”开关。 ///【参考文档】[CDN 旁路直播](https://cloud.tencent.com/document/product/647/16826)。 - const char * streamId; + const char *streamId; - ///【字段含义】设置云端录制完成后的回调消息中的 "userdefinerecordid" 字段内容,便于您更方便的识别录制回调。 - ///【推荐取值】限制长度为64字节,只允许包含大小写英文字母(a-zA-Z)、数字(0-9)及下划线和连词符。 + ///【字段含义】云端录制开关(选填),用于指定是否要在云端将该用户的音视频流录制下来。 ///【参考文档】[云端录制](https://cloud.tencent.com/document/product/647/16823)。 - const char * userDefineRecordId; - - ///【字段含义】房间签名(非必填),当您希望某个房间只能让特定的 userId 进入时,需要使用 privateMapKey 进行权限保护。 + ///【推荐取值】限制长度为64字节,只允许包含大小写英文字母(a-zA-Z)、数字(0-9)及下划线和连词符。 + /// <p> + /// 方案一:手动录制方案: + /// 1. 在“[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 云端录制配置”中开启云端录制。 + /// 2. 设置“录制形式”为“手动录制”。 + /// 3. 设置手动录制后,在一个 TRTC 房间中只有设置了 userDefineRecordId 参数的用户才会在云端录制出视频文件,不指定该参数的用户不会产生录制行为。 + /// 4. 云端会以 “userDefineRecordId_起始时间_结束时间” 的格式命名录制下来的文件。 + /// <p> + /// 方案二:自动录制方案: + /// 1. 需要在“[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 云端录制配置”中开启云端录制。 + /// 2. 设置“录制形式”为“自动录制”。 + /// 3. 设置自动录制后,在一个 TRTC 房间中的任何一个有音视频上行的用户,均会在云端录制出视频文件。 + /// 4. 文件会以 “userDefineRecordId_起始时间_结束时间” 的格式命名,如果不指定 userDefineRecordId,则文件会以 “streamId_起始时间_结束时间” 命名。 + /// <br> + const char *userDefineRecordId; + + ///【字段含义】用于权限控制的权限票据(选填),当您希望某个房间只能让特定的 userId 进入时,需要使用 privateMapKey 进行权限保护。 ///【推荐取值】仅建议有高级别安全需求的客户使用,更多详情请参见 [进房权限保护](https://cloud.tencent.com/document/product/647/32240)。 - const char * privateMapKey; - - ///【字段含义】业务数据(非必填),部分高级特性才需要使用该字段。 - ///【推荐取值】不建议使用 - const char * businessInfo; - - - TRTCParams() - : sdkAppId(0) - , roomId(0) - , strRoomId(nullptr) - , userId(nullptr) - , userSig(nullptr) - , role(TRTCRoleAnchor) - , privateMapKey(nullptr) - , businessInfo(nullptr) - , userDefineRecordId(nullptr) - , streamId(nullptr) - { + const char *privateMapKey; + + ///【字段含义】业务数据字段(选填),部分高级特性才需要用到此字段。 + ///【推荐取值】请不要自行设置该字段。 + const char *businessInfo; + TRTCParams() : sdkAppId(0), userId(nullptr), userSig(nullptr), roomId(0), strRoomId(nullptr), role(TRTCRoleAnchor), streamId(nullptr), userDefineRecordId(nullptr), privateMapKey(nullptr), businessInfo(nullptr) { } }; /** * 5.2 视频编码参数 * - * 该设置决定了远端用户看到的画面质量(同时也是云端录制出的视频文件的画面质量)。 + * 该设置决定远端用户看到的画面质量,同时也决定了云端录制出的视频文件的画面质量。 */ -struct TRTCVideoEncParam -{ +struct TRTCVideoEncParam { ///【字段含义】 视频分辨率 + ///【特别说明】如需使用竖屏分辨率,请指定 resMode 为 Portrait,例如: 640 × 360 + Portrait = 360 × 640。 ///【推荐取值】 - /// - 视频通话建议选择 360 × 640 及以下分辨率,resMode 选择 Portrait。 - /// - 手机直播建议选择 540 × 960,resMode 选择 Portrait。 - /// - Windows 和 Mac 建议选择 640 × 360 及以上分辨率,resMode 选择 Landscape。 - ///【特别说明】 - /// TRTCVideoResolution 默认只能横屏模式的分辨率,例如 640 × 360。 - /// 如需使用竖屏分辨率,请指定 resMode 为 Portrait,例如 640 × 360 结合 Portrait 则为 360 × 640。 + /// - 手机视频通话:建议选择 360 × 640 及以下分辨率,resMode 选择 Portrait,即竖屏分辨率。 + /// - 手机在线直播:建议选择 540 × 960,resMode 选择 Portrait,即竖屏分辨率。 + /// - 桌面平台(Win + Mac):建议选择 640 × 360 及以上分辨率,resMode 选择 Landscape,即横屏分辨率。 TRTCVideoResolution videoResolution; - ///【字段含义】分辨率模式(横屏分辨率 - 竖屏分辨率) - ///【推荐取值】手机直播建议选择 Portrait,Windows 和 Mac 建议选择 Landscape。 - ///【特别说明】如果 videoResolution 指定分辨率 640 × 360,resMode 指定模式为 Portrait,则最终编码出的分辨率为360 × 640。 + ///【字段含义】分辨率模式(横屏分辨率 or 竖屏分辨率) + ///【推荐取值】手机平台(iOS、Android)建议选择 Portrait,桌面平台(Windows、Mac)建议选择 Landscape。 + ///【特别说明】如需使用竖屏分辨率,请指定 resMode 为 Portrait,例如: 640 × 360 + Portrait = 360 × 640。 TRTCVideoResolutionMode resMode; ///【字段含义】视频采集帧率 - ///【推荐取值】15fps 或 20fps。5fps 以下,卡顿感明显。10fps 以下,会有轻微卡顿感。20fps 以上,则过于浪费(电影的帧率为 24fps)。 - ///【特别说明】很多 Android 手机的前置摄像头并不支持 15fps 以上的采集帧率,部分过于突出美颜功能的 Android 手机前置摄像头的采集帧率可能低于 10fps。 + ///【推荐取值】15fps或20fps。5fps以下,卡顿感明显。10fps以下,会有轻微卡顿感。20fps以上,会浪费带宽(电影的帧率为24fps)。 + ///【特别说明】部分 Android 手机的前置摄像头并不支持15fps以上的采集帧率,部分主打美颜功能的 Android 手机的前置摄像头的采集帧率可能低于10fps。 uint32_t videoFps; - ///【字段含义】视频上行码率 - ///【推荐取值】推荐设置请参考本文件前半部分 TRTCVideoResolution 定义处的注释说明 - ///【特别说明】码率太低会导致视频中出现大量马赛克 + ///【字段含义】目标视频码率,SDK 会按照目标码率进行编码,只有在弱网络环境下才会主动降低视频码率。 + ///【推荐取值】请参考本 TRTCVideoResolution 在各档位注释的最佳码率,也可以在此基础上适当调高。 + /// 比如:TRTCVideoResolution_1280_720 对应 1200kbps 的目标码率,您也可以设置为 1500kbps 用来获得更好的观感清晰度。 + ///【特别说明】您可以通过同时设置 videoBitrate 和 minVideoBitrate 两个参数,用于约束 SDK 对视频码率的调整范围: + /// - 如果您追求“弱网络下允许卡顿但要保持清晰”的效果,可以设置 minVideoBitrate 为 videoBitrate 的 60%; + /// - 如果您追求“弱网络下允许模糊但要保持流畅”的效果,可以设置 minVideoBitrate 为一个较低的数值(比如 100kbps); + /// - 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK 对视频码率的自适应调节能力。 uint32_t videoBitrate; - ///【字段含义】最低视频码率,SDK 会在网络不佳的情况下主动降低视频码率,最低会降至 - /// minVideoBitrate 所设定的数值。 【推荐取值】 - /// - 如果您追求“允许卡顿但要保持清晰”的效果,可以设置 minVideoBitrate 为 videoBitrate 的 - /// 60%; - /// - 如果您追求“允许模糊但要保持流畅”的效果,可以设置 minVideoBitrate 为 200kbps; - /// - 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK - /// 的自适应调节能力; - /// - 默认值:0,此时最低码率由 SDK 根据分辨率情况,自动设置合适的数值。 - ///【特别说明】 - /// - 当您把分辨率设置的比较高时,minVideoBitrate - /// 不适合设置的太低,否则会出现画面模糊和大范围的马赛克宏块。 - /// 比如把分辨率设置为 720p,把码率设置为 - /// 200kbps,那么编码出的画面将会出现大范围区域性马赛克。 + ///【字段含义】最低视频码率,SDK 会在网络不佳的情况下主动降低视频码率以保持流畅度,最低会降至 minVideoBitrate 所设定的数值。 + ///【特别说明】 默认值:0,此时最低码率由 SDK 会根据您指定的分辨率,自动计算出合适的数值。 + ///【推荐取值】您可以通过同时设置 videoBitrate 和 minVideoBitrate 两个参数,用于约束 SDK 对视频码率的调整范围: + /// - 如果您追求“弱网络下允许卡顿但要保持清晰”的效果,可以设置 minVideoBitrate 为 videoBitrate 的 60%; + /// - 如果您追求“弱网络下允许模糊但要保持流畅”的效果,可以设置 minVideoBitrate 为一个较低的数值(比如 100kbps); + /// - 如果您将 videoBitrate 和 minVideoBitrate 设置为同一个值,等价于关闭 SDK 对视频码率的自适应调节能力。 uint32_t minVideoBitrate; - ///【字段含义】是否允许调整分辨率 - ///【推荐取值】 - /// - 手机直播建议选择 false。 - /// - 视频通话模式,若更关注流畅性,建议选择 true,此时若遇到带宽有限的弱网,SDK 会自动降低分辨率以保障更好的流畅度(仅针对 TRTCVideoStreamTypeBig 生效)。 - /// - 默认值:false。 - ///【特别说明】若有录制需求,选择 true 时,请确保通话过程中,调整分辨率不会影响您的录制效果。 + ///【字段含义】是否允许动态调整分辨率(开启后会对云端录制产生影响)。 + ///【推荐取值】该功能适用于不需要云端录制的场景,开启后 SDK 会根据当前网络情况,智能选择出一个合适的分辨率,避免出现“大分辨率+小码率”的低效编码模式。 + ///【特别说明】默认值:关闭。如有云端录制的需求,请不要开启此功能,因为如果视频分辨率发生变化后,云端录制出的 MP4 在普通的播放器上无法正常播放。 bool enableAdjustRes; - TRTCVideoEncParam() - : videoResolution(TRTCVideoResolution_640_360) - , resMode(TRTCVideoResolutionModeLandscape) - , videoFps(15) - , videoBitrate(550) - , enableAdjustRes(false) - , minVideoBitrate(0) - { - + TRTCVideoEncParam() : videoResolution(TRTCVideoResolution_640_360), resMode(TRTCVideoResolutionModeLandscape), videoFps(15), videoBitrate(550), minVideoBitrate(0), enableAdjustRes(false) { } }; /** - * 5.3 画面渲染参数 + * 5.3 网络流控(Qos)参数集 * - * 您可以通过设置此参数来控制画面的旋转、填充、镜像模式 + * 网络流控相关参数,该设置决定 SDK 在弱网络环境下的调控策略(例如:“清晰优先”或“流畅优先”) */ -struct TRTCRenderParams { - TRTCVideoRotation rotation; ///< 视频画面旋转方向,默认值为 TRTCVideoRotation0 - TRTCVideoFillMode fillMode; ///< 视频画面填充模式,默认值为 TRTCVideoFillMode_Fit - TRTCVideoMirrorType mirrorType; ///< 视频画面镜像模式,默认值为 TRTCVideoMirrorType_Disable - - TRTCRenderParams() - : rotation(TRTCVideoRotation0) - , fillMode(TRTCVideoFillMode_Fit) - , mirrorType(TRTCVideoMirrorType_Disable) { +struct TRTCNetworkQosParam { + ///【字段含义】清晰优先还是流畅优先 + ///【推荐取值】清晰优先 + ///【特别说明】该参数主要影响 TRTC 在较差网络环境下的音视频表现: + /// - 流畅优先:即当前网络不足以传输既清晰又流畅的画面时,优先保证画面的流畅性,代价就是画面会比较模糊且伴随有较多的马赛克。 + /// - 清晰优先(默认值):即当前网络不足以传输既清晰又流畅的画面时,优先保证画面的清晰度,代价就是画面会比较卡顿。 + TRTCVideoQosPreference preference; + + ///【字段含义】流控模式(已废弃) + ///【推荐取值】云端控制 + ///【特别说明】请设置为云端控制模式(TRTCQosControlModeServer) + TRTCQosControlMode controlMode; + + TRTCNetworkQosParam() : preference(TRTCVideoQosPreferenceClear), controlMode(TRTCQosControlModeServer) { } }; /** - * 5.4 网络流控相关参数 + * 5.4 视频画面的渲染参数 * - * 网络流控相关参数,该设置决定了SDK在各种网络环境下的调控方向(比如弱网下是“保清晰”或“保流畅”) + * 您可以通过设置此参数来控制画面的旋转角度、填充模式和左右镜像模式。 */ -struct TRTCNetworkQosParam -{ - ///【字段含义】弱网下是“保清晰”或“保流畅” - ///【特别说明】 - /// - 弱网下保流畅:在遭遇弱网环境时,画面会变得模糊,且出现较多马赛克,但可以保持流畅不卡顿。 - /// - 弱网下保清晰:在遭遇弱网环境时,画面会尽可能保持清晰,但可能容易出现卡顿 - TRTCVideoQosPreference preference; +struct TRTCRenderParams { + ///【字段含义】图像的顺时针旋转角度 + ///【推荐取值】支持90、180以及270旋转角度,默认值:{@link TRTCVideoRotation_0} + TRTCVideoRotation rotation; - ///【字段含义】视频分辨率(云端控制 - 客户端控制) - ///【推荐取值】云端控制 - ///【特别说明】 - /// - Server 模式(默认):云端控制模式,若无特殊原因,请直接使用该模式 - /// - Client 模式:客户端控制模式,用于 SDK 开发内部调试,客户请勿使用 - TRTCQosControlMode controlMode; + ///【字段含义】画面填充模式 + ///【推荐取值】填充(画面可能会被拉伸裁剪)或适应(画面可能会有黑边),默认值:{@link TRTCVideoFillMode_Fill} + TRTCVideoFillMode fillMode; - TRTCNetworkQosParam() - : preference(TRTCVideoQosPreferenceClear) - , controlMode(TRTCQosControlModeServer) - { + ///【字段含义】画面镜像模式 + ///【推荐取值】默认值:{@link TRTCVideoMirrorType_Auto} + TRTCVideoMirrorType mirrorType; + TRTCRenderParams() : rotation(TRTCVideoRotation0), fillMode(TRTCVideoFillMode_Fit), mirrorType(TRTCVideoMirrorType_Disable) { } }; /** * 5.5 网络质量 * - * 表示网络质量的好坏,通过这个数值,您可以在 UI 界面上用图标表征 userId 的通话线路质量 + * 表征网络质量的好坏,您可以通过该数值在用户界面上展示每个用户的网络质量。 */ -struct TRTCQualityInfo -{ - const char * userId; ///< 用户标识 - TRTCQuality quality; ///< 网络质量 +struct TRTCQualityInfo { + ///用户 ID + const char *userId; - TRTCQualityInfo() - : userId(nullptr) - , quality(TRTCQuality_Unknown) - { + ///网络质量 + TRTCQuality quality; + TRTCQualityInfo() : userId(nullptr), quality(TRTCQuality_Unknown) { } }; /** * 5.6 音量大小 * - * 表示语音音量的评估大小,通过这个数值,您可以在 UI 界面上用图标表征 userId 是否有在说话。 + * 表征语音音量的评估值,您可以通过该数值在用户界面上展示每个用户的音量大小。 */ -struct TRTCVolumeInfo -{ - /// 说话者的 userId,字符编码格式为 UTF-8 - const char * userId; - /// 说话者的音量, 取值范围 0 - 100 - uint32_t volume; +struct TRTCVolumeInfo { + ///说话者的 userId, 如果 userId 为空则代表是当前用户自己。 + const char *userId; - TRTCVolumeInfo() - : userId(nullptr) - , volume(0) - { + ///说话者的音量大小, 取值范围[0 - 100]。 + uint32_t volume; + TRTCVolumeInfo() : userId(nullptr), volume(0) { } }; - /** - * 5.7 视频帧数据 + * 5.7 测速参数 * + * 您可以在用户进入房间前通过 {@link startSpeedTest} 接口测试网速(注意:请不要在通话中调用)。 */ -struct TRTCVideoFrame -{ - TRTCVideoPixelFormat videoFormat; ///< 视频帧的格式 - TRTCVideoBufferType bufferType; ///< 视频数据结构类型 - char * data; ///< 视频数据,字段 bufferType 是 LiteAVVideoBufferType_Buffer 时生效 - int textureId; ///< 视频纹理 ID,字段 bufferType 是 LiteAVVideoBufferType_Texture 时生效 - uint32_t length; ///< 视频数据的长度,单位是字节,对于 i420 而言, length = width * height * 3 / 2,对于BGRA32而言, length = width * height * 4 - uint32_t width; ///< 画面的宽度 - uint32_t height; ///< 画面的高度 - uint64_t timestamp; ///< 时间戳,单位 ms - TRTCVideoRotation rotation; ///< 画面旋转角度 - - TRTCVideoFrame() - : videoFormat(TRTCVideoPixelFormat_Unknown) - , bufferType(TRTCVideoBufferType_Unknown) - , data(nullptr) - , textureId(-1) - , length(0) - , width(640) - , height(360) - , timestamp(0) - , rotation(TRTCVideoRotation0) - { +struct TRTCSpeedTestParams { + ///应用标识,请参考 {@link TRTCParams} 中的相关说明。 + int sdkAppId; - } -}; + ///用户标识,请参考 {@link TRTCParams} 中的相关说明。 + const char *userId; -/** - * 5.8 音频帧数据 - * - */ -struct TRTCAudioFrame -{ - TRTCAudioFrameFormat audioFormat; ///< 音频帧的格式 - char * data; ///< 音频数据 - uint32_t length; ///< 音频数据的长度 - uint32_t sampleRate; ///< 采样率 - uint32_t channel; ///< 声道数 - uint64_t timestamp; ///< 时间戳,单位 ms - - TRTCAudioFrame() - : audioFormat(TRTCAudioFrameFormatNone) - , data(nullptr) - , length(0) - , sampleRate(48000) - , channel(1) - , timestamp(0) - { + ///用户签名,请参考 {@link TRTCParams} 中的相关说明。 + const char *userSig; + + ///预期的上行带宽(kbps,取值范围: 10 ~ 5000,为 0 时不测试)。 + int expectedUpBandwidth; + ///预期的下行带宽(kbps,取值范围: 10 ~ 5000,为 0 时不测试)。 + int expectedDownBandwidth; + + TRTCSpeedTestParams() : sdkAppId(0), userId(nullptr), userSig(nullptr), expectedUpBandwidth(0), expectedDownBandwidth(0) { } }; /** - * 5.9 网络测速结果 - * - * 您可以在用户进入房间前通过 TRTCCloud 的 startSpeedTest 接口进行测速 (注意:请不要在通话中调用), - * 测速结果会每2 - 3秒钟返回一次,每次返回一个 IP 地址的测试结果。 + * 5.8 网络测速结果 * - * @note - quality 是内部通过评估算法测算出的网络质量,loss 越低,rtt 越小,得分便越高。 - * @note - upLostRate 是指上行丢包率。例如,0.3表示每向服务器发送10个数据包可能会在中途丢失3个。 - * @note - downLostRate 是指下行丢包率。例如,0.2表示每从服务器收取10个数据包可能会在中途丢失2个。 - * @note - rtt 是指当前设备到腾讯云服务器的一次网络往返时间,该值越小越好,正常数值范围是10ms - 100ms + * 您可以在用户进入房间前通过 {@link startSpeedTest:} 接口进行测速(注意:请不要在通话中调用)。 */ -struct TRTCSpeedTestResult -{ - /// 服务器 IP 地址 - const char * ip; +struct TRTCSpeedTestResult { + ///测试是否成功。 + bool success; + + ///带宽测试错误信息。 + const char *errMsg; + + ///服务器 IP 地址。 + const char *ip; - /// 网络质量,内部通过评估算法测算出的网络质量,loss 越低,rtt 越小,得分便越高 + ///内部通过评估算法测算出的网络质量,更多信息请参见 {@link TRTCQuality}。 TRTCQuality quality; - /// 上行丢包率,范围是0 - 1.0,例如,0.3表示每向服务器发送10个数据包可能会在中途丢失3个。 + ///上行丢包率,取值范围是 [0 - 1.0],例如 0.3 表示每向服务器发送 10 个数据包可能会在中途丢失 3 个。 float upLostRate; - /// 下行丢包率,范围是0 - 1.0,例如,0.2表示每从服务器收取10个数据包可能会在中途丢失2个。 + ///下行丢包率,取值范围是 [0 - 1.0],例如 0.2 表示每从服务器收取 10 个数据包可能会在中途丢失 2 个。 float downLostRate; - /// 延迟(毫秒),指当前设备到腾讯云服务器的一次网络往返时间,该值越小越好,正常数值范围是10ms - 100ms + ///延迟(毫秒),指当前设备到 TRTC 服务器的一次网络往返时间,该值越小越好,正常数值范围是10ms - 100ms。 int rtt; - TRTCSpeedTestResult() - : ip(nullptr) - , quality(TRTCQuality_Unknown) - , upLostRate(0.0f) - , downLostRate(0.0f) - , rtt(0) - { + ///上行带宽(kbps,-1:无效值)。 + int availableUpBandwidth; + ///下行带宽(kbps,-1:无效值)。 + int availableDownBandwidth; + + TRTCSpeedTestResult() : success(false), errMsg(nullptr), ip(nullptr), quality(TRTCQuality_Unknown), upLostRate(0.0f), downLostRate(0.0f), rtt(0), availableUpBandwidth(0), availableDownBandwidth(0) { } }; /** - * 5.10 云端混流中每一路子画面的位置信息 + * 5.10 视频帧信息 * - * TRTCMixUser 用于指定每一路(即每一个 userId)视频画面的具体摆放位置 + * TRTCVideoFrame 用来描述一帧视频画面的裸数据,也就是编码前或者解码后的视频画面数据。 */ -struct TRTCMixUser -{ - ///【字段含义】参与混流的 userId - const char * userId; +struct TRTCVideoFrame { + ///【字段含义】视频的像素格式 + TRTCVideoPixelFormat videoFormat; - ///【字段含义】参与混流的 roomId,跨房流传入的实际 roomId,当前房间流传入 roomId = nullptr - const char * roomId; + ///【字段含义】视频数据结构类型 + TRTCVideoBufferType bufferType; - ///【字段含义】图层位置坐标以及大小,左上角为坐标原点(0,0) (绝对像素值) - RECT rect; + ///【字段含义】bufferType 为 {@link TRTCVideoBufferType_Buffer} 时的视频数据,承载用于 C++ 层的内存数据块。 + char *data; - ///【字段含义】图层层次(1 - 15)不可重复 - int zOrder; + ///【字段含义】视频纹理 ID,bufferType 为 {@link TRTCVideoBufferType_Texture} 时的视频数据,承载用于 OpenGL 渲染的纹理数据。 + int textureId; - ///【字段含义】该用户是不是只开启了音频 - ///【推荐取值】默认值:NO - ///【特别说明】废弃,推荐使用 inputType - bool pureAudio; + ///【字段含义】视频数据的长度,单位是字节。对于 i420 而言:length = width * height * 3 / 2;对于 BGRA32 而言:length = width * height * 4。 + uint32_t length; - ///【字段含义】参与混合的是主路画面(TRTCVideoStreamTypeBig)或屏幕分享(TRTCVideoStreamTypeSub)画面 - TRTCVideoStreamType streamType; + ///【字段含义】视频宽度 + uint32_t width; - /// 【字段含义】该用户的输入流类型(该字段是对 pureAudio 字段的升级) - /// 【推荐取值】 - /// - 默认值:TRTCMixInputTypeUndefined - /// - 如果您没有对 pureAudio 字段进行设置,您可以根据实际需要设置该字段 - /// - 如果您已经设置了 pureAudio 为 YES,请设置该字段为 TRTCMixInputTypeUndefined - TRTCMixInputType inputType; - - TRTCMixUser() - : userId(nullptr) - , roomId(nullptr) - , rect() - , zOrder(0) - , pureAudio(false) - , streamType(TRTCVideoStreamTypeBig) - , inputType(TRTCMixInputTypeUndefined) - { - rect.left = 0; - rect.top = 0; - rect.right = 0; - rect.bottom = 0; + ///【字段含义】视频高度 + uint32_t height; + + ///【字段含义】视频帧的时间戳,单位毫秒 + ///【推荐取值】自定义视频采集时可以设置为0。若该参数为0,SDK 会自定填充 timestamp 字段,但请“均匀”地控制 sendCustomVideoData 的调用间隔。 + uint64_t timestamp; + + ///【字段含义】视频像素的顺时针旋转角度 + TRTCVideoRotation rotation; + + TRTCVideoFrame() : videoFormat(TRTCVideoPixelFormat_Unknown), bufferType(TRTCVideoBufferType_Unknown), data(nullptr), textureId(-1), length(0), width(640), height(360), timestamp(0), rotation(TRTCVideoRotation0) { } }; /** - * 5.11 混流参数配置模式 + * 5.11 音频帧数据 + */ +struct TRTCAudioFrame { + ///【字段含义】音频帧的格式 + TRTCAudioFrameFormat audioFormat; + + ///【字段含义】音频数据 + char *data; + + ///【字段含义】音频数据的长度 + uint32_t length; + + ///【字段含义】采样率 + uint32_t sampleRate; + + ///【字段含义】声道数 + uint32_t channel; + + ///【字段含义】时间戳,单位ms + uint64_t timestamp; + + ///【字段含义】音频额外数据,远端用户通过 `onLocalProcessedAudioFrame` 写入的数据会通过该字段回调 + char *extraData; + + ///【字段含义】音频消息数据的长度 + uint32_t extraDataLength; + + TRTCAudioFrame() : audioFormat(TRTCAudioFrameFormatNone), data(nullptr), length(0), sampleRate(48000), channel(1), timestamp(0), extraData(nullptr), extraDataLength(0) { + } +}; + +/** + * 5.12 云端混流中各路画面的描述信息 * + * TRTCMixUser 用于指定云端混流中每一路视频画面的位置、大小、图层以及流类型等信息。 */ -enum TRTCTranscodingConfigMode { - /// 非法值 - TRTCTranscodingConfigMode_Unknown = 0, +struct TRTCMixUser { + ///【字段含义】用户 ID + const char *userId; - /// 全手动模式,灵活性最高,可以自由组合出各种混流方案,但易用性最差。 - /// 此模式下,您需要填写 TRTCTranscodingConfig 中的所有参数,并需要监听 TRTCCloudDelegate 中的 onUserVideoAvailable() 和 onUserAudioAvailable() 回调, - /// 以便根据当前房间中各个上麦用户的音视频状态不断地调整 mixUsers 参数,否则会导致混流失败。 - TRTCTranscodingConfigMode_Manual = 1, + ///【字段含义】该路音视频流所在的房间号(设置为空值代表当前用户所在的房间号) + const char *roomId; - /// 纯音频模式,适用于语音通话(AudioCall)和语音聊天室(VoiceChatRoom)等纯音频场景。 - /// 只需要在进房后通过 setMixTranscodingConfig() 接口设置一次,之后 SDK 就会自动把房间内所有上麦用户的声音混流到当前用户的直播流上。 - /// 此模式下,您无需设置 TRTCTranscodingConfig 中的 mixUsers 参数,只需设置 audioSampleRate、audioBitrate 和 audioChannels 等参数。 - TRTCTranscodingConfigMode_Template_PureAudio = 2, + ///【字段含义】指定该路画面的坐标区域(单位:像素) + RECT rect; - /// 预排版模式,通过占位符提前对各路画面进行排布 - /// 此模式下,您依然需要设置 mixUsers 参数,但可以将 userId 设置为占位符,可选的占位符有: - /// - "$PLACE_HOLDER_REMOTE$" : 指代远程用户的画面,可以设置多个。 - /// - "$PLACE_HOLDER_LOCAL_MAIN$" : 指代本地摄像头画面,只允许设置一个。 - /// - "$PLACE_HOLDER_LOCAL_SUB$" : 指代本地屏幕分享画面,只允许设置一个。 - /// 但是您可以不需要监听 TRTCCloudDelegate 中的 onUserVideoAvailable() 和 onUserAudioAvailable() 回调进行实时调整, - /// 只需要在进房成功后调用一次 setMixTranscodingConfig() 即可,之后 SDK 会自动将真实的 userId 补位到您设置的占位符上。 - TRTCTranscodingConfigMode_Template_PresetLayout = 3, + ///【字段含义】指定该路画面的层级(取值范围:1 - 15,不可重复) + int zOrder; - /// 屏幕分享模式,适用于在线教育场景等以屏幕分享为主的应用场景,仅支持 Windows 和 Mac 两个平台的 SDK。 - /// SDK 会先根据您(通过 videoWidth 和 videoHeight 参数)设置的目标分辨率构建一张画布, - /// 当老师未开启屏幕分享时,SDK 会将摄像头画面等比例拉伸绘制到该画布上;当老师开启屏幕分享之后,SDK 会将屏幕分享画面绘制到同样的画布上。 - /// 这样操作的目的是为了确保混流模块的输出分辨率一致,避免课程回放和网页观看的花屏问题(网页播放器不支持可变分辨率)。 - /// - /// 由于教学模式下的视频内容以屏幕分享为主,因此同时传输摄像头画面和屏幕分享画面是非常浪费带宽的。 - /// 推荐的做法是直接将摄像头画面通过 setLocalVideoRenderCallback 接口自定义绘制到当前屏幕上。 - /// 在该模式下,您无需设置 TRTCTranscodingConfig 中的 mixUsers 参数,SDK 不会混合学生的画面,以免干扰屏幕分享的效果。 - /// - /// 您可以将 TRTCTranscodingConfig 中的 width × height 设为 0px × 0px,SDK 会自动根据用户当前屏幕的宽高比计算出一个合适的分辨率: - /// - 如果老师当前屏幕宽度 <= 1920px,SDK 会使用老师当前屏幕的实际分辨率。 - /// - 如果老师当前屏幕宽度 > 1920px,SDK 会根据当前屏幕宽高比,选择 1920x1080(16:9)、1920x1200(16:10)、1920x1440(4:3) 三种分辨率中的一种。 - TRTCTranscodingConfigMode_Template_ScreenSharing = 4, + ///【字段含义】指定该路画面是主路画面({@link TRTCVideoStreamTypeBig})还是辅路画面({@link TRTCVideoStreamTypeSub})。 + TRTCVideoStreamType streamType; + + ///【字段含义】指定该路流是不是只混合声音 + ///【推荐取值】默认值:false + ///【特别说明】已废弃,推荐使用8.5版本开始新引入的字段:inputType。 + bool pureAudio; + + ///【字段含义】指定该路流的混合内容(只混音频、只混视频、混合音视频、混入水印) + ///【默认取值】默认值:TRTCMixInputTypeUndefined + ///【特别说明】 + /// - 当指定 inputType 为 TRTCMixInputTypeUndefined 并设置 pureAudio 为 YES 时,等效于设置 inputType 为 TRTCMixInputTypePureAudio。 + /// - 当指定 inputType 为 TRTCMixInputTypeUndefined 并设置 pureAudio 为 NO 时,等效于设置 inputType 为 TRTCMixInputTypeAudioVideo。 + /// - 当指定 inputType 为 TRTCMixInputTypeWatermark 时,您可以不指定 userId 字段,但需要指定 image 字段。 + TRTCMixInputType inputType; + + ///【字段含义】该画面在输出时的显示模式 + ///【推荐取值】默认值:视频流默认为0。0为裁剪,1为缩放,2为缩放并显示黑底。 + ///【特别说明】水印图和占位图暂时不支持设置 renderMode,默认强制拉伸处理 + uint32_t renderMode; + + ///【字段含义】占位图或水印图 + /// - 占位图是指当对应 userId 混流内容为纯音频时,混合后的画面中显示的是占位图片。 + /// - 水印图是指一张贴在混合后画面中的半透明图片,这张图片会一直覆盖于混合后的画面上。 + /// - 当指定 inputType 为 TRTCMixInputTypePureAudio 时,image 为占位图,此时需要您指定 userId。 + /// - 当指定 inputType 为 TRTCMixInputTypeWatermark 时,image 为水印图,此时不需要您指定 userId。 + ///【推荐取值】默认值:空值,即不设置占位图或者水印图。 + ///【特别说明】 + /// - 您可以将 image 设置为控制台中的某一个素材 ID,这需要您事先在 “[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 功能配置 => 素材管理” 中单击 [新增图片] 按钮进行上传。 + /// - 上传成功后可以获得对应的“图片ID”,然后将“图片ID”转换成字符串类型并设置给 image 字段即可(比如假设“图片ID” 为 63,可以设置 image = @"63") + /// - 您也可以将 image 设置为图片的 URL 地址,腾讯云的后台服务器会将该 URL 地址指定的图片混合到最终的画面中。 + /// - URL 链接长度限制为 512 字节。图片大小限制不超过 2MB。 + /// - 图片格式支持 png、jpg、jpeg、bmp 格式,推荐使用 png 格式的半透明图片作为水印。 + /// - image 仅在 inputType 为 TRTCMixInputTypePureAudio 或者 TRTCMixInputTypeWatermark 时才生效。 + const char *image; + + TRTCMixUser() : userId(nullptr), roomId(nullptr), rect(), zOrder(0), streamType(TRTCVideoStreamTypeBig), pureAudio(false), inputType(TRTCMixInputTypeUndefined), renderMode(0), image(nullptr) { + rect.left = 0; + rect.top = 0; + rect.right = 0; + rect.bottom = 0; + } }; /** - * 5.12 云端混流(转码)配置 + * 5.13 云端混流的排版布局和转码参数 * - * 包括最终编码质量和各路画面的摆放位置 + * 用于指定混流时各路画面的排版位置信息和云端转码的编码参数。 */ -struct TRTCTranscodingConfig -{ - ///【字段含义】转码 config 模式 +struct TRTCTranscodingConfig { + ///【字段含义】排版模式 + ///【推荐取值】请根据您的业务场景要求自行选择,预排版模式是适用性较好的一种模式。 TRTCTranscodingConfigMode mode; - ///【字段含义】腾讯云直播 AppID - ///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/rav) 选择已经创建的应用,单击【帐号信息】后,在“直播信息”中获取 + ///【字段含义】腾讯云直播服务的 AppID + ///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/trtc) 依次单击【应用管理】=>【应用信息】,并在【旁路直播信息】中获取 appid。 uint32_t appId; - ///【字段含义】腾讯云直播 bizid - ///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/rav) 选择已经创建的应用,单击【帐号信息】后,在“直播信息”中获取 + ///【字段含义】腾讯云直播服务的 bizid + ///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/trtc) 依次单击【应用管理】=>【应用信息】,并在【旁路直播信息】中获取 bizid。 uint32_t bizId; - ///【字段含义】最终转码后的视频分辨率的宽度。 - ///【推荐取值】推荐值:360px ,如果你是纯音频推流,请将 width × height 设为 0px × 0px,否则混流后会携带一条画布背景的视频流。 + ///【字段含义】指定云端转码的目标分辨率(宽度) + ///【推荐取值】单位:像素值,推荐值:360,如果你只混合音频流,请将 width 和 height 均设置位 0,否则混流转码后的直播流中会有黑色背景。 uint32_t videoWidth; - ///【字段含义】最终转码后的视频分辨率的高度。 - ///【推荐取值】推荐值:640px ,如果你是纯音频推流,请将 width × height 设为 0px × 0px,否则混流后会携带一条画布背景的视频流。 + ///【字段含义】指定云端转码的目标分辨率(高度) + ///【推荐取值】单位:像素值,推荐值:640,如果你只混合音频流,请将 width 和 height 均设置位 0,否则混流转码后的直播流中会有黑色背景。 uint32_t videoHeight; - ///【字段含义】最终转码后的视频分辨率的码率(kbps)。 - ///【推荐取值】如果填0,后台会根据videoWidth和videoHeight来估算码率,您也可以参考枚举定义TRTCVideoResolution_640_480的注释。 + ///【字段含义】指定云端转码的目标视频码率(kbps) + ///【推荐取值】如果填0,TRTC 会根据 videoWidth 和 videoHeight 估算出一个合理的码率值,您也可以参考视频分辨率枚举定义中所推荐的码率值(见注释部分)。 uint32_t videoBitrate; - ///【字段含义】最终转码后的视频分辨率的帧率(FPS)。 + ///【字段含义】指定云端转码的目标视频帧率(FPS) ///【推荐取值】默认值:15fps,取值范围是 (0,30]。 uint32_t videoFramerate; - ///【字段含义】最终转码后的视频分辨率的关键帧间隔(又称为 GOP)。 + ///【字段含义】指定云端转码的目标视频关键帧间隔(GOP) ///【推荐取值】默认值:2,单位为秒,取值范围是 [1,8]。 uint32_t videoGOP; - ///【字段含义】混合后画面的底色颜色,默认为黑色,格式为十六进制数字,比如:“0x61B9F1” 代表 RGB 分别为(97,158,241)。 - ///【推荐取值】默认值:0x000000,黑色 + ///【字段含义】指定混合画面的底色颜色 + ///【推荐取值】默认值:0x000000 代表黑色。格式为十六进制数字,比如:“0x61B9F1” 代表 RGB 分别为(97,158,241)。 uint32_t backgroundColor; - ///【字段含义】混合后画面的背景图。 - ///【推荐取值】默认值:null,即不设置背景图 - ///【特别说明】背景图需要您事先在 “[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 功能配置 => 素材管理” 中上传, <br> - /// 上传成功后可以获得对应的“图片ID”,然后将“图片ID”转换成字符串类型并设置到 backgroundImage 里即可。 <br> - /// 例如:假设“图片ID” 为 63,可以设置 backgroundImage = "63"; <br> - const char * backgroundImage; - - ///【字段含义】最终转码后的音频采样率。 + ///【字段含义】指定混合画面的背景图片 + ///【推荐取值】默认值:空值,即不设置背景图片。 + ///【特别说明】 + /// - 您可以将 image 设置为控制台中的某一个素材 ID,这需要您事先在 “[控制台](https://console.cloud.tencent.com/trtc) => 应用管理 => 功能配置 => 素材管理” 中单击 [新增图片] 按钮进行上传。 + /// - 上传成功后可以获得对应的“图片ID”,然后将“图片ID”转换成字符串类型并设置给 image 字段即可(比如假设“图片ID” 为 63,可以设置 image = @"63") + /// - 您也可以将 image 设置为图片的 URL 地址,腾讯云的后台服务器会将该 URL 地址指定的图片混合到最终的画面中。 + /// - URL 链接长度限制为 512 字节。图片大小限制不超过 2MB。 + /// - 图片格式支持 png、jpg、jpeg、bmp 格式。 + const char *backgroundImage; + + ///【字段含义】指定云端转码的目标音频采样率 ///【推荐取值】默认值:48000Hz。支持12000HZ、16000HZ、22050HZ、24000HZ、32000HZ、44100HZ、48000HZ。 uint32_t audioSampleRate; - ///【字段含义】最终转码后的音频码率。 + ///【字段含义】指定云端转码的目标音频码率 ///【推荐取值】默认值:64kbps,取值范围是 [32,192]。 uint32_t audioBitrate; - ///【字段含义】最终转码后的音频声道数 - ///【推荐取值】默认值:1。取值范围为 [1,2] 中的整型。 + ///【字段含义】指定云端转码的音频声道数 + ///【推荐取值】默认值:1,代表单声道。可设定的数值只有两个数字:1-单声道,2-双声道。 uint32_t audioChannels; - ///【字段含义】每一路子画面的位置信息 - TRTCMixUser * mixUsersArray; + ///【字段含义】指定云端转码的输出流音频编码类型 + ///【推荐取值】默认值:0,代表LC-AAC。可设定的数值只有三个数字:0 - LC-AAC,1 - HE-AAC,2 - HE-AACv2。 + ///【特别说明】HE-AAC 和 HE-AACv2 支持的输出流音频采样率范围为[48000, 44100, 32000, 24000, 16000] + ///【特别说明】当音频编码设置为 HE-AACv2 时,只支持输出流音频声道数为双声道。 + ///【特别说明】HE-AAC 和 HE-AACv2 取值仅在输出流为您额外设置的 streamId 上时才生效。 + uint32_t audioCodec; - ///【字段含义】 数组 mixUsersArray 的大小 + ///【字段含义】指定云端混流中每一路视频画面的位置、大小、图层以及流类型等信息 + ///【推荐取值】该字段是一个 TRTCMixUser 类型的数组,数组中的每一个元素都用来代表每一路画面的信息。 + TRTCMixUser *mixUsersArray; + + ///【字段含义】 数组 mixUsersArray 的元素个数 uint32_t mixUsersArraySize; ///【字段含义】输出到 CDN 上的直播流 ID - /// 如不设置该参数,SDK 会执行默认逻辑,即房间里的多路流会混合到该接口调用者的视频流上,也就是 A+B =>A; - /// 如果设置该参数,SDK 会将房间里的多路流混合到您指定的直播流 ID 上,也就是 A+B =>C。 - ///【推荐取值】默认值:null,即房间里的多路流会混合到该接口调用者的视频流上。 - const char * streamId; + ///【推荐取值】默认值:空值,即房间里的多路音视频流最终会混合到接口调用者的那一路音视频流上。 + /// - 如不设置该参数,SDK 会执行默认逻辑,即房间里的多路音视频流会混合到该接口调用者的那一路音视频流上,也就是 A + B => A。 + /// - 如您设置该参数,SDK 会将房间里的多路音视频流混合到您指定的直播流上,也就是 A + B => C(C 代表您指定的 streamId)。 + const char *streamId; TRTCTranscodingConfig() - : mode(TRTCTranscodingConfigMode_Unknown) - , appId(0) - , bizId(0) - , videoWidth(0) - , videoHeight(0) - , videoBitrate(0) - , videoFramerate(15) - , videoGOP(2) - , audioSampleRate(48000) - , audioBitrate(64) - , audioChannels(1) - , mixUsersArray(nullptr) - , mixUsersArraySize(0) - , backgroundColor(0) - , backgroundImage(nullptr) - , streamId(nullptr) - {} + : mode(TRTCTranscodingConfigMode_Unknown), + appId(0), + bizId(0), + videoWidth(0), + videoHeight(0), + videoBitrate(0), + videoFramerate(15), + videoGOP(2), + backgroundColor(0), + backgroundImage(nullptr), + audioSampleRate(48000), + audioBitrate(64), + audioChannels(1), + audioCodec(0), + mixUsersArray(nullptr), + mixUsersArraySize(0), + streamId(nullptr) { + } }; /** - * 5.13 CDN 旁路推流参数 + * 5.14 向非腾讯云 CDN 上发布音视频流时需设置的转推参数 + * + * TRTC 的后台服务支持通过标准 RTMP 协议,将其中的音视频流发布到第三方直播 CDN 服务商。 + * 如果您使用腾讯云直播 CDN 服务,可无需关注此参数,直接使用 {@link startPublish} 接口即可。 */ -struct TRTCPublishCDNParam -{ - /// 腾讯云 AppID,请在 [实时音视频控制台](https://console.cloud.tencent.com/rav) 选择已经创建的应用,单击【帐号信息】,在“直播信息”中获取 +struct TRTCPublishCDNParam { + ///【字段含义】腾讯云直播服务的 AppID + ///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/trtc) 依次单击【应用管理】=>【应用信息】,并在【旁路直播信息】中获取 appid。 uint32_t appId; - /// 腾讯云直播 bizid,请在 [实时音视频控制台](https://console.cloud.tencent.com/rav) 选择已经创建的应用,单击【帐号信息】,在“直播信息”中获取 + ///【字段含义】腾讯云直播服务的 bizid + ///【推荐取值】请在 [实时音视频控制台](https://console.cloud.tencent.com/trtc) 依次单击【应用管理】=>【应用信息】,并在【旁路直播信息】中获取 bizid。 uint32_t bizId; - /// 旁路转推的 URL - const char * url; + ///【字段含义】指定该路音视频流在第三方直播服务商的推流地址(RTMP 格式) + ///【推荐取值】各家服务商的推流地址规则差异较大,请根据目标服务商的要求填写合法的推流 URL,TRTC 的后台服务器会按照您填写的 URL 向第三方服务商推送标准格式音视频流。 + ///【特别说明】推流 URL 必须为 RTMP 格式,必须符合您的目标直播服务商的规范要求,否则目标服务商会拒绝来自 TRTC 后台服务的推流请求。 + const char *url; - TRTCPublishCDNParam() - : url(nullptr) - , appId(0) - , bizId(0) - { + ///【字段含义】需要转推的 streamId + ///【推荐取值】默认值:空值。如果不填写,则默认转推调用者的旁路流。 + const char *streamId; + TRTCPublishCDNParam() : appId(0), bizId(0), url(nullptr), streamId(nullptr) { } }; /** - * 5.14 录音参数 + * 5.15 本地音频文件的录制参数 * - * 请正确填写参数,确保录音文件顺利生成。 + * 该参数用于在音频录制接口 {@link startAudioRecording} 中指定录制参数。 */ struct TRTCAudioRecordingParams { + ///【字段含义】录音文件的保存路径(必填)。 + ///【特别说明】该路径需精确到文件名及格式后缀,格式后缀用于决定录音文件的格式,目前支持的格式有 PCM、WAV 和 AAC。 + /// 例如:假如您指定路径为 "mypath/record/audio.aac",代表您希望 SDK 生成一个 AAC 格式的音频录制文件。 + /// 请您指定一个有读写权限的合法路径,否则录音文件无法生成。 + const char *filePath; - ///【字段含义】文件路径(必填),录音文件的保存路径。该路径需要用户自行指定,请确保路径存在且可写。 - ///【特别说明】该路径需精确到文件名及格式后缀,格式后缀决定录音文件的格式,目前支持的格式有 PCM、WAV 和 AAC。 - /// 例如,指定路径为 path/to/audio.aac,则会生成一个 AAC 格式的文件。 - /// 请指定一个有读写权限的合法路径,否则录音文件无法生成。 - const char * filePath; - - TRTCAudioRecordingParams() - : filePath(nullptr) - { + ///【字段含义】音频录制内容类型。 + ///【特别说明】默认录制所有本地和远端音频。 + TRTCAudioRecordingContent recordingContent; + TRTCAudioRecordingParams() : filePath(nullptr), recordingContent(TRTCAudioRecordingContentAll) { } }; /** - * 5.15 音效 + * 5.16 本地媒体文件的录制参数 + * + * 该参数用于在本地媒体文件的录制接口 {@link startLocalRecording} 中指定录制相关参数。 + * 接口 startLocalRecording 是接口 startAudioRecording 的能力加强版本,前者可以录制视频文件,后者只能录制音频文件。 */ -struct TRTCAudioEffectParam { +struct TRTCLocalRecordingParams { + ///【字段含义】录制的文件地址(必填),请确保路径有读写权限且合法,否则录制文件无法生成。 + ///【特别说明】该路径需精确到文件名及格式后缀,格式后缀用于决定录制出的文件格式,目前支持的格式暂时只有 MP4。 + /// 例如:假如您指定路径为 "mypath/record/test.mp4",代表您希望 SDK 生成一个 MP4 格式的本地视频文件。 + /// 请您指定一个有读写权限的合法路径,否则录制文件无法生成。 + const char *filePath = ""; - ///【字段含义】音效 ID, - ///【特殊说明】SDK 允许播放多路音效,因此需要音效 ID 进行标记,用于控制音效的开始、停止、音量等 + ///【字段含义】媒体录制类型,默认值:TRTCRecordTypeBoth,即同时录制音频和视频。 + TRTCLocalRecordType recordType = TRTCLocalRecordType_Both; + + ///【字段含义】interval 录制信息更新频率,单位毫秒,有效范围:1000-10000。默认值为-1,表示不回调。 + int interval = -1; +}; + +/** + * 5.17 音效参数(已废弃) + * + * TRTC 中的“音效”特指一些短暂的音频文件,通常仅有几秒钟的播放时间,比如“鼓掌声”、“欢笑声”等。 + * 该参数用于在早期版本的音效播放接口 {@link TRTCCloud#playAudioEffect} 中指定音效文件(即短音频文件)的路径和播放次数等。 + * 在 7.3 版本以后,音效接口已被新的接口 {@link TXAudioEffectManager#startPlayMusic} 所取代。 + * 您在指定 startPlayMusic 的参数 {@link TXAudioMusicParam} 时,如果将 “isShortFile” 设置为 true,即为“音效”文件。 + */ +struct TRTCAudioEffectParam { + ///【字段含义】音效 ID + ///【特别说明】SDK 允许播放多路音效,因此需要音效 ID 进行标记,用于控制音效的开始、停止、音量等。 int effectId; - ///【字段含义】音效路径,支持的文件格式:aac, mp3。 - const char * path; + ///【字段含义】音效文件路径,支持的文件格式:aac, mp3, m4a。 + const char *path; ///【字段含义】循环播放次数 - ///【推荐取值】取值范围为0 - 任意正整数,默认值:0。0表示播放音效一次;1表示播放音效两次;以此类推 + ///【推荐取值】取值范围为0 - 任意正整数,默认值:0,表示播放音效一次;1表示播放音效两次;以此类推。 int loopCount; ///【字段含义】音效是否上行 - ///【推荐取值】YES:音效在本地播放的同时,会上行至云端,因此远端用户也能听到该音效;NO:音效不会上行至云端,因此只能在本地听到该音效。默认值:NO + ///【推荐取值】true:音效在本地播放的同时,会上行至云端,因此远端用户也能听到该音效;false:音效不会上行至云端,因此只能在本地听到该音效。默认值:false bool publish; ///【字段含义】音效音量 ///【推荐取值】取值范围为0 - 100;默认值:100 int volume; - TRTCAudioEffectParam(const int _effectId, const char *_path) - : loopCount(0) - , publish(false) - , volume(100) - { + TRTCAudioEffectParam(const int _effectId, const char *_path) : loopCount(0), publish(false), volume(100) { effectId = _effectId; path = _path; } }; /** - * 5.16 切换房间参数 + * 5.18 房间切换参数 + * + * 该参数用于切换房间接口{@link switchRoom},可以让用户从一个房间快速切换到另一个房间。 */ struct TRTCSwitchRoomConfig { - ///【字段含义】数字房间号码 [选填],在同一个房间内的用户可以看到彼此并进行视频通话。 + ///【字段含义】数字房间号码 [选填],在同一个房间内的用户可以看到彼此并能够进行音视频通话。 ///【推荐取值】取值范围:1 - 4294967294。 ///【特别说明】roomId 和 strRoomId 必须并且只能填一个。若两者都填,则优先选择 roomId。 uint32_t roomId; - ///【字段含义】字符串房间号码 [选填],在同一个房间内的用户可以看到彼此并进行视频通话。 + ///【字段含义】字符串房间号码 [选填],在同一个房间内的用户可以看到彼此并能够进行音视频通话。 ///【特别说明】roomId 和 strRoomId 必须并且只能填一个。若两者都填,则优先选择 roomId。 const char *strRoomId; - ///【字段含义】用户签名 [选填],当前 userId 对应的验证签名,相当于登录密码。不填时,SDK 会继续使用旧的 userSig, - /// 但用户必须保证旧的 userSig 仍在有效期内,否则会造成进房失败等后果。 - ///【推荐取值】具体计算方法请参见 [如何计算UserSig](https://cloud.tencent.com/document/product/647/17275)。 + ///【字段含义】用户签名 [选填],当前 userId 对应的验证签名,相当于登录密码。 + /// 如果您在切换房间时不指定新计算出的 userSig,SDK 会继续使用您在进入房间时(enterRoom)时所指定的 userSig。 + /// 这就需要您必须保证旧的 userSig 在切换房间的那一刻仍在签名允许的效期内,否则会导致房间切换失败。 + ///【推荐取值】具体计算方法请参考 [如何计算UserSig](https://cloud.tencent.com/document/product/647/17275)。 const char *userSig; - ///【字段含义】房间签名 [选填],当您希望某个房间只能让特定的 userId 进入时,需要使用 privateMapKey 进行权限保护。 + ///【字段含义】用于权限控制的权限票据(选填),当您希望某个房间只能让特定的 userId 进入时,需要使用 privateMapKey 进行权限保护。 ///【推荐取值】仅建议有高级别安全需求的客户使用,更多详情请参见 [进房权限保护](https://cloud.tencent.com/document/product/647/32240)。 const char *privateMapKey; - TRTCSwitchRoomConfig() - : roomId(0) - , strRoomId(nullptr) - , userSig(nullptr) - , privateMapKey(nullptr) - { - + TRTCSwitchRoomConfig() : roomId(0), strRoomId(nullptr), userSig(nullptr), privateMapKey(nullptr) { } }; /** - * 5.17 自己本地的音视频统计信息 + * 5.19 音频自定义回调的格式参数 + * + * 该参数用于在音频自定义回调相关的接口中,设置 SDK 回调出来的音频数据的相关格式(包括采样率、声道数等)。 */ -struct TRTCLocalStatistics -{ - uint32_t width; ///< 视频宽度 +struct TRTCAudioFrameCallbackFormat { + ///【字段含义】采样率 + ///【推荐取值】默认值:48000Hz。支持 16000, 32000, 44100, 48000。 + int sampleRate; - uint32_t height; ///< 视频高度 + ///【字段含义】声道数 + ///【推荐取值】默认值:1,代表单声道。可设定的数值只有两个数字:1-单声道,2-双声道。 + int channel; - uint32_t frameRate; ///< 帧率(fps) - - uint32_t videoBitrate; ///< 视频发送码率(Kbps) - - uint32_t audioSampleRate; ///< 音频采样率(Hz) - - uint32_t audioBitrate; ///< 音频发送码率(Kbps) - - TRTCVideoStreamType streamType; ///< 流类型(大画面 | 小画面 | 辅路画面) - - /// 音频设备采集状态,用于检测外接音频设备的健康度 - /// 0:采集设备状态正常;1:检测到长时间静音;2:检测到破音;3:检测到声音异常间断。 - uint32_t audioCaptureState; - - TRTCLocalStatistics() - : width(0) - , height(0) - , frameRate(0) - , videoBitrate(0) - , audioSampleRate(0) - , audioBitrate(0) - , streamType(TRTCVideoStreamTypeBig) - , audioCaptureState(0) - { + ///【字段含义】采样点数 + ///【推荐取值】取值必须是 sampleRate/100 的整数倍。 + int samplesPerCall; + TRTCAudioFrameCallbackFormat() : sampleRate(0), channel(0), samplesPerCall(0) { } }; /** - * 5.18 远端成员的音视频统计信息 + * 5.20 TRTC 屏幕分享图标信息以及 mute image 垫片 */ -struct TRTCRemoteStatistics -{ - /// 用户 ID,指定是哪个用户的视频流 - const char * userId; - - /// 该线路的总丢包率(%) - /// 这个值越小越好,例如,丢包率为0表示网络很好。 - /// 丢包率是该线路的 userId 从上行到服务器再到下行的总丢包率。 - /// 如果 downLoss 为0,但是 finalLoss 不为0,说明该 userId 上行时出现了无法恢复的丢包。 - uint32_t finalLoss; - - /// 视频宽度 - uint32_t width; - /// 视频高度 - uint32_t height; - - /// 接收帧率(fps) - uint32_t frameRate; +struct TRTCImageBuffer { + ///图像存储的内容,一般为 BGRA 结构 + const char *buffer; - /// 视频码率(Kbps) - uint32_t videoBitrate; + ///图像数据的大小 + uint32_t length; - /// 音频采样率(Hz) - uint32_t audioSampleRate; + ///图像的宽度 + uint32_t width; - /// 音频码率(Kbps) - uint32_t audioBitrate; + ///图像的高度 + uint32_t height; - /// 播放时延(ms) - uint32_t jitterBufferDelay; + TRTCImageBuffer() : buffer(nullptr), length(0), width(0), height(0) { + } +}; - /// 端对端延迟(ms) - /// 该字段为全链路延迟统计,链路包含:采集->编码->网络传输->接收->缓冲->解码->播放 - /// 延迟以 audio 为基准进行计算。需要本地和远端均为8.5版本以上时才生效 - /// 若远端用户为低版本,对应延迟会回调为0,此时代表无效值 - uint32_t point2PointDelay; +/** + * 5.21 屏幕分享的目标信息(仅适用于桌面系统) + * + * 在用户进行屏幕分享时,可以选择抓取整个桌面,也可以仅抓取某个程序的窗口。 + * TRTCScreenCaptureSourceInfo 用于描述待分享目标的信息,包括 ID、名称、缩略图等,该结构体中的字段信息均是只读的。 + */ +struct TRTCScreenCaptureSourceInfo { + ///【字段含义】采集源类型(是分享整个屏幕?还是分享某个窗口?) + TRTCScreenCaptureSourceType type; - /// 音频播放卡顿累计时长(ms) - uint32_t audioTotalBlockTime; + ///【字段含义】采集源的ID,对于窗口,该字段代表窗口的 ID;对于屏幕,该字段代表显示器的 ID。 + TXView sourceId; - /// 音频播放卡顿率,音频卡顿累计时长占音频总播放时长的百分比 (%) - uint32_t audioBlockRate; + ///【字段含义】采集源名称(采用 UTF8 编码) + const char *sourceName; - /// 视频播放卡顿累计时长(ms) - uint32_t videoTotalBlockTime; + ///【字段含义】分享窗口的缩略图 + TRTCImageBuffer thumbBGRA; - /// 视频播放卡顿率,视频卡顿累计时长占音频总播放时长的百分比(%) - uint32_t videoBlockRate; + ///【字段含义】分享窗口的图标 + TRTCImageBuffer iconBGRA; - /// 流类型(大画面 | 小画面 | 辅路画面) - TRTCVideoStreamType streamType; + ///【字段含义】是否为最小化窗口 + bool isMinimizeWindow; - TRTCRemoteStatistics() - : userId(nullptr) - , finalLoss(0) - , width(0) - , height(0) - , frameRate(0) - , videoBitrate(0) - , audioSampleRate(0) - , audioBitrate(0) - , jitterBufferDelay(0) - , point2PointDelay(0) - , audioTotalBlockTime(0) - , audioBlockRate(0) - , videoTotalBlockTime(0) - , videoBlockRate(0) - , streamType(TRTCVideoStreamTypeBig) - { + ///【字段含义】是否为主显示屏(适用于多显示器的情况) + bool isMainScreen; + TRTCScreenCaptureSourceInfo() : type(TRTCScreenCaptureSourceTypeUnknown), sourceId(nullptr), sourceName(nullptr), isMinimizeWindow(false), isMainScreen(false) { } }; - -/** - * 5.19 统计数据 - */ -struct TRTCStatistics -{ - - /// C -> S 上行丢包率(%), - /// 该值越小越好,例如,丢包率为0表示网络很好, - /// 丢包率为30@%则意味着 SDK 向服务器发送的数据包中会有30@%丢失在上行传输中。 - uint32_t upLoss; - - /// S -> C 下行丢包率(%), - /// 该值越小越好,例如,丢包率为0表示网络很好, - /// 丢包率为30@%则意味着 SDK 向服务器发送的数据包中会有30@%丢失在下行传输中。 - uint32_t downLoss; - - /// 当前 App 的 CPU 使用率(%) - uint32_t appCpu; - - /// 当前系统的 CPU 使用率(%) - uint32_t systemCpu; - - /// 延迟(毫秒), - /// 指 SDK 到腾讯云服务器的一次网络往返时间,该值越小越好。 - /// 一般低于 50ms 的 rtt 相对理想,而高于 100ms 的 rtt 会引入较大的通话延时。 - /// 由于数据上下行共享一条网络连接,所以 local 和 remote 的 rtt 相同。 - uint32_t rtt; - - /// 总接收字节数(包含信令和音视频) - uint32_t receivedBytes; - - /// 总发送字节总数(包含信令和音视频) - uint32_t sentBytes; - - /// 本地的音视频统计信息,可能有主画面、小画面以及辅路画面等多路的情况,因此是一个数组 - TRTCLocalStatistics * localStatisticsArray; - - /// 数组 localStatisticsArray 的大小 - uint32_t localStatisticsArraySize; - - /// 远端成员的音视频统计信息,可能有主画面、小画面以及辅路画面等多路的情况,因此是一个数组 - TRTCRemoteStatistics * remoteStatisticsArray; - - /// 数组 remoteStatisticsArray 的大小 - uint32_t remoteStatisticsArraySize; - - TRTCStatistics() - : upLoss(0) - , downLoss(0) - , appCpu(0) - , systemCpu(0) - , rtt(0) - , receivedBytes(0) - , sentBytes(0) - , localStatisticsArray(nullptr) - , localStatisticsArraySize(0) - , remoteStatisticsArray(nullptr) - , remoteStatisticsArraySize(0) - { - +class ITRTCScreenCaptureSourceList { + protected: + virtual ~ITRTCScreenCaptureSourceList() { } -}; - -/** - * 5.20 用于存储屏幕分享窗口缩略图和图标的结构体 - */ -struct TRTCImageBuffer -{ - const char * buffer; ///< 图内容 - uint32_t length; ///< 图缓存大小 - uint32_t width; ///< 图宽 - uint32_t height; ///< 图高 - TRTCImageBuffer() - : buffer(nullptr) - , length(0) - , width(0) - , height(0) - {}; -}; -/** - * 5.21 屏幕分享窗口信息 - * - * 您可以通过 getScreenCaptureSources() 枚举可共享的窗口列表,列表通过 ITRTCScreenCaptureSourceList 返回 - */ -struct TRTCScreenCaptureSourceInfo { - TRTCScreenCaptureSourceType type; ///< 采集源类型 - TXView sourceId; ///< 采集源 ID;对于窗口,该字段指示窗口句柄;对于屏幕,该字段指示屏幕 ID - const char * sourceName; ///< 采集源名称,UTF8 编码 - TRTCImageBuffer thumbBGRA; ///< 缩略图内容 - TRTCImageBuffer iconBGRA; ///< 图标内容 - bool isMinimizeWindow; ///< 是否为最小化窗口,通过 getScreenCaptureSources 获取列表时的窗口状态,仅采集源为 Window 时才可用 - bool isMainScreen; ///< 是否为主屏,是否为主屏,仅采集源类型为 Screen 时才可用 - TRTCScreenCaptureSourceInfo() - : type(TRTCScreenCaptureSourceTypeUnknown) - , sourceId(nullptr) - , sourceName(nullptr) - , isMinimizeWindow(false) - , isMainScreen(false) - {}; -}; - -/** - * 5.22 屏幕分享窗口列表 - */ -class ITRTCScreenCaptureSourceList -{ -protected: - virtual ~ITRTCScreenCaptureSourceList() {} -public: - /** - * @return 窗口个数 - */ + public: virtual uint32_t getCount() = 0; - /** - * @return 窗口信息 - */ virtual TRTCScreenCaptureSourceInfo getSourceInfo(uint32_t index) = 0; - /** - * @brief 遍历完窗口列表后,调用 release 释放资源。 - */ virtual void release() = 0; }; /** - * 5.23 屏幕分享参数 + * 5.23 屏幕分享的进阶控制参数 * - * 您可以通过设置结构体内的参数控制屏幕分享边框的颜色、宽度、是否采集鼠标等参数 + * 该参数用于屏幕分享相关的接口{@link selectScreenCaptureTarget},用于在指定分享目标时设定一系列进阶控制参数。 + * 比如:是否采集鼠标、是否要采集子窗口、是否要在被分享目标周围绘制一个边框等。 */ struct TRTCScreenCaptureProperty { - bool enableCaptureMouse; ///< 是否采集目标内容时顺带采集鼠标,默认为 true - bool enableHighLight; ///< 是否高亮正在共享的窗口,默认为 true - bool enableHighPerformance; ///< 是否开启高性能模式(只会在分享屏幕时会生效),开启后屏幕采集性能最佳,但无法过滤远端的高亮边框,默认为 true - int highLightColor; ///< 指定高亮边框颜色,RGB 格式,传入 0 时采用默认颜色,默认颜色为 #8CBF26 - int highLightWidth; ///< 指定高亮边框的宽度,传入0时采用默认描边宽度,默认宽度为 5,最大值为 50 - bool enableCaptureChildWindow; ///< 窗口采集时是否采集子窗口(与采集窗口具有 Owner 或 Popup 属性),默认为 false - - TRTCScreenCaptureProperty() - : enableCaptureMouse(true) - , enableHighLight(true) - , enableHighPerformance(true) - , highLightColor(0) - , highLightWidth(0) - , enableCaptureChildWindow(false) - { + ///【字段含义】是否采集目标内容的同时采集鼠标,默认为 true。 + bool enableCaptureMouse; - } -}; + ///【字段含义】是否高亮正在共享的窗口(在被分享目标周围绘制一个边框),默认为 true。 + bool enableHighLight; -/** - * 5.24 设备列表和设备 Item 信息 - * - * 以下定义仅用于兼容原有接口,具体定义参见 ITXDeviceManager.h 文件 - */ -typedef ITXDeviceCollection ITRTCDeviceCollection; -typedef ITXDeviceInfo ITRTCDeviceInfo; -/** - * 5.25 本地录制参数 - * - */ -enum TRTCLocalRecordType { - /// 仅录制音频 - TRTCLocalRecordType_Audio = 0, + ///【字段含义】是否开启高性能模式(只会在分享屏幕时会生效),默认为 true。 + ///【特殊说明】开启后屏幕采集性能最佳,但会丧失抗遮挡能力,如果您同时开启 enableHighLight + enableHighPerformance,远端用户可以看到高亮的边框。 + bool enableHighPerformance; - /// 仅录制视频 - TRTCLocalRecordType_Video = 1, + ///【字段含义】指定高亮边框的颜色,RGB 格式,传入 0 时代表采用默认颜色,默认颜色为 #8CBF26。 + int highLightColor; - /// 同时录制音频、视频 - TRTCLocalRecordType_Both = 2, + ///【字段含义】指定高亮边框的宽度,传入0时采用默认描边宽度,默认宽度为 5px,您可以设置的最大值为 50。 + int highLightWidth; -}; + ///【字段含义】窗口采集时是否采集子窗口(需要子窗口与被采集窗口具有 Owner 或 Popup 属性),默认为 false。 + bool enableCaptureChildWindow; -struct TRTCLocalRecordingParams { - ///【字段含义】文件路径(必填),录制的文件地址,请自行指定,确保路径有读写权限且合法,否则录制文件无法生成。 - ///【特别说明】该路径需精确到文件名及格式后缀,格式后缀决定录制文件的格式,目前支持的格式只有 mp4。 - /// Windows建议在应用的私有数据目录中指定存放路径。 - ///【示例代码】在 %appdata%\\test 目录下录制 example.mp4 文件 - /// std::string filePath; - /// std::wstring path; - /// wchar_t fullPath[MAX_PATH] = { 0 }; - /// ::SHGetFolderPathW(NULL, CSIDL_APPDATA, NULL, 0, fullPath); - /// path=fullPath; - /// path += L"\\test\\example.mp4"; - /// filePath = txf_wstr2utf8(path); - const char *filePath; - - ///【字段含义】媒体录制类型,默认为同时录制音频和视频。 - TRTCLocalRecordType recordType; - - ///interval 录制中事件(onLocalRecordDoing)的回调频率,单位毫秒,有效范围:1000-10000,默认为 -1 表示不回调 - int interval; - - TRTCLocalRecordingParams() - : filePath(nullptr) - , recordType(TRTCLocalRecordType_Both) - , interval(-1) - { + TRTCScreenCaptureProperty() : enableCaptureMouse(true), enableHighLight(true), enableHighPerformance(true), highLightColor(0), highLightWidth(0), enableCaptureChildWindow(false) { + } +}; +typedef ITXDeviceCollection ITRTCDeviceCollection; +typedef ITXDeviceInfo ITRTCDeviceInfo; +/** + * 5.24 远端音频流智能并发播放策略的参数 + * + * 该参数用于设置远端音频流智能并发播放策略。 + */ +struct TRTCAudioParallelParams { + ///【字段含义】最大并发播放数。默认值:0 + ///- 如果 maxCount > 0,且实际人数 > maxCount,会实时智能选出 maxCount 路数据进行播放,这会极大的降低性能消耗。 + ///- 如果 maxCount = 0,SDK 不限制并发播放数,在上麦人数比较多的房间可能会引发性能问题。 + uint32_t maxCount; + + ///【字段含义】指定用户必定能并发播放。 + ///【特殊说明】指定必定并发播放的用户 ID 列表。这些用户不参与智能选择。 + /// includeUsers 的数量必须小于 maxCount,否则本次并发播放设置失效。 + /// includeUsers 仅在 maxCount > 0 时有效。当 includeUsers 生效时,参与智能并发选择的最大播放数 = maxCount - 有效 includeUsers 的数量。 + char **includeUsers; + uint32_t includeUsersCount; + + TRTCAudioParallelParams() : maxCount(0), includeUsers(nullptr), includeUsersCount(0) { } }; + /// @} -} // namespace trtc +} // namespace liteav + +// 9.0 开始 C++ 接口将声明在 liteav 命名空间下,为兼容之前的使用方式,将 trtc 作为 liteav 的别名 +// namespace trtc = liteav; #ifdef _WIN32 -using namespace trtc; +using namespace liteav; #endif -#endif / *__TRTCCLOUDDEF_H__ * / +#endif /* __TRTCCLOUDDEF_H__ */ +/// @} diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TXLiteAVCode.h b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TXLiteAVCode.h index dc4c146..3a9a47f 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TXLiteAVCode.h +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TXLiteAVCode.h @@ -1,3 +1,5 @@ +// Copyright (c) 2021 Tencent. All rights reserved. + #ifndef __TXLITEAVCODE_H__ #define __TXLITEAVCODE_H__ @@ -6,303 +8,107 @@ // 错误码 // ///////////////////////////////////////////////////////////////////////////////// +// clang-format off typedef enum TXLiteAVError { ///////////////////////////////////////////////////////////////////////////////// - // // 基础错误码 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_NULL = 0, ///< 无错误 - - ///////////////////////////////////////////////////////////////////////////////// - // - // 进房(enterRoom)相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##onEnterRoom() 和 TRTCCloudDelegate##OnError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_ROOM_ENTER_FAIL = -3301, ///< 进入房间失败 - ERR_ROOM_REQUEST_ENTER_ROOM_TIMEOUT = -3308, ///< 请求进房超时,请检查网络 - ERR_ENTER_ROOM_PARAM_NULL = -3316, ///< 进房参数为空,请检查: enterRoom:appScene: 接口调用是否传入有效的 param - ERR_SDK_APPID_INVALID = -3317, ///< 进房参数 sdkAppId 错误 - ERR_ROOM_ID_INVALID = -3318, ///< 进房参数 roomId 错误 - ERR_USER_ID_INVALID = -3319, ///< 进房参数 userID 不正确 - ERR_USER_SIG_INVALID = -3320, ///< 进房参数 userSig 不正确 - ERR_ROOM_REQUEST_ENTER_ROOM_REFUSED = -3340, ///< 请求进房拒绝,请检查:是否连续调用 enterRoom 进入相同房间 - ERR_SERVER_INFO_SERVICE_SUSPENDED = -100013, ///< 服务不可用。请检查:套餐包剩余分钟数是否大于0,腾讯云账号是否欠费 - ///////////////////////////////////////////////////////////////////////////////// - // - // 退房(exitRoom)相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_ROOM_REQUEST_QUIT_ROOM_TIMEOUT = -3325, ///< 请求退房超时 - + ERR_NULL = 0, ///< 无错误 + ERR_FAILED = -1, ///< 暂未归类的通用错误 + ERR_INVALID_PARAMETER = -2, ///< 调用 API 时,传入的参数不合法 + ERR_REFUSED = -3, ///< API 调用被拒绝 + ERR_NOT_SUPPORTED = -4, ///< 当前 API 不支持调用 + ERR_INVALID_LICENSE = -5, ///< license 不合法,调用失败 + ERR_REQUEST_TIMEOUT = -6, ///< 请求服务器超时 + ERR_SERVER_PROCESS_FAILED = -7, ///< 服务器无法处理您的请求 + ERR_DISCONNECTED = -8, ///< 断开连接 + ///////////////////////////////////////////////////////////////////////////////// - // - // 设备(摄像头、麦克风、扬声器)相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // 区段:-6000 ~ -6999 - // + // 视频相关错误码 ///////////////////////////////////////////////////////////////////////////////// ERR_CAMERA_START_FAIL = -1301, ///< 打开摄像头失败,例如在 Windows 或 Mac 设备,摄像头的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 ERR_CAMERA_NOT_AUTHORIZED = -1314, ///< 摄像头设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 ERR_CAMERA_SET_PARAM_FAIL = -1315, ///< 摄像头参数设置出错(参数不支持或其它) ERR_CAMERA_OCCUPY = -1316, ///< 摄像头正在被占用中,可尝试打开其他摄像头 - ERR_MIC_START_FAIL = -1302, ///< 打开麦克风失败,例如在 Windows 或 Mac 设备,麦克风的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 - ERR_MIC_NOT_AUTHORIZED = -1317, ///< 麦克风设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 - ERR_MIC_SET_PARAM_FAIL = -1318, ///< 麦克风设置参数失败 - ERR_MIC_OCCUPY = -1319, ///< 麦克风正在被占用中,例如移动设备正在通话时,打开麦克风会失败 - ERR_MIC_STOP_FAIL = -1320, ///< 停止麦克风失败 - ERR_SPEAKER_START_FAIL = -1321, ///< 打开扬声器失败,例如在 Windows 或 Mac 设备,扬声器的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 - ERR_SPEAKER_SET_PARAM_FAIL = -1322, ///< 扬声器设置参数失败 - ERR_SPEAKER_STOP_FAIL = -1323, ///< 停止扬声器失败 - - ///////////////////////////////////////////////////////////////////////////////// - // - // 系统声音采集相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##onSystemAudioLoopbackError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_AUDIO_PLUGIN_START_FAIL = -1330, ///< 开启系统声音录制失败,例如音频驱动插件不可用 - ERR_AUDIO_PLUGIN_INSTALL_NOT_AUTHORIZED = -1331, ///< 安装音频驱动插件未授权 - ERR_AUDIO_PLUGIN_INSTALL_FAILED = -1332, ///< 安装音频驱动插件失败 - ///////////////////////////////////////////////////////////////////////////////// - // - // 屏幕分享相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// ERR_SCREEN_CAPTURE_START_FAIL = -1308, ///< 开始录屏失败,如果在移动设备出现,可能是权限被用户拒绝了,如果在 Windows 或 Mac 系统的设备出现,请检查录屏接口的参数是否符合要求 ERR_SCREEN_CAPTURE_UNSURPORT = -1309, ///< 录屏失败,在 Android 平台,需要5.0以上的系统,在 iOS 平台,需要11.0以上的系统 - ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_SUB_VIDEO = -102015, ///< 没有权限上行辅路 - ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO = -102016, ///< 其他用户正在上行辅路 ERR_SCREEN_CAPTURE_STOPPED = -7001, ///< 录屏被系统中止 + ERR_SCREEN_SHARE_NOT_AUTHORIZED = -102015, ///< 没有权限上行辅路 + ERR_SCREEN_SHRAE_OCCUPIED_BY_OTHER = -102016, ///< 其他用户正在上行辅路 - ///////////////////////////////////////////////////////////////////////////////// - // - // 编解码相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_VIDEO_ENCODE_FAIL = -1303, ///< 视频帧编码失败,例如 iOS 设备切换到其他应用时,硬编码器可能被系统释放,再切换回来时,硬编码器重启前,可能会抛出 + ERR_VIDEO_ENCODE_FAIL = -1303, ///< 视频帧编码失败,例如 iOS 设备切换到其他应用时,硬编码器可能被系统释放,再切换回来时,硬编码器重启前,可能会抛出 ERR_UNSUPPORTED_RESOLUTION = -1305, ///< 不支持的视频分辨率 - ERR_AUDIO_ENCODE_FAIL = -1304, ///< 音频帧编码失败,例如传入自定义音频数据,SDK 无法处理 - ERR_UNSUPPORTED_SAMPLERATE = -1306, ///< 不支持的音频采样率 + ERR_PIXEL_FORMAT_UNSUPPORTED = -1327, ///< 自定视频采集:设置的 pixel format 不支持 + ERR_BUFFER_TYPE_UNSUPPORTED = -1328, ///< 自定视频采集:设置的 buffer type 不支持 + ERR_NO_AVAILABLE_HEVC_DECODERS = -2304, ///< 找不到可用的 HEVC 解码器 ///////////////////////////////////////////////////////////////////////////////// - // - // 自定义采集相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 - // + // 音频相关错误码 ///////////////////////////////////////////////////////////////////////////////// - ERR_PIXEL_FORMAT_UNSUPPORTED = -1327, ///< 设置的 pixel format 不支持 - ERR_BUFFER_TYPE_UNSUPPORTED = -1328, ///< 设置的 buffer type 不支持 + ERR_MIC_START_FAIL = -1302, ///< 打开麦克风失败,例如在 Windows 或 Mac 设备,麦克风的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_MIC_NOT_AUTHORIZED = -1317, ///< 麦克风设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 + ERR_MIC_SET_PARAM_FAIL = -1318, ///< 麦克风设置参数失败 + ERR_MIC_OCCUPY = -1319, ///< 麦克风正在被占用中,例如移动设备正在通话时,打开麦克风会失败 + ERR_MIC_STOP_FAIL = -1320, ///< 停止麦克风失败 - ///////////////////////////////////////////////////////////////////////////////// - // - // CDN 绑定和混流相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##onStartPublishing() 和 TRTCCloudDelegate##onSetMixTranscodingConfig 通知。 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_PUBLISH_CDN_STREAM_REQUEST_TIME_OUT = -3321, ///< 旁路转推请求超时 - ERR_CLOUD_MIX_TRANSCODING_REQUEST_TIME_OUT = -3322, ///< 云端混流请求超时 - ERR_PUBLISH_CDN_STREAM_SERVER_FAILED = -3323, ///< 旁路转推回包异常 - ERR_CLOUD_MIX_TRANSCODING_SERVER_FAILED = -3324, ///< 云端混流回包异常 - ERR_ROOM_REQUEST_START_PUBLISHING_TIMEOUT = -3333, ///< 开始向腾讯云的直播 CDN 推流信令超时 - ERR_ROOM_REQUEST_START_PUBLISHING_ERROR = -3334, ///< 开始向腾讯云的直播 CDN 推流信令异常 - ERR_ROOM_REQUEST_STOP_PUBLISHING_TIMEOUT = -3335, ///< 停止向腾讯云的直播 CDN 推流信令超时 - ERR_ROOM_REQUEST_STOP_PUBLISHING_ERROR = -3336, ///< 停止向腾讯云的直播 CDN 推流信令异常 + ERR_SPEAKER_START_FAIL = -1321, ///< 打开扬声器失败,例如在 Windows 或 Mac 设备,扬声器的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_SPEAKER_SET_PARAM_FAIL = -1322, ///< 扬声器设置参数失败 + ERR_SPEAKER_STOP_FAIL = -1323, ///< 停止扬声器失败 - ///////////////////////////////////////////////////////////////////////////////// - // - // 跨房连麦(ConnectOtherRoom)相关错误码 - // NOTE: 通过回调函数 TRTCCloudDelegate##onConnectOtherRoom() 通知。 - // - ///////////////////////////////////////////////////////////////////////////////// - ERR_ROOM_REQUEST_CONN_ROOM_TIMEOUT = -3326, ///< 请求连麦超时 - ERR_ROOM_REQUEST_DISCONN_ROOM_TIMEOUT = -3327, ///< 请求退出连麦超时 - ERR_ROOM_REQUEST_CONN_ROOM_INVALID_PARAM = -3328, ///< 无效参数 - ERR_CONNECT_OTHER_ROOM_AS_AUDIENCE = -3330, ///< 当前是观众角色,不能请求或断开跨房连麦,需要先 switchRole() 到主播 - ERR_SERVER_CENTER_CONN_ROOM_NOT_SUPPORT = -102031, ///< 不支持跨房间连麦 - ERR_SERVER_CENTER_CONN_ROOM_REACH_MAX_NUM = -102032, ///< 达到跨房间连麦上限 - ERR_SERVER_CENTER_CONN_ROOM_REACH_MAX_RETRY_TIMES = -102033, ///< 跨房间连麦重试次数耗尽 - ERR_SERVER_CENTER_CONN_ROOM_REQ_TIMEOUT = -102034, ///< 跨房间连麦请求超时 - ERR_SERVER_CENTER_CONN_ROOM_REQ = -102035, ///< 跨房间连麦请求格式错误 - ERR_SERVER_CENTER_CONN_ROOM_NO_SIG = -102036, ///< 跨房间连麦无签名 - ERR_SERVER_CENTER_CONN_ROOM_DECRYPT_SIG = -102037, ///< 跨房间连麦签名解密失败 - ERR_SERVER_CENTER_CONN_ROOM_NO_KEY = -102038, ///< 未找到跨房间连麦签名解密密钥 - ERR_SERVER_CENTER_CONN_ROOM_PARSE_SIG = -102039, ///< 跨房间连麦签名解析错误 - ERR_SERVER_CENTER_CONN_ROOM_INVALID_SIG_TIME = -102040, ///< 跨房间连麦签名时间戳错误 - ERR_SERVER_CENTER_CONN_ROOM_SIG_GROUPID = -102041, ///< 跨房间连麦签名不匹配 - ERR_SERVER_CENTER_CONN_ROOM_NOT_CONNED = -102042, ///< 本房间无连麦 - ERR_SERVER_CENTER_CONN_ROOM_USER_NOT_CONNED = -102043, ///< 本用户未发起连麦 - ERR_SERVER_CENTER_CONN_ROOM_FAILED = -102044, ///< 跨房间连麦失败 - ERR_SERVER_CENTER_CONN_ROOM_CANCEL_FAILED = -102045, ///< 取消跨房间连麦失败 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_ROOM_NOT_EXIST = -102046, ///< 被连麦房间不存在 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_REACH_MAX_ROOM = -102047, ///< 被连麦房间达到连麦上限 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_NOT_EXIST = -102048, ///< 被连麦用户不存在 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_DELETED = -102049, ///< 被连麦用户已被删除 - ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_FULL = -102050, ///< 被连麦用户达到资源上限 - ERR_SERVER_CENTER_CONN_ROOM_INVALID_SEQ = -102051, ///< 连麦请求序号错乱 + ERR_AUDIO_PLUGIN_START_FAIL = -1330, ///< 开启系统声音录制失败,例如音频驱动插件不可用 + ERR_AUDIO_PLUGIN_INSTALL_NOT_AUTHORIZED = -1331, ///< 安装音频驱动插件未授权 + ERR_AUDIO_PLUGIN_INSTALL_FAILED = -1332, ///< 安装音频驱动插件失败 + ERR_AUDIO_ENCODE_FAIL = -1304, ///< 音频帧编码失败,例如传入自定义音频数据,SDK 无法处理 + ERR_UNSUPPORTED_SAMPLERATE = -1306, ///< 不支持的音频采样率 ///////////////////////////////////////////////////////////////////////////////// - // - // 客户无需关心的内部错误码 - // - ///////////////////////////////////////////////////////////////////////////////// - - // - Remove From Head - ERR_RTMP_PUSH_NET_DISCONNECT = -1307, ///< 直播,推流出现网络断开,且经过多次重试无法恢复 - ERR_RTMP_PUSH_INVALID_ADDRESS = -1313, ///< 直播,推流地址非法,例如不是 RTMP 协议的地址 - ERR_RTMP_PUSH_NET_ALLADDRESS_FAIL = -1324, ///< 直播,连接推流服务器失败(若支持智能选路,IP 全部失败) - ERR_RTMP_PUSH_NO_NETWORK = -1325, ///< 直播,网络不可用,请确认 Wi-Fi、移动数据或者有线网络是否正常 - ERR_RTMP_PUSH_SERVER_REFUSE = -1326, ///< 直播,服务器拒绝连接请求,可能是该推流地址已经被占用,或者 TXSecret 校验失败,或者是过期了,或者是欠费了 - - ERR_PLAY_LIVE_STREAM_NET_DISCONNECT = -2301, ///< 直播,网络断连,且经多次重连抢救无效,可以放弃治疗,更多重试请自行重启播放 - ERR_GET_RTMP_ACC_URL_FAIL = -2302, ///< 直播,获取加速拉流的地址失败 - ERR_FILE_NOT_FOUND = -2303, ///< 播放的文件不存在 - ERR_HEVC_DECODE_FAIL = -2304, ///< H265 解码失败 - ERR_VOD_DECRYPT_FAIL = -2305, ///< 点播,音视频流解密失败 - ERR_GET_VODFILE_MEDIAINFO_FAIL = -2306, ///< 点播,获取点播文件信息失败 - ERR_PLAY_LIVE_STREAM_SWITCH_FAIL = -2307, ///< 直播,切流失败(切流可以播放不同画面大小的视频) - ERR_PLAY_LIVE_STREAM_SERVER_REFUSE = -2308, ///< 直播,服务器拒绝连接请求 - ERR_RTMP_ACC_FETCH_STREAM_FAIL = -2309, ///< 直播,RTMPACC 低延时拉流失败,且经过多次重试无法恢复 - ERR_HEVC_ENCODE_FAIL = -2310, ///< 265编码失败 - ERR_HEVC_ENCODE_NOT_SUPPORT = -2311, ///< 265编码判断不支持 - ERR_HEVC_SOFTDECODER_START_FAIL = -2312, ///< 265软解启动失败 - - ERR_ROOM_HEARTBEAT_FAIL = -3302, ///< 心跳失败,客户端定时向服务器发送数据包,告诉服务器自己活着,这个错误通常是发包超时 - ERR_ROOM_REQUEST_IP_FAIL = -3303, ///< 拉取接口机服务器地址失败 - ERR_ROOM_CONNECT_FAIL = -3304, ///< 连接接口机服务器失败 - ERR_ROOM_REQUEST_AVSEAT_FAIL = -3305, ///< 请求视频位失败 - ERR_ROOM_REQUEST_TOKEN_HTTPS_TIMEOUT = -3306, ///< 请求 token HTTPS 超时,请检查网络是否正常,或网络防火墙是否放行 HTTPS 访问 official.opensso.tencent-cloud.com:443 - ERR_ROOM_REQUEST_IP_TIMEOUT = -3307, ///< 请求 IP 和 sig 超时,请检查网络是否正常,或网络防火墙是否放行 UDP 访问下列 IP 和域名 query.tencent-cloud.com:8000 162.14.23.140:8000 162.14.7.49:8000 - ERR_ROOM_REQUEST_VIDEO_FLAG_TIMEOUT = -3309, ///< 请求视频位超时 - ERR_ROOM_REQUEST_VIDEO_DATA_ROOM_TIMEOUT = -3310, ///< 请求视频数据超时 - ERR_ROOM_REQUEST_CHANGE_ABILITY_TIMEOUT = -3311, ///< 请求修改视频能力项超时 - ERR_ROOM_REQUEST_STATUS_REPORT_TIMEOUT = -3312, ///< 请求状态上报超时 - ERR_ROOM_REQUEST_CLOSE_VIDEO_TIMEOUT = -3313, ///< 请求关闭视频超时 - ERR_ROOM_REQUEST_SET_RECEIVE_TIMEOUT = -3314, ///< 请求接收视频项超时 - ERR_ROOM_REQUEST_TOKEN_INVALID_PARAMETER = -3315, ///< 请求 token 无效参数,请检查 TRTCParams.userSig 是否填写正确 - - ERR_ROOM_REQUEST_AES_TOKEN_RETURN_ERROR = -3329, ///< 请求 AES TOKEN 时,server 返回的内容是空的 - ERR_ACCIP_LIST_EMPTY = -3331, ///< 请求接口机 IP 返回的列表为空的 - ERR_ROOM_REQUEST_SEND_JSON_CMD_TIMEOUT = -3332, ///< 请求发送Json 信令超时 - - // Info 服务器(查询接口机 IP), 服务器错误码,数值范围[-100000, -110000] - ERR_SERVER_INFO_UNPACKING_ERROR = -100000, ///< server 解包错误,可能请求数据被篡改 - ERR_SERVER_INFO_TOKEN_ERROR = -100001, ///< TOKEN 错误 - ERR_SERVER_INFO_ALLOCATE_ACCESS_FAILED = -100002, ///< 分配接口机错误 - ERR_SERVER_INFO_GENERATE_SIGN_FAILED = -100003, ///< 生成签名错误 - ERR_SERVER_INFO_TOKEN_TIMEOUT = -100004, ///< HTTPS token 超时 - ERR_SERVER_INFO_INVALID_COMMAND = -100005, ///< 无效的命令字 - ERR_SERVER_INFO_PRIVILEGE_FLAG_ERROR = -100006, ///< 权限位校验失败 - ERR_SERVER_INFO_GENERATE_KEN_ERROR = -100007, ///< HTTPS 请求时,生成加密 key 错误 - ERR_SERVER_INFO_GENERATE_TOKEN_ERROR = -100008, ///< HTTPS 请求时,生成 token 错误 - ERR_SERVER_INFO_DATABASE = -100009, ///< 数据库查询失败(房间相关存储信息) - ERR_SERVER_INFO_BAD_ROOMID = -100010, ///< 房间号错误 - ERR_SERVER_INFO_BAD_SCENE_OR_ROLE = -100011, ///< 场景或角色错误 - ERR_SERVER_INFO_ROOMID_EXCHANGE_FAILED = -100012, ///< 房间号转换出错 - ERR_SERVER_INFO_STRGROUP_HAS_INVALID_CHARS = -100014, ///< 房间号非法 - ERR_SERVER_INFO_LACK_SDKAPPID = -100015, ///< 非法SDKAppid - ERR_SERVER_INFO_INVALID = -100016, ///< 无效请求, 分配接口机失败 - ERR_SERVER_INFO_ECDH_GET_KEY = -100017, ///< 生成公钥失败 - ERR_SERVER_INFO_ECDH_GET_TINYID = -100018, ///< userSig 校验失败,请检查 TRTCParams.userSig 是否填写正确 - - // Access 接口机 - ERR_SERVER_ACC_TOKEN_TIMEOUT = -101000, ///< token 过期 - ERR_SERVER_ACC_SIGN_ERROR = -101001, ///< 签名错误 - ERR_SERVER_ACC_SIGN_TIMEOUT = -101002, ///< 签名超时 - ERR_SERVER_ACC_ROOM_NOT_EXIST = -101003, ///< 房间不存在 - ERR_SERVER_ACC_ROOMID = -101004, ///< 后台房间标识 roomId 错误 - ERR_SERVER_ACC_LOCATIONID = -101005, ///< 后台用户位置标识 locationId 错误 - ERR_SERVER_ACC_TOKEN_EORROR = -101006, ///< token里面的tinyid和进房信令tinyid不同 或是 进房信令没有token - - // Center 服务器(信令和流控处理等任务) - ERR_SERVER_CENTER_SYSTEM_ERROR = -102000, ///< 后台错误 - - ERR_SERVER_CENTER_INVALID_ROOMID = -102001, ///< 无效的房间 Id - ERR_SERVER_CENTER_CREATE_ROOM_FAILED = -102002, ///< 创建房间失败 - ERR_SERVER_CENTER_SIGN_ERROR = -102003, ///< 签名错误 - ERR_SERVER_CENTER_SIGN_TIMEOUT = -102004, ///< 签名过期 - ERR_SERVER_CENTER_ROOM_NOT_EXIST = -102005, ///< 房间不存在 - ERR_SERVER_CENTER_ADD_USER_FAILED = -102006, ///< 房间添加用户失败 - ERR_SERVER_CENTER_FIND_USER_FAILED = -102007, ///< 查找用户失败 - ERR_SERVER_CENTER_SWITCH_TERMINATION_FREQUENTLY = -102008, ///< 频繁切换终端 - ERR_SERVER_CENTER_LOCATION_NOT_EXIST = -102009, ///< locationid 错误 - ERR_SERVER_CENTER_NO_PRIVILEDGE_CREATE_ROOM = -102010, ///< 没有权限创建房间 - ERR_SERVER_CENTER_NO_PRIVILEDGE_ENTER_ROOM = -102011, ///< 没有权限进入房间 - ERR_SERVER_CENTER_INVALID_PARAMETER_SUB_VIDEO = -102012, ///< 辅路抢视频位、申请辅路请求类型参数错误 - ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_VIDEO = -102013, ///< 没有权限上视频 - ERR_SERVER_CENTER_ROUTE_TABLE_ERROR = -102014, ///< 没有空闲路由表 - ERR_SERVER_CENTER_NOT_PUSH_SUB_VIDEO = -102017, ///< 当前用户没有上行辅路 - ERR_SERVER_CENTER_USER_WAS_DELETED = -102018, ///< 用户被删除状态 - ERR_SERVER_CENTER_NO_PRIVILEDGE_REQUEST_VIDEO = -102019, ///< 没有权限请求视频 - ERR_SERVER_CENTER_INVALID_PARAMETER = -102023, ///< 进房参数 bussInfo 错误 - ERR_SERVER_CENTER_I_FRAME_UNKNOW_TYPE = -102024, ///< 请求 I 帧未知 opType - ERR_SERVER_CENTER_I_FRAME_INVALID_PACKET = -102025, ///< 请求 I 帧包格式错误 - ERR_SERVER_CENTER_I_FRAME_DEST_USER_NOT_EXIST = -102026, ///< 请求 I 帧目标用户不存在 - ERR_SERVER_CENTER_I_FRAME_ROOM_TOO_BIG = -102027, ///< 请求 I 帧房间用户太多 - ERR_SERVER_CENTER_I_FRAME_RPS_INVALID_PARAMETER = -102028, ///< 请求 I 帧参数错误 - ERR_SERVER_CENTER_INVALID_ROOM_ID = -102029, ///< 房间号非法 - ERR_SERVER_CENTER_ROOM_ID_TOO_LONG = -102030, ///< 房间号超过限制 - ERR_SERVER_CENTER_ROOM_FULL = -102052, ///< 房间满员 - ERR_SERVER_CENTER_DECODE_JSON_FAIL = -102053, ///< JSON 串解析失败 - ERR_SERVER_CENTER_UNKNOWN_SUB_CMD = -102054, ///< 未定义命令字 - ERR_SERVER_CENTER_INVALID_ROLE = -102055, ///< 未定义角色 - ERR_SERVER_CENTER_REACH_PROXY_MAX = -102056, ///< 代理机超出限制 - ERR_SERVER_CENTER_RECORDID_STORE = -102057, ///< 无法保存用户自定义 recordId - ERR_SERVER_CENTER_PB_SERIALIZE = -102058, ///< Protobuf 序列化错误 - - ERR_SERVER_SSO_SIG_EXPIRED = -70001, ///< sig 过期,请尝试重新生成。如果是刚生成,就过期,请检查有效期填写的是否过小,或者填的 0 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_1 = -70003, ///< sig 校验失败,请确认下 sig 内容是否被截断,如缓冲区长度不够导致的内容截断 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_2 = -70004, ///< sig 校验失败,请确认下 sig 内容是否被截断,如缓冲区长度不够导致的内容截断 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_3 = -70005, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_4 = -70006, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_5 = -70007, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_6 = -70008, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_7 = -70009, ///< 用业务公钥验证 sig 失败,请确认生成的 usersig 使用的私钥和 sdkAppId 是否对应 - ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_8 = -70010, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 - ERR_SERVER_SSO_SIG_VERIFICATION_ID_NOT_MATCH = -70013, ///< sig 中 identifier 与请求时的 identifier 不匹配,请检查登录时填写的 identifier 与 sig 中的是否一致 - ERR_SERVER_SSO_APPID_NOT_MATCH = -70014, ///< sig 中 sdkAppId 与请求时的 sdkAppId 不匹配,请检查登录时填写的 sdkAppId 与 sig 中的是否一致 - ERR_SERVER_SSO_VERIFICATION_EXPIRED = -70017, ///< 内部第三方票据验证超时,请重试,如多次重试不成功,请@TLS 帐号支持,QQ 3268519604 - ERR_SERVER_SSO_VERIFICATION_FAILED = -70018, ///< 内部第三方票据验证超时,请重试,如多次重试不成功,请@TLS 帐号支持,QQ 3268519604 + // 网络相关错误码 + ///////////////////////////////////////////////////////////////////////////////// + ERR_TRTC_ENTER_ROOM_FAILED = -3301, ///< 进入房间失败,请查看 onError 中的 -3301 对应的 msg 提示确认失败原因 + ERR_TRTC_REQUEST_IP_TIMEOUT = -3307, ///< 请求 IP 和 sig 超时,请检查网络是否正常,或网络防火墙是否放行 UDP。可尝试访问下列 IP:162.14.22.165:8000 162.14.6.105:8000 和域名:default-query.trtc.tencent-cloud.com:8000 + ERR_TRTC_CONNECT_SERVER_TIMEOUT = -3308, ///< 请求进房超时,请检查是否断网或者是否开启vpn,您也可以切换4G进行测试确认 + ERR_TRTC_ROOM_PARAM_NULL = -3316, ///< 进房参数为空,请检查: enterRoom:appScene: 接口调用是否传入有效的 param + ERR_TRTC_INVALID_SDK_APPID = -3317, ///< 进房参数 sdkAppId 错误,请检查 TRTCParams.sdkAppId 是否为空 + ERR_TRTC_INVALID_ROOM_ID = -3318, ///< 进房参数 roomId 错误,请检查 TRTCParams.roomId 或 TRTCParams.strRoomId 是否为空,注意 roomId 和 strRoomId 不可混用 + ERR_TRTC_INVALID_USER_ID = -3319, ///< 进房参数 userId 不正确,请检查 TRTCParams.userId 是否为空 + ERR_TRTC_INVALID_USER_SIG = -3320, ///< 进房参数 userSig 不正确,请检查 TRTCParams.userSig 是否为空 + ERR_TRTC_ENTER_ROOM_REFUSED = -3340, ///< 进房请求被拒绝,请检查是否连续调用 enterRoom 进入相同 Id 的房间 + ERR_TRTC_INVALID_PRIVATE_MAPKEY = -100006, ///< 您开启了高级权限控制,但参数 TRTCParams.privateMapKey 校验失败,您可参考 https://cloud.tencent.com/document/product/647/32240 进行检查 + ERR_TRTC_SERVICE_SUSPENDED = -100013, ///< 服务不可用。请检查:套餐包剩余分钟数是否大于0,腾讯云账号是否欠费。您可参考 https://cloud.tencent.com/document/product/647/50492 进行查看与配置 + ERR_TRTC_USER_SIG_CHECK_FAILED = -100018, ///< UserSig 校验失败,请检查参数 TRTCParams.userSig 是否填写正确,或是否已经过期。您可参考 https://cloud.tencent.com/document/product/647/50686 进行校验 + + ERR_TRTC_PUSH_THIRD_PARTY_CLOUD_TIMEOUT = -3321, ///< 旁路转推请求超时 + ERR_TRTC_PUSH_THIRD_PARTY_CLOUD_FAILED = -3323, ///< 旁路转推回包异常 + ERR_TRTC_MIX_TRANSCODING_TIMEOUT = -3322, ///< 云端混流请求超时 + ERR_TRTC_MIX_TRANSCODING_FAILED = -3324, ///< 云端混流回包异常 + + ERR_TRTC_START_PUBLISHING_TIMEOUT = -3333, ///< 开始向腾讯云的直播 CDN 推流信令超时 + ERR_TRTC_START_PUBLISHING_FAILED = -3334, ///< 开始向腾讯云的直播 CDN 推流信令异常 + ERR_TRTC_STOP_PUBLISHING_TIMEOUT = -3335, ///< 停止向腾讯云的直播 CDN 推流信令超时 + ERR_TRTC_STOP_PUBLISHING_FAILED = -3336, ///< 停止向腾讯云的直播 CDN 推流信令异常 + + ERR_TRTC_CONNECT_OTHER_ROOM_TIMEOUT = -3326, ///< 请求连麦超时 + ERR_TRTC_DISCONNECT_OTHER_ROOM_TIMEOUT = -3327, ///< 请求退出连麦超时 + ERR_TRTC_CONNECT_OTHER_ROOM_INVALID_PARAMETER = -3328, ///< 无效参数 + ERR_TRTC_CONNECT_OTHER_ROOM_AS_AUDIENCE = -3330, ///< 当前是观众角色,不能请求或断开跨房连麦,需要先 switchRole() 到主播 - ERR_SERVER_SSO_APPID_NOT_FOUND = -70020, ///< sdkAppId 未找到,请确认是否已经在腾讯云上配置 - ERR_SERVER_SSO_ACCOUNT_IN_BLACKLIST = -70051, ///< 帐号已被拉入黑名单,请联系 TLS 帐号支持 QQ 3268519604 - ERR_SERVER_SSO_SIG_INVALID = -70052, ///< usersig 已经失效,请重新生成,再次尝试 - ERR_SERVER_SSO_LIMITED_BY_SECURITY = -70114, ///< 安全原因被限制 - ERR_SERVER_SSO_INVALID_LOGIN_STATUS = -70221, ///< 登录状态无效,请使用 usersig 重新鉴权 - ERR_SERVER_SSO_APPID_ERROR = -70252, ///< sdkAppId 填写错误 - ERR_SERVER_SSO_TICKET_VERIFICATION_FAILED = -70346, ///< 票据校验失败,请检查各项参数是否正确 - ERR_SERVER_SSO_TICKET_EXPIRED = -70347, ///< 票据因过期原因校验失败 - ERR_SERVER_SSO_ACCOUNT_EXCEED_PURCHASES = -70398, ///< 创建账号数量超过已购买预付费数量限制 - ERR_SERVER_SSO_INTERNAL_ERROR = -70500, ///< 服务器内部错误,请重试 - - //秒级监控上报错误码 - ERR_REQUEST_QUERY_CONFIG_TIMEOUT = -4001, ///< 请求通用配置超时 - ERR_CUSTOM_STREAM_INVALID = -4002, ///< 自定义流id错误 - ERR_USER_DEFINE_RECORD_ID_INVALID = -4003, ///< userDefineRecordId错误 - ERR_MIX_PARAM_INVALID = -4004, ///< 混流参数校验失败 - ERR_REQUEST_ACC_BY_HOST_IP = -4005, ///< 通过域名进行0x1请求 - // - /Remove From Head } TXLiteAVError; ///////////////////////////////////////////////////////////////////////////////// // // 警告码 // -//> 不需要特别关注,但您可以根据其中某些感兴趣的警告码,对当前用户进行相应的提示 -// ///////////////////////////////////////////////////////////////////////////////// typedef enum TXLiteAVWarning { - WARNING_HW_ENCODER_START_FAIL = 1103, ///< 硬编码启动出现问题,自动切换到软编码 + ///////////////////////////////////////////////////////////////////////////////// + // 视频相关警告码 + ///////////////////////////////////////////////////////////////////////////////// + WARNING_HW_ENCODER_START_FAIL = 1103, ///< 硬编码启动出现问题,自动切换到软编码 WARNING_CURRENT_ENCODE_TYPE_CHANGED = 1104, ///< 当前编码格式, 通过key 为type获取,值为1时是265编码,值为0时是264编码 WARNING_VIDEO_ENCODER_SW_TO_HW = 1107, ///< 当前 CPU 使用率太高,无法满足软件编码需求,自动切换到硬件编码 WARNING_INSUFFICIENT_CAPTURE_FPS = 1108, ///< 摄像头采集帧率不足,部分自带美颜算法的 Android 手机上会出现 @@ -310,117 +116,65 @@ typedef enum TXLiteAVWarning WARNING_REDUCE_CAPTURE_RESOLUTION = 1110, ///< 摄像头采集分辨率被降低,以满足当前帧率和性能最优解。 WARNING_CAMERA_DEVICE_EMPTY = 1111, ///< 没有检测到可用的摄像头设备 WARNING_CAMERA_NOT_AUTHORIZED = 1112, ///< 用户未授权当前应用使用摄像头 - WARNING_MICROPHONE_DEVICE_EMPTY = 1201, ///< 没有检测到可用的麦克风设备 - WARNING_SPEAKER_DEVICE_EMPTY = 1202, ///< 没有检测到可用的扬声器设备 - WARNING_MICROPHONE_NOT_AUTHORIZED = 1203, ///< 用户未授权当前应用使用麦克风 - WARNING_MICROPHONE_DEVICE_ABNORMAL = 1204, ///< 音频采集设备不可用(例如被占用或者PC判定无效设备) - WARNING_SPEAKER_DEVICE_ABNORMAL = 1205, ///< 音频播放设备不可用(例如被占用或者PC判定无效设备) WARNING_SCREEN_CAPTURE_NOT_AUTHORIZED = 1206, ///< 用户未授权当前应用使用屏幕录制 WARNING_VIDEO_FRAME_DECODE_FAIL = 2101, ///< 当前视频帧解码失败 - WARNING_AUDIO_FRAME_DECODE_FAIL = 2102, ///< 当前音频帧解码失败 - WARNING_VIDEO_PLAY_LAG = 2105, ///< 当前视频播放出现卡顿 WARNING_HW_DECODER_START_FAIL = 2106, ///< 硬解启动失败,采用软解码 WARNING_VIDEO_DECODER_HW_TO_SW = 2108, ///< 当前流硬解第一个 I 帧失败,SDK 自动切软解 WARNING_SW_DECODER_START_FAIL = 2109, ///< 软解码器启动失败 WARNING_VIDEO_RENDER_FAIL = 2110, ///< 视频渲染失败 - WARNING_START_CAPTURE_IGNORED = 4000, ///< 已经在采集,启动采集被忽略 + + ///////////////////////////////////////////////////////////////////////////////// + // 音频相关警告码 + ///////////////////////////////////////////////////////////////////////////////// + WARNING_MICROPHONE_DEVICE_EMPTY = 1201, ///< 没有检测到可用的麦克风设备 + WARNING_SPEAKER_DEVICE_EMPTY = 1202, ///< 没有检测到可用的扬声器设备 + WARNING_MICROPHONE_NOT_AUTHORIZED = 1203, ///< 用户未授权当前应用使用麦克风 + WARNING_MICROPHONE_DEVICE_ABNORMAL = 1204, ///< 音频采集设备不可用(例如被占用或者PC判定无效设备) + WARNING_SPEAKER_DEVICE_ABNORMAL = 1205, ///< 音频播放设备不可用(例如被占用或者PC判定无效设备) + WARNING_AUDIO_FRAME_DECODE_FAIL = 2102, ///< 当前音频帧解码失败 WARNING_AUDIO_RECORDING_WRITE_FAIL = 7001, ///< 音频录制写入文件失败 - WARNING_ROOM_DISCONNECT = 5101, ///< 网络断开连接 - WARNING_IGNORE_UPSTREAM_FOR_AUDIENCE = 6001, ///< 当前是观众角色,忽略上行音视频数据 - - // - Remove From Head - WARNING_NET_BUSY = 1101, ///< 网络状况不佳:上行带宽太小,上传数据受阻 - WARNING_RTMP_SERVER_RECONNECT = 1102, ///< 直播,网络断连, 已启动自动重连(自动重连连续失败超过三次会放弃) - WARNING_LIVE_STREAM_SERVER_RECONNECT = 2103, ///< 直播,网络断连, 已启动自动重连(自动重连连续失败超过三次会放弃) - WARNING_RECV_DATA_LAG = 2104, ///< 网络来包不稳:可能是下行带宽不足,或由于主播端出流不均匀 - WARNING_RTMP_DNS_FAIL = 3001, ///< 直播,DNS 解析失败 - WARNING_RTMP_SEVER_CONN_FAIL = 3002, ///< 直播,服务器连接失败 - WARNING_RTMP_SHAKE_FAIL = 3003, ///< 直播,与 RTMP 服务器握手失败 - WARNING_RTMP_SERVER_BREAK_CONNECT = 3004, ///< 直播,服务器主动断开 - WARNING_RTMP_READ_WRITE_FAIL = 3005, ///< 直播,RTMP 读/写失败,将会断开连接 - WARNING_RTMP_WRITE_FAIL = 3006, ///< 直播,RTMP 写失败(SDK 内部错误码,不会对外抛出) - WARNING_RTMP_READ_FAIL = 3007, ///< 直播,RTMP 读失败(SDK 内部错误码,不会对外抛出) - WARNING_RTMP_NO_DATA = 3008, ///< 直播,超过30s 没有数据发送,主动断开连接 - WARNING_PLAY_LIVE_STREAM_INFO_CONNECT_FAIL = 3009, ///< 直播,connect 服务器调用失败(SDK 内部错误码,不会对外抛出) - WARNING_NO_STEAM_SOURCE_FAIL = 3010, ///< 直播,连接失败,该流地址无视频(SDK 内部错误码,不会对外抛出) - WARNING_ROOM_RECONNECT = 5102, ///< 网络断连,已启动自动重连 - WARNING_ROOM_NET_BUSY = 5103, ///< 网络状况不佳:上行带宽太小,上传数据受阻 - // - /Remove From Head + + ///////////////////////////////////////////////////////////////////////////////// + // 网络相关警告码 + ///////////////////////////////////////////////////////////////////////////////// + WARNING_IGNORE_UPSTREAM_FOR_AUDIENCE = 6001, ///< 当前是观众角色,不支持发布音视频,需要先切换成主播角色 } TXLiteAVWarning; -// - Remove From Head + ///////////////////////////////////////////////////////////////////////////////// // -// (三)事件列表 +// 兼容定义(用于兼容老版本的错误码定义,请在代码中尽量使用右侧的新定义) // ///////////////////////////////////////////////////////////////////////////////// - -typedef enum TXLiteAVEvent -{ - EVT_RTMP_PUSH_CONNECT_SUCC = 1001, ///< 直播,已经连接 RTMP 推流服务器 - EVT_RTMP_PUSH_BEGIN = 1002, ///< 直播,已经与 RTMP 服务器握手完毕,开始推流 - EVT_CAMERA_START_SUCC = 1003, ///< 打开摄像头成功 - EVT_SCREEN_CAPTURE_SUCC = 1004, ///< 录屏启动成功 - EVT_UP_CHANGE_RESOLUTION = 1005, ///< 上行动态调整分辨率 - EVT_UP_CHANGE_BITRATE = 1006, ///< 码率动态调整 - EVT_FIRST_FRAME_AVAILABLE = 1007, ///< 首帧画面采集完成 - EVT_START_VIDEO_ENCODER = 1008, ///< 编码器启动成功 - EVT_SNAPSHOT_COMPLETE = 1022, ///< 一帧截图完成 - EVT_CAMERA_REMOVED = 1023, ///< 摄像头设备已被移出(Windows 和 Mac 版 SDK 使用) - EVT_CAMERA_AVAILABLE = 1024, ///< 摄像头设备重新可用(Windows 和 Mac 版 SDK 使用) - EVT_CAMERA_CLOSE = 1025, ///< 关闭摄像头完成(Windows 和 Mac 版 SDK 使用) - EVT_RTMP_PUSH_PUBLISH_START = 1026, ///< 直播,与 RTMP 服务器连接后,收到 NetStream.Publish.Start 消息,表明流发布成功(SDK 内部事件,不会对外抛出) - EVT_HW_ENCODER_START_SUCC = 1027, ///< 硬编码器启动成功 - EVT_SW_ENCODER_START_SUCC = 1028, ///< 软编码器启动成功 - EVT_LOCAL_RECORD_RESULT = 1029, ///< 本地录制结果 - EVT_LOCAL_RECORD_PROGRESS = 1030, ///< 本地录制状态通知 - - EVT_PLAY_LIVE_STREAM_CONNECT_SUCC = 2001, ///< 直播,已经连接 RTMP 拉流服务器 - EVT_PLAY_LIVE_STREAM_BEGIN = 2002, ///< 直播,已经与 RTMP 服务器握手完毕,开始拉流 - EVT_RENDER_FIRST_I_FRAME = 2003, ///< 渲染首个视频数据包(IDR) - EVT_VIDEO_PLAY_BEGIN = 2004, ///< 视频播放开始 - EVT_VIDEO_PLAY_PROGRESS = 2005, ///< 视频播放进度 - EVT_VIDEO_PLAY_END = 2006, ///< 视频播放结束 - EVT_VIDEO_PLAY_LOADING = 2007, ///< 视频播放 loading - EVT_START_VIDEO_DECODER = 2008, ///< 解码器启动 - EVT_DOWN_CHANGE_RESOLUTION = 2009, ///< 下行视频分辨率改变 - EVT_GET_VODFILE_MEDIAINFO_SUCC = 2010, ///< 点播,获取点播文件信息成功 - EVT_VIDEO_CHANGE_ROTATION = 2011, ///< 视频旋转角度发生改变 - EVT_PLAY_GET_MESSAGE = 2012, ///< 消息事件 - EVT_VOD_PLAY_PREPARED = 2013, ///< 点播,视频加载完毕 - EVT_VOD_PLAY_LOADING_END = 2014, ///< 点播,loading 结束 - EVT_PLAY_LIVE_STREAM_SWITCH_SUCC = 2015, ///< 直播,切流成功(切流可以播放不同画面大小的视频) - EVT_VOD_PLAY_TCP_CONNECT_SUCC = 2016, ///< 点播,TCP 连接成功(SDK 内部事件,不会对外抛出) - EVT_VOD_PLAY_FIRST_VIDEO_PACKET = 2017, ///< 点播,收到首帧数据(SDK 内部事件,不会对外抛出) - EVT_VOD_PLAY_DNS_RESOLVED = 2018, ///< 点播,DNS 解析完成(SDK 内部事件,不会对外抛出) - EVT_VOD_PLAY_SEEK_COMPLETE = 2019, ///< 点播,视频播放 Seek 完成(SDK 内部事件,不会对外抛出) - EVT_VIDEO_DECODER_CACHE_TOO_MANY_FRAMES = 2020, ///< 视频解码器缓存帧数过多,超过40帧(SDK 内部事件,不会对外抛出) - EVT_HW_DECODER_START_SUCC = 2021, ///< 硬解码器启动成功(SDK 内部事件,不会对外抛出) - EVT_SW_DECODER_START_SUCC = 2022, ///< 软解码器启动成功(SDK 内部事件,不会对外抛出) - EVT_AUDIO_JITTER_STATE_FIRST_LOADING = 2023, ///< 音频首次加载(SDK 内部事件,不会对外抛出) - EVT_AUDIO_JITTER_STATE_LOADING = 2024, ///< 音频正在加载(SDK 内部事件,不会对外抛出) - EVT_AUDIO_JITTER_STATE_PLAYING = 2025, ///< 音频正在播放(SDK 内部事件,不会对外抛出) - EVT_AUDIO_JITTER_STATE_FIRST_PLAY = 2026, ///< 音频首次播放(SDK 内部事件,不会对外抛出) - EVT_MIC_START_SUCC = 2027, ///< 麦克风启动成功 - EVT_PLAY_GET_METADATA = 2028, ///< 视频流MetaData事件 - EVT_MIC_RELEASE_SUCC = 2029, ///< 释放麦克风占用 - EVT_AUDIO_DEVICE_ROUTE_CHANGED = 2030, ///< 音频设备的route发生改变,即当前的输入输出设备发生改变,比如耳机被拔出 - EVT_PLAY_GET_FLVSESSIONKEY = 2031, ///< TXLivePlayer 接收到http响应头中的 flvSessionKey 信息 - - EVT_ROOM_ENTER = 1018, ///< 进入房间成功 - EVT_ROOM_EXIT = 1019, ///< 退出房间 - EVT_ROOM_USERLIST = 1020, ///< 下发房间成员列表(不包括自己) - EVT_ROOM_NEED_REENTER = 1021, ///< WiFi 切换到4G 会触发断线重连,此时需要重新进入房间(拉取最优的服务器地址) - EVT_ROOM_ENTER_FAILED = 1022, ///< 自己进入房间失败 - EVT_ROOM_USER_ENTER = 1031, ///< 进房通知 - EVT_ROOM_USER_EXIT = 1032, ///< 退房通知 - EVT_ROOM_USER_VIDEO_STATE = 1033, ///< 视频状态位变化通知 - EVT_ROOM_USER_AUDIO_STATE = 1034, ///< 音频状态位变化通知 - - EVT_ROOM_REQUEST_IP_SUCC = 8001, ///< 拉取接口机服务器地址成功 - EVT_ROOM_CONNECT_SUCC = 8002, ///< 连接接口机服务器成功 - EVT_ROOM_REQUEST_AVSEAT_SUCC = 8003, ///< 请求视频位成功 -} TXLiteAVEvent; -// - /Remove From Head - +#define ERR_ROOM_ENTER_FAIL ERR_TRTC_ENTER_ROOM_FAILED +#define ERR_ROOM_REQUEST_IP_TIMEOUT ERR_TRTC_REQUEST_IP_TIMEOUT +#define ERR_ROOM_REQUEST_ENTER_ROOM_TIMEOUT ERR_TRTC_CONNECT_SERVER_TIMEOUT + +#define ERR_ENTER_ROOM_PARAM_NULL ERR_TRTC_ROOM_PARAM_NULL +#define ERR_SDK_APPID_INVALID ERR_TRTC_INVALID_SDK_APPID +#define ERR_ROOM_ID_INVALID ERR_TRTC_INVALID_ROOM_ID +#define ERR_USER_ID_INVALID ERR_TRTC_INVALID_USER_ID +#define ERR_USER_SIG_INVALID ERR_TRTC_INVALID_USER_SIG +#define ERR_ROOM_REQUEST_ENTER_ROOM_REFUSED ERR_TRTC_ENTER_ROOM_REFUSED +#define ERR_SERVER_INFO_PRIVILEGE_FLAG_ERROR ERR_TRTC_INVALID_PRIVATE_MAPKEY +#define ERR_SERVER_INFO_SERVICE_SUSPENDED ERR_TRTC_SERVICE_SUSPENDED +#define ERR_SERVER_INFO_ECDH_GET_TINYID ERR_TRTC_USER_SIG_CHECK_FAILED +#define ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_SUB_VIDEO ERR_SCREEN_SHARE_NOT_AUTHORIZED +#define ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO ERR_SCREEN_SHRAE_OCCUPIED_BY_OTHER +#define ERR_PUBLISH_CDN_STREAM_REQUEST_TIME_OUT ERR_TRTC_PUSH_THIRD_PARTY_CLOUD_TIMEOUT +#define ERR_PUBLISH_CDN_STREAM_SERVER_FAILED ERR_TRTC_PUSH_THIRD_PARTY_CLOUD_FAILED +#define ERR_CLOUD_MIX_TRANSCODING_REQUEST_TIME_OUT ERR_TRTC_MIX_TRANSCODING_TIMEOUT +#define ERR_CLOUD_MIX_TRANSCODING_SERVER_FAILED ERR_TRTC_MIX_TRANSCODING_FAILED + +#define ERR_ROOM_REQUEST_START_PUBLISHING_TIMEOUT ERR_TRTC_START_PUBLISHING_TIMEOUT +#define ERR_ROOM_REQUEST_START_PUBLISHING_ERROR ERR_TRTC_START_PUBLISHING_FAILED +#define ERR_ROOM_REQUEST_STOP_PUBLISHING_TIMEOUT ERR_TRTC_STOP_PUBLISHING_TIMEOUT +#define ERR_ROOM_REQUEST_STOP_PUBLISHING_ERROR ERR_TRTC_STOP_PUBLISHING_FAILED + +#define ERR_ROOM_REQUEST_CONN_ROOM_TIMEOUT ERR_TRTC_CONNECT_OTHER_ROOM_TIMEOUT +#define ERR_ROOM_REQUEST_DISCONN_ROOM_TIMEOUT ERR_TRTC_DISCONNECT_OTHER_ROOM_TIMEOUT +#define ERR_ROOM_REQUEST_CONN_ROOM_INVALID_PARAM ERR_TRTC_CONNECT_OTHER_ROOM_INVALID_PARAMETER +#define ERR_CONNECT_OTHER_ROOM_AS_AUDIENCE ERR_TRTC_CONNECT_OTHER_ROOM_AS_AUDIENCE + +// clang-format on #endif /* __TXLITEAVCODE_H__ */ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Info.plist b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Info.plist index 13bd72e..a9c6eb0 100644 Binary files a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Info.plist and b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Info.plist differ diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Modules/module.modulemap b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Modules/module.modulemap index 920e668..5a80880 100644 --- a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Modules/module.modulemap +++ b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/Modules/module.modulemap @@ -1,12 +1,6 @@ framework module TXLiteAVSDK_TRTC { - umbrella header "TXLiteAVSDK.h" - exclude header "TXLiteAVEncodedDataProcessingListener.h" - exclude header "TXLiteAVBuffer.h" - exclude header "cpp_interface/ITRTCCloud.h" - exclude header "cpp_interface/ITXAudioEffectManager.h" - exclude header "cpp_interface/ITXDeviceManager.h" - exclude header "cpp_interface/TRTCCloudCallback.h" - exclude header "cpp_interface/TRTCTypeDef.h" - exclude header "cpp_interface/TXLiteAVCode.h" - export * + umbrella header "TXLiteAVSDK.h" + + export * + module * { export * } } diff --git a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/TXLiteAVSDK_TRTC b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/TXLiteAVSDK_TRTC index 610cbe2..44ac050 100644 Binary files a/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/TXLiteAVSDK_TRTC and b/HHVDoctorSDK/TXLiteAVSDK_TRTC.framework/TXLiteAVSDK_TRTC differ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/.DS_Store b/HHVDoctorSDK/TXSoundTouch.xcframework/.DS_Store new file mode 100644 index 0000000..3bfc3ec Binary files /dev/null and b/HHVDoctorSDK/TXSoundTouch.xcframework/.DS_Store differ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/Info.plist b/HHVDoctorSDK/TXSoundTouch.xcframework/Info.plist new file mode 100644 index 0000000..e17ae5e --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/Info.plist @@ -0,0 +1,40 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>AvailableLibraries</key> + <array> + <dict> + <key>LibraryIdentifier</key> + <string>ios-x86_64-simulator</string> + <key>LibraryPath</key> + <string>TXSoundTouch.framework</string> + <key>SupportedArchitectures</key> + <array> + <string>x86_64</string> + </array> + <key>SupportedPlatform</key> + <string>ios</string> + <key>SupportedPlatformVariant</key> + <string>simulator</string> + </dict> + <dict> + <key>LibraryIdentifier</key> + <string>ios-arm64_armv7</string> + <key>LibraryPath</key> + <string>TXSoundTouch.framework</string> + <key>SupportedArchitectures</key> + <array> + <string>arm64</string> + <string>armv7</string> + </array> + <key>SupportedPlatform</key> + <string>ios</string> + </dict> + </array> + <key>CFBundlePackageType</key> + <string>XFWK</string> + <key>XCFrameworkFormatVersion</key> + <string>1.0</string> +</dict> +</plist> diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/.DS_Store b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/.DS_Store new file mode 100644 index 0000000..e4a705e Binary files /dev/null and b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/.DS_Store differ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/.DS_Store b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/.DS_Store new file mode 100644 index 0000000..fda8680 Binary files /dev/null and b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/.DS_Store differ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/BPMDetect.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/BPMDetect.h new file mode 100644 index 0000000..683208d --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/BPMDetect.h @@ -0,0 +1,205 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Beats-per-minute (BPM) detection routine. +/// +/// The beat detection algorithm works as follows: +/// - Use function 'inputSamples' to input a chunks of samples to the class for +/// analysis. It's a good idea to enter a large sound file or stream in smallish +/// chunks of around few kilosamples in order not to extinguish too much RAM memory. +/// - Input sound data is decimated to approx 500 Hz to reduce calculation burden, +/// which is basically ok as low (bass) frequencies mostly determine the beat rate. +/// Simple averaging is used for anti-alias filtering because the resulting signal +/// quality isn't of that high importance. +/// - Decimated sound data is enveloped, i.e. the amplitude shape is detected by +/// taking absolute value that's smoothed by sliding average. Signal levels that +/// are below a couple of times the general RMS amplitude level are cut away to +/// leave only notable peaks there. +/// - Repeating sound patterns (e.g. beats) are detected by calculating short-term +/// autocorrelation function of the enveloped signal. +/// - After whole sound data file has been analyzed as above, the bpm level is +/// detected by function 'getBpm' that finds the highest peak of the autocorrelation +/// function, calculates it's precise location and converts this reading to bpm's. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef _BPMDetect_H_ +#define _BPMDetect_H_ + +#include <vector> +#include "STTypes.h" +#include "FIFOSampleBuffer.h" + +namespace liteav_soundtouch +{ + + /// Minimum allowed BPM rate. Used to restrict accepted result above a reasonable limit. + #define MIN_BPM 45 + + /// Maximum allowed BPM rate range. Used for calculating algorithm parametrs + #define MAX_BPM_RANGE 200 + + /// Maximum allowed BPM rate range. Used to restrict accepted result below a reasonable limit. + #define MAX_BPM_VALID 190 + +//////////////////////////////////////////////////////////////////////////////// + + typedef struct + { + float pos; + float strength; + } BEAT; + + + class IIR2_filter + { + double coeffs[5]; + double prev[5]; + + public: + IIR2_filter(const double *lpf_coeffs); + float update(float x); + }; + + + /// Class for calculating BPM rate for audio data. + class BPMDetect + { + protected: + /// Auto-correlation accumulator bins. + float *xcorr; + + /// Sample average counter. + int decimateCount; + + /// Sample average accumulator for FIFO-like decimation. + liteav_soundtouch::LONG_SAMPLETYPE decimateSum; + + /// Decimate sound by this coefficient to reach approx. 500 Hz. + int decimateBy; + + /// Auto-correlation window length + int windowLen; + + /// Number of channels (1 = mono, 2 = stereo) + int channels; + + /// sample rate + int sampleRate; + + /// Beginning of auto-correlation window: Autocorrelation isn't being updated for + /// the first these many correlation bins. + int windowStart; + + /// window functions for data preconditioning + float *hamw; + float *hamw2; + + // beat detection variables + int pos; + int peakPos; + int beatcorr_ringbuffpos; + int init_scaler; + float peakVal; + float *beatcorr_ringbuff; + + /// FIFO-buffer for decimated processing samples. + liteav_soundtouch::FIFOSampleBuffer *buffer; + + /// Collection of detected beat positions + //BeatCollection beats; + std::vector<BEAT> beats; + + // 2nd order low-pass-filter + IIR2_filter beat_lpf; + + /// Updates auto-correlation function for given number of decimated samples that + /// are read from the internal 'buffer' pipe (samples aren't removed from the pipe + /// though). + void updateXCorr(int process_samples /// How many samples are processed. + ); + + /// Decimates samples to approx. 500 Hz. + /// + /// \return Number of output samples. + int decimate(liteav_soundtouch::SAMPLETYPE *dest, ///< Destination buffer + const liteav_soundtouch::SAMPLETYPE *src, ///< Source sample buffer + int numsamples ///< Number of source samples. + ); + + /// Calculates amplitude envelope for the buffer of samples. + /// Result is output to 'samples'. + void calcEnvelope(liteav_soundtouch::SAMPLETYPE *samples, ///< Pointer to input/output data buffer + int numsamples ///< Number of samples in buffer + ); + + /// remove constant bias from xcorr data + void removeBias(); + + // Detect individual beat positions + void updateBeatPos(int process_samples); + + + public: + /// Constructor. + BPMDetect(int numChannels, ///< Number of channels in sample data. + int sampleRate ///< Sample rate in Hz. + ); + + /// Destructor. + virtual ~BPMDetect(); + + /// Inputs a block of samples for analyzing: Envelopes the samples and then + /// updates the autocorrelation estimation. When whole song data has been input + /// in smaller blocks using this function, read the resulting bpm with 'getBpm' + /// function. + /// + /// Notice that data in 'samples' array can be disrupted in processing. + void inputSamples(const liteav_soundtouch::SAMPLETYPE *samples, ///< Pointer to input/working data buffer + int numSamples ///< Number of samples in buffer + ); + + /// Analyzes the results and returns the BPM rate. Use this function to read result + /// after whole song data has been input to the class by consecutive calls of + /// 'inputSamples' function. + /// + /// \return Beats-per-minute rate, or zero if detection failed. + float getBpm(); + + /// Get beat position arrays. Note: The array includes also really low beat detection values + /// in absence of clear strong beats. Consumer may wish to filter low values away. + /// - "pos" receive array of beat positions + /// - "values" receive array of beat detection strengths + /// - max_num indicates max.size of "pos" and "values" array. + /// + /// You can query a suitable array sized by calling this with NULL in "pos" & "values". + /// + /// \return number of beats in the arrays. + int getBeats(float *pos, float *strength, int max_num); + }; +} +#endif // _BPMDetect_H_ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h new file mode 100644 index 0000000..0d09df4 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h @@ -0,0 +1,180 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// A buffer class for temporarily storaging sound samples, operates as a +/// first-in-first-out pipe. +/// +/// Samples are added to the end of the sample buffer with the 'putSamples' +/// function, and are received from the beginning of the buffer by calling +/// the 'receiveSamples' function. The class automatically removes the +/// output samples from the buffer as well as grows the storage size +/// whenever necessary. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIFOSampleBuffer_H +#define FIFOSampleBuffer_H + +#include "FIFOSamplePipe.h" + +namespace liteav_soundtouch +{ + +/// Sample buffer working in FIFO (first-in-first-out) principle. The class takes +/// care of storage size adjustment and data moving during input/output operations. +/// +/// Notice that in case of stereo audio, one sample is considered to consist of +/// both channel data. +class FIFOSampleBuffer : public FIFOSamplePipe +{ +private: + /// Sample buffer. + SAMPLETYPE *buffer; + + // Raw unaligned buffer memory. 'buffer' is made aligned by pointing it to first + // 16-byte aligned location of this buffer + SAMPLETYPE *bufferUnaligned; + + /// Sample buffer size in bytes + uint sizeInBytes; + + /// How many samples are currently in buffer. + uint samplesInBuffer; + + /// Channels, 1=mono, 2=stereo. + uint channels; + + /// Current position pointer to the buffer. This pointer is increased when samples are + /// removed from the pipe so that it's necessary to actually rewind buffer (move data) + /// only new data when is put to the pipe. + uint bufferPos; + + /// Rewind the buffer by moving data from position pointed by 'bufferPos' to real + /// beginning of the buffer. + void rewind(); + + /// Ensures that the buffer has capacity for at least this many samples. + void ensureCapacity(uint capacityRequirement); + + /// Returns current capacity. + uint getCapacity() const; + +public: + + /// Constructor + FIFOSampleBuffer(int numChannels = 2 ///< Number of channels, 1=mono, 2=stereo. + ///< Default is stereo. + ); + + /// destructor + ~FIFOSampleBuffer(); + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin(); + + /// Returns a pointer to the end of the used part of the sample buffer (i.e. + /// where the new samples are to be inserted). This function may be used for + /// inserting new samples into the sample buffer directly. Please be careful + /// not corrupt the book-keeping! + /// + /// When using this function as means for inserting new samples, also remember + /// to increase the sample count afterwards, by calling the + /// 'putSamples(numSamples)' function. + SAMPLETYPE *ptrEnd( + uint slackCapacity ///< How much free capacity (in samples) there _at least_ + ///< should be so that the caller can successfully insert the + ///< desired samples to the buffer. If necessary, the function + ///< grows the buffer size to comply with this requirement. + ); + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position to + /// the sample buffer. + virtual void putSamples(const SAMPLETYPE *samples, ///< Pointer to samples. + uint numSamples ///< Number of samples to insert. + ); + + /// Adjusts the book-keeping to increase number of samples in the buffer without + /// copying any actual samples. + /// + /// This function is used to update the number of samples in the sample buffer + /// when accessing the buffer directly with 'ptrEnd' function. Please be + /// careful though! + virtual void putSamples(uint numSamples ///< Number of samples been inserted. + ); + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ); + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ); + + /// Returns number of samples currently available. + virtual uint numSamples() const; + + /// Sets number of channels, 1 = mono, 2 = stereo. + void setChannels(int numChannels); + + /// Get number of channels + int getChannels() + { + return channels; + } + + /// Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const; + + /// Clears all the samples. + virtual void clear(); + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + uint adjustAmountOfSamples(uint numSamples); + + /// Add silence to end of buffer + void addSilent(uint nSamples); +}; + +} + +#endif diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSamplePipe.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSamplePipe.h new file mode 100644 index 0000000..90ae18f --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/FIFOSamplePipe.h @@ -0,0 +1,231 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// 'FIFOSamplePipe' : An abstract base class for classes that manipulate sound +/// samples by operating like a first-in-first-out pipe: New samples are fed +/// into one end of the pipe with the 'putSamples' function, and the processed +/// samples are received from the other end with the 'receiveSamples' function. +/// +/// 'FIFOProcessor' : A base class for classes the do signal processing with +/// the samples while operating like a first-in-first-out pipe. When samples +/// are input with the 'putSamples' function, the class processes them +/// and moves the processed samples to the given 'output' pipe object, which +/// may be either another processing stage, or a fifo sample buffer object. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIFOSamplePipe_H +#define FIFOSamplePipe_H + +#include <assert.h> +#include <stdlib.h> +#include "STTypes.h" +#include "st_export.h" + +namespace liteav_soundtouch +{ + +/// Abstract base class for FIFO (first-in-first-out) sample processing classes. +class SOUNDTOUCH_API FIFOSamplePipe +{ +protected: + + bool verifyNumberOfChannels(int nChannels) const + { + if ((nChannels > 0) && (nChannels <= SOUNDTOUCH_MAX_CHANNELS)) + { + return true; + } + ST_THROW_RT_ERROR("Error: Illegal number of channels"); + return false; + } + +public: + // virtual default destructor + virtual ~FIFOSamplePipe() {} + + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin() = 0; + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position to + /// the sample buffer. + virtual void putSamples(const SAMPLETYPE *samples, ///< Pointer to samples. + uint numSamples ///< Number of samples to insert. + ) = 0; + + + // Moves samples from the 'other' pipe instance to this instance. + void moveSamples(FIFOSamplePipe &other ///< Other pipe instance where from the receive the data. + ) + { + int oNumSamples = other.numSamples(); + + putSamples(other.ptrBegin(), oNumSamples); + other.receiveSamples(oNumSamples); + } + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ) = 0; + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ) = 0; + + /// Returns number of samples currently available. + virtual uint numSamples() const = 0; + + // Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const = 0; + + /// Clears all the samples. + virtual void clear() = 0; + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + virtual uint adjustAmountOfSamples(uint numSamples) = 0; + +}; + + +/// Base-class for sound processing routines working in FIFO principle. With this base +/// class it's easy to implement sound processing stages that can be chained together, +/// so that samples that are fed into beginning of the pipe automatically go through +/// all the processing stages. +/// +/// When samples are input to this class, they're first processed and then put to +/// the FIFO pipe that's defined as output of this class. This output pipe can be +/// either other processing stage or a FIFO sample buffer. +class SOUNDTOUCH_API FIFOProcessor :public FIFOSamplePipe +{ +protected: + /// Internal pipe where processed samples are put. + FIFOSamplePipe *output; + + /// Sets output pipe. + void setOutPipe(FIFOSamplePipe *pOutput) + { + assert(output == NULL); + assert(pOutput != NULL); + output = pOutput; + } + + /// Constructor. Doesn't define output pipe; it has to be set be + /// 'setOutPipe' function. + FIFOProcessor() + { + output = NULL; + } + + /// Constructor. Configures output pipe. + FIFOProcessor(FIFOSamplePipe *pOutput ///< Output pipe. + ) + { + output = pOutput; + } + + /// Destructor. + virtual ~FIFOProcessor() + { + } + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin() + { + return output->ptrBegin(); + } + +public: + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *outBuffer, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ) + { + return output->receiveSamples(outBuffer, maxSamples); + } + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ) + { + return output->receiveSamples(maxSamples); + } + + /// Returns number of samples currently available. + virtual uint numSamples() const + { + return output->numSamples(); + } + + /// Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const + { + return output->isEmpty(); + } + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + virtual uint adjustAmountOfSamples(uint numSamples) + { + return output->adjustAmountOfSamples(numSamples); + } +}; + +} + +#endif diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/STTypes.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/STTypes.h new file mode 100644 index 0000000..3be0642 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/STTypes.h @@ -0,0 +1,190 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Common type definitions for SoundTouch audio processing library. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef STTypes_H +#define STTypes_H + +typedef unsigned int uint; +typedef unsigned long ulong; + +// Patch for MinGW: on Win64 long is 32-bit +#ifdef _WIN64 + typedef unsigned long long ulongptr; +#else + typedef ulong ulongptr; +#endif + + +// Helper macro for aligning pointer up to next 16-byte boundary +#define SOUNDTOUCH_ALIGN_POINTER_16(x) ( ( (ulongptr)(x) + 15 ) & ~(ulongptr)15 ) + + +#if (defined(__GNUC__) && !defined(ANDROID)) + // In GCC, include soundtouch_config.h made by config scritps. + // Skip this in Android compilation that uses GCC but without configure scripts. + #include "soundtouch_config.h" +#endif + + +namespace liteav_soundtouch +{ + /// Max allowed number of channels + #define SOUNDTOUCH_MAX_CHANNELS 16 + + /// Activate these undef's to overrule the possible sampletype + /// setting inherited from some other header file: + //#undef SOUNDTOUCH_INTEGER_SAMPLES + //#undef SOUNDTOUCH_FLOAT_SAMPLES + + /// If following flag is defined, always uses multichannel processing + /// routines also for mono and stero sound. This is for routine testing + /// purposes; output should be same with either routines, yet disabling + /// the dedicated mono/stereo processing routines will result in slower + /// runtime performance so recommendation is to keep this off. + // #define USE_MULTICH_ALWAYS + + #if (defined(__SOFTFP__) && defined(ANDROID)) + // For Android compilation: Force use of Integer samples in case that + // compilation uses soft-floating point emulation - soft-fp is way too slow + #undef SOUNDTOUCH_FLOAT_SAMPLES + #define SOUNDTOUCH_INTEGER_SAMPLES 1 + #endif + + #if !(SOUNDTOUCH_INTEGER_SAMPLES || SOUNDTOUCH_FLOAT_SAMPLES) + + /// Choose either 32bit floating point or 16bit integer sampletype + /// by choosing one of the following defines, unless this selection + /// has already been done in some other file. + //// + /// Notes: + /// - In Windows environment, choose the sample format with the + /// following defines. + /// - In GNU environment, the floating point samples are used by + /// default, but integer samples can be chosen by giving the + /// following switch to the configure script: + /// ./configure --enable-integer-samples + /// However, if you still prefer to select the sample format here + /// also in GNU environment, then please #undef the INTEGER_SAMPLE + /// and FLOAT_SAMPLE defines first as in comments above. + #define SOUNDTOUCH_INTEGER_SAMPLES 1 //< 16bit integer samples + //#define SOUNDTOUCH_FLOAT_SAMPLES 1 //< 32bit float samples + + #endif + + #if (_M_IX86 || __i386__ || __x86_64__ || _M_X64) + /// Define this to allow X86-specific assembler/intrinsic optimizations. + /// Notice that library contains also usual C++ versions of each of these + /// these routines, so if you're having difficulties getting the optimized + /// routines compiled for whatever reason, you may disable these optimizations + /// to make the library compile. + + #define SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS 1 + + /// In GNU environment, allow the user to override this setting by + /// giving the following switch to the configure script: + /// ./configure --disable-x86-optimizations + /// ./configure --enable-x86-optimizations=no + #ifdef SOUNDTOUCH_DISABLE_X86_OPTIMIZATIONS + #undef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + #endif + #else + /// Always disable optimizations when not using a x86 systems. + #undef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + + #endif + + // If defined, allows the SIMD-optimized routines to take minor shortcuts + // for improved performance. Undefine to require faithfully similar SIMD + // calculations as in normal C implementation. + #define SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION 1 + + + #ifdef SOUNDTOUCH_INTEGER_SAMPLES + // 16bit integer sample type + typedef short SAMPLETYPE; + // data type for sample accumulation: Use 32bit integer to prevent overflows + typedef long LONG_SAMPLETYPE; + + #ifdef SOUNDTOUCH_FLOAT_SAMPLES + // check that only one sample type is defined + #error "conflicting sample types defined" + #endif // SOUNDTOUCH_FLOAT_SAMPLES + + #ifdef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + // Allow MMX optimizations (not available in X64 mode) + #if (!_M_X64) + #define SOUNDTOUCH_ALLOW_MMX 1 + #endif + #endif + + #else + + // floating point samples + typedef float SAMPLETYPE; + // data type for sample accumulation: Use float also here to enable + // efficient autovectorization + typedef float LONG_SAMPLETYPE; + + #ifdef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + // Allow SSE optimizations + #define SOUNDTOUCH_ALLOW_SSE 1 + #endif + + #endif // SOUNDTOUCH_INTEGER_SAMPLES + + #if ((SOUNDTOUCH_ALLOW_SSE) || (__SSE__) || (SOUNDTOUCH_USE_NEON)) + #if SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION + #define ST_SIMD_AVOID_UNALIGNED + #endif + #endif + +} + +// define ST_NO_EXCEPTION_HANDLING switch to disable throwing std exceptions: +#define ST_NO_EXCEPTION_HANDLING 1 +#ifdef ST_NO_EXCEPTION_HANDLING + // Exceptions disabled. Throw asserts instead if enabled. + #include <assert.h> + #define ST_THROW_RT_ERROR(x) {assert((const char *)x);} +#else + // use c++ standard exceptions + #include <stdexcept> + #include <string> + #define ST_THROW_RT_ERROR(x) {throw std::runtime_error(x);} +#endif + +// When this #define is active, eliminates a clicking sound when the "rate" or "pitch" +// parameter setting crosses from value <1 to >=1 or vice versa during processing. +// Default is off as such crossover is untypical case and involves a slight sound +// quality compromise. +//#define SOUNDTOUCH_PREVENT_CLICK_AT_RATE_CROSSOVER 1 + +#endif diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/SoundTouch.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/SoundTouch.h new file mode 100644 index 0000000..90a9a74 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/SoundTouch.h @@ -0,0 +1,349 @@ +////////////////////////////////////////////////////////////////////////////// +/// +/// SoundTouch - main class for tempo/pitch/rate adjusting routines. +/// +/// Notes: +/// - Initialize the SoundTouch object instance by setting up the sound stream +/// parameters with functions 'setSampleRate' and 'setChannels', then set +/// desired tempo/pitch/rate settings with the corresponding functions. +/// +/// - The SoundTouch class behaves like a first-in-first-out pipeline: The +/// samples that are to be processed are fed into one of the pipe by calling +/// function 'putSamples', while the ready processed samples can be read +/// from the other end of the pipeline with function 'receiveSamples'. +/// +/// - The SoundTouch processing classes require certain sized 'batches' of +/// samples in order to process the sound. For this reason the classes buffer +/// incoming samples until there are enough of samples available for +/// processing, then they carry out the processing step and consequently +/// make the processed samples available for outputting. +/// +/// - For the above reason, the processing routines introduce a certain +/// 'latency' between the input and output, so that the samples input to +/// SoundTouch may not be immediately available in the output, and neither +/// the amount of outputtable samples may not immediately be in direct +/// relationship with the amount of previously input samples. +/// +/// - The tempo/pitch/rate control parameters can be altered during processing. +/// Please notice though that they aren't currently protected by semaphores, +/// so in multi-thread application external semaphore protection may be +/// required. +/// +/// - This class utilizes classes 'TDStretch' for tempo change (without modifying +/// pitch) and 'RateTransposer' for changing the playback rate (that is, both +/// tempo and pitch in the same ratio) of the sound. The third available control +/// 'pitch' (change pitch but maintain tempo) is produced by a combination of +/// combining the two other controls. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef SoundTouch_H +#define SoundTouch_H + +#include "FIFOSamplePipe.h" +#include "STTypes.h" +#include "st_export.h" + +namespace liteav_soundtouch +{ + +/// Soundtouch library version string +#define SOUNDTOUCH_VERSION "2.2" + +/// SoundTouch library version id +#define SOUNDTOUCH_VERSION_ID (20200) + +// +// Available setting IDs for the 'setSetting' & 'get_setting' functions: + +/// Enable/disable anti-alias filter in pitch transposer (0 = disable) +#define SETTING_USE_AA_FILTER 0 + +/// Pitch transposer anti-alias filter length (8 .. 128 taps, default = 32) +#define SETTING_AA_FILTER_LENGTH 1 + +/// Enable/disable quick seeking algorithm in tempo changer routine +/// (enabling quick seeking lowers CPU utilization but causes a minor sound +/// quality compromising) +#define SETTING_USE_QUICKSEEK 2 + +/// Time-stretch algorithm single processing sequence length in milliseconds. This determines +/// to how long sequences the original sound is chopped in the time-stretch algorithm. +/// See "STTypes.h" or README for more information. +#define SETTING_SEQUENCE_MS 3 + +/// Time-stretch algorithm seeking window length in milliseconds for algorithm that finds the +/// best possible overlapping location. This determines from how wide window the algorithm +/// may look for an optimal joining location when mixing the sound sequences back together. +/// See "STTypes.h" or README for more information. +#define SETTING_SEEKWINDOW_MS 4 + +/// Time-stretch algorithm overlap length in milliseconds. When the chopped sound sequences +/// are mixed back together, to form a continuous sound stream, this parameter defines over +/// how long period the two consecutive sequences are let to overlap each other. +/// See "STTypes.h" or README for more information. +#define SETTING_OVERLAP_MS 5 + + +/// Call "getSetting" with this ID to query processing sequence size in samples. +/// This value gives approximate value of how many input samples you'll need to +/// feed into SoundTouch after initial buffering to get out a new batch of +/// output samples. +/// +/// This value does not include initial buffering at beginning of a new processing +/// stream, use SETTING_INITIAL_LATENCY to get the initial buffering size. +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_NOMINAL_INPUT_SEQUENCE 6 + + +/// Call "getSetting" with this ID to query nominal average processing output +/// size in samples. This value tells approcimate value how many output samples +/// SoundTouch outputs once it does DSP processing run for a batch of input samples. +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_NOMINAL_OUTPUT_SEQUENCE 7 + + +/// Call "getSetting" with this ID to query initial processing latency, i.e. +/// approx. how many samples you'll need to enter to SoundTouch pipeline before +/// you can expect to get first batch of ready output samples out. +/// +/// After the first output batch, you can then expect to get approx. +/// SETTING_NOMINAL_OUTPUT_SEQUENCE ready samples out for every +/// SETTING_NOMINAL_INPUT_SEQUENCE samples that you enter into SoundTouch. +/// +/// Example: +/// processing with parameter -tempo=5 +/// => initial latency = 5509 samples +/// input sequence = 4167 samples +/// output sequence = 3969 samples +/// +/// Accordingly, you can expect to feed in approx. 5509 samples at beginning of +/// the stream, and then you'll get out the first 3969 samples. After that, for +/// every approx. 4167 samples that you'll put in, you'll receive again approx. +/// 3969 samples out. +/// +/// This also means that average latency during stream processing is +/// INITIAL_LATENCY-OUTPUT_SEQUENCE/2, in the above example case 5509-3969/2 +/// = 3524 samples +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_INITIAL_LATENCY 8 + + +class SOUNDTOUCH_API SoundTouch : public FIFOProcessor +{ +private: + /// Rate transposer class instance + class RateTransposer *pRateTransposer; + + /// Time-stretch class instance + class TDStretch *pTDStretch; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualRate; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualTempo; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualPitch; + + /// Flag: Has sample rate been set? + bool bSrateSet; + + /// Accumulator for how many samples in total will be expected as output vs. samples put in, + /// considering current processing settings. + double samplesExpectedOut; + + /// Accumulator for how many samples in total have been read out from the processing so far + long samplesOutput; + + /// Calculates effective rate & tempo valuescfrom 'virtualRate', 'virtualTempo' and + /// 'virtualPitch' parameters. + void calcEffectiveRateAndTempo(); + +protected : + /// Number of channels + uint channels; + + /// Effective 'rate' value calculated from 'virtualRate', 'virtualTempo' and 'virtualPitch' + double rate; + + /// Effective 'tempo' value calculated from 'virtualRate', 'virtualTempo' and 'virtualPitch' + double tempo; + +public: + SoundTouch(); + virtual ~SoundTouch(); + + /// Get SoundTouch library version string + static const char *getVersionString(); + + /// Get SoundTouch library version Id + static uint getVersionId(); + + /// Sets new rate control value. Normal rate = 1.0, smaller values + /// represent slower rate, larger faster rates. + void setRate(double newRate); + + /// Sets new tempo control value. Normal tempo = 1.0, smaller values + /// represent slower tempo, larger faster tempo. + void setTempo(double newTempo); + + /// Sets new rate control value as a difference in percents compared + /// to the original rate (-50 .. +100 %) + void setRateChange(double newRate); + + /// Sets new tempo control value as a difference in percents compared + /// to the original tempo (-50 .. +100 %) + void setTempoChange(double newTempo); + + /// Sets new pitch control value. Original pitch = 1.0, smaller values + /// represent lower pitches, larger values higher pitch. + void setPitch(double newPitch); + + /// Sets pitch change in octaves compared to the original pitch + /// (-1.00 .. +1.00) + void setPitchOctaves(double newPitch); + + /// Sets pitch change in semi-tones compared to the original pitch + /// (-12 .. +12) + void setPitchSemiTones(int newPitch); + void setPitchSemiTones(double newPitch); + + /// Sets the number of channels, 1 = mono, 2 = stereo + void setChannels(uint numChannels); + + /// Sets sample rate. + void setSampleRate(uint srate); + + /// Get ratio between input and output audio durations, useful for calculating + /// processed output duration: if you'll process a stream of N samples, then + /// you can expect to get out N * getInputOutputSampleRatio() samples. + /// + /// This ratio will give accurate target duration ratio for a full audio track, + /// given that the the whole track is processed with same processing parameters. + /// + /// If this ratio is applied to calculate intermediate offsets inside a processing + /// stream, then this ratio is approximate and can deviate +- some tens of milliseconds + /// from ideal offset, yet by end of the audio stream the duration ratio will become + /// exact. + /// + /// Example: if processing with parameters "-tempo=15 -pitch=-3", the function + /// will return value 0.8695652... Now, if processing an audio stream whose duration + /// is exactly one million audio samples, then you can expect the processed + /// output duration be 0.869565 * 1000000 = 869565 samples. + double getInputOutputSampleRatio(); + + /// Flushes the last samples from the processing pipeline to the output. + /// Clears also the internal processing buffers. + // + /// Note: This function is meant for extracting the last samples of a sound + /// stream. This function may introduce additional blank samples in the end + /// of the sound stream, and thus it's not recommended to call this function + /// in the middle of a sound stream. + void flush(); + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position into + /// the input of the object. Notice that sample rate _has_to_ be set before + /// calling this function, otherwise throws a runtime_error exception. + virtual void putSamples( + const SAMPLETYPE *samples, ///< Pointer to sample buffer. + uint numSamples ///< Number of samples in buffer. Notice + ///< that in case of stereo-sound a single sample + ///< contains data for both channels. + ); + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ); + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ); + + /// Clears all the samples in the object's output and internal processing + /// buffers. + virtual void clear(); + + /// Changes a setting controlling the processing system behaviour. See the + /// 'SETTING_...' defines for available setting ID's. + /// + /// \return 'true' if the setting was successfully changed + bool setSetting(int settingId, ///< Setting ID number. see SETTING_... defines. + int value ///< New setting value. + ); + + /// Reads a setting controlling the processing system behaviour. See the + /// 'SETTING_...' defines for available setting ID's. + /// + /// \return the setting value. + int getSetting(int settingId ///< Setting ID number, see SETTING_... defines. + ) const; + + /// Returns number of samples currently unprocessed. + virtual uint numUnprocessedSamples() const; + + /// Return number of channels + uint numChannels() const + { + return channels; + } + + /// Other handy functions that are implemented in the ancestor classes (see + /// classes 'FIFOProcessor' and 'FIFOSamplePipe') + /// + /// - receiveSamples() : Use this function to receive 'ready' processed samples from SoundTouch. + /// - numSamples() : Get number of 'ready' samples that can be received with + /// function 'receiveSamples()' + /// - isEmpty() : Returns nonzero if there aren't any 'ready' samples. + /// - clear() : Clears all samples from ready/processing buffers. +}; + +} +#endif diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/TXSoundTouch.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/TXSoundTouch.h new file mode 100644 index 0000000..6926ff6 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/TXSoundTouch.h @@ -0,0 +1,11 @@ +/* + * Copyright (c) 2022 Tencent. All Rights Reserved. + * + */ + +#import <TXSoundTouch/BPMDetect.h> +#import <TXSoundTouch/FIFOSampleBuffer.h> +#import <TXSoundTouch/FIFOSamplePipe.h> +#import <TXSoundTouch/soundtouch_config.h> +#import <TXSoundTouch/SoundTouch.h> +#import <TXSoundTouch/STTypes.h> diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/soundtouch_config.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/soundtouch_config.h new file mode 100644 index 0000000..a8b897e --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Headers/soundtouch_config.h @@ -0,0 +1,105 @@ +/* include/soundtouch_config.h. Generated from soundtouch_config.h.in by configure. */ +/* include/soundtouch_config.h.in. Generated from configure.ac by autoheader. */ + +#ifndef SoundTouchConfig_H +#define SoundTouchConfig_H + +namespace liteav_soundtouch +{ + +/* Never allow x86 optimizations in iOS simulator build */ +#define ALLOW_X86_OPTIMIZATIONS 0 + +/* Use Integer as Sample type */ +#define INTEGER_SAMPLES 1 +#define SOUNDTOUCH_INTEGER_SAMPLES 1 + +/* Use Float as Sample type */ +//#define FLOAT_SAMPLES 1 + +/* Define to 1 if you have the <dlfcn.h> header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the <inttypes.h> header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `m' library (-lm). */ +#define HAVE_LIBM 1 + +/* Define to 1 if your system has a GNU libc compatible `malloc' function, and + to 0 otherwise. */ +#define HAVE_MALLOC 1 + +/* Define to 1 if you have the <memory.h> header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the <stdint.h> header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the <stdlib.h> header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the <strings.h> header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the <string.h> header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the <sys/stat.h> header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the <sys/types.h> header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the <unistd.h> header file. */ +#define HAVE_UNISTD_H 1 + +/* Use Integer as Sample type */ +/* #undef INTEGER_SAMPLES */ + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "soundtouch" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://www.surina.net/soundtouch" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "SoundTouch" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "SoundTouch 1.4.0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "soundtouch" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "1.4.0" + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Version number of package */ +#define VERSION "1.4.0" + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +/* #undef inline */ +#endif + +/* Define to rpl_malloc if the replacement function should be used. */ +/* #undef malloc */ + +} + +#endif // SoundTouchConfig_H diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Info.plist b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Info.plist new file mode 100644 index 0000000..2416226 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Info.plist @@ -0,0 +1,55 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>BuildMachineOSBuild</key> + <string>20F71</string> + <key>CFBundleDevelopmentRegion</key> + <string>en</string> + <key>CFBundleExecutable</key> + <string>TXSoundTouch</string> + <key>CFBundleIdentifier</key> + <string>com.tencent.liteav.SoundTouch</string> + <key>CFBundleInfoDictionaryVersion</key> + <string>6.0</string> + <key>CFBundleName</key> + <string>TXSoundTouch</string> + <key>CFBundlePackageType</key> + <string>FMWK</string> + <key>CFBundleShortVersionString</key> + <string>1.0</string> + <key>CFBundleSignature</key> + <string>????</string> + <key>CFBundleSupportedPlatforms</key> + <array> + <string>iPhoneOS</string> + </array> + <key>CFBundleVersion</key> + <string>1.0</string> + <key>DTCompiler</key> + <string>com.apple.compilers.llvm.clang.1_0</string> + <key>DTPlatformBuild</key> + <string>18E182</string> + <key>DTPlatformName</key> + <string>iphoneos</string> + <key>DTPlatformVersion</key> + <string>14.5</string> + <key>DTSDKBuild</key> + <string>18E182</string> + <key>DTSDKName</key> + <string>iphoneos14.5</string> + <key>DTXcode</key> + <string>1250</string> + <key>DTXcodeBuild</key> + <string>12E262</string> + <key>MinimumOSVersion</key> + <string>9.0</string> + <key>NSPrincipalClass</key> + <string></string> + <key>UIDeviceFamily</key> + <array> + <integer>1</integer> + <integer>2</integer> + </array> +</dict> +</plist> diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Modules/module.modulemap b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Modules/module.modulemap new file mode 100644 index 0000000..b269309 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/Modules/module.modulemap @@ -0,0 +1,6 @@ +framework module TXSoundTouch { + umbrella header "TXSoundTouch.h" + + export * + module * { export * } +} diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/TXSoundTouch b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/TXSoundTouch new file mode 100755 index 0000000..7a5ff37 Binary files /dev/null and b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-arm64_armv7/TXSoundTouch.framework/TXSoundTouch differ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/.DS_Store b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/.DS_Store new file mode 100644 index 0000000..9c2049c Binary files /dev/null and b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/.DS_Store differ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/.DS_Store b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/.DS_Store new file mode 100644 index 0000000..fda8680 Binary files /dev/null and b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/.DS_Store differ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/BPMDetect.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/BPMDetect.h new file mode 100644 index 0000000..683208d --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/BPMDetect.h @@ -0,0 +1,205 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Beats-per-minute (BPM) detection routine. +/// +/// The beat detection algorithm works as follows: +/// - Use function 'inputSamples' to input a chunks of samples to the class for +/// analysis. It's a good idea to enter a large sound file or stream in smallish +/// chunks of around few kilosamples in order not to extinguish too much RAM memory. +/// - Input sound data is decimated to approx 500 Hz to reduce calculation burden, +/// which is basically ok as low (bass) frequencies mostly determine the beat rate. +/// Simple averaging is used for anti-alias filtering because the resulting signal +/// quality isn't of that high importance. +/// - Decimated sound data is enveloped, i.e. the amplitude shape is detected by +/// taking absolute value that's smoothed by sliding average. Signal levels that +/// are below a couple of times the general RMS amplitude level are cut away to +/// leave only notable peaks there. +/// - Repeating sound patterns (e.g. beats) are detected by calculating short-term +/// autocorrelation function of the enveloped signal. +/// - After whole sound data file has been analyzed as above, the bpm level is +/// detected by function 'getBpm' that finds the highest peak of the autocorrelation +/// function, calculates it's precise location and converts this reading to bpm's. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef _BPMDetect_H_ +#define _BPMDetect_H_ + +#include <vector> +#include "STTypes.h" +#include "FIFOSampleBuffer.h" + +namespace liteav_soundtouch +{ + + /// Minimum allowed BPM rate. Used to restrict accepted result above a reasonable limit. + #define MIN_BPM 45 + + /// Maximum allowed BPM rate range. Used for calculating algorithm parametrs + #define MAX_BPM_RANGE 200 + + /// Maximum allowed BPM rate range. Used to restrict accepted result below a reasonable limit. + #define MAX_BPM_VALID 190 + +//////////////////////////////////////////////////////////////////////////////// + + typedef struct + { + float pos; + float strength; + } BEAT; + + + class IIR2_filter + { + double coeffs[5]; + double prev[5]; + + public: + IIR2_filter(const double *lpf_coeffs); + float update(float x); + }; + + + /// Class for calculating BPM rate for audio data. + class BPMDetect + { + protected: + /// Auto-correlation accumulator bins. + float *xcorr; + + /// Sample average counter. + int decimateCount; + + /// Sample average accumulator for FIFO-like decimation. + liteav_soundtouch::LONG_SAMPLETYPE decimateSum; + + /// Decimate sound by this coefficient to reach approx. 500 Hz. + int decimateBy; + + /// Auto-correlation window length + int windowLen; + + /// Number of channels (1 = mono, 2 = stereo) + int channels; + + /// sample rate + int sampleRate; + + /// Beginning of auto-correlation window: Autocorrelation isn't being updated for + /// the first these many correlation bins. + int windowStart; + + /// window functions for data preconditioning + float *hamw; + float *hamw2; + + // beat detection variables + int pos; + int peakPos; + int beatcorr_ringbuffpos; + int init_scaler; + float peakVal; + float *beatcorr_ringbuff; + + /// FIFO-buffer for decimated processing samples. + liteav_soundtouch::FIFOSampleBuffer *buffer; + + /// Collection of detected beat positions + //BeatCollection beats; + std::vector<BEAT> beats; + + // 2nd order low-pass-filter + IIR2_filter beat_lpf; + + /// Updates auto-correlation function for given number of decimated samples that + /// are read from the internal 'buffer' pipe (samples aren't removed from the pipe + /// though). + void updateXCorr(int process_samples /// How many samples are processed. + ); + + /// Decimates samples to approx. 500 Hz. + /// + /// \return Number of output samples. + int decimate(liteav_soundtouch::SAMPLETYPE *dest, ///< Destination buffer + const liteav_soundtouch::SAMPLETYPE *src, ///< Source sample buffer + int numsamples ///< Number of source samples. + ); + + /// Calculates amplitude envelope for the buffer of samples. + /// Result is output to 'samples'. + void calcEnvelope(liteav_soundtouch::SAMPLETYPE *samples, ///< Pointer to input/output data buffer + int numsamples ///< Number of samples in buffer + ); + + /// remove constant bias from xcorr data + void removeBias(); + + // Detect individual beat positions + void updateBeatPos(int process_samples); + + + public: + /// Constructor. + BPMDetect(int numChannels, ///< Number of channels in sample data. + int sampleRate ///< Sample rate in Hz. + ); + + /// Destructor. + virtual ~BPMDetect(); + + /// Inputs a block of samples for analyzing: Envelopes the samples and then + /// updates the autocorrelation estimation. When whole song data has been input + /// in smaller blocks using this function, read the resulting bpm with 'getBpm' + /// function. + /// + /// Notice that data in 'samples' array can be disrupted in processing. + void inputSamples(const liteav_soundtouch::SAMPLETYPE *samples, ///< Pointer to input/working data buffer + int numSamples ///< Number of samples in buffer + ); + + /// Analyzes the results and returns the BPM rate. Use this function to read result + /// after whole song data has been input to the class by consecutive calls of + /// 'inputSamples' function. + /// + /// \return Beats-per-minute rate, or zero if detection failed. + float getBpm(); + + /// Get beat position arrays. Note: The array includes also really low beat detection values + /// in absence of clear strong beats. Consumer may wish to filter low values away. + /// - "pos" receive array of beat positions + /// - "values" receive array of beat detection strengths + /// - max_num indicates max.size of "pos" and "values" array. + /// + /// You can query a suitable array sized by calling this with NULL in "pos" & "values". + /// + /// \return number of beats in the arrays. + int getBeats(float *pos, float *strength, int max_num); + }; +} +#endif // _BPMDetect_H_ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h new file mode 100644 index 0000000..0d09df4 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSampleBuffer.h @@ -0,0 +1,180 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// A buffer class for temporarily storaging sound samples, operates as a +/// first-in-first-out pipe. +/// +/// Samples are added to the end of the sample buffer with the 'putSamples' +/// function, and are received from the beginning of the buffer by calling +/// the 'receiveSamples' function. The class automatically removes the +/// output samples from the buffer as well as grows the storage size +/// whenever necessary. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIFOSampleBuffer_H +#define FIFOSampleBuffer_H + +#include "FIFOSamplePipe.h" + +namespace liteav_soundtouch +{ + +/// Sample buffer working in FIFO (first-in-first-out) principle. The class takes +/// care of storage size adjustment and data moving during input/output operations. +/// +/// Notice that in case of stereo audio, one sample is considered to consist of +/// both channel data. +class FIFOSampleBuffer : public FIFOSamplePipe +{ +private: + /// Sample buffer. + SAMPLETYPE *buffer; + + // Raw unaligned buffer memory. 'buffer' is made aligned by pointing it to first + // 16-byte aligned location of this buffer + SAMPLETYPE *bufferUnaligned; + + /// Sample buffer size in bytes + uint sizeInBytes; + + /// How many samples are currently in buffer. + uint samplesInBuffer; + + /// Channels, 1=mono, 2=stereo. + uint channels; + + /// Current position pointer to the buffer. This pointer is increased when samples are + /// removed from the pipe so that it's necessary to actually rewind buffer (move data) + /// only new data when is put to the pipe. + uint bufferPos; + + /// Rewind the buffer by moving data from position pointed by 'bufferPos' to real + /// beginning of the buffer. + void rewind(); + + /// Ensures that the buffer has capacity for at least this many samples. + void ensureCapacity(uint capacityRequirement); + + /// Returns current capacity. + uint getCapacity() const; + +public: + + /// Constructor + FIFOSampleBuffer(int numChannels = 2 ///< Number of channels, 1=mono, 2=stereo. + ///< Default is stereo. + ); + + /// destructor + ~FIFOSampleBuffer(); + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin(); + + /// Returns a pointer to the end of the used part of the sample buffer (i.e. + /// where the new samples are to be inserted). This function may be used for + /// inserting new samples into the sample buffer directly. Please be careful + /// not corrupt the book-keeping! + /// + /// When using this function as means for inserting new samples, also remember + /// to increase the sample count afterwards, by calling the + /// 'putSamples(numSamples)' function. + SAMPLETYPE *ptrEnd( + uint slackCapacity ///< How much free capacity (in samples) there _at least_ + ///< should be so that the caller can successfully insert the + ///< desired samples to the buffer. If necessary, the function + ///< grows the buffer size to comply with this requirement. + ); + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position to + /// the sample buffer. + virtual void putSamples(const SAMPLETYPE *samples, ///< Pointer to samples. + uint numSamples ///< Number of samples to insert. + ); + + /// Adjusts the book-keeping to increase number of samples in the buffer without + /// copying any actual samples. + /// + /// This function is used to update the number of samples in the sample buffer + /// when accessing the buffer directly with 'ptrEnd' function. Please be + /// careful though! + virtual void putSamples(uint numSamples ///< Number of samples been inserted. + ); + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ); + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ); + + /// Returns number of samples currently available. + virtual uint numSamples() const; + + /// Sets number of channels, 1 = mono, 2 = stereo. + void setChannels(int numChannels); + + /// Get number of channels + int getChannels() + { + return channels; + } + + /// Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const; + + /// Clears all the samples. + virtual void clear(); + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + uint adjustAmountOfSamples(uint numSamples); + + /// Add silence to end of buffer + void addSilent(uint nSamples); +}; + +} + +#endif diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSamplePipe.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSamplePipe.h new file mode 100644 index 0000000..90ae18f --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/FIFOSamplePipe.h @@ -0,0 +1,231 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// 'FIFOSamplePipe' : An abstract base class for classes that manipulate sound +/// samples by operating like a first-in-first-out pipe: New samples are fed +/// into one end of the pipe with the 'putSamples' function, and the processed +/// samples are received from the other end with the 'receiveSamples' function. +/// +/// 'FIFOProcessor' : A base class for classes the do signal processing with +/// the samples while operating like a first-in-first-out pipe. When samples +/// are input with the 'putSamples' function, the class processes them +/// and moves the processed samples to the given 'output' pipe object, which +/// may be either another processing stage, or a fifo sample buffer object. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIFOSamplePipe_H +#define FIFOSamplePipe_H + +#include <assert.h> +#include <stdlib.h> +#include "STTypes.h" +#include "st_export.h" + +namespace liteav_soundtouch +{ + +/// Abstract base class for FIFO (first-in-first-out) sample processing classes. +class SOUNDTOUCH_API FIFOSamplePipe +{ +protected: + + bool verifyNumberOfChannels(int nChannels) const + { + if ((nChannels > 0) && (nChannels <= SOUNDTOUCH_MAX_CHANNELS)) + { + return true; + } + ST_THROW_RT_ERROR("Error: Illegal number of channels"); + return false; + } + +public: + // virtual default destructor + virtual ~FIFOSamplePipe() {} + + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin() = 0; + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position to + /// the sample buffer. + virtual void putSamples(const SAMPLETYPE *samples, ///< Pointer to samples. + uint numSamples ///< Number of samples to insert. + ) = 0; + + + // Moves samples from the 'other' pipe instance to this instance. + void moveSamples(FIFOSamplePipe &other ///< Other pipe instance where from the receive the data. + ) + { + int oNumSamples = other.numSamples(); + + putSamples(other.ptrBegin(), oNumSamples); + other.receiveSamples(oNumSamples); + } + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ) = 0; + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ) = 0; + + /// Returns number of samples currently available. + virtual uint numSamples() const = 0; + + // Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const = 0; + + /// Clears all the samples. + virtual void clear() = 0; + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + virtual uint adjustAmountOfSamples(uint numSamples) = 0; + +}; + + +/// Base-class for sound processing routines working in FIFO principle. With this base +/// class it's easy to implement sound processing stages that can be chained together, +/// so that samples that are fed into beginning of the pipe automatically go through +/// all the processing stages. +/// +/// When samples are input to this class, they're first processed and then put to +/// the FIFO pipe that's defined as output of this class. This output pipe can be +/// either other processing stage or a FIFO sample buffer. +class SOUNDTOUCH_API FIFOProcessor :public FIFOSamplePipe +{ +protected: + /// Internal pipe where processed samples are put. + FIFOSamplePipe *output; + + /// Sets output pipe. + void setOutPipe(FIFOSamplePipe *pOutput) + { + assert(output == NULL); + assert(pOutput != NULL); + output = pOutput; + } + + /// Constructor. Doesn't define output pipe; it has to be set be + /// 'setOutPipe' function. + FIFOProcessor() + { + output = NULL; + } + + /// Constructor. Configures output pipe. + FIFOProcessor(FIFOSamplePipe *pOutput ///< Output pipe. + ) + { + output = pOutput; + } + + /// Destructor. + virtual ~FIFOProcessor() + { + } + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin() + { + return output->ptrBegin(); + } + +public: + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *outBuffer, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ) + { + return output->receiveSamples(outBuffer, maxSamples); + } + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ) + { + return output->receiveSamples(maxSamples); + } + + /// Returns number of samples currently available. + virtual uint numSamples() const + { + return output->numSamples(); + } + + /// Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const + { + return output->isEmpty(); + } + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + virtual uint adjustAmountOfSamples(uint numSamples) + { + return output->adjustAmountOfSamples(numSamples); + } +}; + +} + +#endif diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/STTypes.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/STTypes.h new file mode 100644 index 0000000..3be0642 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/STTypes.h @@ -0,0 +1,190 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Common type definitions for SoundTouch audio processing library. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef STTypes_H +#define STTypes_H + +typedef unsigned int uint; +typedef unsigned long ulong; + +// Patch for MinGW: on Win64 long is 32-bit +#ifdef _WIN64 + typedef unsigned long long ulongptr; +#else + typedef ulong ulongptr; +#endif + + +// Helper macro for aligning pointer up to next 16-byte boundary +#define SOUNDTOUCH_ALIGN_POINTER_16(x) ( ( (ulongptr)(x) + 15 ) & ~(ulongptr)15 ) + + +#if (defined(__GNUC__) && !defined(ANDROID)) + // In GCC, include soundtouch_config.h made by config scritps. + // Skip this in Android compilation that uses GCC but without configure scripts. + #include "soundtouch_config.h" +#endif + + +namespace liteav_soundtouch +{ + /// Max allowed number of channels + #define SOUNDTOUCH_MAX_CHANNELS 16 + + /// Activate these undef's to overrule the possible sampletype + /// setting inherited from some other header file: + //#undef SOUNDTOUCH_INTEGER_SAMPLES + //#undef SOUNDTOUCH_FLOAT_SAMPLES + + /// If following flag is defined, always uses multichannel processing + /// routines also for mono and stero sound. This is for routine testing + /// purposes; output should be same with either routines, yet disabling + /// the dedicated mono/stereo processing routines will result in slower + /// runtime performance so recommendation is to keep this off. + // #define USE_MULTICH_ALWAYS + + #if (defined(__SOFTFP__) && defined(ANDROID)) + // For Android compilation: Force use of Integer samples in case that + // compilation uses soft-floating point emulation - soft-fp is way too slow + #undef SOUNDTOUCH_FLOAT_SAMPLES + #define SOUNDTOUCH_INTEGER_SAMPLES 1 + #endif + + #if !(SOUNDTOUCH_INTEGER_SAMPLES || SOUNDTOUCH_FLOAT_SAMPLES) + + /// Choose either 32bit floating point or 16bit integer sampletype + /// by choosing one of the following defines, unless this selection + /// has already been done in some other file. + //// + /// Notes: + /// - In Windows environment, choose the sample format with the + /// following defines. + /// - In GNU environment, the floating point samples are used by + /// default, but integer samples can be chosen by giving the + /// following switch to the configure script: + /// ./configure --enable-integer-samples + /// However, if you still prefer to select the sample format here + /// also in GNU environment, then please #undef the INTEGER_SAMPLE + /// and FLOAT_SAMPLE defines first as in comments above. + #define SOUNDTOUCH_INTEGER_SAMPLES 1 //< 16bit integer samples + //#define SOUNDTOUCH_FLOAT_SAMPLES 1 //< 32bit float samples + + #endif + + #if (_M_IX86 || __i386__ || __x86_64__ || _M_X64) + /// Define this to allow X86-specific assembler/intrinsic optimizations. + /// Notice that library contains also usual C++ versions of each of these + /// these routines, so if you're having difficulties getting the optimized + /// routines compiled for whatever reason, you may disable these optimizations + /// to make the library compile. + + #define SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS 1 + + /// In GNU environment, allow the user to override this setting by + /// giving the following switch to the configure script: + /// ./configure --disable-x86-optimizations + /// ./configure --enable-x86-optimizations=no + #ifdef SOUNDTOUCH_DISABLE_X86_OPTIMIZATIONS + #undef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + #endif + #else + /// Always disable optimizations when not using a x86 systems. + #undef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + + #endif + + // If defined, allows the SIMD-optimized routines to take minor shortcuts + // for improved performance. Undefine to require faithfully similar SIMD + // calculations as in normal C implementation. + #define SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION 1 + + + #ifdef SOUNDTOUCH_INTEGER_SAMPLES + // 16bit integer sample type + typedef short SAMPLETYPE; + // data type for sample accumulation: Use 32bit integer to prevent overflows + typedef long LONG_SAMPLETYPE; + + #ifdef SOUNDTOUCH_FLOAT_SAMPLES + // check that only one sample type is defined + #error "conflicting sample types defined" + #endif // SOUNDTOUCH_FLOAT_SAMPLES + + #ifdef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + // Allow MMX optimizations (not available in X64 mode) + #if (!_M_X64) + #define SOUNDTOUCH_ALLOW_MMX 1 + #endif + #endif + + #else + + // floating point samples + typedef float SAMPLETYPE; + // data type for sample accumulation: Use float also here to enable + // efficient autovectorization + typedef float LONG_SAMPLETYPE; + + #ifdef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + // Allow SSE optimizations + #define SOUNDTOUCH_ALLOW_SSE 1 + #endif + + #endif // SOUNDTOUCH_INTEGER_SAMPLES + + #if ((SOUNDTOUCH_ALLOW_SSE) || (__SSE__) || (SOUNDTOUCH_USE_NEON)) + #if SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION + #define ST_SIMD_AVOID_UNALIGNED + #endif + #endif + +} + +// define ST_NO_EXCEPTION_HANDLING switch to disable throwing std exceptions: +#define ST_NO_EXCEPTION_HANDLING 1 +#ifdef ST_NO_EXCEPTION_HANDLING + // Exceptions disabled. Throw asserts instead if enabled. + #include <assert.h> + #define ST_THROW_RT_ERROR(x) {assert((const char *)x);} +#else + // use c++ standard exceptions + #include <stdexcept> + #include <string> + #define ST_THROW_RT_ERROR(x) {throw std::runtime_error(x);} +#endif + +// When this #define is active, eliminates a clicking sound when the "rate" or "pitch" +// parameter setting crosses from value <1 to >=1 or vice versa during processing. +// Default is off as such crossover is untypical case and involves a slight sound +// quality compromise. +//#define SOUNDTOUCH_PREVENT_CLICK_AT_RATE_CROSSOVER 1 + +#endif diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/SoundTouch.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/SoundTouch.h new file mode 100644 index 0000000..90a9a74 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/SoundTouch.h @@ -0,0 +1,349 @@ +////////////////////////////////////////////////////////////////////////////// +/// +/// SoundTouch - main class for tempo/pitch/rate adjusting routines. +/// +/// Notes: +/// - Initialize the SoundTouch object instance by setting up the sound stream +/// parameters with functions 'setSampleRate' and 'setChannels', then set +/// desired tempo/pitch/rate settings with the corresponding functions. +/// +/// - The SoundTouch class behaves like a first-in-first-out pipeline: The +/// samples that are to be processed are fed into one of the pipe by calling +/// function 'putSamples', while the ready processed samples can be read +/// from the other end of the pipeline with function 'receiveSamples'. +/// +/// - The SoundTouch processing classes require certain sized 'batches' of +/// samples in order to process the sound. For this reason the classes buffer +/// incoming samples until there are enough of samples available for +/// processing, then they carry out the processing step and consequently +/// make the processed samples available for outputting. +/// +/// - For the above reason, the processing routines introduce a certain +/// 'latency' between the input and output, so that the samples input to +/// SoundTouch may not be immediately available in the output, and neither +/// the amount of outputtable samples may not immediately be in direct +/// relationship with the amount of previously input samples. +/// +/// - The tempo/pitch/rate control parameters can be altered during processing. +/// Please notice though that they aren't currently protected by semaphores, +/// so in multi-thread application external semaphore protection may be +/// required. +/// +/// - This class utilizes classes 'TDStretch' for tempo change (without modifying +/// pitch) and 'RateTransposer' for changing the playback rate (that is, both +/// tempo and pitch in the same ratio) of the sound. The third available control +/// 'pitch' (change pitch but maintain tempo) is produced by a combination of +/// combining the two other controls. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef SoundTouch_H +#define SoundTouch_H + +#include "FIFOSamplePipe.h" +#include "STTypes.h" +#include "st_export.h" + +namespace liteav_soundtouch +{ + +/// Soundtouch library version string +#define SOUNDTOUCH_VERSION "2.2" + +/// SoundTouch library version id +#define SOUNDTOUCH_VERSION_ID (20200) + +// +// Available setting IDs for the 'setSetting' & 'get_setting' functions: + +/// Enable/disable anti-alias filter in pitch transposer (0 = disable) +#define SETTING_USE_AA_FILTER 0 + +/// Pitch transposer anti-alias filter length (8 .. 128 taps, default = 32) +#define SETTING_AA_FILTER_LENGTH 1 + +/// Enable/disable quick seeking algorithm in tempo changer routine +/// (enabling quick seeking lowers CPU utilization but causes a minor sound +/// quality compromising) +#define SETTING_USE_QUICKSEEK 2 + +/// Time-stretch algorithm single processing sequence length in milliseconds. This determines +/// to how long sequences the original sound is chopped in the time-stretch algorithm. +/// See "STTypes.h" or README for more information. +#define SETTING_SEQUENCE_MS 3 + +/// Time-stretch algorithm seeking window length in milliseconds for algorithm that finds the +/// best possible overlapping location. This determines from how wide window the algorithm +/// may look for an optimal joining location when mixing the sound sequences back together. +/// See "STTypes.h" or README for more information. +#define SETTING_SEEKWINDOW_MS 4 + +/// Time-stretch algorithm overlap length in milliseconds. When the chopped sound sequences +/// are mixed back together, to form a continuous sound stream, this parameter defines over +/// how long period the two consecutive sequences are let to overlap each other. +/// See "STTypes.h" or README for more information. +#define SETTING_OVERLAP_MS 5 + + +/// Call "getSetting" with this ID to query processing sequence size in samples. +/// This value gives approximate value of how many input samples you'll need to +/// feed into SoundTouch after initial buffering to get out a new batch of +/// output samples. +/// +/// This value does not include initial buffering at beginning of a new processing +/// stream, use SETTING_INITIAL_LATENCY to get the initial buffering size. +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_NOMINAL_INPUT_SEQUENCE 6 + + +/// Call "getSetting" with this ID to query nominal average processing output +/// size in samples. This value tells approcimate value how many output samples +/// SoundTouch outputs once it does DSP processing run for a batch of input samples. +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_NOMINAL_OUTPUT_SEQUENCE 7 + + +/// Call "getSetting" with this ID to query initial processing latency, i.e. +/// approx. how many samples you'll need to enter to SoundTouch pipeline before +/// you can expect to get first batch of ready output samples out. +/// +/// After the first output batch, you can then expect to get approx. +/// SETTING_NOMINAL_OUTPUT_SEQUENCE ready samples out for every +/// SETTING_NOMINAL_INPUT_SEQUENCE samples that you enter into SoundTouch. +/// +/// Example: +/// processing with parameter -tempo=5 +/// => initial latency = 5509 samples +/// input sequence = 4167 samples +/// output sequence = 3969 samples +/// +/// Accordingly, you can expect to feed in approx. 5509 samples at beginning of +/// the stream, and then you'll get out the first 3969 samples. After that, for +/// every approx. 4167 samples that you'll put in, you'll receive again approx. +/// 3969 samples out. +/// +/// This also means that average latency during stream processing is +/// INITIAL_LATENCY-OUTPUT_SEQUENCE/2, in the above example case 5509-3969/2 +/// = 3524 samples +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_INITIAL_LATENCY 8 + + +class SOUNDTOUCH_API SoundTouch : public FIFOProcessor +{ +private: + /// Rate transposer class instance + class RateTransposer *pRateTransposer; + + /// Time-stretch class instance + class TDStretch *pTDStretch; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualRate; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualTempo; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualPitch; + + /// Flag: Has sample rate been set? + bool bSrateSet; + + /// Accumulator for how many samples in total will be expected as output vs. samples put in, + /// considering current processing settings. + double samplesExpectedOut; + + /// Accumulator for how many samples in total have been read out from the processing so far + long samplesOutput; + + /// Calculates effective rate & tempo valuescfrom 'virtualRate', 'virtualTempo' and + /// 'virtualPitch' parameters. + void calcEffectiveRateAndTempo(); + +protected : + /// Number of channels + uint channels; + + /// Effective 'rate' value calculated from 'virtualRate', 'virtualTempo' and 'virtualPitch' + double rate; + + /// Effective 'tempo' value calculated from 'virtualRate', 'virtualTempo' and 'virtualPitch' + double tempo; + +public: + SoundTouch(); + virtual ~SoundTouch(); + + /// Get SoundTouch library version string + static const char *getVersionString(); + + /// Get SoundTouch library version Id + static uint getVersionId(); + + /// Sets new rate control value. Normal rate = 1.0, smaller values + /// represent slower rate, larger faster rates. + void setRate(double newRate); + + /// Sets new tempo control value. Normal tempo = 1.0, smaller values + /// represent slower tempo, larger faster tempo. + void setTempo(double newTempo); + + /// Sets new rate control value as a difference in percents compared + /// to the original rate (-50 .. +100 %) + void setRateChange(double newRate); + + /// Sets new tempo control value as a difference in percents compared + /// to the original tempo (-50 .. +100 %) + void setTempoChange(double newTempo); + + /// Sets new pitch control value. Original pitch = 1.0, smaller values + /// represent lower pitches, larger values higher pitch. + void setPitch(double newPitch); + + /// Sets pitch change in octaves compared to the original pitch + /// (-1.00 .. +1.00) + void setPitchOctaves(double newPitch); + + /// Sets pitch change in semi-tones compared to the original pitch + /// (-12 .. +12) + void setPitchSemiTones(int newPitch); + void setPitchSemiTones(double newPitch); + + /// Sets the number of channels, 1 = mono, 2 = stereo + void setChannels(uint numChannels); + + /// Sets sample rate. + void setSampleRate(uint srate); + + /// Get ratio between input and output audio durations, useful for calculating + /// processed output duration: if you'll process a stream of N samples, then + /// you can expect to get out N * getInputOutputSampleRatio() samples. + /// + /// This ratio will give accurate target duration ratio for a full audio track, + /// given that the the whole track is processed with same processing parameters. + /// + /// If this ratio is applied to calculate intermediate offsets inside a processing + /// stream, then this ratio is approximate and can deviate +- some tens of milliseconds + /// from ideal offset, yet by end of the audio stream the duration ratio will become + /// exact. + /// + /// Example: if processing with parameters "-tempo=15 -pitch=-3", the function + /// will return value 0.8695652... Now, if processing an audio stream whose duration + /// is exactly one million audio samples, then you can expect the processed + /// output duration be 0.869565 * 1000000 = 869565 samples. + double getInputOutputSampleRatio(); + + /// Flushes the last samples from the processing pipeline to the output. + /// Clears also the internal processing buffers. + // + /// Note: This function is meant for extracting the last samples of a sound + /// stream. This function may introduce additional blank samples in the end + /// of the sound stream, and thus it's not recommended to call this function + /// in the middle of a sound stream. + void flush(); + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position into + /// the input of the object. Notice that sample rate _has_to_ be set before + /// calling this function, otherwise throws a runtime_error exception. + virtual void putSamples( + const SAMPLETYPE *samples, ///< Pointer to sample buffer. + uint numSamples ///< Number of samples in buffer. Notice + ///< that in case of stereo-sound a single sample + ///< contains data for both channels. + ); + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ); + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ); + + /// Clears all the samples in the object's output and internal processing + /// buffers. + virtual void clear(); + + /// Changes a setting controlling the processing system behaviour. See the + /// 'SETTING_...' defines for available setting ID's. + /// + /// \return 'true' if the setting was successfully changed + bool setSetting(int settingId, ///< Setting ID number. see SETTING_... defines. + int value ///< New setting value. + ); + + /// Reads a setting controlling the processing system behaviour. See the + /// 'SETTING_...' defines for available setting ID's. + /// + /// \return the setting value. + int getSetting(int settingId ///< Setting ID number, see SETTING_... defines. + ) const; + + /// Returns number of samples currently unprocessed. + virtual uint numUnprocessedSamples() const; + + /// Return number of channels + uint numChannels() const + { + return channels; + } + + /// Other handy functions that are implemented in the ancestor classes (see + /// classes 'FIFOProcessor' and 'FIFOSamplePipe') + /// + /// - receiveSamples() : Use this function to receive 'ready' processed samples from SoundTouch. + /// - numSamples() : Get number of 'ready' samples that can be received with + /// function 'receiveSamples()' + /// - isEmpty() : Returns nonzero if there aren't any 'ready' samples. + /// - clear() : Clears all samples from ready/processing buffers. +}; + +} +#endif diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/TXSoundTouch.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/TXSoundTouch.h new file mode 100644 index 0000000..6926ff6 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/TXSoundTouch.h @@ -0,0 +1,11 @@ +/* + * Copyright (c) 2022 Tencent. All Rights Reserved. + * + */ + +#import <TXSoundTouch/BPMDetect.h> +#import <TXSoundTouch/FIFOSampleBuffer.h> +#import <TXSoundTouch/FIFOSamplePipe.h> +#import <TXSoundTouch/soundtouch_config.h> +#import <TXSoundTouch/SoundTouch.h> +#import <TXSoundTouch/STTypes.h> diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/soundtouch_config.h b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/soundtouch_config.h new file mode 100644 index 0000000..a8b897e --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Headers/soundtouch_config.h @@ -0,0 +1,105 @@ +/* include/soundtouch_config.h. Generated from soundtouch_config.h.in by configure. */ +/* include/soundtouch_config.h.in. Generated from configure.ac by autoheader. */ + +#ifndef SoundTouchConfig_H +#define SoundTouchConfig_H + +namespace liteav_soundtouch +{ + +/* Never allow x86 optimizations in iOS simulator build */ +#define ALLOW_X86_OPTIMIZATIONS 0 + +/* Use Integer as Sample type */ +#define INTEGER_SAMPLES 1 +#define SOUNDTOUCH_INTEGER_SAMPLES 1 + +/* Use Float as Sample type */ +//#define FLOAT_SAMPLES 1 + +/* Define to 1 if you have the <dlfcn.h> header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the <inttypes.h> header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `m' library (-lm). */ +#define HAVE_LIBM 1 + +/* Define to 1 if your system has a GNU libc compatible `malloc' function, and + to 0 otherwise. */ +#define HAVE_MALLOC 1 + +/* Define to 1 if you have the <memory.h> header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the <stdint.h> header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the <stdlib.h> header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the <strings.h> header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the <string.h> header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the <sys/stat.h> header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the <sys/types.h> header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the <unistd.h> header file. */ +#define HAVE_UNISTD_H 1 + +/* Use Integer as Sample type */ +/* #undef INTEGER_SAMPLES */ + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "soundtouch" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://www.surina.net/soundtouch" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "SoundTouch" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "SoundTouch 1.4.0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "soundtouch" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "1.4.0" + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Version number of package */ +#define VERSION "1.4.0" + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +/* #undef inline */ +#endif + +/* Define to rpl_malloc if the replacement function should be used. */ +/* #undef malloc */ + +} + +#endif // SoundTouchConfig_H diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Info.plist b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Info.plist new file mode 100644 index 0000000..41b569c Binary files /dev/null and b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Info.plist differ diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Modules/module.modulemap b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Modules/module.modulemap new file mode 100644 index 0000000..b269309 --- /dev/null +++ b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/Modules/module.modulemap @@ -0,0 +1,6 @@ +framework module TXSoundTouch { + umbrella header "TXSoundTouch.h" + + export * + module * { export * } +} diff --git a/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/TXSoundTouch b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/TXSoundTouch new file mode 100755 index 0000000..f083c0d Binary files /dev/null and b/HHVDoctorSDK/TXSoundTouch.xcframework/ios-x86_64-simulator/TXSoundTouch.framework/TXSoundTouch differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo index 865db76..afd89dc 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo and b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64-apple-ios.swiftsourceinfo differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64.swiftsourceinfo b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64.swiftsourceinfo index 865db76..afd89dc 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64.swiftsourceinfo and b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/arm64.swiftsourceinfo differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo index aa78e9d..c0775c5 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo and b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64-apple-ios-simulator.swiftsourceinfo differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64.swiftsourceinfo b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64.swiftsourceinfo index aa78e9d..c0775c5 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64.swiftsourceinfo and b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/Project/x86_64.swiftsourceinfo differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64-apple-ios.swiftmodule b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64-apple-ios.swiftmodule index 826555f..4d55b47 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64-apple-ios.swiftmodule and b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64-apple-ios.swiftmodule differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64.swiftmodule b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64.swiftmodule index 826555f..4d55b47 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64.swiftmodule and b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/arm64.swiftmodule differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64-apple-ios-simulator.swiftmodule b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64-apple-ios-simulator.swiftmodule index 0666c1e..2c80c80 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64-apple-ios-simulator.swiftmodule and b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64-apple-ios-simulator.swiftmodule differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64.swiftmodule b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64.swiftmodule index 0666c1e..2c80c80 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64.swiftmodule and b/HHVDoctorSDK/hhVDoctorSDK.framework/Modules/hhVDoctorSDK.swiftmodule/x86_64.swiftmodule differ diff --git a/HHVDoctorSDK/hhVDoctorSDK.framework/hhVDoctorSDK b/HHVDoctorSDK/hhVDoctorSDK.framework/hhVDoctorSDK index ca99966..584404b 100644 Binary files a/HHVDoctorSDK/hhVDoctorSDK.framework/hhVDoctorSDK and b/HHVDoctorSDK/hhVDoctorSDK.framework/hhVDoctorSDK differ -- libgit2 0.24.0