diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 734dab64..67bf9f7c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ jobs: lint: strategy: matrix: - platform: [ ubuntu, windows ] + platform: [ubuntu, windows] runs-on: ${{ matrix.platform }}-latest steps: - name: Checkout @@ -35,7 +35,7 @@ jobs: test: strategy: matrix: - platform: [ ubuntu, windows ] + platform: [ubuntu, windows] runs-on: ${{ matrix.platform }}-latest steps: - name: Checkout @@ -50,7 +50,7 @@ jobs: build-library: strategy: matrix: - platform: [ ubuntu, windows ] + platform: [ubuntu, windows] runs-on: ${{ matrix.platform }}-latest steps: - name: Checkout @@ -65,8 +65,8 @@ jobs: test-android: strategy: matrix: - newArch: [ true, false ] - runs-on: macos-latest + newArch: [true, false] + runs-on: macos-12 env: TURBO_CACHE_DIR: .turbo/android ORG_GRADLE_PROJECT_newArchEnabled: ${{ matrix.newArch }} @@ -146,7 +146,7 @@ jobs: test-ios: strategy: matrix: - newArch: [ 1, 0 ] + newArch: [1, 0] runs-on: macos-latest env: TURBO_CACHE_DIR: .turbo/ios @@ -206,7 +206,7 @@ jobs: - name: Build example for iOS run: | - yarn turbo run detox:ios --cache-dir="${{ env.TURBO_CACHE_DIR }}" --force=true + yarn turbo run detox:ios --cache-dir="${{ env.TURBO_CACHE_DIR }}" - uses: futureware-tech/simulator-action@v1 with: diff --git a/android/build.gradle b/android/build.gradle index 560abc6c..c4f250aa 100644 --- a/android/build.gradle +++ b/android/build.gradle @@ -164,9 +164,9 @@ dependencies { //noinspection GradleDynamicVersion implementation "com.facebook.react:react-native:+" implementation fileTree(include: ['*.jar', '*.aar'], dir: 'libs') - api 'io.agora.rtc:full-sdk:4.3.0' - implementation 'io.agora.rtc:full-screen-sharing:4.3.0' - implementation 'io.agora.rtc:iris-rtc:4.3.0-build.2' + api 'io.agora.rtc:full-sdk:4.3.1' + implementation 'io.agora.rtc:full-screen-sharing:4.3.1' + implementation 'io.agora.rtc:iris-rtc:4.3.1-build.1' } if (isNewArchitectureEnabled()) { diff --git a/example/.detoxrc.js b/example/.detoxrc.js index ee94453f..0b2135b8 100644 --- a/example/.detoxrc.js +++ b/example/.detoxrc.js @@ -6,7 +6,7 @@ module.exports = { config: 'e2e/jest.config.js' }, jest: { - setupTimeout: 1200000 + setupTimeout: 2100000 } }, apps: { diff --git a/example/ios/Podfile b/example/ios/Podfile index 323f004f..76e338e6 100644 --- a/example/ios/Podfile +++ b/example/ios/Podfile @@ -62,5 +62,5 @@ target 'AgoraRtcNgExample' do end target 'ScreenShare' do - pod 'AgoraRtcEngine_iOS', '4.3.0' + pod 'AgoraRtcEngine_iOS', '4.3.1' end diff --git a/example/ios/Podfile.lock b/example/ios/Podfile.lock index b29a3e96..7fc7a031 100644 --- a/example/ios/Podfile.lock +++ b/example/ios/Podfile.lock @@ -1,36 +1,40 @@ PODS: - - AgoraIrisRTC_iOS (4.3.0-build.2) - - AgoraRtcEngine_iOS (4.3.0): - - AgoraRtcEngine_iOS/AIAEC (= 4.3.0) - - AgoraRtcEngine_iOS/AINS (= 4.3.0) - - AgoraRtcEngine_iOS/AudioBeauty (= 4.3.0) - - AgoraRtcEngine_iOS/ClearVision (= 4.3.0) - - AgoraRtcEngine_iOS/ContentInspect (= 4.3.0) - - AgoraRtcEngine_iOS/FaceCapture (= 4.3.0) - - AgoraRtcEngine_iOS/FaceDetection (= 4.3.0) - - AgoraRtcEngine_iOS/ReplayKit (= 4.3.0) - - AgoraRtcEngine_iOS/RtcBasic (= 4.3.0) - - AgoraRtcEngine_iOS/SpatialAudio (= 4.3.0) - - AgoraRtcEngine_iOS/VideoAv1CodecDec (= 4.3.0) - - AgoraRtcEngine_iOS/VideoCodecDec (= 4.3.0) - - AgoraRtcEngine_iOS/VideoCodecEnc (= 4.3.0) - - AgoraRtcEngine_iOS/VirtualBackground (= 4.3.0) - - AgoraRtcEngine_iOS/VQA (= 4.3.0) - - AgoraRtcEngine_iOS/AIAEC (4.3.0) - - AgoraRtcEngine_iOS/AINS (4.3.0) - - AgoraRtcEngine_iOS/AudioBeauty (4.3.0) - - AgoraRtcEngine_iOS/ClearVision (4.3.0) - - AgoraRtcEngine_iOS/ContentInspect (4.3.0) - - AgoraRtcEngine_iOS/FaceCapture (4.3.0) - - AgoraRtcEngine_iOS/FaceDetection (4.3.0) - - AgoraRtcEngine_iOS/ReplayKit (4.3.0) - - AgoraRtcEngine_iOS/RtcBasic (4.3.0) - - AgoraRtcEngine_iOS/SpatialAudio (4.3.0) - - AgoraRtcEngine_iOS/VideoAv1CodecDec (4.3.0) - - AgoraRtcEngine_iOS/VideoCodecDec (4.3.0) - - AgoraRtcEngine_iOS/VideoCodecEnc (4.3.0) - - AgoraRtcEngine_iOS/VirtualBackground (4.3.0) - - AgoraRtcEngine_iOS/VQA (4.3.0) + - AgoraIrisRTC_iOS (4.3.1-build.1) + - AgoraRtcEngine_iOS (4.3.1): + - AgoraRtcEngine_iOS/AIAEC (= 4.3.1) + - AgoraRtcEngine_iOS/AINS (= 4.3.1) + - AgoraRtcEngine_iOS/AudioBeauty (= 4.3.1) + - AgoraRtcEngine_iOS/ClearVision (= 4.3.1) + - AgoraRtcEngine_iOS/ContentInspect (= 4.3.1) + - AgoraRtcEngine_iOS/FaceCapture (= 4.3.1) + - AgoraRtcEngine_iOS/FaceDetection (= 4.3.1) + - AgoraRtcEngine_iOS/LipSync (= 4.3.1) + - AgoraRtcEngine_iOS/ReplayKit (= 4.3.1) + - AgoraRtcEngine_iOS/RtcBasic (= 4.3.1) + - AgoraRtcEngine_iOS/SpatialAudio (= 4.3.1) + - AgoraRtcEngine_iOS/VideoAv1CodecDec (= 4.3.1) + - AgoraRtcEngine_iOS/VideoAv1CodecEnc (= 4.3.1) + - AgoraRtcEngine_iOS/VideoCodecDec (= 4.3.1) + - AgoraRtcEngine_iOS/VideoCodecEnc (= 4.3.1) + - AgoraRtcEngine_iOS/VirtualBackground (= 4.3.1) + - AgoraRtcEngine_iOS/VQA (= 4.3.1) + - AgoraRtcEngine_iOS/AIAEC (4.3.1) + - AgoraRtcEngine_iOS/AINS (4.3.1) + - AgoraRtcEngine_iOS/AudioBeauty (4.3.1) + - AgoraRtcEngine_iOS/ClearVision (4.3.1) + - AgoraRtcEngine_iOS/ContentInspect (4.3.1) + - AgoraRtcEngine_iOS/FaceCapture (4.3.1) + - AgoraRtcEngine_iOS/FaceDetection (4.3.1) + - AgoraRtcEngine_iOS/LipSync (4.3.1) + - AgoraRtcEngine_iOS/ReplayKit (4.3.1) + - AgoraRtcEngine_iOS/RtcBasic (4.3.1) + - AgoraRtcEngine_iOS/SpatialAudio (4.3.1) + - AgoraRtcEngine_iOS/VideoAv1CodecDec (4.3.1) + - AgoraRtcEngine_iOS/VideoAv1CodecEnc (4.3.1) + - AgoraRtcEngine_iOS/VideoCodecDec (4.3.1) + - AgoraRtcEngine_iOS/VideoCodecEnc (4.3.1) + - AgoraRtcEngine_iOS/VirtualBackground (4.3.1) + - AgoraRtcEngine_iOS/VQA (4.3.1) - boost (1.76.0) - CocoaAsyncSocket (7.6.5) - DoubleConversion (1.1.6) @@ -407,9 +411,9 @@ PODS: - React-jsinspector (0.72.10) - React-logger (0.72.10): - glog - - react-native-agora (4.2.6): - - AgoraIrisRTC_iOS (= 4.3.0-build.2) - - AgoraRtcEngine_iOS (= 4.3.0) + - react-native-agora (4.3.0): + - AgoraIrisRTC_iOS (= 4.3.1-build.1) + - AgoraRtcEngine_iOS (= 4.3.1) - RCT-Folly (= 2021.07.22.00) - React-Core - react-native-agora-rawdata (0.1.0): @@ -549,7 +553,7 @@ PODS: - Yoga (~> 1.14) DEPENDENCIES: - - AgoraRtcEngine_iOS (= 4.3.0) + - AgoraRtcEngine_iOS (= 4.3.1) - boost (from `../node_modules/react-native/third-party-podspecs/boost.podspec`) - DoubleConversion (from `../node_modules/react-native/third-party-podspecs/DoubleConversion.podspec`) - FBLazyVector (from `../node_modules/react-native/Libraries/FBLazyVector`) @@ -747,8 +751,8 @@ EXTERNAL SOURCES: :path: "../node_modules/react-native/ReactCommon/yoga" SPEC CHECKSUMS: - AgoraIrisRTC_iOS: 2caf892fa827777fe43b6ac7d12e9b42579eb865 - AgoraRtcEngine_iOS: 267c0980c1fb97e056d05b850f8629b05b6e467a + AgoraIrisRTC_iOS: 7710d853202eca4900c2916aefc44abd5a139d4e + AgoraRtcEngine_iOS: f64be00fdda786bb7edd84ab461c31b7b4c93534 boost: 7dcd2de282d72e344012f7d6564d024930a6a440 CocoaAsyncSocket: 065fd1e645c7abab64f7a6a2007a48038fdc6a99 DoubleConversion: 5189b271737e1565bdce30deb4a08d647e3f5f54 @@ -782,7 +786,7 @@ SPEC CHECKSUMS: React-jsiexecutor: 45ef2ec6dcde31b90469175ec76ddac77b91dfc3 React-jsinspector: de0198127395fec3058140a20c045167f761bb16 React-logger: dc3a2b174d79c2da635059212747d8d929b54e06 - react-native-agora: d485857dafe397d26f2ba2355b4b7db98508bc17 + react-native-agora: 4f85b3cdfa151e91df4953d4b9bf4fdbb3d527ff react-native-agora-rawdata: 097895cdccd8fcf3cff5dffe23372f5d3c89fd31 react-native-image-tools: 88218449791389bbf550a2c475a3b564c8233c8b react-native-safe-area-context: 7aa8e6d9d0f3100a820efb1a98af68aa747f9284 @@ -814,6 +818,6 @@ SPEC CHECKSUMS: Yoga: d0003f849d2b5224c072cef6568b540d8bb15cd3 YogaKit: f782866e155069a2cca2517aafea43200b01fd5a -PODFILE CHECKSUM: 130d9ef97e0ac413532866c101953098003f33da +PODFILE CHECKSUM: a1a891aa26be28bd4953131bc93b8f452d3aff23 COCOAPODS: 1.13.0 diff --git a/react-native-agora.podspec b/react-native-agora.podspec index f3609819..ccff16d5 100644 --- a/react-native-agora.podspec +++ b/react-native-agora.podspec @@ -40,8 +40,8 @@ Pod::Spec.new do |s| end end - s.dependency 'AgoraRtcEngine_iOS', '4.3.0' - s.dependency 'AgoraIrisRTC_iOS', '4.3.0-build.2' + s.dependency 'AgoraRtcEngine_iOS', '4.3.1' + s.dependency 'AgoraIrisRTC_iOS', '4.3.1-build.1' s.libraries = 'stdc++' s.framework = 'ReplayKit' end diff --git a/scripts/mirror.sh b/scripts/mirror.sh index 3409ae4a..2282ebcf 100644 --- a/scripts/mirror.sh +++ b/scripts/mirror.sh @@ -5,11 +5,17 @@ MY_PATH=$(realpath $(dirname "$0")) PROJECT_ROOT=$(realpath ${MY_PATH}/..) LINE="npmRegistryServer: https://registry.npmmirror.com" -if ! grep -q "$LINE" ${PROJECT_ROOT}/.yarnrc.yml; then - sed -i "" "1i\\ -$LINE -" ${PROJECT_ROOT}/.yarnrc.yml -fi - -sed -i "" 's#"react-native-agora-rawdata": "github:AgoraLibrary/react-native-agora-rawdata"#"react-native-agora-rawdata": "git+https://gitee.com/agoraio-community/react-native-agora-rawdata.git"#g' ${PROJECT_ROOT}/example/package.json -sed -i "" 's#"react-native-image-tool": "github:LichKing-2234/react-native-image-tools"#"react-native-image-tool": "git+https://gitee.com/agoraio-community/react-native-image-tools.git"#g' ${PROJECT_ROOT}/example/package.json +grep -q "$LINE" ${PROJECT_ROOT}/.yarnrc.yml || echo -e "$LINE\n$(cat ${PROJECT_ROOT}/.yarnrc.yml)" > ${PROJECT_ROOT}/.yarnrc.yml.tmp && mv ${PROJECT_ROOT}/.yarnrc.yml.tmp ${PROJECT_ROOT}/.yarnrc.yml + +echo ".yarnrc.yml updated" + +old1='"react-native-agora-rawdata": "github:AgoraLibrary/react-native-agora-rawdata"' +new1='"react-native-agora-rawdata": "git+https://gitee.com/agoraio-community/react-native-agora-rawdata.git"' + +old2='"react-native-image-tool": "github:LichKing-2234/react-native-image-tools"' +new2='"react-native-image-tool": "git+https://gitee.com/agoraio-community/react-native-image-tools.git"' + +sed "s#${old1}#${new1}#g" ${PROJECT_ROOT}/example/package.json > tmp && mv tmp ${PROJECT_ROOT}/example/package.json +sed "s#${old2}#${new2}#g" ${PROJECT_ROOT}/example/package.json > tmp && mv tmp ${PROJECT_ROOT}/example/package.json + +echo "example/package.json updated" diff --git a/scripts/terra/config/impl_config.yaml b/scripts/terra/config/impl_config.yaml index 6a9ef9c2..b8372dd8 100644 --- a/scripts/terra/config/impl_config.yaml +++ b/scripts/terra/config/impl_config.yaml @@ -2,7 +2,7 @@ parsers: - name: RTCParser package: '@agoraio-extensions/terra_shared_configs' args: - sdkVersion: 4.3.0 + sdkVersion: 4.3.1 FixEnumConstantParser: skipCalEnumValue: true diff --git a/scripts/terra/config/types_config.yaml b/scripts/terra/config/types_config.yaml index 202af865..f6677480 100644 --- a/scripts/terra/config/types_config.yaml +++ b/scripts/terra/config/types_config.yaml @@ -2,7 +2,7 @@ parsers: - name: RTCParser package: '@agoraio-extensions/terra_shared_configs' args: - sdkVersion: 4.3.0 + sdkVersion: 4.3.1 FixEnumConstantParser: skipCalEnumValue: true diff --git a/scripts/terra/generate-code.sh b/scripts/terra/generate-code.sh index 38213683..b771f645 100644 --- a/scripts/terra/generate-code.sh +++ b/scripts/terra/generate-code.sh @@ -16,5 +16,3 @@ npm exec terra -- run \ cd ${PROJECT_ROOT} yarn build:ts-interface - -yarn lint --fix diff --git a/scripts/terra/impl.ts b/scripts/terra/impl.ts index 54ce52b5..ce7a1e2f 100644 --- a/scripts/terra/impl.ts +++ b/scripts/terra/impl.ts @@ -54,7 +54,7 @@ type ClazzMethodUserData = IrisApiIdParserUserData & { }; export function impl(parseResult: ParseResult) { - let preParseResult = deepClone(parseResult, ['parent']); + let preParseResult = deepClone(parseResult, ['parent', 'outVariable']); let cxxfiles = parseResult.nodes as CXXFile[]; //only render file which has clazz let view = cxxfiles diff --git a/src/AgoraBase.ts b/src/AgoraBase.ts index 45277def..f14afe15 100644 --- a/src/AgoraBase.ts +++ b/src/AgoraBase.ts @@ -319,13 +319,17 @@ export enum ErrorCodeType { */ ErrSetClientRoleNotAuthorized = 119, /** - * 120: Decryption fails. The user might have entered an incorrect password to join the channel. Check the entered password, or tell the user to try rejoining the channel. + * 120: Media streams decryption fails. The user might use an incorrect password to join the channel. Check the entered password, or tell the user to try rejoining the channel. */ ErrDecryptionFailed = 120, /** * 121: The user ID is invalid. */ ErrInvalidUserId = 121, + /** + * 122: Data streams decryption fails. The user might use an incorrect password to join the channel. Check the entered password, or tell the user to try rejoining the channel. + */ + ErrDatastreamDecryptionFailed = 122, /** * 123: The user is banned from the server. */ @@ -888,6 +892,28 @@ export enum VideoCodecType { VideoCodecGenericJpeg = 20, } +/** + * The camera focal length types. + */ +export enum CameraFocalLengthType { + /** + * 0: (Default) Standard lens. + */ + CameraFocalLengthDefault = 0, + /** + * 1: Wide-angle lens. + */ + CameraFocalLengthWideAngle = 1, + /** + * 2: Ultra-wide-angle lens. + */ + CameraFocalLengthUltraWide = 2, + /** + * 3: (For iOS only) Telephoto lens. + */ + CameraFocalLengthTelephoto = 3, +} + /** * @ignore */ @@ -1196,6 +1222,10 @@ export class EncodedVideoFrameInfo { * The type of video streams. See VideoStreamType. */ streamType?: VideoStreamType; + /** + * @ignore + */ + presentationMs?: number; } /** @@ -1322,6 +1352,20 @@ export class CodecCapInfo { codecLevels?: CodecCapLevels; } +/** + * Focal length information supported by the camera, including the camera direction and focal length type. + */ +export class FocalLengthInfo { + /** + * The camera direction. See CameraDirection. + */ + cameraDirection?: number; + /** + * The focal length type. See CameraFocalLengthType. + */ + focalLengthType?: CameraFocalLengthType; +} + /** * Video encoder configurations. */ @@ -1403,11 +1447,11 @@ export enum SimulcastStreamMode { */ export class SimulcastStreamConfig { /** - * The video dimension. See VideoDimensions. The default value is 160 × 120. + * The video dimension. See VideoDimensions. The default value is 50% of the high-quality video stream. */ dimensions?: VideoDimensions; /** - * Video receive bitrate (Kbps), represented by an instantaneous value. The default value is 65. + * Video receive bitrate (Kbps), represented by an instantaneous value. This parameter does not need to be set. The SDK automatically matches the most suitable bitrate based on the video resolution and frame rate you set. */ kBitrate?: number; /** @@ -1465,7 +1509,7 @@ export class WatermarkRatio { */ export class WatermarkOptions { /** - * Is the watermark visible in the local preview view? true : (Default) The watermark is visible in the local preview view. false : The watermark is not visible in the local preview view. + * Whether the watermark is visible in the local preview view: true : (Default) The watermark is visible in the local preview view. false : The watermark is not visible in the local preview view. */ visibleInPreview?: boolean; /** @@ -1816,15 +1860,15 @@ export enum AudioScenarioType { */ export class VideoFormat { /** - * The width (px) of the video frame. + * The width (px) of the video frame. The default value is 960. */ width?: number; /** - * The height (px) of the video frame. + * The height (px) of the video frame. The default value is 540. */ height?: number; /** - * The video frame rate (fps). + * The video frame rate (fps). The default value is 15. */ fps?: number; } @@ -1940,6 +1984,38 @@ export enum CaptureBrightnessLevelType { CaptureBrightnessLevelDark = 2, } +/** + * Camera stabilization modes. + * + * The camera stabilization effect increases in the order of 1 < 2 < 3, and the latency will also increase accordingly. + */ +export enum CameraStabilizationMode { + /** + * -1: (Default) Camera stabilization mode off. + */ + CameraStabilizationModeOff = -1, + /** + * 0: Automatic camera stabilization. The system automatically selects a stabilization mode based on the status of the camera. However, the latency is relatively high in this mode, so it is recommended not to use this enumeration. + */ + CameraStabilizationModeAuto = 0, + /** + * 1: (Recommended) Level 1 camera stabilization. + */ + CameraStabilizationModeLevel1 = 1, + /** + * 2: Level 2 camera stabilization. + */ + CameraStabilizationModeLevel2 = 2, + /** + * 3: Level 3 camera stabilization. + */ + CameraStabilizationModeLevel3 = 3, + /** + * @ignore + */ + CameraStabilizationModeMaxLevel = 3, +} + /** * The state of the local audio. */ @@ -1999,7 +2075,7 @@ export enum LocalAudioStreamReason { */ LocalAudioStreamReasonNoPlayoutDevice = 7, /** - * 8: The local audio capture is interrupted by a system call, Siri, or alarm clock. Remind your users to end the phone call, Siri, or alarm clock if the local audio capture is required. + * 8: The local audio capture is interrupted by a system call, smart assistants, or alarm clock. Prompt your users to end the phone call, smart assistants, or alarm clock if the local audio capture is required. */ LocalAudioStreamReasonInterrupted = 8, /** @@ -2047,15 +2123,15 @@ export enum LocalVideoStreamReason { */ LocalVideoStreamReasonFailure = 1, /** - * 2: No permission to use the local video capturing device. Remind the user to grant permissions and rejoin the channel. Deprecated: This enumerator is deprecated. Please use CAMERA in the onPermissionError callback instead. + * 2: No permission to use the local video capturing device. Prompt the user to grant permissions and rejoin the channel. Deprecated: This enumerator is deprecated. Please use CAMERA in the onPermissionError callback instead. */ LocalVideoStreamReasonDeviceNoPermission = 2, /** - * 3: The local video capturing device is in use. Remind the user to check whether another application occupies the camera. + * 3: The local video capturing device is in use. Prompt the user to check if the camera is being used by another app, or try to rejoin the channel. */ LocalVideoStreamReasonDeviceBusy = 3, /** - * 4: The local video capture fails. Remind your user to check whether the video capture device is working properly, whether the camera is occupied by another application, or try to rejoin the channel. + * 4: The local video capture fails. Prompt the user to check whether the video capture device is working properly, whether the camera is used by another app, or try to rejoin the channel. */ LocalVideoStreamReasonCaptureFailure = 4, /** @@ -2063,11 +2139,11 @@ export enum LocalVideoStreamReason { */ LocalVideoStreamReasonCodecNotSupport = 5, /** - * 6: (iOS only) The app is in the background. Remind the user that video capture cannot be performed normally when the app is in the background. + * 6: (iOS only) The app is in the background. Prompt the user that video capture cannot be performed normally when the app is in the background. */ LocalVideoStreamReasonCaptureInbackground = 6, /** - * 7: (iOS only) The current application window is running in Slide Over, Split View, or Picture in Picture mode, and another app is occupying the camera. Remind the user that the application cannot capture video properly when the app is running in Slide Over, Split View, or Picture in Picture mode and another app is occupying the camera. + * 7: (iOS only) The current app window is running in Slide Over, Split View, or Picture in Picture mode, and another app is occupying the camera. Prompt the user that the app cannot capture video properly when it is running in Slide Over, Split View, or Picture in Picture mode and another app is occupying the camera. */ LocalVideoStreamReasonCaptureMultipleForegroundApps = 7, /** @@ -2082,6 +2158,16 @@ export enum LocalVideoStreamReason { * @ignore */ LocalVideoStreamReasonDeviceInvalidId = 10, + /** + * 14: (Android only) Video capture is interrupted. Possible reasons include the following: + * The camera is being used by another app. Prompt the user to check if the camera is being used by another app. + * The current app has been switched to the background. You can use foreground services to notify the operating system and ensure that the app can still collect video when it switches to the background. + */ + LocalVideoStreamReasonDeviceInterrupt = 14, + /** + * 15: (Android only) The video capture device encounters an error. Prompt the user to close and restart the camera to restore functionality. If this operation does not solve the problem, check if the camera has a hardware failure. + */ + LocalVideoStreamReasonDeviceFatalError = 15, /** * @ignore */ @@ -2198,6 +2284,14 @@ export enum RemoteAudioStateReason { * 7: The remote user leaves the channel. */ RemoteAudioReasonRemoteOffline = 7, + /** + * @ignore + */ + RemoteAudioReasonNoPacketReceive = 8, + /** + * @ignore + */ + RemoteAudioReasonLocalPlayFailed = 9, } /** @@ -2639,7 +2733,7 @@ export enum RtmpStreamPublishReason { */ RtmpStreamPublishReasonInvalidAppid = 15, /** - * 16: Your project does not have permission to use streaming services. Refer to Media Push to enable the Media Push permission. + * 16: Your project does not have permission to use streaming services. */ RtmpStreamPublishReasonInvalidPrivilege = 16, /** @@ -3348,7 +3442,7 @@ export class VideoCanvas { */ subviewUid?: number; /** - * The video display window. + * The video display window. In one VideoCanvas, you can only choose to set either view or surfaceTexture. If both are set, only the settings in view take effect. */ view?: any; /** @@ -3396,7 +3490,7 @@ export class VideoCanvas { */ export enum LighteningContrastLevel { /** - * @ignore + * 0: Low contrast level. */ LighteningContrastLow = 0, /** @@ -3628,7 +3722,7 @@ export class SegmentationProperty { */ modelType?: SegModelType; /** - * The range of accuracy for identifying green colors (different shades of green) in the view. The value range is [0,1], and the default value is 0.5. The larger the value, the wider the range of identifiable shades of green. When the value of this parameter is too large, the edge of the portrait and the green color in the portrait range are also detected. Agora recommends that you dynamically adjust the value of this parameter according to the actual effect. This parameter only takes effect when modelType is set to SegModelGreen. + * The accuracy range for recognizing background colors in the image. The value range is [0,1], and the default value is 0.5. The larger the value, the wider the range of identifiable shades of pure color. When the value of this parameter is too large, the edge of the portrait and the pure color in the portrait range are also detected. Agora recommends that you dynamically adjust the value of this parameter according to the actual effect. This parameter only takes effect when modelType is set to SegModelGreen. */ greenCapacity?: number; } @@ -3773,6 +3867,10 @@ export enum AudioEffectPreset { * Virtual surround sound, that is, the SDK generates a simulated surround sound field on the basis of stereo channels, thereby creating a surround sound effect. If the virtual surround sound is enabled, users need to use stereo audio playback devices to hear the anticipated audio effect. */ RoomAcousticsVirtualSurroundSound = 0x02010900, + /** + * The audio effect of chorus. Agora recommends using this effect in chorus scenarios to enhance the sense of depth and dimension in the vocals. + */ + RoomAcousticsChorus = 0x02010d00, /** * A middle-aged man's voice. Agora recommends using this preset to process a male-sounding voice; otherwise, you may not hear the anticipated voice effect. */ @@ -4203,7 +4301,7 @@ export enum ChannelMediaRelayError { */ RelayErrorServerErrorResponse = 1, /** - * 2: No server response. You can call leaveChannel to leave the channel. This error can also occur if your project has not enabled co-host token authentication. You can to enable the service for cohosting across channels before starting a channel media relay. + * 2: No server response. This error may be caused by poor network connections. If this error occurs when initiating a channel media relay, you can try again later; if this error occurs during channel media relay, you can call leaveChannel to leave the channel. This error can also occur if the channel media relay service is not enabled in the project. You can contact to enable the service. */ RelayErrorServerNoResponse = 2, /** @@ -4424,6 +4522,10 @@ export class EncryptionConfig { * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server side. See Media Stream Encryption for details. This parameter takes effect only in Aes128Gcm2 or Aes256Gcm2 encrypted mode. In this case, ensure that this parameter is not 0. */ encryptionKdfSalt?: number[]; + /** + * Whether to enable data stream encryption: true : Enable data stream encryption. false : (Default) Disable data stream encryption. + */ + datastreamEncryptionEnabled?: boolean; } /** @@ -4435,13 +4537,21 @@ export enum EncryptionErrorType { */ EncryptionErrorInternalFailure = 0, /** - * 1: Decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. + * 1: Media stream decryption error. Ensure that the receiver and the sender use the same encryption mode and key. */ EncryptionErrorDecryptionFailure = 1, /** - * 2: Encryption errors. + * 2: Media stream encryption error. */ EncryptionErrorEncryptionFailure = 2, + /** + * 3: Data stream decryption error. Ensure that the receiver and the sender use the same encryption mode and key. + */ + EncryptionErrorDatastreamDecryptionFailure = 3, + /** + * 4: Data stream encryption error. + */ + EncryptionErrorDatastreamEncryptionFailure = 4, } /** @@ -4583,21 +4693,25 @@ export class UserInfo { } /** - * The audio filter of in-ear monitoring. + * The audio filter types of in-ear monitoring. */ export enum EarMonitoringFilterType { /** - * 1<<0: Do not add an audio filter to the in-ear monitor. + * 1<<0: No audio filter added to in-ear monitoring. */ EarMonitoringFilterNone = 1 << 0, /** - * 1<<1: Add an audio filter to the in-ear monitor. If you implement functions such as voice beautifier and audio effect, users can hear the voice after adding these effects. + * 1<<1: Add vocal effects audio filter to in-ear monitoring. If you implement functions such as voice beautifier and audio effect, users can hear the voice after adding these effects. */ EarMonitoringFilterBuiltInAudioFilters = 1 << 1, /** - * 1<<2: Enable noise suppression to the in-ear monitor. + * 1<<2: Add noise suppression audio filter to in-ear monitoring. */ EarMonitoringFilterNoiseSuppression = 1 << 2, + /** + * 1<<15: Reuse the audio filter that has been processed on the sending end for in-ear monitoring. This enumerator reduces CPU usage while increasing in-ear monitoring latency, which is suitable for latency-tolerant scenarios requiring low CPU consumption. + */ + EarMonitoringFilterReusePostProcessingFilter = 1 << 15, } /** @@ -4916,7 +5030,7 @@ export class SpatialAudioParams { speaker_attenuation?: number; /** * Whether to enable the Doppler effect: When there is a relative displacement between the sound source and the receiver of the sound source, the tone heard by the receiver changes. true : Enable the Doppler effect. false : (Default) Disable the Doppler effect. - * This parameter is suitable for scenarios where the sound source is moving at high speed (for example, racing games). It is not recommended for common audio and video interactive scenarios (for example, voice chat, cohosting, or online KTV). + * This parameter is suitable for scenarios where the sound source is moving at high speed (for example, racing games). It is not recommended for common audio and video interactive scenarios (for example, voice chat, co-streaming, or online KTV). * When this parameter is enabled, Agora recommends that you set a regular period (such as 30 ms), and then call the updatePlayerPositionInfo, updateSelfPosition, and updateRemotePosition methods to continuously update the relative distance between the sound source and the receiver. The following factors can cause the Doppler effect to be unpredictable or the sound to be jittery: the period of updating the distance is too long, the updating period is irregular, or the distance information is lost due to network packet loss or delay. */ enable_doppler?: boolean; diff --git a/src/AgoraMediaBase.ts b/src/AgoraMediaBase.ts index 170f3fd1..3f764396 100644 --- a/src/AgoraMediaBase.ts +++ b/src/AgoraMediaBase.ts @@ -58,11 +58,11 @@ export enum VideoSourceType { */ VideoSourceTranscoded = 10, /** - * @ignore + * 11: (For Android only) The third camera. */ VideoSourceCameraThird = 11, /** - * @ignore + * 12: (For Android only) The fourth camera. */ VideoSourceCameraFourth = 12, /** @@ -73,6 +73,10 @@ export enum VideoSourceType { * @ignore */ VideoSourceScreenFourth = 14, + /** + * @ignore + */ + VideoSourceSpeechDriven = 15, /** * 100: An unknown video source. */ @@ -204,7 +208,7 @@ export enum MediaSourceType { */ SecondaryScreenSource = 5, /** - * 6. Custom video source. + * 6: Custom video source. */ CustomVideoSource = 6, /** @@ -231,6 +235,10 @@ export enum MediaSourceType { * @ignore */ TranscodedVideoSource = 12, + /** + * @ignore + */ + SpeechDrivenVideoSource = 13, /** * 100: Unknown media source. */ @@ -449,6 +457,10 @@ export enum VideoPixelFormat { * @ignore */ VideoTextureId3d11texture2d = 17, + /** + * @ignore + */ + VideoPixelI010 = 18, } /** @@ -611,6 +623,10 @@ export class ExternalVideoFrame { * @ignore */ alphaBuffer?: Uint8Array; + /** + * @ignore + */ + fillAlphaBuffer?: boolean; /** * @ignore */ @@ -636,15 +652,15 @@ export class VideoFrame { */ height?: number; /** - * For YUV data, the line span of the Y buffer; for RGBA data, the total data length. + * For YUV data, the line span of the Y buffer; for RGBA data, the total data length. When dealing with video data, it is necessary to process the offset between each line of pixel data based on this parameter, otherwise it may result in image distortion. */ yStride?: number; /** - * For YUV data, the line span of the U buffer; for RGBA data, the value is 0. + * For YUV data, the line span of the U buffer; for RGBA data, the value is 0. When dealing with video data, it is necessary to process the offset between each line of pixel data based on this parameter, otherwise it may result in image distortion. */ uStride?: number; /** - * For YUV data, the line span of the V buffer; for RGBA data, the value is 0. + * For YUV data, the line span of the V buffer; for RGBA data, the value is 0. When dealing with video data, it is necessary to process the offset between each line of pixel data based on this parameter, otherwise it may result in image distortion. */ vStride?: number; /** @@ -664,7 +680,7 @@ export class VideoFrame { */ rotation?: number; /** - * The Unix timestamp (ms) when the video frame is rendered. This timestamp can be used to guide the rendering of the video frame. It is required. + * The Unix timestamp (ms) when the video frame is rendered. This timestamp can be used to guide the rendering of the video frame. This parameter is required. */ renderTimeMs?: number; /** @@ -782,7 +798,7 @@ export class AudioFrame { */ samplesPerChannel?: number; /** - * The number of bytes per sample. The number of bytes per audio sample, which is usually 16-bit (2-byte). + * The number of bytes per sample. For PCM, this parameter is generally set to 16 bits (2 bytes). */ bytesPerSample?: BytesPerSample; /** @@ -815,6 +831,10 @@ export class AudioFrame { * @ignore */ audioTrackNumber?: number; + /** + * @ignore + */ + rtpTimestamp?: number; } /** @@ -891,9 +911,6 @@ export interface IAudioFrameObserverBase { * * @param channelId The channel ID. * @param audioFrame The raw audio data. See AudioFrame. - * - * @returns - * Without practical meaning. */ onRecordAudioFrame?(channelId: string, audioFrame: AudioFrame): void; @@ -904,9 +921,6 @@ export interface IAudioFrameObserverBase { * * @param channelId The channel ID. * @param audioFrame The raw audio data. See AudioFrame. - * - * @returns - * Without practical meaning. */ onPlaybackAudioFrame?(channelId: string, audioFrame: AudioFrame): void; @@ -917,9 +931,6 @@ export interface IAudioFrameObserverBase { * * @param channelId The channel ID. * @param audioFrame The raw audio data. See AudioFrame. - * - * @returns - * Without practical meaning. */ onMixedAudioFrame?(channelId: string, audioFrame: AudioFrame): void; @@ -929,9 +940,6 @@ export interface IAudioFrameObserverBase { * In order to ensure that the obtained in-ear audio data meets the expectations, Agora recommends that you set the in-ear monitoring-ear audio data format as follows: After calling setEarMonitoringAudioFrameParameters to set the audio data format and registerAudioFrameObserver to register the audio frame observer object, the SDK calculates the sampling interval according to the parameters set in the methods, and triggers the onEarMonitoringAudioFrame callback according to the sampling interval. * * @param audioFrame The raw audio data. See AudioFrame. - * - * @returns - * Without practical meaning. */ onEarMonitoringAudioFrame?(audioFrame: AudioFrame): void; } @@ -943,12 +951,11 @@ export interface IAudioFrameObserver extends IAudioFrameObserverBase { /** * Retrieves the audio frame of a specified user before mixing. * + * Due to framework limitations, this callback does not support sending processed audio data back to the SDK. + * * @param channelId The channel ID. * @param uid The user ID of the specified user. * @param audioFrame The raw audio data. See AudioFrame. - * - * @returns - * Without practical meaning. */ onPlaybackAudioFrameBeforeMixing?( channelId: string, @@ -995,9 +1002,6 @@ export interface IAudioSpectrumObserver { * After successfully calling registerAudioSpectrumObserver to implement the onLocalAudioSpectrum callback in IAudioSpectrumObserver and calling enableAudioSpectrumMonitor to enable audio spectrum monitoring, the SDK will trigger the callback as the time interval you set to report the received remote audio data spectrum. * * @param data The audio spectrum data of the local user. See AudioSpectrumData. - * - * @returns - * Whether the spectrum data is received: true : Spectrum data is received. false : No spectrum data is received. */ onLocalAudioSpectrum?(data: AudioSpectrumData): void; @@ -1008,9 +1012,6 @@ export interface IAudioSpectrumObserver { * * @param spectrums The audio spectrum information of the remote user, see UserAudioSpectrumInfo. The number of arrays is the number of remote users monitored by the SDK. If the array is null, it means that no audio spectrum of remote users is detected. * @param spectrumNumber The number of remote users. - * - * @returns - * Whether the spectrum data is received: true : Spectrum data is received. false : No spectrum data is received. */ onRemoteAudioSpectrum?( spectrums: UserAudioSpectrumInfo[], @@ -1031,9 +1032,6 @@ export interface IVideoEncodedFrameObserver { * @param imageBuffer The encoded video image buffer. * @param length The data length of the video image. * @param videoEncodedFrameInfo For the information of the encoded video frame, see EncodedVideoFrameInfo. - * - * @returns - * Without practical meaning. */ onEncodedVideoFrameReceived?( uid: number, @@ -1064,18 +1062,12 @@ export interface IVideoFrameObserver { /** * Occurs each time the SDK receives a video frame captured by local devices. * - * After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data captured by local devices. You can then pre-process the data according to your scenarios. Once the pre-processing is complete, you can directly modify videoFrame in this callback, and set the return value to true to send the modified video data to the SDK. - * The video data that this callback gets has not been pre-processed such as watermarking, cropping, and rotating. - * If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel. + * You can get raw video data collected by the local device through this callback. * * @param sourceType Video source types, including cameras, screens, or media player. See VideoSourceType. * @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows: * Android: I420 or RGB (GLES20.GL_TEXTURE_2D) * iOS: I420 or CVPixelBufferRef - * - * @returns - * When the video processing mode is ProcessModeReadOnly : true : Reserved for future use. false : Reserved for future use. - * When the video processing mode is ProcessModeReadWrite : true : Sets the SDK to receive the video frame. false : Sets the SDK to discard the video frame. */ onCaptureVideoFrame?( sourceType: VideoSourceType, @@ -1086,16 +1078,13 @@ export interface IVideoFrameObserver { * Occurs each time the SDK receives a video frame before encoding. * * After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data before encoding and then process the data according to your particular scenarios. After processing, you can send the processed video data back to the SDK in this callback. + * Due to framework limitations, this callback does not support sending processed video data back to the SDK. * The video data that this callback gets has been preprocessed, with its content cropped and rotated, and the image enhanced. * * @param sourceType The type of the video source. See VideoSourceType. * @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows: * Android: I420 or RGB (GLES20.GL_TEXTURE_2D) * iOS: I420 or CVPixelBufferRef - * - * @returns - * When the video processing mode is ProcessModeReadOnly : true : Reserved for future use. false : Reserved for future use. - * When the video processing mode is ProcessModeReadWrite : true : Sets the SDK to receive the video frame. false : Sets the SDK to discard the video frame. */ onPreEncodeVideoFrame?( sourceType: VideoSourceType, @@ -1112,16 +1101,13 @@ export interface IVideoFrameObserver { * * After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data sent from the remote end before rendering, and then process it according to the particular scenarios. * If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel. + * Due to framework limitations, this callback does not support sending processed video data back to the SDK. * * @param channelId The channel ID. * @param remoteUid The user ID of the remote user who sends the current video frame. * @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows: * Android: I420 or RGB (GLES20.GL_TEXTURE_2D) * iOS: I420 or CVPixelBufferRef - * - * @returns - * When the video processing mode is ProcessModeReadOnly : true : Reserved for future use. false : Reserved for future use. - * When the video processing mode is ProcessModeReadWrite : true : Sets the SDK to receive the video frame. false : Sets the SDK to discard the video frame. */ onRenderVideoFrame?( channelId: string, @@ -1247,6 +1233,50 @@ export class MediaRecorderConfiguration { recorderInfoUpdateInterval?: number; } +/** + * Facial information observer. + * + * You can call registerFaceInfoObserver to register or unregister the IFaceInfoObserver object. + */ +export interface IFaceInfoObserver { + /** + * Occurs when the facial information processed by speech driven extension is received. + * + * @param outFaceInfo Output parameter, the JSON string of the facial information processed by the voice driver plugin, including the following fields: + * faces: Object sequence. The collection of facial information, with each face corresponding to an object. + * blendshapes: Object. The collection of face capture coefficients, named according to ARkit standards, with each key-value pair representing a blendshape coefficient. The blendshape coefficient is a floating point number with a range of [0.0, 1.0]. + * rotation: Object sequence. The rotation of the head, which includes the following three key-value pairs, with values as floating point numbers ranging from -180.0 to 180.0: + * pitch: Head pitch angle. A positve value means looking down, while a negative value means looking up. + * yaw: Head yaw angle. A positve value means turning left, while a negative value means turning right. + * roll: Head roll angle. A positve value means tilting to the right, while a negative value means tilting to the left. + * timestamp: String. The timestamp of the output result, in milliseconds. Here is an example of JSON: + * { + * "faces":[{ + * "blendshapes":{ + * "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0, + * "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0, + * "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0, + * "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0, + * "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0, + * "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0, + * "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0, + * "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0, + * "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0, + * "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0, + * "tongueOut":0.0 + * }, + * "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5}, + * + * }], + * "timestamp":"654879876546" + * } + * + * @returns + * true : Facial information JSON parsing successful. false : Facial information JSON parsing failed. + */ + onFaceInfo?(outFaceInfo: string): void; +} + /** * @ignore */ diff --git a/src/IAgoraLog.ts b/src/IAgoraLog.ts index 8b25b5cc..37549b41 100644 --- a/src/IAgoraLog.ts +++ b/src/IAgoraLog.ts @@ -69,7 +69,7 @@ export enum LogFilterType { */ export class LogConfig { /** - * The complete path of the log files. Ensure that the path for the log file exists and is writable. You can use this parameter to rename the log files. The default path is: + * The complete path of the log files. Agora recommends using the default log directory. If you need to modify the default directory, ensure that the directory you specify exists and is writable. The default log directory is: * Android: /storage/emulated/0/Android/data//files/agorasdk.log. * iOS: App Sandbox/Library/caches/agorasdk.log. */ diff --git a/src/IAgoraMediaEngine.ts b/src/IAgoraMediaEngine.ts index 3b8a2648..6756c072 100644 --- a/src/IAgoraMediaEngine.ts +++ b/src/IAgoraMediaEngine.ts @@ -10,6 +10,7 @@ import { ExternalVideoFrame, ExternalVideoSourceType, IAudioFrameObserver, + IFaceInfoObserver, IVideoEncodedFrameObserver, IVideoFrameObserver, } from './AgoraMediaBase'; @@ -98,9 +99,28 @@ export abstract class IMediaEngine { observer: IVideoEncodedFrameObserver ): number; + /** + * Registers a facial information observer. + * + * You can call this method to register the onFaceInfo callback to receive the facial information processed by Agora speech driven extension. When calling this method to register a facial information observer, you can register callbacks in the IFaceInfoObserver class as needed. After successfully registering the facial information observer, the SDK triggers the callback you have registered when it captures the facial information converted by the speech driven extension. + * Ensure that you call this method before joining a channel. + * Before calling this method, you need to make sure that the speech driven extension has been enabled by calling enableExtension. + * + * @param observer Facial information observer, see IFaceInfoObserver. + * + * @returns + * 0: Success. + * < 0: Failure. + */ + abstract registerFaceInfoObserver(observer: IFaceInfoObserver): number; + /** * Pushes the external audio frame. * + * Before calling this method to push external audio data, perform the following steps: + * Call createCustomAudioTrack to create a custom audio track and get the audio track ID. + * Call joinChannel to join the channel. In ChannelMediaOptions, set publishCustomAduioTrackId to the audio track ID that you want to publish, and set publishCustomAudioTrack to true. + * * @param frame The external audio frame. See AudioFrame. * @param trackId The audio track ID. If you want to publish a custom external audio source, set this parameter to the ID of the corresponding custom audio track you want to publish. * @@ -113,13 +133,12 @@ export abstract class IMediaEngine { /** * Pulls the remote audio data. * - * Before calling this method, you need to call setExternalAudioSink to notify the app to enable and set the external rendering. After a successful call of this method, the app pulls the decoded and mixed audio data for playback. - * This method only supports pulling data from custom audio source. If you need to pull the data captured by the SDK, do not call this method. + * Before calling this method, call setExternalAudioSink (enabled : true) to notify the app to enable and set the external audio rendering. After a successful call of this method, the app pulls the decoded and mixed audio data for playback. * Call this method after joining a channel. - * Once you enable the external audio sink, the app will not retrieve any audio data from the onPlaybackAudioFrame callback. - * The difference between this method and the onPlaybackAudioFrame callback is as follows: + * Both this method and onPlaybackAudioFrame callback can be used to get audio data after remote mixing. Note that after calling setExternalAudioSink to enable external audio rendering, the app no longer receives data from the onPlaybackAudioFrame callback. Therefore, you should choose between this method and the onPlaybackAudioFrame callback based on your actual business requirements. The specific distinctions between them are as follows: + * After calling this method, the app automatically pulls the audio data from the SDK. By setting the audio data parameters, the SDK adjusts the frame buffer to help the app handle latency, effectively avoiding audio playback jitter. * The SDK sends the audio data to the app through the onPlaybackAudioFrame callback. Any delay in processing the audio frames may result in audio jitter. - * After a successful method call, the app automatically pulls the audio data from the SDK. After setting the audio data parameters, the SDK adjusts the frame buffer and avoids problems caused by jitter in the external audio playback. + * This method is only used for retrieving audio data after remote mixing. If you need to get audio data from different audio processing stages such as capture and playback, you can register the corresponding callbacks by calling registerAudioFrameObserver. * * @returns * The AudioFrame instance, if the method call succeeds. @@ -302,4 +321,15 @@ export abstract class IMediaEngine { abstract unregisterVideoEncodedFrameObserver( observer: IVideoEncodedFrameObserver ): number; + + /** + * Unregisters a facial information observer. + * + * @param observer Facial information observer, see IFaceInfoObserver. + * + * @returns + * 0: Success. + * < 0: Failure. + */ + abstract unregisterFaceInfoObserver(observer: IFaceInfoObserver): number; } diff --git a/src/IAgoraMediaPlayer.ts b/src/IAgoraMediaPlayer.ts index 3e0f3274..3d69831d 100644 --- a/src/IAgoraMediaPlayer.ts +++ b/src/IAgoraMediaPlayer.ts @@ -205,7 +205,16 @@ export abstract class IMediaPlayer { abstract selectAudioTrack(index: number): number; /** - * @ignore + * Selects the audio tracks that you want to play on your local device and publish to the channel respectively. + * + * You can call this method to determine the audio track to be played on your local device and published to the channel. Before calling this method, you need to open the media file with the openWithMediaSource method and set enableMultiAudioTrack in MediaSource as true. + * + * @param playoutTrackIndex The index of audio tracks for local playback. You can obtain the index through getStreamInfo. + * @param publishTrackIndex The index of audio tracks to be published in the channel. You can obtain the index through getStreamInfo. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract selectMultiAudioTrack( playoutTrackIndex: number, diff --git a/src/IAgoraRtcEngine.ts b/src/IAgoraRtcEngine.ts index ae2bad29..dfdd2de8 100644 --- a/src/IAgoraRtcEngine.ts +++ b/src/IAgoraRtcEngine.ts @@ -11,6 +11,8 @@ import { AudioSessionOperationRestriction, AudioVolumeInfo, BeautyOptions, + CameraFocalLengthType, + CameraStabilizationMode, CaptureBrightnessLevelType, ChannelMediaRelayConfiguration, ChannelMediaRelayError, @@ -31,6 +33,7 @@ import { EncryptionConfig, EncryptionErrorType, ErrorCodeType, + FocalLengthInfo, HeadphoneEqualizerPreset, IAudioEncodedFrameObserver, LastmileProbeConfig, @@ -547,6 +550,10 @@ export class RemoteAudioStats { * @ignore */ rxAudioBytes?: number; + /** + * End-to-end audio delay (in milliseconds), which refers to the time from when the audio is captured by the remote user to when it is played by the local user. + */ + e2eDelay?: number; } /** @@ -816,11 +823,11 @@ export class PublisherConfiguration { */ export enum CameraDirection { /** - * The rear camera. + * 0: The rear camera. */ CameraRear = 0, /** - * The front camera. + * 1: (Default) The front camera. */ CameraFront = 1, } @@ -848,21 +855,37 @@ export enum CloudProxyType { */ export class CameraCapturerConfiguration { /** - * This parameter applies to Android and iOS only. The camera direction. See CameraDirection. + * (Optional) The camera direction. See CameraDirection. */ cameraDirection?: CameraDirection; + /** + * (Optional) The camera focal length type. See CameraFocalLengthType. + * To set the focal length type of the camera, it is only supported to specify the camera through cameraDirection, and not supported to specify it through cameraId. + * For iOS devices equipped with multi-lens rear cameras, such as those featuring dual-camera (wide-angle and ultra-wide-angle) or triple-camera (wide-angle, ultra-wide-angle, and telephoto), you can use one of the following methods to capture video with an ultra-wide-angle perspective: + * Method one: Set this parameter to CameraFocalLengthUltraWide (2) (ultra-wide lens). + * Method two: Set this parameter to CameraFocalLengthDefault (0) (standard lens), then call setCameraZoomFactor to set the camera's zoom factor to a value less than 1.0, with the minimum setting being 0.5. The difference is that the size of the ultra-wide angle in method one is not adjustable, whereas method two supports adjusting the camera's zoom factor freely. + */ + cameraFocalLengthType?: CameraFocalLengthType; /** * @ignore */ deviceId?: string; /** - * The format of the video frame. See VideoFormat. + * (Optional) The camera ID. The default value is the camera ID of the front camera. You can get the camera ID through the Android native system API, see and for details. + * This parameter is for Android only. + * This parameter and cameraDirection are mutually exclusive in specifying the camera; you can choose one based on your needs. The differences are as follows: + * Specifying the camera via cameraDirection is more straightforward. You only need to indicate the camera direction (front or rear), without specifying a specific camera ID; the SDK will retrieve and confirm the actual camera ID through Android native system APIs. + * Specifying via cameraId allows for more precise identification of a particular camera. For devices with multiple cameras, where cameraDirection cannot recognize or access all available cameras, it is recommended to use cameraId to specify the desired camera ID directly. */ - format?: VideoFormat; + cameraId?: string; /** - * Whether to follow the video aspect ratio set in setVideoEncoderConfiguration : true : (Default) Follow the set video aspect ratio. The SDK crops the captured video according to the set video aspect ratio and synchronously changes the local preview screen and the video frame in onCaptureVideoFrame and onPreEncodeVideoFrame. false : Do not follow the system default audio playback device. The SDK does not change the aspect ratio of the captured video frame. + * (Optional) Whether to follow the video aspect ratio set in setVideoEncoderConfiguration : true : (Default) Follow the set video aspect ratio. The SDK crops the captured video according to the set video aspect ratio and synchronously changes the local preview screen and the video frame in onCaptureVideoFrame and onPreEncodeVideoFrame. false : Do not follow the system default audio playback device. The SDK does not change the aspect ratio of the captured video frame. */ followEncodeDimensionRatio?: boolean; + /** + * (Optional) The format of the video frame. See VideoFormat. + */ + format?: VideoFormat; } /** @@ -1050,11 +1073,11 @@ export class ChannelMediaOptions { */ publishSecondaryCameraTrack?: boolean; /** - * @ignore + * Whether to publish the video captured by the third camera: true : Publish the video captured by the third camera. false : Do not publish the video captured by the third camera. This parameter is for Android only. */ publishThirdCameraTrack?: boolean; /** - * @ignore + * Whether to publish the video captured by the fourth camera: true : Publish the video captured by the fourth camera. false : Do not publish the video captured by the fourth camera. This parameter is for Android only. */ publishFourthCameraTrack?: boolean; /** @@ -1117,6 +1140,10 @@ export class ChannelMediaOptions { * @ignore */ publishMixedAudioTrack?: boolean; + /** + * @ignore + */ + publishLipSyncTrack?: boolean; /** * Whether to automatically subscribe to all remote audio streams when the user joins a channel: true : Subscribe to all remote audio streams. false : Do not automatically subscribe to any remote audio streams. */ @@ -1150,7 +1177,7 @@ export class ChannelMediaOptions { */ channelProfile?: ChannelProfileType; /** - * @ignore + * Delay (in milliseconds) for sending audio frames. You can use this parameter to set the delay of the audio frames that need to be sent, to ensure audio and video synchronization. To switch off the delay, set the value to 0. */ audioDelayMs?: number; /** @@ -1173,7 +1200,7 @@ export class ChannelMediaOptions { publishRhythmPlayerTrack?: boolean; /** * Whether to enable interactive mode: true : Enable interactive mode. Once this mode is enabled and the user role is set as audience, the user can receive remote video streams with low latency. false :Do not enable interactive mode. If this mode is disabled, the user receives the remote video streams in default settings. - * This parameter only applies to scenarios involving cohosting across channels. The cohosts need to call the joinChannelEx method to join the other host's channel as an audience member, and set isInteractiveAudience to true. + * This parameter only applies to co-streaming scenarios. The cohosts need to call the joinChannelEx method to join the other host's channel as an audience member, and set isInteractiveAudience to true. * This parameter takes effect only when the user role is ClientRoleAudience. */ isInteractiveAudience?: boolean; @@ -1549,11 +1576,7 @@ export interface IRtcEngineEventHandler { /** * Occurs when the local video stream state changes. * - * When the state of the local video stream changes (including the state of the video capture and encoding), the SDK triggers this callback to report the current state. This callback indicates the state of the local video stream, including camera capturing and video encoding, and allows you to troubleshoot issues when exceptions occur. The SDK triggers the onLocalVideoStateChanged callback with the state code of LocalVideoStreamStateFailed and error code of LocalVideoStreamReasonCaptureFailure in the following situations: - * The app switches to the background, and the system gets the camera resource. - * For Android 9 and later versions, after an app is in the background for a period, the system automatically revokes camera permissions. - * For Android 6 and later versions, if the camera is held by a third-party app for a certain duration and then released, the SDK triggers this callback and reports the onLocalVideoStateChanged (LocalVideoStreamStateCapturing, LocalVideoStreamReasonOk) callback. - * The camera starts normally, but does not output video frames for four consecutive seconds. When the camera outputs the captured video frames, if the video frames are the same for 15 consecutive frames, the SDK triggers the onLocalVideoStateChanged callback with the state code of LocalVideoStreamStateCapturing and error code of LocalVideoStreamReasonCaptureFailure. Note that the video frame duplication detection is only available for video frames with a resolution greater than 200 × 200, a frame rate greater than or equal to 10 fps, and a bitrate less than 20 Kbps. For some device models, the SDK does not trigger this callback when the state of the local video changes while the local video capturing device is in use, so you have to make your own timeout judgment. + * When the status of the local video changes, the SDK triggers this callback to report the current local video state and the reason for the state change. * * @param source The type of the video source. See VideoSourceType. * @param state The state of the local video, see LocalVideoStreamState. @@ -1694,11 +1717,11 @@ export interface IRtcEngineEventHandler { /** * Occurs when a specific remote user enables/disables the local video capturing function. * - * The SDK triggers this callback when the remote user resumes or stops capturing the video stream by calling the enableLocalVideo method. + * Deprecated: This callback is deprecated, use the following enumerations in the onRemoteVideoStateChanged callback: RemoteVideoStateStopped (0) and RemoteVideoStateReasonRemoteMuted (5). RemoteVideoStateDecoding (2) and RemoteVideoStateReasonRemoteUnmuted (6). The SDK triggers this callback when the remote user resumes or stops capturing the video stream by calling the enableLocalVideo method. * * @param connection The connection information. See RtcConnection. * @param remoteUid The user ID of the remote user. - * @param enabled Whether the specified remote user enables/disables the local video capturing function: true : The video module is enabled. Other users in the channel can see the video of this remote user. false : The video module is disabled. Other users in the channel can no longer receive the video stream from this remote user, while this remote user can still receive the video streams from other users. + * @param enabled Whether the specified remote user enables/disables local video capturing: true : The video module is enabled. Other users in the channel can see the video of this remote user. false : The video module is disabled. Other users in the channel can no longer receive the video stream from this remote user, while this remote user can still receive the video streams from other users. */ onUserEnableLocalVideo?( connection: RtcConnection, @@ -1902,7 +1925,7 @@ export interface IRtcEngineEventHandler { * @param connection The connection information. See RtcConnection. * @param remoteUid The ID of the remote user sending the message. * @param streamId The stream ID of the received message. - * @param code ErrorCodeType The error code. + * @param code The error code. See ErrorCodeType. * @param missed The number of lost messages. * @param cached Number of incoming cached messages when the data stream is interrupted. */ @@ -2447,6 +2470,16 @@ export interface IRtcEngineEventHandler { layoutlist: VideoLayout[] ): void; + /** + * @ignore + */ + onAudioMetadataReceived?( + connection: RtcConnection, + uid: number, + metadata: string, + length: number + ): void; + /** * The event callback of the extension. * @@ -2467,7 +2500,7 @@ export interface IRtcEngineEventHandler { /** * Occurs when the extension is enabled. * - * After a successful call of enableExtension (true), the extension triggers this callback. + * The extension triggers this callback after it is successfully enabled. * * @param provider The name of the extension provider. * @param extension The name of the extension. @@ -2477,7 +2510,7 @@ export interface IRtcEngineEventHandler { /** * Occurs when the extension is disabled. * - * After a successful call of enableExtension (false), this callback is triggered. + * The extension triggers this callback after it is successfully destroyed. * * @param provider The name of the extension provider. * @param extension The name of the extension. @@ -2487,7 +2520,7 @@ export interface IRtcEngineEventHandler { /** * Occurs when the extension runs incorrectly. * - * When calling enableExtension (true) fails or the extension runs in error, the extension triggers this callback and reports the error code and reason. + * In case of extension enabling failure or runtime errors, the extension triggers this callback and reports the error code along with the reasons. * * @param provider The name of the extension provider. * @param extension The name of the extension. @@ -2940,38 +2973,7 @@ export abstract class IRtcEngine { ): number; /** - * Preloads a channel with token, channelId, and userAccount. - * - * When audience members need to switch between different channels frequently, calling the method can help shortening the time of joining a channel, thus reducing the time it takes for audience members to hear and see the host. As it may take a while for the SDK to preload a channel, Agora recommends that you call this method as soon as possible after obtaining the channel name and user ID to join a channel. If you join a preloaded channel, leave it and want to rejoin the same channel, you do not need to call this method unless the token for preloading the channel expires. - * Failing to preload a channel does not mean that you can't join a channel, nor will it increase the time of joining a channel. - * One IRtcEngine instance supports preloading 20 channels at most. When exceeding this limit, the latest 20 preloaded channels take effect. - * When calling this method, ensure you set the user role as audience and do not set the audio scenario as AudioScenarioChorus, otherwise, this method does not take effect. - * You also need to make sure that the User Account, channel ID and token passed in for preloading are the same as the values passed in when joining the channel, otherwise, this method does not take effect. - * - * @param token The token generated on your server for authentication. When the token for preloading channels expires, you can update the token based on the number of channels you preload. - * When preloading one channel, calling this method to pass in the new token. - * When preloading more than one channels: - * If you use a wildcard token for all preloaded channels, call updatePreloadChannelToken to update the token. When generating a wildcard token, ensure the user ID is not set as 0. - * If you use different tokens to preload different channels, call this method to pass in your user ID, channel name and the new token. - * @param channelId The channel name that you want to preload. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): - * All lowercase English letters: a to z. - * All uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * Space - * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are (89 in total): - * The 26 lowercase English letters: a to z. - * The 26 uppercase English letters: A to Z. - * All numeric characters: 0 to 9. - * Space - * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * - * @returns - * 0: Success. - * < 0: Failure. - * -2: The parameter is invalid. For example, the User Account is empty. You need to pass in a valid parameter and join the channel again. - * -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine object before calling this method. - * -102: The channel name is invalid. You need to pass in a valid channel name and join the channel again. + * @ignore */ abstract preloadChannelWithUserAccount( token: string, @@ -3002,9 +3004,10 @@ export abstract class IRtcEngine { * The remote client: onUserJoined, if the user joining the channel is in the Communication profile or is a host in the Live-broadcasting profile. When the connection between the client and Agora's server is interrupted due to poor network conditions, the SDK tries reconnecting to the server. When the local client successfully rejoins the channel, the SDK triggers the onRejoinChannelSuccess callback on the local client. * This method allows users to join only one channel at a time. * Ensure that the app ID you use to generate the token is the same app ID that you pass in the initialize method; otherwise, you may fail to join the channel by token. + * If you choose the Testing Mode (using an App ID for authentication) for your project and call this method to join a channel, you will automatically exit the channel after 24 hours. * * @param token The token generated on your server for authentication. If you need to join different channels at the same time or switch between channels, Agora recommends using a wildcard token so that you don't need to apply for a new token every time joining a channel. - * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters: + * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): * All lowercase English letters: a to z. * All uppercase English letters: A to Z. * All numeric characters: 0 to 9. @@ -3174,10 +3177,7 @@ export abstract class IRtcEngine { /** * Enables the video module. * - * Call this method either before joining a channel or during a call. If this method is called before joining a channel, the call starts in the video mode; if called during a call, the audio call switches to a video call. Call disableVideo to disable the video mode. A successful call of this method triggers the onRemoteVideoStateChanged callback on the remote client. - * This method enables the internal engine and is valid after leaving the channel. - * Calling this method will reset the entire engine, resulting in a slow response time. Instead of callling this method, you can independently control a specific video module based on your actual needs using the following methods: enableLocalVideo : Whether to enable the camera to create the local video stream. muteLocalVideoStream : Whether to publish the local video stream. muteRemoteVideoStream : Whether to subscribe to and play the remote video stream. muteAllRemoteVideoStreams : Whether to subscribe to and play all remote video streams. - * A successful call of this method resets enableLocalVideo, muteRemoteVideoStream, and muteAllRemoteVideoStreams. Proceed it with caution. + * The video module is disabled by default, call this method to enable it. If you need to disable the video module later, you need to call disableVideo. * * @returns * 0: Success. @@ -3270,15 +3270,13 @@ export abstract class IRtcEngine { * Sets the image enhancement options. * * Enables or disables image enhancement, and sets the options. - * Call this method before calling enableVideo or startPreview. + * Call this method after calling enableVideo or startPreview. * This method relies on the image enhancement dynamic library libagora_clear_vision_extension.dll. If the dynamic library is deleted, the function cannot be enabled normally. * This feature has high requirements on device performance. When calling this method, the SDK automatically checks the capabilities of the current device. * * @param enabled Whether to enable the image enhancement function: true : Enable the image enhancement function. false : (Default) Disable the image enhancement function. * @param options The image enhancement options. See BeautyOptions. - * @param type Type of media source. See MediaSourceType. In this method, this parameter supports only the following two settings: - * The default value is UnknownMediaSource. - * If you want to use the second camera to capture video, set this parameter to SecondaryCameraSource. + * @param type Source type of the extension. See MediaSourceType. * * @returns * 0: Success. @@ -3304,7 +3302,7 @@ export abstract class IRtcEngine { * When you use an external video source to implement custom video capture, or send an external video source to the SDK, Agora recommends using setExtensionProperty. * This method relies on the image enhancement dynamic library libagora_clear_vision_extension.dll. If the dynamic library is deleted, the function cannot be enabled normally. * - * @param enabled Whether to enable low-light enhancement function: true : Enable low-light enhancement function. false : (Default) Disable low-light enhancement function. + * @param enabled Whether to enable low-light enhancement: true : Enable low-light enhancement. false : (Default) Disable low-light enhancement. * @param options The low-light enhancement options. See LowlightEnhanceOptions. * @param type The type of the video source. See MediaSourceType. * @@ -3371,7 +3369,7 @@ export abstract class IRtcEngine { /** * Enables/Disables the virtual background. * - * The virtual background feature enables the local user to replace their original background with a static image, dynamic video, blurred background, or portrait-background segmentation to achieve picture-in-picture effect. Once the virtual background feature is enabled, all users in the channel can see the custom background. Call this method before calling enableVideo or startPreview. + * The virtual background feature enables the local user to replace their original background with a static image, dynamic video, blurred background, or portrait-background segmentation to achieve picture-in-picture effect. Once the virtual background feature is enabled, all users in the channel can see the custom background. Call this method after calling enableVideo or startPreview. * This feature has high requirements on device performance. When calling this method, the SDK automatically checks the capabilities of the current device. Agora recommends you use virtual background on devices with the following processors: * Snapdragon 700 series 750G and later * Snapdragon 800 series 835 and later @@ -3626,7 +3624,7 @@ export abstract class IRtcEngine { /** * Sets the default video stream type to subscribe to. * - * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side, which means the sender does not actively send low-quality video stream. The receiver can initiate a low-quality video stream request by calling this method, and the sender will automatically start sending low-quality video stream upon receiving the request. By default, users receive the high-quality video stream. Call this method if you want to switch to the low-quality video stream. The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. Under limited network conditions, if the publisher does not disable the dual-stream mode using enableDualStreamMode (false), the receiver can choose to receive either the high-quality video stream, or the low-quality video stream. The high-quality video stream has a higher resolution and bitrate, while the low-quality video stream has a lower resolution and bitrate. + * The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sending end, which means the sender does not actively send low-quality video stream. The receiver with the role of the host can initiate a low-quality video stream request by calling this method, and upon receiving the request, the sending end automatically starts sending the low-quality video stream. * Call this method before joining a channel. The SDK does not support changing the default subscribed video stream type after joining a channel. * If you call both this method and setRemoteVideoStreamType, the setting of setRemoteVideoStreamType takes effect. * @@ -3655,7 +3653,11 @@ export abstract class IRtcEngine { /** * Sets the video stream type to subscribe to. * - * Under limited network conditions, if the publisher does not disable the dual-stream mode using enableDualStreamMode (false), the receiver can choose to receive either the high-quality video stream, or the low-quality video stream. The high-quality video stream has a higher resolution and bitrate, while the low-quality video stream has a lower resolution and bitrate. By default, users receive the high-quality video stream. Call this method if you want to switch to the low-quality video stream. The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side, which means the sender does not actively send low-quality video stream. The receiver can initiate a low-quality video stream request by calling this method, and the sender will automatically start sending low-quality video stream upon receiving the request. You can call this method either before or after joining a channel. If you call both setRemoteVideoStreamType and setRemoteDefaultVideoStreamType, the setting of setRemoteVideoStreamType takes effect. + * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sending end, which means the sender does not actively send low-quality video stream. The receiver with the role of the host can initiate a low-quality video stream request by calling this method, and upon receiving the request, the sending end automatically starts sending the low-quality video stream. The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. + * You can call this method either before or after joining a channel. + * If the publisher has already called setDualStreamMode and set mode to DisableSimulcastStream (never send low-quality video stream), calling this method will not take effect, you should call setDualStreamMode again on the sending end and adjust the settings. + * Calling this method on the receiving end of the audience role will not take effect. + * If you call both setRemoteVideoStreamType and setRemoteDefaultVideoStreamType, the settings in setRemoteVideoStreamType take effect. * * @param uid The user ID. * @param streamType The video stream type, see VideoStreamType. @@ -4089,6 +4091,22 @@ export abstract class IRtcEngine { */ abstract setAudioMixingPitch(pitch: number): number; + /** + * Sets the playback speed of the current audio file. + * + * Ensure you call this method after calling startAudioMixing receiving the onAudioMixingStateChanged callback reporting the state as AudioMixingStatePlaying. + * + * @param speed The playback speed. Agora recommends that you set this to a value between 50 and 400, defined as follows: + * 50: Half the original speed. + * 100: The original speed. + * 400: 4 times the original speed. + * + * @returns + * 0: Success. + * < 0: Failure. + */ + abstract setAudioMixingPlaybackSpeed(speed: number): number; + /** * Retrieves the volume of the audio effects. * @@ -4683,6 +4701,11 @@ export abstract class IRtcEngine { */ abstract uploadLogFile(): string; + /** + * @ignore + */ + abstract writeLog(level: LogLevel, fmt: string): number; + /** * Updates the display mode of the local video view. * @@ -4759,7 +4782,7 @@ export abstract class IRtcEngine { /** * Sets dual-stream mode configuration on the sender side. * - * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side, which means the sender does not actively send low-quality video stream. The receiver can initiate a low-quality video stream request by calling setRemoteVideoStreamType, and the sender then automatically starts sending low-quality video stream upon receiving the request. + * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side, which means the sender does not actively send low-quality video stream. The receiving end with the role of the host can initiate a low-quality video stream request by calling setRemoteVideoStreamType, and upon receiving the request, the sending end automatically starts sending low-quality stream. * If you want to modify this behavior, you can call this method and set mode to DisableSimulcastStream (never send low-quality video streams) or EnableSimulcastStream (always send low-quality video streams). * If you want to restore the default behavior after making changes, you can call this method again with mode set to AutoSimulcastStream. The difference and connection between this method and enableDualStreamMode is as follows: * When calling this method and setting mode to DisableSimulcastStream, it has the same effect as calling enableDualStreamMode and setting enabled to false. @@ -5053,9 +5076,7 @@ export abstract class IRtcEngine { * @param provider The name of the extension provider. * @param extension The name of the extension. * @param enable Whether to enable the extension: true : Enable the extension. false : Disable the extension. - * @param type Type of media source. See MediaSourceType. In this method, this parameter supports only the following two settings: - * The default value is UnknownMediaSource. - * If you want to use the second camera to capture video, set this parameter to SecondaryCameraSource. + * @param type Source type of the extension. See MediaSourceType. * * @returns * 0: Success. @@ -5078,9 +5099,7 @@ export abstract class IRtcEngine { * @param extension The name of the extension. * @param key The key of the extension. * @param value The value of the extension key. - * @param type Type of media source. See MediaSourceType. In this method, this parameter supports only the following two settings: - * The default value is UnknownMediaSource. - * If you want to use the second camera to capture video, set this parameter to SecondaryCameraSource. + * @param type Source type of the extension. See MediaSourceType. * * @returns * 0: Success. @@ -5141,7 +5160,7 @@ export abstract class IRtcEngine { * You can call this method either before or after joining a channel. * * @param enabled Enables or disables in-ear monitoring. true : Enables in-ear monitoring. false : (Default) Disables in-ear monitoring. - * @param includeAudioFilters The audio filter of in-ear monitoring: See EarMonitoringFilterType. + * @param includeAudioFilters The audio filter types of in-ear monitoring. See EarMonitoringFilterType. * * @returns * 0: Success. @@ -5207,7 +5226,15 @@ export abstract class IRtcEngine { ): number; /** - * @ignore + * Registers an extension. + * + * After the extension is loaded, you can call this method to register the extension. + * Before calling this method, you need to call loadExtensionProvider to load the extension first. + * For extensions external to the SDK (such as those from Extensions Marketplace and SDK Extensions), you need to call this method before calling setExtensionProperty. + * + * @param provider The name of the extension provider. + * @param extension The name of the extension. + * @param type Source type of the extension. See MediaSourceType. */ abstract registerExtension( provider: string, @@ -5219,6 +5246,8 @@ export abstract class IRtcEngine { * Sets the camera capture configuration. * * Call this method before enabling local camera capture, such as before calling startPreview and joinChannel. + * To adjust the camera focal length configuration, It is recommended to call queryCameraFocalLengthCapability first to check the device's focal length capabilities, and then configure based on the query results. + * Due to limitations on some Android devices, even if you set the focal length type according to the results returned in queryCameraFocalLengthCapability, the settings may not take effect. * * @param config The camera capture configuration. See CameraCapturerConfiguration. * @@ -5268,7 +5297,9 @@ export abstract class IRtcEngine { /** * Switches between front and rear cameras. * - * This method must be called after the camera is successfully enabled, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * You can call this method to dynamically switch cameras based on the actual camera availability during the app's runtime, without having to restart the video stream or reconfigure the video source. + * This method must be called after the camera is successfully enabled, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * This method only switches the camera for the video stream captured by the first camera, that is, the video source set to VideoSourceCamera (0) when calling startCameraCapture. * * @returns * 0: Success. @@ -5330,11 +5361,12 @@ export abstract class IRtcEngine { abstract isCameraAutoFocusFaceModeSupported(): boolean; /** - * Sets the camera zoom ratio. + * Sets the camera zoom factor. * - * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). + * For iOS devices equipped with multi-lens rear cameras, such as those featuring dual-camera (wide-angle and ultra-wide-angle) or triple-camera (wide-angle, ultra-wide-angle, and telephoto), you can call setCameraCapturerConfiguration first to set the cameraFocalLengthType as CameraFocalLengthDefault (0) (standard lens). Then, adjust the camera zoom factor to a value less than 1.0. This configuration allows you to capture video with an ultra-wide-angle perspective. + * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). * - * @param factor The camera zoom ratio. The value ranges between 1.0 and the maximum zoom supported by the device. You can get the maximum zoom ratio supported by the device by calling the getCameraMaxZoomFactor method. + * @param factor The camera zoom factor. For devices that do not support ultra-wide-angle, the value ranges from 1.0 to the maximum zoom factor; for devices that support ultra-wide-angle, the value ranges from 0.5 to the maximum zoom factor. You can get the maximum zoom factor supported by the device by calling the getCameraMaxZoomFactor method. * * @returns * The camera zoom factor value, if successful. @@ -5390,7 +5422,6 @@ export abstract class IRtcEngine { * Enables the camera flash. * * You must call this method after enableVideo. The setting result will take effect after the camera is successfully turned on, that is, after the SDK triggers the onLocalVideoStateChanged callback and returns the local video state as LocalVideoStreamStateCapturing (1). - * This method is for Android and iOS only. * * @param isOn Whether to turn on the camera flash: true : Turn on the flash. false : (Default) Turn off the flash. * @@ -5495,6 +5526,19 @@ export abstract class IRtcEngine { */ abstract setCameraAutoExposureFaceModeEnabled(enabled: boolean): number; + /** + * Set the camera stabilization mode. + * + * This method applies to iOS only. The camera stabilization mode is off by default. You need to call this method to turn it on and set the appropriate stabilization mode. + * + * @param mode Camera stabilization mode. See CameraStabilizationMode. + * + * @returns + * 0: Success. + * < 0: Failure. + */ + abstract setCameraStabilizationMode(mode: CameraStabilizationMode): number; + /** * Sets the default audio playback route. * @@ -5559,6 +5603,29 @@ export abstract class IRtcEngine { */ abstract setRouteInCommunicationMode(route: number): number; + /** + * Check if the camera supports portrait center stage. + * + * This method applies to iOS only. Before calling enableCameraCenterStage to enable portrait center stage, it is recommended to call this method to check if the current device supports the feature. + * + * @returns + * true : The current camera supports the portrait center stage. false : The current camera supports the portrait center stage. + */ + abstract isCameraCenterStageSupported(): boolean; + + /** + * Enables or disables portrait center stage. + * + * The portrait center stage feature is off by default. You need to call this method to turn it on. If you need to disable this feature, you need to call this method again and set enabled to false. This method applies to iOS only. + * + * @param enabled Whether to enable the portrait center stage: true : Enable portrait center stage. false : Disable portrait center stage. + * + * @returns + * 0: Success. + * < 0: Failure. + */ + abstract enableCameraCenterStage(enabled: boolean): number; + /** * @ignore */ @@ -5679,7 +5746,7 @@ export abstract class IRtcEngine { * If you are using the custom audio source instead of the SDK to capture audio, Agora recommends you add the keep-alive processing logic to your application to avoid screen sharing stopping when the application goes to the background. * This feature requires high-performance device, and Agora recommends that you use it on iPhone X and later models. * This method relies on the iOS screen sharing dynamic library AgoraReplayKitExtension.xcframework. If the dynamic library is deleted, screen sharing cannot be enabled normally. - * On the Android platform, make sure the user has granted the app screen capture permission. + * On the Android platform, if the user has not granted the app screen capture permission, the SDK reports the onPermissionError (2) callback. * On Android 9 and later, to avoid the application being killed by the system after going to the background, Agora recommends you add the foreground service android.permission.FOREGROUND_SERVICE to the /app/Manifests/AndroidManifest.xml file. * Due to performance limitations, screen sharing is not supported on Android TV. * Due to system limitations, if you are using Huawei phones, do not adjust the video encoding resolution of the screen sharing stream during the screen sharing, or you could experience crashes. @@ -5691,7 +5758,9 @@ export abstract class IRtcEngine { * @returns * 0: Success. * < 0: Failure. - * -2: The parameter is null. + * -2 (iOS platform): Empty parameter. + * -2 (Android platform): The system version is too low. Ensure that the Android API level is not lower than 21. + * -3 (Android platform): Unable to capture system audio. Ensure that the Android API level is not lower than 29. */ abstract startScreenCapture(captureParams: ScreenCaptureParameters2): number; @@ -5723,6 +5792,19 @@ export abstract class IRtcEngine { */ abstract queryScreenCaptureCapability(): number; + /** + * Queries the focal length capability supported by the camera. + * + * If you want to enable the wide-angle or ultra-wide-angle mode for camera capture, it is recommended to start by calling this method to check whether the device supports the required focal length capability. Then, adjust the camera's focal length configuration based on the query result by calling setCameraCapturerConfiguration, ensuring the best camera capture performance. + * + * @returns + * Returns an object containing the following properties: focalLengthInfos : An array of FocalLengthInfo objects, which contain the camera's orientation and focal length type. size : The number of focal length information items retrieved. + */ + abstract queryCameraFocalLengthCapability(): { + focalLengthInfos: FocalLengthInfo[]; + size: number; + }; + /** * Sets the screen sharing scenario. * @@ -5748,7 +5830,7 @@ export abstract class IRtcEngine { /** * Retrieves the call ID. * - * When a user joins a channel on a client, a callId is generated to identify the call from the client. Some methods, such as rate and complain, must be called after the call ends to submit feedback to the SDK. These methods require the callId parameter. Call this method after joining a channel. + * When a user joins a channel on a client, a callId is generated to identify the call from the client. You can call this method to get the callId parameter, and pass it in when calling methods such as rate and complain. Call this method after joining a channel. * * @returns * The current call ID, if the method succeeds. @@ -5851,11 +5933,6 @@ export abstract class IRtcEngine { * Starts the local video mixing. * * After calling this method, you can merge multiple video streams into one video stream locally. For example, you can merge the video streams captured by the camera, screen sharing, media player, remote video, video files, images, etc. into one video stream, and then publish the mixed video stream to the channel. - * Local video mixing requires more CPU resources. Therefore, Agora recommends enabling this function on devices with higher performance. - * If you need to mix locally captured video streams, the SDK supports the following capture combinations: - * On Android and iOS platforms, it supports video streams captured by up to 2 cameras (the device itself needs to support dual cameras or supports external cameras) + 1 screen sharing stream. - * If you need to mix the locally collected video streams, you need to call this method after startCameraCapture or startScreenCapture. - * If you want to publish the mixed video stream to the channel, you need to set publishTranscodedVideoTrack in ChannelMediaOptions to true when calling joinChannel or updateChannelMediaOptions. * * @param config Configuration of the local video mixing, see LocalTranscoderConfiguration. * The maximum resolution of each video stream participating in the local video mixing is 4096 × 2160. If this limit is exceeded, video mixing does not take effect. @@ -5914,7 +5991,8 @@ export abstract class IRtcEngine { * You can call this method to start capturing video from one or more cameras by specifying sourceType. On the iOS platform, if you want to enable multi-camera capture, you need to call enableMultiCamera and set enabled to true before calling this method. * * @param sourceType The type of the video source. See VideoSourceType. - * On the mobile platforms, you can capture video from up to 2 cameras, provided the device has dual cameras or supports an external camera. + * On iOS devices, you can capture video from up to 2 cameras, provided the device has multiple cameras or supports external cameras. + * On Android devices, you can capture video from up to 4 cameras, provided the device has multiple cameras or supports external cameras. * @param config The configuration of the video capture. See CameraCapturerConfiguration. On the iOS platform, this parameter has no practical function. Use the config parameter in enableMultiCamera instead to set the video capture configuration. * * @returns @@ -5988,11 +6066,11 @@ export abstract class IRtcEngine { abstract registerEventHandler(eventHandler: IRtcEngineEventHandler): boolean; /** - * Removes the specified callback handler. + * Removes the specified callback events. * - * This method removes the specified callback handler. For callback events that you want to listen for only once, call this method to remove the relevant callback handler after you have received them. + * You can call this method too remove all added callback events. * - * @param eventHandler The callback handler to be deleted. See IRtcEngineEventHandler. + * @param eventHandler Callback events to be removed. See IRtcEngineEventHandler. * * @returns * true : Success. false : Failure. @@ -6049,7 +6127,7 @@ export abstract class IRtcEngine { * * In scenarios requiring high security, Agora recommends calling this method to enable the built-in encryption before joining a channel. All users in the same channel must use the same encryption mode and encryption key. After the user leaves the channel, the SDK automatically disables the built-in encryption. To enable the built-in encryption, call this method before the user joins the channel again. If you enable the built-in encryption, you cannot use the Media Push function. * - * @param enabled Whether to enable built-in encryption: true : Enable the built-in encryption. false : Disable the built-in encryption. + * @param enabled Whether to enable built-in encryption: true : Enable the built-in encryption. false : (Default) Disable the built-in encryption. * @param config Built-in encryption configurations. See EncryptionConfig. * * @returns @@ -6271,18 +6349,19 @@ export abstract class IRtcEngine { /** * Joins the channel with a user account, and configures whether to automatically subscribe to audio or video streams after joining the channel. * - * This method allows a user to join the channel with the user account. After the user successfully joins the channel, the SDK triggers the following callbacks: + * To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a user ID, then ensure all the other users use the user ID too. The same applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the ID of the user is set to the same parameter type. + * If you choose the Testing Mode (using an App ID for authentication) for your project and call this method to join a channel, you will automatically exit the channel after 24 hours. This method allows a user to join the channel with the user account. After the user successfully joins the channel, the SDK triggers the following callbacks: * The local client: onLocalUserRegistered, onJoinChannelSuccess and onConnectionStateChanged callbacks. - * The remote client: The onUserJoined callback, if the user is in the COMMUNICATION profile, and the onUserInfoUpdated callback if the user is a host in the LIVE_BROADCASTING profile. Once a user joins the channel, the user subscribes to the audio and video streams of all the other users in the channel by default, giving rise to usage and billing calculation. To stop subscribing to a specified stream or all remote streams, call the corresponding mute methods. To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a user ID, then ensure all the other users use the user ID too. The same applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the ID of the user is set to the same parameter type. + * The remote client: The onUserJoined callback, if the user is in the COMMUNICATION profile, and the onUserInfoUpdated callback if the user is a host in the LIVE_BROADCASTING profile. Once a user joins the channel, the user subscribes to the audio and video streams of all the other users in the channel by default, giving rise to usage and billing calculation. To stop subscribing to a specified stream or all remote streams, call the corresponding mute methods. * * @param token The token generated on your server for authentication. If you need to join different channels at the same time or switch between channels, Agora recommends using a wildcard token so that you don't need to apply for a new token every time joining a channel. - * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters: + * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): * All lowercase English letters: a to z. * All uppercase English letters: A to Z. * All numeric characters: 0 to 9. * Space * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are (89 in total): + * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are as follows(89 in total): * The 26 lowercase English letters: a to z. * The 26 uppercase English letters: A to Z. * All numeric characters: 0 to 9. @@ -6316,13 +6395,13 @@ export abstract class IRtcEngine { * The remote client: The onUserJoined callback, if the user is in the COMMUNICATION profile, and the onUserInfoUpdated callback if the user is a host in the LIVE_BROADCASTING profile. Once a user joins the channel, the user subscribes to the audio and video streams of all the other users in the channel by default, giving rise to usage and billing calculation. To stop subscribing to a specified stream or all remote streams, call the corresponding mute methods. To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a user ID, then ensure all the other users use the user ID too. The same applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the ID of the user is set to the same parameter type. * * @param token The token generated on your server for authentication. If you need to join different channels at the same time or switch between channels, Agora recommends using a wildcard token so that you don't need to apply for a new token every time joining a channel. - * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters: + * @param channelId The channel name. This parameter signifies the channel in which users engage in real-time audio and video interaction. Under the premise of the same App ID, users who fill in the same channel ID enter the same channel for audio and video interaction. The string length must be less than 64 bytes. Supported characters (89 characters in total): * All lowercase English letters: a to z. * All uppercase English letters: A to Z. * All numeric characters: 0 to 9. * Space * "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", "," - * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are (89 in total): + * @param userAccount The user account. This parameter is used to identify the user in the channel for real-time audio and video engagement. You need to set and manage user accounts yourself and ensure that each user account in the same channel is unique. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported characters are as follows(89 in total): * The 26 lowercase English letters: a to z. * The 26 uppercase English letters: A to Z. * All numeric characters: 0 to 9. @@ -6764,6 +6843,11 @@ export abstract class IRtcEngine { */ abstract isFeatureAvailableOnDevice(type: FeatureType): boolean; + /** + * @ignore + */ + abstract sendAudioMetadata(metadata: string, length: number): number; + /** * @ignore */ @@ -7278,13 +7362,17 @@ export class VideoDeviceInfo { } /** - * The AudioDeviceInfo class that contains the ID and device name of the audio devices. + * The AudioDeviceInfo class that contains the ID, name and type of the audio devices. */ export class AudioDeviceInfo { /** * The device ID. */ deviceId?: string; + /** + * Output parameter; indicates the type of audio devices, such as built-in, USB and HDMI. + */ + deviceTypeName?: string; /** * The device name. */ diff --git a/src/IAgoraRtcEngineEx.ts b/src/IAgoraRtcEngineEx.ts index d09ab0de..1a168af8 100644 --- a/src/IAgoraRtcEngineEx.ts +++ b/src/IAgoraRtcEngineEx.ts @@ -51,6 +51,7 @@ export abstract class IRtcEngineEx extends IRtcEngine { * If you are already in a channel, you cannot rejoin it with the same user ID. * If you want to join the same channel from different devices, ensure that the user IDs are different for all devices. * Ensure that the App ID you use to generate the token is the same as the App ID used when creating the IRtcEngine instance. + * If you choose the Testing Mode (using an App ID for authentication) for your project and call this method to join a channel, you will automatically exit the channel after 24 hours. * * @param token The token generated on your server for authentication. If you need to join different channels at the same time or switch between channels, Agora recommends using a wildcard token so that you don't need to apply for a new token every time joining a channel. * @param connection The connection information. See RtcConnection. @@ -174,7 +175,9 @@ export abstract class IRtcEngineEx extends IRtcEngine { /** * Sets the video stream type to subscribe to. * - * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side, which means the sender does not actively send low-quality video stream. The receiver can initiate a low-quality video stream request by calling this method, and the sender will automatically start sending low-quality video stream upon receiving the request. By default, users receive the high-quality video stream. Call this method if you want to switch to the low-quality video stream. The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. Under limited network conditions, if the publisher does not disable the dual-stream mode using enableDualStreamModeEx (false), the receiver can choose to receive either the high-quality video stream, or the low-quality video stream. The high-quality video stream has a higher resolution and bitrate, while the low-quality video stream has a lower resolution and bitrate. + * The SDK will dynamically adjust the size of the corresponding video stream based on the size of the video window to save bandwidth and computing resources. The default aspect ratio of the low-quality video stream is the same as that of the high-quality video stream. According to the current aspect ratio of the high-quality video stream, the system will automatically allocate the resolution, frame rate, and bitrate of the low-quality video stream. The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sending end, which means the sender does not actively send low-quality video stream. The receiver with the role of the host can initiate a low-quality video stream request by calling this method, and upon receiving the request, the sending end automatically starts sending the low-quality video stream. + * If the publisher has already called setDualStreamModeEx and set mode to DisableSimulcastStream (never send low-quality video stream), calling this method will not take effect, you should call setDualStreamModeEx again on the sending end and adjust the settings. + * Calling this method on the receiving end of the audience role will not take effect. * * @param uid The user ID. * @param streamType The video stream type, see VideoStreamType. @@ -489,7 +492,17 @@ export abstract class IRtcEngineEx extends IRtcEngine { abstract getConnectionStateEx(connection: RtcConnection): ConnectionStateType; /** - * @ignore + * Enables or disables the built-in encryption. + * + * All users in the same channel must use the same encryption mode and encryption key. After the user leaves the channel, the SDK automatically disables the built-in encryption. To enable the built-in encryption, call this method before the user joins the channel again. In scenarios requiring high security, Agora recommends calling this method to enable the built-in encryption before joining a channel. + * + * @param connection The connection information. See RtcConnection. + * @param enabled Whether to enable built-in encryption: true : Enable the built-in encryption. false : (Default) Disable the built-in encryption. + * @param config Built-in encryption configurations. See EncryptionConfig. + * + * @returns + * 0: Success. + * < 0: Failure. */ abstract enableEncryptionEx( connection: RtcConnection, @@ -777,7 +790,7 @@ export abstract class IRtcEngineEx extends IRtcEngine { * * After you enable dual-stream mode, you can call setRemoteVideoStreamType to choose to receive either the high-quality video stream or the low-quality video stream on the subscriber side. You can call this method to enable or disable the dual-stream mode on the publisher side. Dual streams are a pairing of a high-quality video stream and a low-quality video stream: * High-quality video stream: High bitrate, high resolution. - * Low-quality video stream: Low bitrate, low resolution. This method is applicable to all types of streams from the sender, including but not limited to video streams collected from cameras, screen sharing streams, and custom-collected video streams. + * Low-quality video stream: Low bitrate, low resolution. Deprecated: This method is deprecated as of v4.2.0. Use setDualStreamModeEx instead. This method is applicable to all types of streams from the sender, including but not limited to video streams collected from cameras, screen sharing streams, and custom-collected video streams. * * @param enabled Whether to enable dual-stream mode: true : Enable dual-stream mode. false : (Default) Disable dual-stream mode. * @param streamConfig The configuration of the low-quality video stream. See SimulcastStreamConfig. When setting mode to DisableSimulcastStream, setting streamConfig will not take effect. @@ -796,7 +809,7 @@ export abstract class IRtcEngineEx extends IRtcEngine { /** * Sets the dual-stream mode on the sender side. * - * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sender side, which means the sender does not actively send low-quality video stream. The receiver can initiate a low-quality video stream request by calling setRemoteVideoStreamTypeEx, and the sender will automatically start sending low-quality video stream upon receiving the request. + * The SDK defaults to enabling low-quality video stream adaptive mode (AutoSimulcastStream) on the sending end, which means the sender does not actively send low-quality video stream. The receiver with the role of the host can initiate a low-quality video stream request by calling setRemoteVideoStreamTypeEx, and upon receiving the request, the sending end automatically starts sending the low-quality video stream. * If you want to modify this behavior, you can call this method and set mode to DisableSimulcastStream (never send low-quality video streams) or EnableSimulcastStream (always send low-quality video streams). * If you want to restore the default behavior after making changes, you can call this method again with mode set to AutoSimulcastStream. The difference and connection between this method and enableDualStreamModeEx is as follows: * When calling this method and setting mode to DisableSimulcastStream, it has the same effect as enableDualStreamModeEx (false). @@ -891,4 +904,26 @@ export abstract class IRtcEngineEx extends IRtcEngine { connection: RtcConnection, parameters: string ): number; + + /** + * Gets the call ID with the connection ID. + * + * Call this method after joining a channel. When a user joins a channel on a client, a callId is generated to identify the call from the client. You can call this method to get the callId parameter, and pass it in when calling methods such as rate and complain. + * + * @param connection The connection information. See RtcConnection. + * + * @returns + * The current call ID, if the method succeeds. + * An empty string, if the method call fails. + */ + abstract getCallIdEx(connection: RtcConnection): string; + + /** + * @ignore + */ + abstract sendAudioMetadataEx( + connection: RtcConnection, + metadata: string, + length: number + ): number; } diff --git a/src/__tests__/MediaEngineInternal.test.ts b/src/__tests__/MediaEngineInternal.test.ts index 20cedffa..d8c586b2 100644 --- a/src/__tests__/MediaEngineInternal.test.ts +++ b/src/__tests__/MediaEngineInternal.test.ts @@ -21,9 +21,13 @@ jest.mock('../specs', () => ({ test('addListener', () => { const engine = createAgoraRtcEngine().getMediaEngine(); const callback = jest.fn(); + const callback2 = jest.fn(); engine.addListener('onCaptureVideoFrame', callback); + engine.addListener('onFaceInfo', callback2); emitEvent('onCaptureVideoFrame', EVENT_PROCESSORS.IVideoFrameObserver, {}); + emitEvent('onFaceInfo', EVENT_PROCESSORS.IFaceInfoObserver, {}); expect(callback).toBeCalledTimes(1); + expect(callback2).toBeCalledTimes(1); }); test('addListenerWithSameEventTypeAndCallback', () => { @@ -48,10 +52,15 @@ test('addListenerWithSameCallback', () => { test('removeListener', () => { const engine = createAgoraRtcEngine().getMediaEngine(); const callback = jest.fn(); + const callback2 = jest.fn(); engine.addListener('onCaptureVideoFrame', callback); + engine.addListener('onFaceInfo', callback2); engine.removeListener('onCaptureVideoFrame', callback); + engine.removeListener('onFaceInfo', callback2); emitEvent('onCaptureVideoFrame', EVENT_PROCESSORS.IVideoFrameObserver, {}); + emitEvent('onFaceInfo', EVENT_PROCESSORS.IFaceInfoObserver, {}); expect(callback).not.toBeCalled(); + expect(callback2).not.toBeCalled(); }); test('removeListenerWithoutCallback', () => { @@ -79,13 +88,17 @@ test('removeAllListeners', () => { const engine = createAgoraRtcEngine().getMediaEngine(); const callback1 = jest.fn(); const callback2 = jest.fn(); + const callback3 = jest.fn(); engine.addListener('onCaptureVideoFrame', callback1); engine.addListener('onRecordAudioFrame', callback2); + engine.addListener('onFaceInfo', callback3); engine.removeAllListeners(); emitEvent('onCaptureVideoFrame', EVENT_PROCESSORS.IVideoFrameObserver, {}); emitEvent('onRecordAudioFrame', EVENT_PROCESSORS.IAudioFrameObserver, {}); + emitEvent('onFaceInfo', EVENT_PROCESSORS.IFaceInfoObserver, {}); expect(callback1).not.toBeCalled(); expect(callback2).not.toBeCalled(); + expect(callback3).not.toBeCalled(); }); import { EVENT_PROCESSORS, emitEvent } from '../internal/IrisApiEngine'; diff --git a/src/extension/IAgoraMediaEngineExtension.ts b/src/extension/IAgoraMediaEngineExtension.ts index d532270b..8d141948 100644 --- a/src/extension/IAgoraMediaEngineExtension.ts +++ b/src/extension/IAgoraMediaEngineExtension.ts @@ -1,12 +1,14 @@ import { IAudioFrameObserver, + IFaceInfoObserver, IVideoEncodedFrameObserver, IVideoFrameObserver, } from '../AgoraMediaBase'; export type IMediaEngineEvent = IAudioFrameObserver & IVideoFrameObserver & - IVideoEncodedFrameObserver; + IVideoEncodedFrameObserver & + IFaceInfoObserver; declare module '../IAgoraMediaEngine' { interface IMediaEngine { diff --git a/src/impl/AgoraMediaBaseImpl.ts b/src/impl/AgoraMediaBaseImpl.ts index b5d5ad63..81a89f89 100644 --- a/src/impl/AgoraMediaBaseImpl.ts +++ b/src/impl/AgoraMediaBaseImpl.ts @@ -3,6 +3,7 @@ import { IAudioFrameObserverBase, IAudioPcmFrameSink, IAudioSpectrumObserver, + IFaceInfoObserver, IMediaRecorderObserver, IVideoEncodedFrameObserver, IVideoFrameMetaInfo, @@ -190,6 +191,20 @@ export function processIVideoFrameObserver( } } +export function processIFaceInfoObserver( + handler: IFaceInfoObserver, + event: string, + jsonParams: any +) { + switch (event) { + case 'onFaceInfo': + if (handler.onFaceInfo !== undefined) { + handler.onFaceInfo(jsonParams.outFaceInfo); + } + break; + } +} + export function processIMediaRecorderObserver( handler: IMediaRecorderObserver, event: string, diff --git a/src/impl/IAgoraMediaEngineImpl.ts b/src/impl/IAgoraMediaEngineImpl.ts index 7c13ab64..ddc9e7a5 100644 --- a/src/impl/IAgoraMediaEngineImpl.ts +++ b/src/impl/IAgoraMediaEngineImpl.ts @@ -9,6 +9,7 @@ import { ExternalVideoFrame, ExternalVideoSourceType, IAudioFrameObserver, + IFaceInfoObserver, IVideoEncodedFrameObserver, IVideoFrameObserver, } from '../AgoraMediaBase'; @@ -73,6 +74,24 @@ export class IMediaEngineImpl implements IMediaEngine { return 'MediaEngine_registerVideoEncodedFrameObserver_d45d579'; } + registerFaceInfoObserver(observer: IFaceInfoObserver): number { + const apiType = this.getApiTypeFromRegisterFaceInfoObserver(observer); + const jsonParams = { + observer: observer, + toJSON: () => { + return {}; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromRegisterFaceInfoObserver( + observer: IFaceInfoObserver + ): string { + return 'MediaEngine_registerFaceInfoObserver_0303ed6'; + } + pushAudioFrame(frame: AudioFrame, trackId: number = 0): number { const apiType = this.getApiTypeFromPushAudioFrame(frame, trackId); const jsonParams = { @@ -431,6 +450,24 @@ export class IMediaEngineImpl implements IMediaEngine { ): string { return 'MediaEngine_unregisterVideoEncodedFrameObserver'; } + + unregisterFaceInfoObserver(observer: IFaceInfoObserver): number { + const apiType = this.getApiTypeFromUnregisterFaceInfoObserver(observer); + const jsonParams = { + observer: observer, + toJSON: () => { + return {}; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromUnregisterFaceInfoObserver( + observer: IFaceInfoObserver + ): string { + return 'MediaEngine_unregisterFaceInfoObserver'; + } } import { callIrisApi } from '../internal/IrisApiEngine'; diff --git a/src/impl/IAgoraMediaPlayerImpl.ts b/src/impl/IAgoraMediaPlayerImpl.ts index aff053f0..eaec2138 100644 --- a/src/impl/IAgoraMediaPlayerImpl.ts +++ b/src/impl/IAgoraMediaPlayerImpl.ts @@ -958,7 +958,7 @@ export class IMediaPlayerImpl implements IMediaPlayer { key: string, value: number ): string { - return 'MediaPlayer_setPlayerOptionInInt'; + return 'MediaPlayer_setPlayerOption_4d05d29'; } setPlayerOptionInString(key: string, value: string): number { @@ -981,7 +981,7 @@ export class IMediaPlayerImpl implements IMediaPlayer { key: string, value: string ): string { - return 'MediaPlayer_setPlayerOptionInString'; + return 'MediaPlayer_setPlayerOption_ccad422'; } } diff --git a/src/impl/IAgoraMusicContentCenterImpl.ts b/src/impl/IAgoraMusicContentCenterImpl.ts index ca6d8ff5..b5a3cd54 100644 --- a/src/impl/IAgoraMusicContentCenterImpl.ts +++ b/src/impl/IAgoraMusicContentCenterImpl.ts @@ -194,7 +194,7 @@ export class IMusicPlayerImpl extends IMediaPlayerImpl implements IMusicPlayer { songCode: number, startPos: number = 0 ): string { - return 'MusicPlayer_openWithSongCode'; + return 'MusicPlayer_open_303b92e'; } } diff --git a/src/impl/IAgoraRtcEngineExImpl.ts b/src/impl/IAgoraRtcEngineExImpl.ts index 2f8b2514..db00c4a8 100644 --- a/src/impl/IAgoraRtcEngineExImpl.ts +++ b/src/impl/IAgoraRtcEngineExImpl.ts @@ -1512,6 +1512,59 @@ export class IRtcEngineExImpl extends IRtcEngineImpl implements IRtcEngineEx { ): string { return 'RtcEngineEx_setParametersEx_8225ea3'; } + + getCallIdEx(connection: RtcConnection): string { + const apiType = this.getApiTypeFromGetCallIdEx(connection); + const jsonParams = { + connection: connection, + toJSON: () => { + return { + connection: connection, + }; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + const callId = jsonResults.callId; + return callId; + } + + protected getApiTypeFromGetCallIdEx(connection: RtcConnection): string { + return 'RtcEngineEx_getCallIdEx_b13f7c4'; + } + + sendAudioMetadataEx( + connection: RtcConnection, + metadata: string, + length: number + ): number { + const apiType = this.getApiTypeFromSendAudioMetadataEx( + connection, + metadata, + length + ); + const jsonParams = { + connection: connection, + metadata: metadata, + length: length, + toJSON: () => { + return { + connection: connection, + metadata: metadata, + length: length, + }; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromSendAudioMetadataEx( + connection: RtcConnection, + metadata: string, + length: number + ): string { + return 'RtcEngineEx_sendAudioMetadataEx_e2bf1c4'; + } } import { callIrisApi } from '../internal/IrisApiEngine'; diff --git a/src/impl/IAgoraRtcEngineImpl.ts b/src/impl/IAgoraRtcEngineImpl.ts index b4c08d1b..3eb536a1 100644 --- a/src/impl/IAgoraRtcEngineImpl.ts +++ b/src/impl/IAgoraRtcEngineImpl.ts @@ -7,6 +7,7 @@ import { AudioScenarioType, AudioSessionOperationRestriction, BeautyOptions, + CameraStabilizationMode, ChannelMediaRelayConfiguration, ChannelProfileType, ClientRoleOptions, @@ -19,6 +20,7 @@ import { EarMonitoringFilterType, EchoTestConfiguration, EncryptionConfig, + FocalLengthInfo, HeadphoneEqualizerPreset, IAudioEncodedFrameObserver, LastmileProbeConfig, @@ -897,6 +899,17 @@ export function processIRtcEngineEventHandler( } break; + case 'onAudioMetadataReceived': + if (handler.onAudioMetadataReceived !== undefined) { + handler.onAudioMetadataReceived( + jsonParams.connection, + jsonParams.uid, + jsonParams.metadata, + jsonParams.length + ); + } + break; + case 'onExtensionEvent': if (handler.onExtensionEvent !== undefined) { handler.onExtensionEvent( @@ -2686,6 +2699,24 @@ export class IRtcEngineImpl implements IRtcEngine { return 'RtcEngine_setAudioMixingPitch_46f8ab7'; } + setAudioMixingPlaybackSpeed(speed: number): number { + const apiType = this.getApiTypeFromSetAudioMixingPlaybackSpeed(speed); + const jsonParams = { + speed: speed, + toJSON: () => { + return { + speed: speed, + }; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromSetAudioMixingPlaybackSpeed(speed: number): string { + return 'RtcEngine_setAudioMixingPlaybackSpeed_46f8ab7'; + } + getEffectsVolume(): number { const apiType = this.getApiTypeFromGetEffectsVolume(); const jsonParams = {}; @@ -3541,6 +3572,26 @@ export class IRtcEngineImpl implements IRtcEngine { return 'RtcEngine_uploadLogFile_66d4ecd'; } + writeLog(level: LogLevel, fmt: string): number { + const apiType = this.getApiTypeFromWriteLog(level, fmt); + const jsonParams = { + level: level, + fmt: fmt, + toJSON: () => { + return { + level: level, + fmt: fmt, + }; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromWriteLog(level: LogLevel, fmt: string): string { + return 'RtcEngine_writeLog_62889f6'; + } + setLocalRenderMode( renderMode: RenderModeType, mirrorMode: VideoMirrorModeType = VideoMirrorModeType.VideoMirrorModeAuto @@ -4807,6 +4858,26 @@ export class IRtcEngineImpl implements IRtcEngine { return 'RtcEngine_setCameraAutoExposureFaceModeEnabled_5039d15'; } + setCameraStabilizationMode(mode: CameraStabilizationMode): number { + const apiType = this.getApiTypeFromSetCameraStabilizationMode(mode); + const jsonParams = { + mode: mode, + toJSON: () => { + return { + mode: mode, + }; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromSetCameraStabilizationMode( + mode: CameraStabilizationMode + ): string { + return 'RtcEngine_setCameraStabilizationMode_701b981'; + } + setDefaultAudioRouteToSpeakerphone(defaultToSpeaker: boolean): number { const apiType = this.getApiTypeFromSetDefaultAudioRouteToSpeakerphone(defaultToSpeaker); @@ -4875,6 +4946,35 @@ export class IRtcEngineImpl implements IRtcEngine { return 'RtcEngine_setRouteInCommunicationMode_46f8ab7'; } + isCameraCenterStageSupported(): boolean { + const apiType = this.getApiTypeFromIsCameraCenterStageSupported(); + const jsonParams = {}; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromIsCameraCenterStageSupported(): string { + return 'RtcEngine_isCameraCenterStageSupported'; + } + + enableCameraCenterStage(enabled: boolean): number { + const apiType = this.getApiTypeFromEnableCameraCenterStage(enabled); + const jsonParams = { + enabled: enabled, + toJSON: () => { + return { + enabled: enabled, + }; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromEnableCameraCenterStage(enabled: boolean): string { + return 'RtcEngine_enableCameraCenterStage_5039d15'; + } + getScreenCaptureSources( thumbSize: Size, iconSize: Size, @@ -5160,6 +5260,25 @@ export class IRtcEngineImpl implements IRtcEngine { return 'RtcEngine_queryScreenCaptureCapability'; } + queryCameraFocalLengthCapability(): { + focalLengthInfos: FocalLengthInfo[]; + size: number; + } { + const apiType = this.getApiTypeFromQueryCameraFocalLengthCapability(); + const jsonParams = {}; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + const focalLengthInfos = jsonResults.focalLengthInfos; + const size = jsonResults.size; + return { + focalLengthInfos, + size, + }; + } + + protected getApiTypeFromQueryCameraFocalLengthCapability(): string { + return 'RtcEngine_queryCameraFocalLengthCapability_2dee6af'; + } + setScreenCaptureScenario(screenScenario: ScreenScenarioType): number { const apiType = this.getApiTypeFromSetScreenCaptureScenario(screenScenario); const jsonParams = { @@ -6656,6 +6775,29 @@ export class IRtcEngineImpl implements IRtcEngine { return 'RtcEngine_isFeatureAvailableOnDevice_a694b62'; } + sendAudioMetadata(metadata: string, length: number): number { + const apiType = this.getApiTypeFromSendAudioMetadata(metadata, length); + const jsonParams = { + metadata: metadata, + length: length, + toJSON: () => { + return { + metadata: metadata, + length: length, + }; + }, + }; + const jsonResults = callIrisApi.call(this, apiType, jsonParams); + return jsonResults.result; + } + + protected getApiTypeFromSendAudioMetadata( + metadata: string, + length: number + ): string { + return 'RtcEngine_sendAudioMetadata_878f309'; + } + startScreenCaptureBySourceType( sourceType: VideoSourceType, config: ScreenCaptureConfiguration @@ -6682,7 +6824,7 @@ export class IRtcEngineImpl implements IRtcEngine { sourceType: VideoSourceType, config: ScreenCaptureConfiguration ): string { - return 'RtcEngine_startScreenCaptureBySourceType'; + return 'RtcEngine_startScreenCapture_9ebb320'; } stopScreenCaptureBySourceType(sourceType: VideoSourceType): number { @@ -6703,7 +6845,7 @@ export class IRtcEngineImpl implements IRtcEngine { protected getApiTypeFromStopScreenCaptureBySourceType( sourceType: VideoSourceType ): string { - return 'RtcEngine_stopScreenCaptureBySourceType'; + return 'RtcEngine_stopScreenCapture_4fd718e'; } release(sync: boolean = false): void { @@ -6731,7 +6873,7 @@ export class IRtcEngineImpl implements IRtcEngine { } protected getApiTypeFromStartPreviewWithoutSourceType(): string { - return 'RtcEngine_startPreviewWithoutSourceType'; + return 'RtcEngine_startPreview'; } getAudioDeviceManager(): IAudioDeviceManager { @@ -6742,7 +6884,7 @@ export class IRtcEngineImpl implements IRtcEngine { } protected getApiTypeFromGetAudioDeviceManager(): string { - return 'RtcEngine_getAudioDeviceManager'; + return 'RtcEngine_queryInterface_257d192'; } getVideoDeviceManager(): IVideoDeviceManager { @@ -6753,7 +6895,7 @@ export class IRtcEngineImpl implements IRtcEngine { } protected getApiTypeFromGetVideoDeviceManager(): string { - return 'RtcEngine_getVideoDeviceManager'; + return 'RtcEngine_queryInterface_257d192'; } getMusicContentCenter(): IMusicContentCenter { @@ -6764,7 +6906,7 @@ export class IRtcEngineImpl implements IRtcEngine { } protected getApiTypeFromGetMusicContentCenter(): string { - return 'RtcEngine_getMusicContentCenter'; + return 'RtcEngine_queryInterface_257d192'; } getMediaEngine(): IMediaEngine { @@ -6775,7 +6917,7 @@ export class IRtcEngineImpl implements IRtcEngine { } protected getApiTypeFromGetMediaEngine(): string { - return 'RtcEngine_getMediaEngine'; + return 'RtcEngine_queryInterface_257d192'; } getLocalSpatialAudioEngine(): ILocalSpatialAudioEngine { @@ -6786,7 +6928,7 @@ export class IRtcEngineImpl implements IRtcEngine { } protected getApiTypeFromGetLocalSpatialAudioEngine(): string { - return 'RtcEngine_getLocalSpatialAudioEngine'; + return 'RtcEngine_queryInterface_257d192'; } getH265Transcoder(): IH265Transcoder { @@ -6797,7 +6939,7 @@ export class IRtcEngineImpl implements IRtcEngine { } protected getApiTypeFromGetH265Transcoder(): string { - return 'RtcEngine_getH265Transcoder'; + return 'RtcEngine_queryInterface_257d192'; } sendMetaData(metadata: Metadata, sourceType: VideoSourceType): number { @@ -6855,7 +6997,7 @@ export class IRtcEngineImpl implements IRtcEngine { } protected getApiTypeFromDestroyRendererByView(view: any): string { - return 'RtcEngine_destroyRendererByView'; + return ''; } destroyRendererByConfig( @@ -6888,7 +7030,7 @@ export class IRtcEngineImpl implements IRtcEngine { channelId?: string, uid: number = 0 ): string { - return 'RtcEngine_destroyRendererByConfig'; + return ''; } unregisterAudioEncodedFrameObserver( diff --git a/src/impl/IAudioDeviceManagerImpl.ts b/src/impl/IAudioDeviceManagerImpl.ts index 58c8da82..c006c209 100644 --- a/src/impl/IAudioDeviceManagerImpl.ts +++ b/src/impl/IAudioDeviceManagerImpl.ts @@ -63,7 +63,7 @@ export class IAudioDeviceManagerImpl implements IAudioDeviceManager { } protected getApiTypeFromGetPlaybackDeviceInfo(): string { - return 'AudioDeviceManager_getPlaybackDeviceInfo_5540658'; + return 'AudioDeviceManager_getPlaybackDeviceInfo_ed3a96d'; } setPlaybackDeviceVolume(volume: number): number { @@ -134,7 +134,7 @@ export class IAudioDeviceManagerImpl implements IAudioDeviceManager { } protected getApiTypeFromGetRecordingDeviceInfo(): string { - return 'AudioDeviceManager_getRecordingDeviceInfo_5540658'; + return 'AudioDeviceManager_getRecordingDeviceInfo_ed3a96d'; } setRecordingDeviceVolume(volume: number): number { diff --git a/src/internal/IrisApiEngine.ts b/src/internal/IrisApiEngine.ts index 7709327e..d520f6a0 100644 --- a/src/internal/IrisApiEngine.ts +++ b/src/internal/IrisApiEngine.ts @@ -12,6 +12,7 @@ import { IAudioFrameObserver, IAudioPcmFrameSink, IAudioSpectrumObserver, + IFaceInfoObserver, IMediaRecorderObserver, IVideoEncodedFrameObserver, IVideoFrameObserver, @@ -39,6 +40,7 @@ import { processIAudioFrameObserverBase, processIAudioPcmFrameSink, processIAudioSpectrumObserver, + processIFaceInfoObserver, processIMediaRecorderObserver, processIVideoEncodedFrameObserver, processIVideoFrameObserver, @@ -127,7 +129,8 @@ type ProcessorType = | IDirectCdnStreamingEventHandler | IRtcEngineEventHandler | IMusicContentCenterEventHandler - | IH265TranscoderObserver; + | IH265TranscoderObserver + | IFaceInfoObserver; type EventProcessors = { IAudioFrameObserver: EventProcessor; @@ -144,6 +147,7 @@ type EventProcessors = { IRtcEngineEventHandler: EventProcessor; IMusicContentCenterEventHandler: EventProcessor; IH265TranscoderObserver: EventProcessor; + IFaceInfoObserver: EventProcessor; }; /** @@ -357,6 +361,12 @@ export const EVENT_PROCESSORS: EventProcessors = { func: [processIH265TranscoderObserver], handlers: () => H265TranscoderInternal._h265_transcoder_observers, }, + IFaceInfoObserver: { + suffix: 'FaceInfoObserver_', + type: () => EVENT_TYPE.IMediaEngine, + func: [processIFaceInfoObserver], + handlers: () => MediaEngineInternal._face_info_observers, + }, }; function handleEvent({ event, data, buffers }: any) { diff --git a/src/internal/MediaEngineInternal.ts b/src/internal/MediaEngineInternal.ts index e8f81d29..21531928 100644 --- a/src/internal/MediaEngineInternal.ts +++ b/src/internal/MediaEngineInternal.ts @@ -2,6 +2,7 @@ import { createCheckers } from 'ts-interface-checker'; import { IAudioFrameObserver, + IFaceInfoObserver, IVideoEncodedFrameObserver, IVideoFrameObserver, } from '../AgoraMediaBase'; @@ -20,6 +21,7 @@ export class MediaEngineInternal extends IMediaEngineImpl { static _audio_frame_observers: IAudioFrameObserver[] = []; static _video_frame_observers: IVideoFrameObserver[] = []; static _video_encoded_frame_observers: IVideoEncodedFrameObserver[] = []; + static _face_info_observers: IFaceInfoObserver[] = []; override registerAudioFrameObserver(observer: IAudioFrameObserver): number { if ( @@ -82,10 +84,30 @@ export class MediaEngineInternal extends IMediaEngineImpl { return super.unregisterVideoEncodedFrameObserver(observer); } + override registerFaceInfoObserver(observer: IFaceInfoObserver): number { + if ( + !MediaEngineInternal._face_info_observers.find( + (value) => value === observer + ) + ) { + MediaEngineInternal._face_info_observers.push(observer); + } + return super.registerFaceInfoObserver(observer); + } + + override unregisterFaceInfoObserver(observer: IFaceInfoObserver): number { + MediaEngineInternal._face_info_observers = + MediaEngineInternal._face_info_observers.filter( + (value) => value !== observer + ); + return super.unregisterFaceInfoObserver(observer); + } + override release() { MediaEngineInternal._audio_frame_observers = []; MediaEngineInternal._video_frame_observers = []; MediaEngineInternal._video_encoded_frame_observers = []; + MediaEngineInternal._face_info_observers = []; this.removeAllListeners(); super.release(); } @@ -120,6 +142,15 @@ export class MediaEngineInternal extends IMediaEngineImpl { this.registerVideoEncodedFrameObserver({}); } } + if ( + checkers.IFaceInfoObserver?.strictTest({ + [eventType]: undefined, + }) + ) { + if (MediaEngineInternal._face_info_observers.length === 0) { + this.registerFaceInfoObserver({}); + } + } return true; } diff --git a/src/internal/MediaPlayerInternal.ts b/src/internal/MediaPlayerInternal.ts index 89d330ef..7a737213 100644 --- a/src/internal/MediaPlayerInternal.ts +++ b/src/internal/MediaPlayerInternal.ts @@ -283,18 +283,4 @@ export class MediaPlayerInternal extends IMediaPlayerImpl { ); return super.unregisterMediaPlayerAudioSpectrumObserver(observer); } - - protected override getApiTypeFromSetPlayerOptionInInt( - key: string, - value: number - ): string { - return 'MediaPlayer_setPlayerOption_4d05d29'; - } - - protected override getApiTypeFromSetPlayerOptionInString( - key: string, - value: string - ): string { - return 'MediaPlayer_setPlayerOption_ccad422'; - } } diff --git a/src/internal/MusicContentCenterInternal.ts b/src/internal/MusicContentCenterInternal.ts index 6bdb1039..24b1144d 100644 --- a/src/internal/MusicContentCenterInternal.ts +++ b/src/internal/MusicContentCenterInternal.ts @@ -117,13 +117,6 @@ class _MusicPlayerInternal extends IMusicPlayerImpl { override getMediaPlayerId(): number { return this._mediaPlayerId; } - - protected override getApiTypeFromOpenWithSongCode( - songCode: number, - startPos = 0 - ): string { - return 'MusicPlayer_open_303b92e'; - } } export class MusicPlayerInternal diff --git a/src/internal/RtcEngineExInternal.ts b/src/internal/RtcEngineExInternal.ts index 004c6494..da5ce002 100644 --- a/src/internal/RtcEngineExInternal.ts +++ b/src/internal/RtcEngineExInternal.ts @@ -79,6 +79,9 @@ export class RtcEngineExInternal extends IRtcEngineExImpl { override release(sync: boolean = false) { this._media_engine.release(); this._local_spatial_audio_engine.release(); + RtcEngineExInternal._event_handlers.map((it) => { + super.unregisterEventHandler(it); + }); RtcEngineExInternal._event_handlers = []; RtcEngineExInternal._direct_cdn_streaming_event_handler = []; RtcEngineExInternal._metadata_observer = []; @@ -200,6 +203,8 @@ export class RtcEngineExInternal extends IRtcEngineExImpl { } override registerEventHandler(eventHandler: IRtcEngineEventHandler): boolean { + // only call iris when no event handler registered + let callIris = RtcEngineExInternal._event_handlers.length === 0; if ( !RtcEngineExInternal._event_handlers.find( (value) => value === eventHandler @@ -207,7 +212,7 @@ export class RtcEngineExInternal extends IRtcEngineExImpl { ) { RtcEngineExInternal._event_handlers.push(eventHandler); } - return super.registerEventHandler(eventHandler); + return callIris ? super.registerEventHandler(eventHandler) : true; } override unregisterEventHandler( @@ -217,7 +222,9 @@ export class RtcEngineExInternal extends IRtcEngineExImpl { RtcEngineExInternal._event_handlers.filter( (value) => value !== eventHandler ); - return super.unregisterEventHandler(eventHandler); + // only call iris when no event handler registered + let callIris = RtcEngineExInternal._event_handlers.length === 0; + return callIris ? super.unregisterEventHandler(eventHandler) : true; } override createMediaPlayer(): IMediaPlayer { @@ -314,10 +321,6 @@ export class RtcEngineExInternal extends IRtcEngineExImpl { : 'RtcEngine_setClientRole_b46cc48'; } - protected override getApiTypeFromStartPreviewWithoutSourceType(): string { - return 'RtcEngine_startPreview'; - } - protected override getApiTypeFromEnableDualStreamMode( enabled: boolean, streamConfig?: SimulcastStreamConfig @@ -347,14 +350,6 @@ export class RtcEngineExInternal extends IRtcEngineExImpl { : 'RtcEngine_joinChannelWithUserAccount_4685af9'; } - protected override getApiTypeFromPreloadChannelWithUserAccount( - token: string, - channelId: string, - userAccount: string - ): string { - return 'RtcEngine_startScreenCapture_9ebb320'; - } - override getAudioDeviceManager(): IAudioDeviceManager { throw 'Not support'; } diff --git a/src/ti/AgoraMediaBase-ti.ts b/src/ti/AgoraMediaBase-ti.ts index e6df2c03..c3fc51d5 100644 --- a/src/ti/AgoraMediaBase-ti.ts +++ b/src/ti/AgoraMediaBase-ti.ts @@ -36,6 +36,10 @@ export const IVideoFrameObserver = t.iface([], { "onTranscodedVideoFrame": t.opt(t.func("void", t.param("videoFrame", "VideoFrame"))), }); +export const IFaceInfoObserver = t.iface([], { + "onFaceInfo": t.opt(t.func("void", t.param("outFaceInfo", "string"))), +}); + export const IMediaRecorderObserver = t.iface([], { "onRecorderStateChanged": t.opt(t.func("void", t.param("channelId", "string"), t.param("uid", "number"), t.param("state", "RecorderState"), t.param("reason", "RecorderReasonCode"))), "onRecorderInfoUpdated": t.opt(t.func("void", t.param("channelId", "string"), t.param("uid", "number"), t.param("info", "RecorderInfo"))), @@ -48,6 +52,7 @@ const exportedTypeSuite: t.ITypeSuite = { IAudioSpectrumObserver, IVideoEncodedFrameObserver, IVideoFrameObserver, + IFaceInfoObserver, IMediaRecorderObserver, }; export default exportedTypeSuite; diff --git a/src/ti/IAgoraRtcEngine-ti.ts b/src/ti/IAgoraRtcEngine-ti.ts index c82e2c61..3cf528b9 100644 --- a/src/ti/IAgoraRtcEngine-ti.ts +++ b/src/ti/IAgoraRtcEngine-ti.ts @@ -94,6 +94,7 @@ export const IRtcEngineEventHandler = t.iface([], { "onAudioPublishStateChanged": t.opt(t.func("void", t.param("channel", "string"), t.param("oldState", "StreamPublishState"), t.param("newState", "StreamPublishState"), t.param("elapseSinceLastState", "number"))), "onVideoPublishStateChanged": t.opt(t.func("void", t.param("source", "VideoSourceType"), t.param("channel", "string"), t.param("oldState", "StreamPublishState"), t.param("newState", "StreamPublishState"), t.param("elapseSinceLastState", "number"))), "onTranscodedStreamLayoutInfo": t.opt(t.func("void", t.param("connection", "RtcConnection"), t.param("uid", "number"), t.param("width", "number"), t.param("height", "number"), t.param("layoutCount", "number"), t.param("layoutlist", t.array("VideoLayout")))), + "onAudioMetadataReceived": t.opt(t.func("void", t.param("connection", "RtcConnection"), t.param("uid", "number"), t.param("metadata", "string"), t.param("length", "number"))), "onExtensionEvent": t.opt(t.func("void", t.param("provider", "string"), t.param("extension", "string"), t.param("key", "string"), t.param("value", "string"))), "onExtensionStarted": t.opt(t.func("void", t.param("provider", "string"), t.param("extension", "string"))), "onExtensionStopped": t.opt(t.func("void", t.param("provider", "string"), t.param("extension", "string"))),