From 0d47901eedc1d7d647a54da298fcbf4de9901f9c Mon Sep 17 00:00:00 2001 From: Hiroshi Horie <548776+hiroshihorie@users.noreply.github.com> Date: Tue, 31 Mar 2026 17:21:03 +0800 Subject: [PATCH 01/16] Expose audio engine APIs (#62) --- .../WebRTCModule+RTCAudioDeviceModule.h | 5 + .../WebRTCModule+RTCAudioDeviceModule.m | 242 +++++++++++++ ios/RCTWebRTC/WebRTCModule.h | 2 + ios/RCTWebRTC/WebRTCModule.m | 1 + src/AudioDeviceModule.ts | 329 ++++++++++++++++++ src/index.ts | 7 + 6 files changed, 586 insertions(+) create mode 100644 ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.h create mode 100644 ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m create mode 100644 src/AudioDeviceModule.ts diff --git a/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.h b/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.h new file mode 100644 index 000000000..32fcd47f5 --- /dev/null +++ b/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.h @@ -0,0 +1,5 @@ +#import "WebRTCModule.h" + +@interface WebRTCModule (RTCAudioDeviceModule) + +@end diff --git a/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m b/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m new file mode 100644 index 000000000..5fb00df2a --- /dev/null +++ b/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m @@ -0,0 +1,242 @@ +#import + +#import +#import + +#import "AudioDeviceModuleObserver.h" +#import "WebRTCModule.h" + +// The underlying `RTCAudioDeviceModule` is owned by the `RTCPeerConnectionFactory`. +// `WebRTCModule.audioDeviceModule` is a Swift wrapper around it, so we reach for the +// raw device module here when we need to call APIs that are only defined on +// `RTCAudioDeviceModule`. +#define RAW_ADM (self.peerConnectionFactory.audioDeviceModule) + +@implementation WebRTCModule (RTCAudioDeviceModule) + +#pragma mark - Recording & Playback Control + +RCT_EXPORT_METHOD(audioDeviceModuleStartPlayout + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM startPlayout]; + if (result == 0) { + resolve(nil); + } else { + reject(@"playout_error", [NSString stringWithFormat:@"Failed to start playout: %ld", (long)result], nil); + } +} + +RCT_EXPORT_METHOD(audioDeviceModuleStopPlayout + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM stopPlayout]; + if (result == 0) { + resolve(nil); + } else { + reject(@"playout_error", [NSString stringWithFormat:@"Failed to stop playout: %ld", (long)result], nil); + } +} + +RCT_EXPORT_METHOD(audioDeviceModuleStartRecording + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM startRecording]; + if (result == 0) { + resolve(nil); + } else { + reject(@"recording_error", [NSString stringWithFormat:@"Failed to start recording: %ld", (long)result], nil); + } +} + +RCT_EXPORT_METHOD(audioDeviceModuleStopRecording + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM stopRecording]; + if (result == 0) { + resolve(nil); + } else { + reject(@"recording_error", [NSString stringWithFormat:@"Failed to stop recording: %ld", (long)result], nil); + } +} + +RCT_EXPORT_METHOD(audioDeviceModuleStartLocalRecording + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM initAndStartRecording]; + if (result == 0) { + resolve(nil); + } else { + reject( + @"recording_error", [NSString stringWithFormat:@"Failed to start local recording: %ld", (long)result], nil); + } +} + +RCT_EXPORT_METHOD(audioDeviceModuleStopLocalRecording + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM stopRecording]; + if (result == 0) { + resolve(nil); + } else { + reject( + @"recording_error", [NSString stringWithFormat:@"Failed to stop local recording: %ld", (long)result], nil); + } +} + +#pragma mark - Microphone Control + +RCT_EXPORT_METHOD(audioDeviceModuleSetMicrophoneMuted + : (BOOL)muted resolver + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM setMicrophoneMuted:muted]; + if (result == 0) { + resolve(nil); + } else { + reject(@"mute_error", [NSString stringWithFormat:@"Failed to set microphone mute: %ld", (long)result], nil); + } +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsMicrophoneMuted) { + return @(RAW_ADM.isMicrophoneMuted); +} + +#pragma mark - Voice Processing + +RCT_EXPORT_METHOD(audioDeviceModuleSetVoiceProcessingEnabled + : (BOOL)enabled resolver + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM setVoiceProcessingEnabled:enabled]; + if (result == 0) { + resolve(nil); + } else { + reject(@"voice_processing_error", + [NSString stringWithFormat:@"Failed to set voice processing: %ld", (long)result], + nil); + } +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsVoiceProcessingEnabled) { + return @(RAW_ADM.isVoiceProcessingEnabled); +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleSetVoiceProcessingBypassed : (BOOL)bypassed) { + RAW_ADM.voiceProcessingBypassed = bypassed; + return nil; +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsVoiceProcessingBypassed) { + return @(RAW_ADM.isVoiceProcessingBypassed); +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleSetVoiceProcessingAGCEnabled : (BOOL)enabled) { + RAW_ADM.voiceProcessingAGCEnabled = enabled; + return nil; +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsVoiceProcessingAGCEnabled) { + return @(RAW_ADM.isVoiceProcessingAGCEnabled); +} + +#pragma mark - Status + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsPlaying) { + return @(RAW_ADM.isPlaying); +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsRecording) { + return @(RAW_ADM.isRecording); +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsEngineRunning) { + return @(RAW_ADM.isEngineRunning); +} + +#pragma mark - Advanced Features + +RCT_EXPORT_METHOD(audioDeviceModuleSetMuteMode + : (NSInteger)mode resolver + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM setMuteMode:(RTCAudioEngineMuteMode)mode]; + if (result == 0) { + resolve(nil); + } else { + reject(@"mute_mode_error", [NSString stringWithFormat:@"Failed to set mute mode: %ld", (long)result], nil); + } +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleGetMuteMode) { + return @(RAW_ADM.muteMode); +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleSetAdvancedDuckingEnabled : (BOOL)enabled) { + RAW_ADM.advancedDuckingEnabled = enabled; + return nil; +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsAdvancedDuckingEnabled) { + return @(RAW_ADM.isAdvancedDuckingEnabled); +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleSetDuckingLevel : (NSInteger)level) { + RAW_ADM.duckingLevel = level; + return nil; +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleGetDuckingLevel) { + return @(RAW_ADM.duckingLevel); +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsRecordingAlwaysPreparedMode) { + return @(RAW_ADM.recordingAlwaysPreparedMode); +} + +RCT_EXPORT_METHOD(audioDeviceModuleSetRecordingAlwaysPreparedMode + : (BOOL)enabled resolver + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + NSInteger result = [RAW_ADM setRecordingAlwaysPreparedMode:enabled]; + if (result == 0) { + resolve(nil); + } else { + reject(@"recording_always_prepared_mode_error", + [NSString stringWithFormat:@"Failed to set recording always prepared mode: %ld", (long)result], + nil); + } +} + +RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleGetEngineAvailability) { + RTCAudioEngineAvailability availability = RAW_ADM.engineAvailability; + return @{ + @"isInputAvailable" : @(availability.isInputAvailable), + @"isOutputAvailable" : @(availability.isOutputAvailable) + }; +} + +RCT_EXPORT_METHOD(audioDeviceModuleSetEngineAvailability + : (NSDictionary *)availabilityDict resolver + : (RCTPromiseResolveBlock)resolve rejecter + : (RCTPromiseRejectBlock)reject) { + RTCAudioEngineAvailability availability; + availability.isInputAvailable = [availabilityDict[@"isInputAvailable"] boolValue]; + availability.isOutputAvailable = [availabilityDict[@"isOutputAvailable"] boolValue]; + NSInteger result = [RAW_ADM setEngineAvailability:availability]; + if (result == 0) { + resolve(nil); + } else { + reject(@"engine_availability_error", + [NSString stringWithFormat:@"Failed to set engine availability: %ld", (long)result], + nil); + } +} + +// TODO: Observer delegate "resolve" methods were skipped because our current +// `AudioDeviceModuleObserver` does not expose async JS-driven resolution hooks; +// the Swift `AudioDeviceModule` wrapper always returns success immediately. + +@end + +#undef RAW_ADM diff --git a/ios/RCTWebRTC/WebRTCModule.h b/ios/RCTWebRTC/WebRTCModule.h index 538240911..ace2ba043 100644 --- a/ios/RCTWebRTC/WebRTCModule.h +++ b/ios/RCTWebRTC/WebRTCModule.h @@ -48,6 +48,8 @@ static NSString *const kEventAudioDeviceModuleAudioProcessingStateUpdated = @"au @property(nonatomic, strong) NSMutableDictionary *localStreams; @property(nonatomic, strong) NSMutableDictionary *localTracks; +// TODO: FrameCryption is not supported by this SDK yet. These containers are +// retained so the native factory initialization keeps working unchanged. @property(nonatomic, strong) NSMutableDictionary *frameCryptors; @property(nonatomic, strong) NSMutableDictionary *keyProviders; @property(nonatomic, strong) NSMutableDictionary *dataPacketCryptors; diff --git a/ios/RCTWebRTC/WebRTCModule.m b/ios/RCTWebRTC/WebRTCModule.m index 4455e60f2..5188a7496 100644 --- a/ios/RCTWebRTC/WebRTCModule.m +++ b/ios/RCTWebRTC/WebRTCModule.m @@ -131,6 +131,7 @@ - (instancetype)init { _localStreams = [NSMutableDictionary new]; _localTracks = [NSMutableDictionary new]; + // TODO: FrameCryption is not supported yet; dictionaries left empty. _frameCryptors = [NSMutableDictionary new]; _keyProviders = [NSMutableDictionary new]; _dataPacketCryptors = [NSMutableDictionary new]; diff --git a/src/AudioDeviceModule.ts b/src/AudioDeviceModule.ts new file mode 100644 index 000000000..8d2ab676d --- /dev/null +++ b/src/AudioDeviceModule.ts @@ -0,0 +1,329 @@ +import { NativeModules, Platform } from 'react-native'; + +const { WebRTCModule } = NativeModules; + +export enum AudioEngineMuteMode { + Unknown = -1, + VoiceProcessing = 0, + RestartEngine = 1, + InputMixer = 2, +} + +export interface AudioEngineAvailability { + isInputAvailable: boolean; + isOutputAvailable: boolean; +} + +export const AudioEngineAvailability = { + default: { + isInputAvailable: true, + isOutputAvailable: true, + }, + none: { + isInputAvailable: false, + isOutputAvailable: false, + }, +} as const; + +/** + * Audio Device Module API for controlling audio devices and settings. + * iOS/macOS only - will throw on Android. + */ +export class AudioDeviceModule { + /** + * Start audio playback + */ + static async startPlayout(): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleStartPlayout(); + } + + /** + * Stop audio playback + */ + static async stopPlayout(): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleStopPlayout(); + } + + /** + * Start audio recording + */ + static async startRecording(): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleStartRecording(); + } + + /** + * Stop audio recording + */ + static async stopRecording(): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleStopRecording(); + } + + /** + * Initialize and start local audio recording (calls initAndStartRecording) + */ + static async startLocalRecording(): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleStartLocalRecording(); + } + + /** + * Stop local audio recording + */ + static async stopLocalRecording(): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleStopLocalRecording(); + } + + /** + * Mute or unmute the microphone + */ + static async setMicrophoneMuted(muted: boolean): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleSetMicrophoneMuted(muted); + } + + /** + * Check if microphone is currently muted + */ + static isMicrophoneMuted(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsMicrophoneMuted(); + } + + /** + * Enable or disable voice processing (requires engine restart) + */ + static async setVoiceProcessingEnabled(enabled: boolean): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleSetVoiceProcessingEnabled(enabled); + } + + /** + * Check if voice processing is enabled + */ + static isVoiceProcessingEnabled(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsVoiceProcessingEnabled(); + } + + /** + * Temporarily bypass voice processing without restarting the engine + */ + static setVoiceProcessingBypassed(bypassed: boolean): void { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + WebRTCModule.audioDeviceModuleSetVoiceProcessingBypassed(bypassed); + } + + /** + * Check if voice processing is currently bypassed + */ + static isVoiceProcessingBypassed(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsVoiceProcessingBypassed(); + } + + /** + * Enable or disable Automatic Gain Control (AGC) + */ + static setVoiceProcessingAGCEnabled(enabled: boolean): void { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleSetVoiceProcessingAGCEnabled(enabled); + } + + /** + * Check if AGC is enabled + */ + static isVoiceProcessingAGCEnabled(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsVoiceProcessingAGCEnabled(); + } + + /** + * Check if audio is currently playing + */ + static isPlaying(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsPlaying(); + } + + /** + * Check if audio is currently recording + */ + static isRecording(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsRecording(); + } + + /** + * Check if the audio engine is running + */ + static isEngineRunning(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsEngineRunning(); + } + + /** + * Set the microphone mute mode + */ + static async setMuteMode(mode: AudioEngineMuteMode): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleSetMuteMode(mode); + } + + /** + * Get the current mute mode + */ + static getMuteMode(): AudioEngineMuteMode { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleGetMuteMode(); + } + + /** + * Enable or disable advanced audio ducking + */ + static setAdvancedDuckingEnabled(enabled: boolean): void { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleSetAdvancedDuckingEnabled(enabled); + } + + /** + * Check if advanced ducking is enabled + */ + static isAdvancedDuckingEnabled(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsAdvancedDuckingEnabled(); + } + + /** + * Set the audio ducking level (0-100) + */ + static setDuckingLevel(level: number): void { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleSetDuckingLevel(level); + } + + /** + * Get the current ducking level + */ + static getDuckingLevel(): number { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleGetDuckingLevel(); + } + + /** + * Check if recording always prepared mode is enabled + */ + static isRecordingAlwaysPreparedMode(): boolean { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleIsRecordingAlwaysPreparedMode(); + } + + /** + * Enable or disable recording always prepared mode + */ + static async setRecordingAlwaysPreparedMode(enabled: boolean): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleSetRecordingAlwaysPreparedMode(enabled); + } + + /** + * Get the current engine availability (input/output availability) + */ + static getEngineAvailability(): AudioEngineAvailability { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleGetEngineAvailability(); + } + + /** + * Set the engine availability (input/output availability) + */ + static async setEngineAvailability(availability: AudioEngineAvailability): Promise { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule.audioDeviceModuleSetEngineAvailability(availability); + } +} diff --git a/src/index.ts b/src/index.ts index 496c83b96..476898229 100644 --- a/src/index.ts +++ b/src/index.ts @@ -8,6 +8,7 @@ if (WebRTCModule === null) { }`); } +import { AudioDeviceModule, AudioEngineMuteMode, AudioEngineAvailability } from './AudioDeviceModule'; import { audioDeviceModuleEvents } from './AudioDeviceModuleEvents'; import { setupNativeEvents } from './EventEmitter'; import Logger from './Logger'; @@ -52,6 +53,9 @@ export { mediaDevices, permissions, registerGlobals, + AudioDeviceModule, + AudioEngineMuteMode, + AudioEngineAvailability, audioDeviceModuleEvents, }; @@ -83,4 +87,7 @@ function registerGlobals(): void { global.RTCRtpReceiver = RTCRtpReceiver; global.RTCRtpSender = RTCRtpSender; global.RTCErrorEvent = RTCErrorEvent; + + // Ensure audioDeviceModuleEvents is initialized and event listeners are registered + audioDeviceModuleEvents.setupListeners(); } From 154fe2204f02a0b0645dbea79185381a7106d42b Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 14:04:08 +0200 Subject: [PATCH 02/16] 137.1.4-alpha.1 --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 3129d42d5..d62be1e99 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.3", + "version": "137.1.4-alpha.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@stream-io/react-native-webrtc", - "version": "137.1.3", + "version": "137.1.4-alpha.1", "license": "MIT", "dependencies": { "base64-js": "1.5.1", diff --git a/package.json b/package.json index 4cee13ecb..eefb82b52 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.3", + "version": "137.1.4-alpha.1", "repository": { "type": "git", "url": "git+https://github.com/GetStream/react-native-webrtc.git" From 68c0d9769947183162e38004dfee292a9cc5fc69 Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 14:28:52 +0200 Subject: [PATCH 03/16] fix(ios): drop engine availability APIs unavailable in our WebRTC SDK Our StreamWebRTC framework exposes RTCAudioEngineState via engineState but does not provide RTCAudioEngineAvailability or -setEngineAvailability:. Remove the corresponding native bridge methods and their TS wrappers so the example app compiles. Also drop the unused AudioDeviceModuleObserver import and trim AudioEngineAvailability from the public exports. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../WebRTCModule+RTCAudioDeviceModule.m | 29 ++------------ src/AudioDeviceModule.ts | 40 ++----------------- src/index.ts | 3 +- 3 files changed, 8 insertions(+), 64 deletions(-) diff --git a/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m b/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m index 5fb00df2a..9da67d118 100644 --- a/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m +++ b/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m @@ -3,7 +3,6 @@ #import #import -#import "AudioDeviceModuleObserver.h" #import "WebRTCModule.h" // The underlying `RTCAudioDeviceModule` is owned by the `RTCPeerConnectionFactory`. @@ -208,30 +207,10 @@ @implementation WebRTCModule (RTCAudioDeviceModule) } } -RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleGetEngineAvailability) { - RTCAudioEngineAvailability availability = RAW_ADM.engineAvailability; - return @{ - @"isInputAvailable" : @(availability.isInputAvailable), - @"isOutputAvailable" : @(availability.isOutputAvailable) - }; -} - -RCT_EXPORT_METHOD(audioDeviceModuleSetEngineAvailability - : (NSDictionary *)availabilityDict resolver - : (RCTPromiseResolveBlock)resolve rejecter - : (RCTPromiseRejectBlock)reject) { - RTCAudioEngineAvailability availability; - availability.isInputAvailable = [availabilityDict[@"isInputAvailable"] boolValue]; - availability.isOutputAvailable = [availabilityDict[@"isOutputAvailable"] boolValue]; - NSInteger result = [RAW_ADM setEngineAvailability:availability]; - if (result == 0) { - resolve(nil); - } else { - reject(@"engine_availability_error", - [NSString stringWithFormat:@"Failed to set engine availability: %ld", (long)result], - nil); - } -} +// TODO: `getEngineAvailability` / `setEngineAvailability` were dropped because the +// Stream WebRTC SDK does not expose `RTCAudioEngineAvailability` / `-setEngineAvailability:`. +// The closest equivalent is `RTCAudioEngineState` via `engineState`, but the +// semantics differ and the JS API isn't consumed anywhere yet. // TODO: Observer delegate "resolve" methods were skipped because our current // `AudioDeviceModuleObserver` does not expose async JS-driven resolution hooks; diff --git a/src/AudioDeviceModule.ts b/src/AudioDeviceModule.ts index 8d2ab676d..84c9923df 100644 --- a/src/AudioDeviceModule.ts +++ b/src/AudioDeviceModule.ts @@ -9,22 +9,6 @@ export enum AudioEngineMuteMode { InputMixer = 2, } -export interface AudioEngineAvailability { - isInputAvailable: boolean; - isOutputAvailable: boolean; -} - -export const AudioEngineAvailability = { - default: { - isInputAvailable: true, - isOutputAvailable: true, - }, - none: { - isInputAvailable: false, - isOutputAvailable: false, - }, -} as const; - /** * Audio Device Module API for controlling audio devices and settings. * iOS/macOS only - will throw on Android. @@ -305,25 +289,7 @@ export class AudioDeviceModule { return WebRTCModule.audioDeviceModuleSetRecordingAlwaysPreparedMode(enabled); } - /** - * Get the current engine availability (input/output availability) - */ - static getEngineAvailability(): AudioEngineAvailability { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleGetEngineAvailability(); - } - - /** - * Set the engine availability (input/output availability) - */ - static async setEngineAvailability(availability: AudioEngineAvailability): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleSetEngineAvailability(availability); - } + // TODO: getEngineAvailability / setEngineAvailability are not supported by the + // Stream WebRTC SDK (no RTCAudioEngineAvailability type / setEngineAvailability: + // method). Re-add if/when the native API lands. } diff --git a/src/index.ts b/src/index.ts index 476898229..7d0983b12 100644 --- a/src/index.ts +++ b/src/index.ts @@ -8,7 +8,7 @@ if (WebRTCModule === null) { }`); } -import { AudioDeviceModule, AudioEngineMuteMode, AudioEngineAvailability } from './AudioDeviceModule'; +import { AudioDeviceModule, AudioEngineMuteMode } from './AudioDeviceModule'; import { audioDeviceModuleEvents } from './AudioDeviceModuleEvents'; import { setupNativeEvents } from './EventEmitter'; import Logger from './Logger'; @@ -55,7 +55,6 @@ export { registerGlobals, AudioDeviceModule, AudioEngineMuteMode, - AudioEngineAvailability, audioDeviceModuleEvents, }; From fa3614c8b7b8d21ce8b24c289ed390c3f464fdf0 Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 14:58:56 +0200 Subject: [PATCH 04/16] chore: example app --- .gitignore | 1 + examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist | 2 +- .../ios/GumTestApp.xcodeproj/project.pbxproj | 10 ++++++++-- examples/GumTestApp/ios/GumTestApp/Info.plist | 2 +- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index ba4d9bdf9..da590b420 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ WebRTC.xcframework WebRTC.dSYMs examples/GumTestApp/package-lock.json examples/GumTestApp_macOS/package-lock.json +**/.xcode.env.local *.jar *.tgz *.zip diff --git a/examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist b/examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist index 72946a788..9dccc86f3 100644 --- a/examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist +++ b/examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist @@ -36,7 +36,7 @@ NSLocationWhenInUseUsageDescription RCTNewArchEnabled - + UILaunchStoryboardName LaunchScreen UIRequiredDeviceCapabilities diff --git a/examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj b/examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj index d04f17a7c..7ca07461f 100644 --- a/examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj +++ b/examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj @@ -759,7 +759,10 @@ ); MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; - OTHER_LDFLAGS = "$(inherited) "; + OTHER_LDFLAGS = ( + "$(inherited)", + " ", + ); REACT_NATIVE_PATH = "${PODS_ROOT}/../../node_modules/react-native"; SDKROOT = iphoneos; SWIFT_ACTIVE_COMPILATION_CONDITIONS = "$(inherited) DEBUG"; @@ -816,7 +819,10 @@ "\"$(inherited)\"", ); MTL_ENABLE_DEBUG_INFO = NO; - OTHER_LDFLAGS = "$(inherited) "; + OTHER_LDFLAGS = ( + "$(inherited)", + " ", + ); REACT_NATIVE_PATH = "${PODS_ROOT}/../../node_modules/react-native"; SDKROOT = iphoneos; USE_HERMES = true; diff --git a/examples/GumTestApp/ios/GumTestApp/Info.plist b/examples/GumTestApp/ios/GumTestApp/Info.plist index 99880dda5..60abedb79 100644 --- a/examples/GumTestApp/ios/GumTestApp/Info.plist +++ b/examples/GumTestApp/ios/GumTestApp/Info.plist @@ -44,7 +44,7 @@ NSMicrophoneUsageDescription I NEED MICROPHONE RCTNewArchEnabled - + UILaunchStoryboardName LaunchScreen UIRequiredDeviceCapabilities From 1217607961b637a63cf340c2a04ed708396b2cb5 Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 14:59:09 +0200 Subject: [PATCH 05/16] 137.1.4-alpha.2 --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index d62be1e99..a91d6e93a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.1", + "version": "137.1.4-alpha.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.1", + "version": "137.1.4-alpha.2", "license": "MIT", "dependencies": { "base64-js": "1.5.1", diff --git a/package.json b/package.json index eefb82b52..13dc6d4bb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.1", + "version": "137.1.4-alpha.2", "repository": { "type": "git", "url": "git+https://github.com/GetStream/react-native-webrtc.git" From 2e2f976e221532059b17e04a079363d7ee425e4f Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 16:16:31 +0200 Subject: [PATCH 06/16] android impl --- .gitignore | 1 + .../WebRTCModule/SpeechActivityDetector.java | 152 ++++++++++++++++++ .../com/oney/WebRTCModule/WebRTCModule.java | 32 ++++ src/AudioDeviceModuleEvents.ts | 10 +- 4 files changed, 191 insertions(+), 4 deletions(-) create mode 100644 android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java diff --git a/.gitignore b/.gitignore index da590b420..743ca6449 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ WebRTC.dSYMs examples/GumTestApp/package-lock.json examples/GumTestApp_macOS/package-lock.json **/.xcode.env.local +**/PLAN.md *.jar *.tgz *.zip diff --git a/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java b/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java new file mode 100644 index 000000000..be3bd8408 --- /dev/null +++ b/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java @@ -0,0 +1,152 @@ +package com.oney.WebRTCModule; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.ShortBuffer; +import java.util.Iterator; +import java.util.Map; +import java.util.TreeMap; + +/** + * Tells you when the user is talking, by measuring how loud the mic is. + * + *

How it works: + *

    + *
  1. Every ~10 ms the mic gives us a chunk of samples.
  2. + *
  3. Convert each chunk to one "loudness" number in decibels (dB): + * quiet room ≈ -60 dB, normal speech ≈ -30 to -20 dB.
  4. + *
  5. Keep the last 600 ms of dB values in a sliding window and average them.
  6. + *
  7. If the average crosses {@link #THRESHOLD_DB} (-45 dB) and stays on the + * other side for {@link #HYSTERESIS_MS} (200 ms), flip state and fire + * {@code onSpeechStarted} / {@code onSpeechEnded}. The 200 ms wait + * prevents flapping when the average bounces around the threshold.
  8. + *
+ * + *

Not "real" voice recognition. This only looks at energy/loudness, + * not voice features. Loud non-voice sounds (typing, door slams, music) will + * trigger {@code onSpeechStarted}. iOS uses Apple's hardware VAD which is + * smarter, but Android has no equivalent — same tradeoff stream-video-android + * lives with. + * + *

Thread-safety: single-threaded — only the WebRTC audio thread should call + * {@link #processBuffer}. Listener callbacks fire synchronously on that thread; + * the listener is responsible for dispatching to the JS thread. + */ +class SpeechActivityDetector { + + interface Listener { + void onSpeechStarted(); + void onSpeechEnded(); + } + + private static final double THRESHOLD_DB = -45.0; + private static final long WINDOW_MS = 600; + private static final long HYSTERESIS_MS = 200; + + private final Listener listener; + private final TreeMap windowEntries = new TreeMap<>(); + + private boolean isSpeaking = false; + /** Timestamp at which we first observed the candidate (opposite) state. */ + private long candidateStateStartMs = -1; + + SpeechActivityDetector(Listener listener) { + this.listener = listener; + } + + /** + * Feed one mic chunk through the detector. Reads PCM16 LE samples from + * {@code audioBuffer} without mutating its position/limit. May fire a + * listener callback synchronously if state flips. + * + *

Must be called on the WebRTC audio thread, BEFORE any code that mutates + * {@code audioBuffer} (e.g. screen-audio mixing) — otherwise the detector + * sees post-mix audio and triggers on system sounds. + */ + void processBuffer(ByteBuffer audioBuffer, int bytesRead) { + if (bytesRead <= 0) { + return; + } + + // Work on a duplicate so we never mutate the caller's position/limit. + ByteBuffer buf = audioBuffer.duplicate(); + buf.position(0); + buf.limit(bytesRead); + buf.order(ByteOrder.LITTLE_ENDIAN); + ShortBuffer shorts = buf.asShortBuffer(); + + int numSamples = shorts.remaining(); + if (numSamples == 0) { + return; + } + + double sumSquares = 0; + for (int i = 0; i < numSamples; i++) { + double sample = shorts.get(i); + sumSquares += sample * sample; + } + + double rms = Math.sqrt(sumSquares / numSamples); + double db = (rms > 0) ? 20.0 * Math.log10(rms) : -100.0; + + long now = System.currentTimeMillis(); + + // Add the new entry and prune stale ones. + windowEntries.put(now, db); + long cutoff = now - WINDOW_MS; + Iterator> it = windowEntries.entrySet().iterator(); + while (it.hasNext()) { + if (it.next().getKey() < cutoff) { + it.remove(); + } else { + break; // TreeMap is sorted — remaining entries are within the window. + } + } + + // Compute window average dB. + double sum = 0; + for (double value : windowEntries.values()) { + sum += value; + } + double avgDb = sum / windowEntries.size(); + + boolean aboveThreshold = avgDb > THRESHOLD_DB; + + if (aboveThreshold == isSpeaking) { + // State matches — reset hysteresis counter. + candidateStateStartMs = -1; + } else { + // State differs from current — track how long. + if (candidateStateStartMs < 0) { + candidateStateStartMs = now; + } + if (now - candidateStateStartMs >= HYSTERESIS_MS) { + isSpeaking = aboveThreshold; + candidateStateStartMs = -1; + if (isSpeaking) { + listener.onSpeechStarted(); + } else { + listener.onSpeechEnded(); + } + } + } + } + + /** Wipes the sliding window and state. Call on recorder start. No event fires. */ + void reset() { + windowEntries.clear(); + isSpeaking = false; + candidateStateStartMs = -1; + } + + /** + * Call on recorder stop. If we were in {@code started}, force-fires + * {@code onSpeechEnded} so JS doesn't get latched, then resets. + */ + void onRecordStop() { + if (isSpeaking) { + listener.onSpeechEnded(); + } + reset(); + } +} diff --git a/android/src/main/java/com/oney/WebRTCModule/WebRTCModule.java b/android/src/main/java/com/oney/WebRTCModule/WebRTCModule.java index 831232cef..089f6150a 100644 --- a/android/src/main/java/com/oney/WebRTCModule/WebRTCModule.java +++ b/android/src/main/java/com/oney/WebRTCModule/WebRTCModule.java @@ -51,6 +51,7 @@ public class WebRTCModule extends ReactContextBaseJavaModule { final Map localStreams; private final GetUserMediaImpl getUserMediaImpl; + private SpeechActivityDetector speechActivityDetector; public WebRTCModule(ReactApplicationContext reactContext) { super(reactContext); @@ -124,12 +125,32 @@ public WebRTCModule(ReactApplicationContext reactContext) { } private JavaAudioDeviceModule createAudioDeviceModule(ReactApplicationContext reactContext) { + speechActivityDetector = new SpeechActivityDetector(new SpeechActivityDetector.Listener() { + @Override + public void onSpeechStarted() { + WritableMap params = Arguments.createMap(); + params.putString("event", "started"); + sendEvent("audioDeviceModuleSpeechActivity", params); + } + + @Override + public void onSpeechEnded() { + WritableMap params = Arguments.createMap(); + params.putString("event", "ended"); + sendEvent("audioDeviceModuleSpeechActivity", params); + } + }); + return JavaAudioDeviceModule .builder(reactContext) .setUseHardwareAcousticEchoCanceler(Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) .setUseHardwareNoiseSuppressor(Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) .setUseStereoOutput(true) .setAudioBufferCallback((audioBuffer, audioFormat, channelCount, sampleRate, bytesRead, captureTimeNs) -> { + // 1. Speech activity detection on raw mic data, BEFORE any mutation. + speechActivityDetector.processBuffer(audioBuffer, bytesRead); + + // 2. Existing screen-audio mixing — mutates audioBuffer in place. if (bytesRead > 0) { WebRTCModuleOptions.ScreenAudioBytesProvider provider = WebRTCModuleOptions.getInstance().screenAudioBytesProvider; @@ -142,6 +163,17 @@ private JavaAudioDeviceModule createAudioDeviceModule(ReactApplicationContext re } return captureTimeNs; }) + .setAudioRecordStateCallback(new JavaAudioDeviceModule.AudioRecordStateCallback() { + @Override + public void onWebRtcAudioRecordStart() { + speechActivityDetector.reset(); + } + + @Override + public void onWebRtcAudioRecordStop() { + speechActivityDetector.onRecordStop(); + } + }) .createAudioDeviceModule(); } diff --git a/src/AudioDeviceModuleEvents.ts b/src/AudioDeviceModuleEvents.ts index 190a62cde..b1dff2bf6 100644 --- a/src/AudioDeviceModuleEvents.ts +++ b/src/AudioDeviceModuleEvents.ts @@ -28,7 +28,8 @@ export type AudioDeviceModuleEventData = /** * Event emitter for RTCAudioDeviceModule delegate callbacks. - * iOS/macOS only. + * Speech activity events are supported on iOS, macOS, and Android. + * Engine/audio-processing-state events remain iOS/macOS only. */ class AudioDeviceModuleEventEmitter { private eventEmitter: NativeEventEmitter | null = null; @@ -39,17 +40,18 @@ class AudioDeviceModuleEventEmitter { return; } - if (Platform.OS !== 'android' && WebRTCModule) { + if (WebRTCModule) { this.eventEmitter = new NativeEventEmitter(WebRTCModule); } } /** - * Subscribe to speech activity events (started/ended) + * Subscribe to speech activity events (started/ended). + * Supported on iOS, macOS, and Android. */ addSpeechActivityListener(listener: (data: SpeechActivityEventData) => void) { if (!this.eventEmitter) { - throw new Error('AudioDeviceModuleEvents is only available on iOS/macOS'); + throw new Error('AudioDeviceModuleEvents: native module not available'); } return this.eventEmitter.addListener('audioDeviceModuleSpeechActivity', listener); From 10adae0275bb64dede0aac35e42d3c8bffb59122 Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 16:16:38 +0200 Subject: [PATCH 07/16] 137.1.4-alpha.3 --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index a91d6e93a..3d223c096 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.2", + "version": "137.1.4-alpha.3", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.2", + "version": "137.1.4-alpha.3", "license": "MIT", "dependencies": { "base64-js": "1.5.1", diff --git a/package.json b/package.json index 13dc6d4bb..fe2fb0413 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.2", + "version": "137.1.4-alpha.3", "repository": { "type": "git", "url": "git+https://github.com/GetStream/react-native-webrtc.git" From 3a58794eeed1dca1e09e6a006bc5afd55d93bad3 Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 16:34:46 +0200 Subject: [PATCH 08/16] fix android bug --- .../java/com/oney/WebRTCModule/SpeechActivityDetector.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java b/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java index be3bd8408..a0f8bba01 100644 --- a/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java +++ b/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java @@ -80,9 +80,14 @@ void processBuffer(ByteBuffer audioBuffer, int bytesRead) { return; } + // Normalize int16 samples to [-1.0, 1.0] BEFORE squaring so the resulting + // dB value is dBFS (decibels relative to full scale). Without this, dB is + // computed against a 1-sample-unit reference and silence reads as ~+40, + // making the -45 dBFS threshold uncrossable from above (started would + // fire once and ended would never fire). double sumSquares = 0; for (int i = 0; i < numSamples; i++) { - double sample = shorts.get(i); + double sample = shorts.get(i) / (double) Short.MAX_VALUE; sumSquares += sample * sample; } From 1ee1dfa557e26eb67220746f12ec40c917d8307f Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 16:35:09 +0200 Subject: [PATCH 09/16] 137.1.4-alpha.4 --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 3d223c096..b40be700f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.3", + "version": "137.1.4-alpha.4", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.3", + "version": "137.1.4-alpha.4", "license": "MIT", "dependencies": { "base64-js": "1.5.1", diff --git a/package.json b/package.json index fe2fb0413..6fcb32927 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.3", + "version": "137.1.4-alpha.4", "repository": { "type": "git", "url": "git+https://github.com/GetStream/react-native-webrtc.git" From a20e388096b1160bc91c060982fa12a2567f213d Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 16:51:33 +0200 Subject: [PATCH 10/16] lower threshold --- .../WebRTCModule/SpeechActivityDetector.java | 110 +++++++++--------- 1 file changed, 53 insertions(+), 57 deletions(-) diff --git a/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java b/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java index a0f8bba01..c621440dc 100644 --- a/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java +++ b/android/src/main/java/com/oney/WebRTCModule/SpeechActivityDetector.java @@ -3,25 +3,37 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.ShortBuffer; -import java.util.Iterator; -import java.util.Map; -import java.util.TreeMap; /** - * Tells you when the user is talking, by measuring how loud the mic is. + * Tells you when the user is talking, by watching how loud the mic is over time. * *

How it works: *

    *
  1. Every ~10 ms the mic gives us a chunk of samples.
  2. - *
  3. Convert each chunk to one "loudness" number in decibels (dB): - * quiet room ≈ -60 dB, normal speech ≈ -30 to -20 dB.
  4. - *
  5. Keep the last 600 ms of dB values in a sliding window and average them.
  6. - *
  7. If the average crosses {@link #THRESHOLD_DB} (-45 dB) and stays on the - * other side for {@link #HYSTERESIS_MS} (200 ms), flip state and fire - * {@code onSpeechStarted} / {@code onSpeechEnded}. The 200 ms wait - * prevents flapping when the average bounces around the threshold.
  8. + *
  9. Convert each chunk to one "loudness" number in dBFS (decibels relative + * to full scale): quiet room ≈ -60 dB, normal speech ≈ -30 to -20 dB, + * speaking close to the mic ≈ -15 to -10 dB.
  10. + *
  11. Track two things only: when we last saw a loud chunk and + * when the current run of loud chunks started.
  12. + *
  13. Fire {@code onSpeechStarted} once we've had loud chunks for + * {@link #START_CONFIRM_MS} in a row. Fire {@code onSpeechEnded} once + * {@link #SILENCE_TIMEOUT_MS} has passed with no loud chunks. The + * timeout is long enough to span natural between-word pauses.
  14. *
* + *

Why this, not a rolling dB average? Android's AGC (automatic gain + * control) ramps the mic gain back up the instant speech stops, amplifying + * room noise to -35 or -40 dB. A rolling average over that noise never drops + * below the threshold, so {@code onSpeechEnded} would never fire. Looking at + * "time since last loud peak" is immune to that — pauses between words are + * short, but a real stop is sustained. + * + *

Alignment with stream-video-android. stream-video-android's + * {@code SoundInputProcessor} fires only an "edge-up" callback and relies on + * the app layer to infer "stopped". We need the {@code ended} edge to match + * the iOS contract, so we add the silence-timeout inference here using the + * same {@code -45 dBFS} threshold they use. + * *

Not "real" voice recognition. This only looks at energy/loudness, * not voice features. Loud non-voice sounds (typing, door slams, music) will * trigger {@code onSpeechStarted}. iOS uses Apple's hardware VAD which is @@ -39,16 +51,20 @@ interface Listener { void onSpeechEnded(); } + /** Above this dBFS level a chunk counts as "loud". Matches stream-video-android. */ private static final double THRESHOLD_DB = -45.0; - private static final long WINDOW_MS = 600; - private static final long HYSTERESIS_MS = 200; + /** Require loud chunks for this long before firing started (rejects door slams). */ + private static final long START_CONFIRM_MS = 150; + /** Fire ended after this long with no loud chunk (spans natural between-word pauses). */ + private static final long SILENCE_TIMEOUT_MS = 900; private final Listener listener; - private final TreeMap windowEntries = new TreeMap<>(); private boolean isSpeaking = false; - /** Timestamp at which we first observed the candidate (opposite) state. */ - private long candidateStateStartMs = -1; + /** Start of the current run of above-threshold chunks, or -1 if last chunk was quiet. */ + private long firstLoudMs = -1; + /** Last time any chunk was above threshold, or -1 if never (or cleared on ended). */ + private long lastLoudMs = -1; SpeechActivityDetector(Listener listener) { this.listener = listener; @@ -82,9 +98,7 @@ void processBuffer(ByteBuffer audioBuffer, int bytesRead) { // Normalize int16 samples to [-1.0, 1.0] BEFORE squaring so the resulting // dB value is dBFS (decibels relative to full scale). Without this, dB is - // computed against a 1-sample-unit reference and silence reads as ~+40, - // making the -45 dBFS threshold uncrossable from above (started would - // fire once and ended would never fire). + // computed against a 1-sample-unit reference and silence reads as ~+40. double sumSquares = 0; for (int i = 0; i < numSamples; i++) { double sample = shorts.get(i) / (double) Short.MAX_VALUE; @@ -96,52 +110,34 @@ void processBuffer(ByteBuffer audioBuffer, int bytesRead) { long now = System.currentTimeMillis(); - // Add the new entry and prune stale ones. - windowEntries.put(now, db); - long cutoff = now - WINDOW_MS; - Iterator> it = windowEntries.entrySet().iterator(); - while (it.hasNext()) { - if (it.next().getKey() < cutoff) { - it.remove(); - } else { - break; // TreeMap is sorted — remaining entries are within the window. + if (db > THRESHOLD_DB) { + // Loud chunk. Open a start window if one isn't already open, and + // remember this as the most recent loud chunk for ended timing. + lastLoudMs = now; + if (firstLoudMs < 0) { + firstLoudMs = now; } - } - - // Compute window average dB. - double sum = 0; - for (double value : windowEntries.values()) { - sum += value; - } - double avgDb = sum / windowEntries.size(); - - boolean aboveThreshold = avgDb > THRESHOLD_DB; - - if (aboveThreshold == isSpeaking) { - // State matches — reset hysteresis counter. - candidateStateStartMs = -1; - } else { - // State differs from current — track how long. - if (candidateStateStartMs < 0) { - candidateStateStartMs = now; + if (!isSpeaking && now - firstLoudMs >= START_CONFIRM_MS) { + isSpeaking = true; + listener.onSpeechStarted(); } - if (now - candidateStateStartMs >= HYSTERESIS_MS) { - isSpeaking = aboveThreshold; - candidateStateStartMs = -1; - if (isSpeaking) { - listener.onSpeechStarted(); - } else { - listener.onSpeechEnded(); - } + } else { + // Quiet chunk. Cancel any in-progress start confirmation. If we're + // already speaking, fire ended once the silence is long enough. + firstLoudMs = -1; + if (isSpeaking && lastLoudMs > 0 && now - lastLoudMs >= SILENCE_TIMEOUT_MS) { + isSpeaking = false; + lastLoudMs = -1; + listener.onSpeechEnded(); } } } - /** Wipes the sliding window and state. Call on recorder start. No event fires. */ + /** Wipes state. Call on recorder start. No event fires. */ void reset() { - windowEntries.clear(); isSpeaking = false; - candidateStateStartMs = -1; + firstLoudMs = -1; + lastLoudMs = -1; } /** From dcdccf95046ac8d4b1a72b68a90f2ad1812acf5a Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Mon, 13 Apr 2026 16:51:40 +0200 Subject: [PATCH 11/16] 137.1.4-alpha.5 --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index b40be700f..8f3a70152 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.4", + "version": "137.1.4-alpha.5", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.4", + "version": "137.1.4-alpha.5", "license": "MIT", "dependencies": { "base64-js": "1.5.1", diff --git a/package.json b/package.json index 6fcb32927..ba8cc44c3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@stream-io/react-native-webrtc", - "version": "137.1.4-alpha.4", + "version": "137.1.4-alpha.5", "repository": { "type": "git", "url": "git+https://github.com/GetStream/react-native-webrtc.git" From 7c258ce4cb5602e4f7624eae60c7ff5c477d0a32 Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Tue, 14 Apr 2026 11:05:03 +0200 Subject: [PATCH 12/16] lint fix --- src/AudioDeviceModuleEvents.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AudioDeviceModuleEvents.ts b/src/AudioDeviceModuleEvents.ts index b1dff2bf6..c25b74b1b 100644 --- a/src/AudioDeviceModuleEvents.ts +++ b/src/AudioDeviceModuleEvents.ts @@ -1,4 +1,4 @@ -import { NativeEventEmitter, NativeModules, Platform } from 'react-native'; +import { NativeEventEmitter, NativeModules } from 'react-native'; const { WebRTCModule } = NativeModules; From 5f9a2afdf18af991d1b1b044f96147960797504d Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Tue, 14 Apr 2026 11:12:44 +0200 Subject: [PATCH 13/16] code-rabbit review fix --- src/AudioDeviceModule.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/AudioDeviceModule.ts b/src/AudioDeviceModule.ts index 84c9923df..1df93d5f0 100644 --- a/src/AudioDeviceModule.ts +++ b/src/AudioDeviceModule.ts @@ -253,6 +253,14 @@ export class AudioDeviceModule { throw new Error('AudioDeviceModule is only available on iOS/macOS'); } + if (typeof level !== 'number' || isNaN(level)) { + throw new TypeError(`setDuckingLevel: expected a number, got ${typeof level}`); + } + + if (!Number.isInteger(level) || level < 0 || level > 100) { + throw new RangeError(`setDuckingLevel: level must be an integer between 0 and 100, got ${level}`); + } + return WebRTCModule.audioDeviceModuleSetDuckingLevel(level); } From a1ba5f61f20b5401bfd90bc27197f2344978098b Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Tue, 14 Apr 2026 11:19:59 +0200 Subject: [PATCH 14/16] dont set new arch to false for iOS - example app --- examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist | 2 +- examples/GumTestApp/ios/GumTestApp/Info.plist | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist b/examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist index 9dccc86f3..72946a788 100644 --- a/examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist +++ b/examples/GumTestApp/ios/GumTestApp-tvOS/Info.plist @@ -36,7 +36,7 @@ NSLocationWhenInUseUsageDescription RCTNewArchEnabled - + UILaunchStoryboardName LaunchScreen UIRequiredDeviceCapabilities diff --git a/examples/GumTestApp/ios/GumTestApp/Info.plist b/examples/GumTestApp/ios/GumTestApp/Info.plist index 60abedb79..99880dda5 100644 --- a/examples/GumTestApp/ios/GumTestApp/Info.plist +++ b/examples/GumTestApp/ios/GumTestApp/Info.plist @@ -44,7 +44,7 @@ NSMicrophoneUsageDescription I NEED MICROPHONE RCTNewArchEnabled - + UILaunchStoryboardName LaunchScreen UIRequiredDeviceCapabilities From e7a6a5701efa41246499526b3117db12b1b90ebc Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Tue, 14 Apr 2026 11:22:01 +0200 Subject: [PATCH 15/16] code-rabbit review fix --- examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj b/examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj index 7ca07461f..e5ba6d775 100644 --- a/examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj +++ b/examples/GumTestApp/ios/GumTestApp.xcodeproj/project.pbxproj @@ -761,7 +761,6 @@ ONLY_ACTIVE_ARCH = YES; OTHER_LDFLAGS = ( "$(inherited)", - " ", ); REACT_NATIVE_PATH = "${PODS_ROOT}/../../node_modules/react-native"; SDKROOT = iphoneos; @@ -821,7 +820,6 @@ MTL_ENABLE_DEBUG_INFO = NO; OTHER_LDFLAGS = ( "$(inherited)", - " ", ); REACT_NATIVE_PATH = "${PODS_ROOT}/../../node_modules/react-native"; SDKROOT = iphoneos; From d4bd4e0682df3640acaa85c0e5414ca05e72b67a Mon Sep 17 00:00:00 2001 From: Santhosh Vaiyapuri Date: Tue, 14 Apr 2026 11:31:24 +0200 Subject: [PATCH 16/16] code-rabbit review fix --- .../WebRTCModule+RTCAudioDeviceModule.m | 88 +++------- src/AudioDeviceModule.ts | 160 ++++-------------- 2 files changed, 59 insertions(+), 189 deletions(-) diff --git a/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m b/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m index 9da67d118..b531781c7 100644 --- a/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m +++ b/ios/RCTWebRTC/WebRTCModule+RTCAudioDeviceModule.m @@ -13,74 +13,54 @@ @implementation WebRTCModule (RTCAudioDeviceModule) +- (void)handleADMResult:(NSInteger)result + operation:(NSString *)op + code:(NSString *)code + resolve:(RCTPromiseResolveBlock)resolve + reject:(RCTPromiseRejectBlock)reject { + if (result == 0) { + resolve(nil); + } else { + reject(code, [NSString stringWithFormat:@"Failed to %@: %ld", op, (long)result], nil); + } +} + #pragma mark - Recording & Playback Control RCT_EXPORT_METHOD(audioDeviceModuleStartPlayout : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM startPlayout]; - if (result == 0) { - resolve(nil); - } else { - reject(@"playout_error", [NSString stringWithFormat:@"Failed to start playout: %ld", (long)result], nil); - } + [self handleADMResult:[RAW_ADM startPlayout] operation:@"start playout" code:@"playout_error" resolve:resolve reject:reject]; } RCT_EXPORT_METHOD(audioDeviceModuleStopPlayout : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM stopPlayout]; - if (result == 0) { - resolve(nil); - } else { - reject(@"playout_error", [NSString stringWithFormat:@"Failed to stop playout: %ld", (long)result], nil); - } + [self handleADMResult:[RAW_ADM stopPlayout] operation:@"stop playout" code:@"playout_error" resolve:resolve reject:reject]; } RCT_EXPORT_METHOD(audioDeviceModuleStartRecording : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM startRecording]; - if (result == 0) { - resolve(nil); - } else { - reject(@"recording_error", [NSString stringWithFormat:@"Failed to start recording: %ld", (long)result], nil); - } + [self handleADMResult:[RAW_ADM startRecording] operation:@"start recording" code:@"recording_error" resolve:resolve reject:reject]; } RCT_EXPORT_METHOD(audioDeviceModuleStopRecording : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM stopRecording]; - if (result == 0) { - resolve(nil); - } else { - reject(@"recording_error", [NSString stringWithFormat:@"Failed to stop recording: %ld", (long)result], nil); - } + [self handleADMResult:[RAW_ADM stopRecording] operation:@"stop recording" code:@"recording_error" resolve:resolve reject:reject]; } RCT_EXPORT_METHOD(audioDeviceModuleStartLocalRecording : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM initAndStartRecording]; - if (result == 0) { - resolve(nil); - } else { - reject( - @"recording_error", [NSString stringWithFormat:@"Failed to start local recording: %ld", (long)result], nil); - } + [self handleADMResult:[RAW_ADM initAndStartRecording] operation:@"start local recording" code:@"recording_error" resolve:resolve reject:reject]; } RCT_EXPORT_METHOD(audioDeviceModuleStopLocalRecording : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM stopRecording]; - if (result == 0) { - resolve(nil); - } else { - reject( - @"recording_error", [NSString stringWithFormat:@"Failed to stop local recording: %ld", (long)result], nil); - } + [self handleADMResult:[RAW_ADM stopRecording] operation:@"stop local recording" code:@"recording_error" resolve:resolve reject:reject]; } #pragma mark - Microphone Control @@ -89,12 +69,7 @@ @implementation WebRTCModule (RTCAudioDeviceModule) : (BOOL)muted resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM setMicrophoneMuted:muted]; - if (result == 0) { - resolve(nil); - } else { - reject(@"mute_error", [NSString stringWithFormat:@"Failed to set microphone mute: %ld", (long)result], nil); - } + [self handleADMResult:[RAW_ADM setMicrophoneMuted:muted] operation:@"set microphone mute" code:@"mute_error" resolve:resolve reject:reject]; } RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsMicrophoneMuted) { @@ -107,14 +82,7 @@ @implementation WebRTCModule (RTCAudioDeviceModule) : (BOOL)enabled resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM setVoiceProcessingEnabled:enabled]; - if (result == 0) { - resolve(nil); - } else { - reject(@"voice_processing_error", - [NSString stringWithFormat:@"Failed to set voice processing: %ld", (long)result], - nil); - } + [self handleADMResult:[RAW_ADM setVoiceProcessingEnabled:enabled] operation:@"set voice processing" code:@"voice_processing_error" resolve:resolve reject:reject]; } RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleIsVoiceProcessingEnabled) { @@ -159,12 +127,7 @@ @implementation WebRTCModule (RTCAudioDeviceModule) : (NSInteger)mode resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM setMuteMode:(RTCAudioEngineMuteMode)mode]; - if (result == 0) { - resolve(nil); - } else { - reject(@"mute_mode_error", [NSString stringWithFormat:@"Failed to set mute mode: %ld", (long)result], nil); - } + [self handleADMResult:[RAW_ADM setMuteMode:(RTCAudioEngineMuteMode)mode] operation:@"set mute mode" code:@"mute_mode_error" resolve:resolve reject:reject]; } RCT_EXPORT_BLOCKING_SYNCHRONOUS_METHOD(audioDeviceModuleGetMuteMode) { @@ -197,14 +160,7 @@ @implementation WebRTCModule (RTCAudioDeviceModule) : (BOOL)enabled resolver : (RCTPromiseResolveBlock)resolve rejecter : (RCTPromiseRejectBlock)reject) { - NSInteger result = [RAW_ADM setRecordingAlwaysPreparedMode:enabled]; - if (result == 0) { - resolve(nil); - } else { - reject(@"recording_always_prepared_mode_error", - [NSString stringWithFormat:@"Failed to set recording always prepared mode: %ld", (long)result], - nil); - } + [self handleADMResult:[RAW_ADM setRecordingAlwaysPreparedMode:enabled] operation:@"set recording always prepared mode" code:@"recording_always_prepared_mode_error" resolve:resolve reject:reject]; } // TODO: `getEngineAvailability` / `setEngineAvailability` were dropped because the diff --git a/src/AudioDeviceModule.ts b/src/AudioDeviceModule.ts index 1df93d5f0..478c3e6c9 100644 --- a/src/AudioDeviceModule.ts +++ b/src/AudioDeviceModule.ts @@ -9,6 +9,18 @@ export enum AudioEngineMuteMode { InputMixer = 2, } +/** + * Returns the native WebRTCModule after verifying the platform is iOS/macOS. + * Throws on Android where these audio device module APIs are not available. + */ +const getAudioDeviceModule = () => { + if (Platform.OS === 'android') { + throw new Error('AudioDeviceModule is only available on iOS/macOS'); + } + + return WebRTCModule; +}; + /** * Audio Device Module API for controlling audio devices and settings. * iOS/macOS only - will throw on Android. @@ -18,240 +30,154 @@ export class AudioDeviceModule { * Start audio playback */ static async startPlayout(): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleStartPlayout(); + return getAudioDeviceModule().audioDeviceModuleStartPlayout(); } /** * Stop audio playback */ static async stopPlayout(): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleStopPlayout(); + return getAudioDeviceModule().audioDeviceModuleStopPlayout(); } /** * Start audio recording */ static async startRecording(): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleStartRecording(); + return getAudioDeviceModule().audioDeviceModuleStartRecording(); } /** * Stop audio recording */ static async stopRecording(): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleStopRecording(); + return getAudioDeviceModule().audioDeviceModuleStopRecording(); } /** * Initialize and start local audio recording (calls initAndStartRecording) */ static async startLocalRecording(): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleStartLocalRecording(); + return getAudioDeviceModule().audioDeviceModuleStartLocalRecording(); } /** * Stop local audio recording */ static async stopLocalRecording(): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleStopLocalRecording(); + return getAudioDeviceModule().audioDeviceModuleStopLocalRecording(); } /** * Mute or unmute the microphone */ static async setMicrophoneMuted(muted: boolean): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleSetMicrophoneMuted(muted); + return getAudioDeviceModule().audioDeviceModuleSetMicrophoneMuted(muted); } /** * Check if microphone is currently muted */ static isMicrophoneMuted(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsMicrophoneMuted(); + return getAudioDeviceModule().audioDeviceModuleIsMicrophoneMuted(); } /** * Enable or disable voice processing (requires engine restart) */ static async setVoiceProcessingEnabled(enabled: boolean): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleSetVoiceProcessingEnabled(enabled); + return getAudioDeviceModule().audioDeviceModuleSetVoiceProcessingEnabled(enabled); } /** * Check if voice processing is enabled */ static isVoiceProcessingEnabled(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsVoiceProcessingEnabled(); + return getAudioDeviceModule().audioDeviceModuleIsVoiceProcessingEnabled(); } /** * Temporarily bypass voice processing without restarting the engine */ static setVoiceProcessingBypassed(bypassed: boolean): void { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - WebRTCModule.audioDeviceModuleSetVoiceProcessingBypassed(bypassed); + getAudioDeviceModule().audioDeviceModuleSetVoiceProcessingBypassed(bypassed); } /** * Check if voice processing is currently bypassed */ static isVoiceProcessingBypassed(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsVoiceProcessingBypassed(); + return getAudioDeviceModule().audioDeviceModuleIsVoiceProcessingBypassed(); } /** * Enable or disable Automatic Gain Control (AGC) */ static setVoiceProcessingAGCEnabled(enabled: boolean): void { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleSetVoiceProcessingAGCEnabled(enabled); + return getAudioDeviceModule().audioDeviceModuleSetVoiceProcessingAGCEnabled(enabled); } /** * Check if AGC is enabled */ static isVoiceProcessingAGCEnabled(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsVoiceProcessingAGCEnabled(); + return getAudioDeviceModule().audioDeviceModuleIsVoiceProcessingAGCEnabled(); } /** * Check if audio is currently playing */ static isPlaying(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsPlaying(); + return getAudioDeviceModule().audioDeviceModuleIsPlaying(); } /** * Check if audio is currently recording */ static isRecording(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsRecording(); + return getAudioDeviceModule().audioDeviceModuleIsRecording(); } /** * Check if the audio engine is running */ static isEngineRunning(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsEngineRunning(); + return getAudioDeviceModule().audioDeviceModuleIsEngineRunning(); } /** * Set the microphone mute mode */ static async setMuteMode(mode: AudioEngineMuteMode): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleSetMuteMode(mode); + return getAudioDeviceModule().audioDeviceModuleSetMuteMode(mode); } /** * Get the current mute mode */ static getMuteMode(): AudioEngineMuteMode { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleGetMuteMode(); + return getAudioDeviceModule().audioDeviceModuleGetMuteMode(); } /** * Enable or disable advanced audio ducking */ static setAdvancedDuckingEnabled(enabled: boolean): void { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleSetAdvancedDuckingEnabled(enabled); + return getAudioDeviceModule().audioDeviceModuleSetAdvancedDuckingEnabled(enabled); } /** * Check if advanced ducking is enabled */ static isAdvancedDuckingEnabled(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsAdvancedDuckingEnabled(); + return getAudioDeviceModule().audioDeviceModuleIsAdvancedDuckingEnabled(); } /** * Set the audio ducking level (0-100) */ static setDuckingLevel(level: number): void { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } + getAudioDeviceModule(); if (typeof level !== 'number' || isNaN(level)) { throw new TypeError(`setDuckingLevel: expected a number, got ${typeof level}`); @@ -268,33 +194,21 @@ export class AudioDeviceModule { * Get the current ducking level */ static getDuckingLevel(): number { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleGetDuckingLevel(); + return getAudioDeviceModule().audioDeviceModuleGetDuckingLevel(); } /** * Check if recording always prepared mode is enabled */ static isRecordingAlwaysPreparedMode(): boolean { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleIsRecordingAlwaysPreparedMode(); + return getAudioDeviceModule().audioDeviceModuleIsRecordingAlwaysPreparedMode(); } /** * Enable or disable recording always prepared mode */ static async setRecordingAlwaysPreparedMode(enabled: boolean): Promise { - if (Platform.OS === 'android') { - throw new Error('AudioDeviceModule is only available on iOS/macOS'); - } - - return WebRTCModule.audioDeviceModuleSetRecordingAlwaysPreparedMode(enabled); + return getAudioDeviceModule().audioDeviceModuleSetRecordingAlwaysPreparedMode(enabled); } // TODO: getEngineAvailability / setEngineAvailability are not supported by the