Skip to content
This repository was archived by the owner on Sep 18, 2025. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 85 additions & 0 deletions src/speech.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
*/
describe('speech function', () => {
// モックされたイベントリスナーを保存するオブジェクト
let eventListeners: Record<string, Array<(...args: any[]) => void>> = {}

Check warning on line 13 in src/speech.test.ts

View workflow job for this annotation

GitHub Actions / lint-and-test (22.x)

Unexpected any. Specify a different type

// モックされたSpeechRecognitionインスタンス
let mockRecognitionInstance: {
Expand Down Expand Up @@ -156,6 +156,91 @@

// Assert: 期待される結果を確認
expect(mockRecognitionInstance.start).toHaveBeenCalledTimes(1)
expect(mockRecognitionInstance.start).toHaveBeenCalledWith()
})

it('When calling start method with MediaStreamTrack, it passes the track to recognition.start', () => {
// Arrange: 操作に必要な準備
const recognitionObj = speech({})
const mockAudioTrack = {
kind: 'audio',
readyState: 'live',
} as MediaStreamTrack

// Act: 結果を得るために必要な操作
recognitionObj.start(mockAudioTrack)

// Assert: 期待される結果を確認
expect(mockRecognitionInstance.start).toHaveBeenCalledTimes(1)
expect(mockRecognitionInstance.start).toHaveBeenCalledWith(mockAudioTrack)
})

it('When calling start method with video track, it throws InvalidStateError', () => {
// Arrange: 操作に必要な準備
const recognitionObj = speech({})
const mockVideoTrack = {
kind: 'video',
readyState: 'live',
} as MediaStreamTrack

// Act & Assert: 操作とアサーションを組み合わせる
expect(() => recognitionObj.start(mockVideoTrack)).toThrow(DOMException)
expect(() => recognitionObj.start(mockVideoTrack)).toThrow(
'The provided MediaStreamTrack must be an audio track'
)
expect(mockRecognitionInstance.start).not.toHaveBeenCalled()
})

it('When calling start method with ended audio track, it throws InvalidStateError', () => {
// Arrange: 操作に必要な準備
const recognitionObj = speech({})
const mockEndedTrack = {
kind: 'audio',
readyState: 'ended',
} as MediaStreamTrack

// Act & Assert: 操作とアサーションを組み合わせる
expect(() => recognitionObj.start(mockEndedTrack)).toThrow(DOMException)
expect(() => recognitionObj.start(mockEndedTrack)).toThrow(
'The provided MediaStreamTrack must be in "live" state'
)
expect(mockRecognitionInstance.start).not.toHaveBeenCalled()
})

it('When audioTrack is provided in options, it uses that track on start', () => {
// Arrange: 操作に必要な準備
const mockAudioTrack = {
kind: 'audio',
readyState: 'live',
} as MediaStreamTrack
const recognitionObj = speech({ audioTrack: mockAudioTrack })

// Act: 結果を得るために必要な操作
recognitionObj.start()

// Assert: 期待される結果を確認
expect(mockRecognitionInstance.start).toHaveBeenCalledTimes(1)
expect(mockRecognitionInstance.start).toHaveBeenCalledWith(mockAudioTrack)
})

it('When both options.audioTrack and parameter audioTrack are provided, parameter takes precedence', () => {
// Arrange: 操作に必要な準備
const optionsTrack = {
kind: 'audio',
readyState: 'live',
} as MediaStreamTrack
const parameterTrack = {
kind: 'audio',
readyState: 'live',
} as MediaStreamTrack
const recognitionObj = speech({ audioTrack: optionsTrack })

// Act: 結果を得るために必要な操作
recognitionObj.start(parameterTrack)

// Assert: 期待される結果を確認
expect(mockRecognitionInstance.start).toHaveBeenCalledTimes(1)
expect(mockRecognitionInstance.start).toHaveBeenCalledWith(parameterTrack)
})

it('When calling stop method, it stops the recognition', () => {
Expand Down
30 changes: 28 additions & 2 deletions src/speech.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,12 @@ export interface SpeechOptions {
* エラー時のコールバック
*/
onError?: (error: SpeechRecognitionErrorCode) => void

/**
* 音声入力として使用するMediaStreamTrack
* 指定しない場合はデフォルトのマイクを使用
*/
audioTrack?: MediaStreamTrack
}

/**
Expand Down Expand Up @@ -111,9 +117,29 @@ export function speech(options: SpeechOptions = {}) {
return {
/**
* 音声認識を開始
* @param audioTrack - オプション: 使用する音声トラック
*/
start: () => {
recognition.start()
start: (audioTrack?: MediaStreamTrack) => {
const trackToUse = audioTrack || options.audioTrack

if (trackToUse) {
if (trackToUse.kind !== 'audio') {
throw new DOMException(
'The provided MediaStreamTrack must be an audio track',
'InvalidStateError'
)
}
if (trackToUse.readyState !== 'live') {
throw new DOMException(
'The provided MediaStreamTrack must be in "live" state',
'InvalidStateError'
)
}
// @ts-expect-error - Web Speech API仕様の新しいメソッドシグネチャ
recognition.start(trackToUse)
} else {
recognition.start()
}
},

/**
Expand Down
Loading