Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 25 additions & 29 deletions __tests__/html2/speechToSpeech/mute.unmute.html
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,11 @@
Test: Mute/Unmute functionality for Speech-to-Speech

This test validates:
1. Listening state can transition to muted and back to listening
2. Other states (idle) cannot transition to muted
3. Muted chunks contain all zeros (silent audio)
4. Uses useVoiceRecordingMuted hook via Composer pattern for mute/unmute control
1. Mute is allowed from all state except idle
2. When muted during listening, chunks contain all zeros (silent audio)
3. When unmuted, chunks contain real audio
4. Mute resets to false when recording stops
5. Uses useVoiceRecordingMuted hook for mute/unmute control
-->
<script type="module">
import { setupMockMediaDevices } from '/assets/esm/speechToSpeech/mockMediaDevices.js';
Expand Down Expand Up @@ -51,26 +52,18 @@
return bytes.every(byte => byte === 0);
}

// Helper to check if audio has non-zero data (real audio)
function hasNonZeroAudio(base64Content) {
const binaryString = atob(base64Content);
const bytes = new Uint8Array(binaryString.length);
for (let i = 0; i < binaryString.length; i++) {
bytes[i] = binaryString.charCodeAt(i);
}
return bytes.some(byte => byte !== 0);
}

const audioChunks = [];
let currentVoiceState = 'idle';
let currentMicrophoneMuted = false;

// Setup Web Chat with Speech-to-Speech
const { directLine, store } = testHelpers.createDirectLineEmulator();
directLine.setCapability('getVoiceConfiguration', { sampleRate: 24000, chunkIntervalMs: 100 }, { emitEvent: false });

// Track voiceState changes
// Track voiceState and microphoneMuted changes
store.subscribe(() => {
currentVoiceState = store.getState().voice?.voiceState || 'idle';
currentMicrophoneMuted = store.getState().voice?.microphoneMuted || false;
});

// Intercept postActivity to capture outgoing voice chunks
Expand All @@ -79,7 +72,8 @@
if (activity.name === 'media.chunk' && activity.type === 'event') {
audioChunks.push({
content: activity.value?.content,
voiceState: currentVoiceState
voiceState: currentVoiceState,
microphoneMuted: currentMicrophoneMuted
});
}
return originalPostActivity(activity);
Expand All @@ -103,6 +97,7 @@

// Helper to get voice state from store
const getVoiceState = () => store.getState().voice?.voiceState;
const getMicrophoneMuted = () => store.getState().voice?.microphoneMuted;

render(
<FluentThemeProvider variant="fluent">
Expand Down Expand Up @@ -141,41 +136,42 @@
// Wait for some listening chunks
await pageConditions.became(
'At least 2 listening chunks received',
() => audioChunks.filter(c => c.voiceState === 'listening').length >= 2,
() => audioChunks.filter(c => c.voiceState === 'listening' && !c.microphoneMuted).length >= 2,
2000
);

// ===== TEST 3: Mute from listening state → muted state =====
// ===== TEST 3: Mute while listening → microphoneMuted true, voiceState stays listening =====
muteControlRef.setMuted(true);

await pageConditions.became(
'Voice state is muted',
() => getVoiceState() === 'muted',
'microphoneMuted is true',
() => getMicrophoneMuted() === true,
1000
);

expect(muteControlRef.muted).toBe(true);
expect(getVoiceState()).toBe('listening'); // voiceState stays listening

// Wait for muted chunks
await pageConditions.became(
'At least 2 muted chunks received',
() => audioChunks.filter(c => c.voiceState === 'muted').length >= 2,
() => audioChunks.filter(c => c.microphoneMuted).length >= 2,
2000
);

// ===== TEST 4: Verify muted chunks are all zeros =====
const mutedChunks = audioChunks.filter(c => c.voiceState === 'muted');
const mutedChunks = audioChunks.filter(c => c.microphoneMuted);
expect(mutedChunks.length).toBeGreaterThanOrEqual(2);
for (const chunk of mutedChunks) {
expect(isAudioAllZeros(chunk.content)).toBe(true);
}

// ===== TEST 5: Unmute → back to listening state =====
// ===== TEST 5: Unmute → microphoneMuted false =====
muteControlRef.setMuted(false);

await pageConditions.became(
'Voice state is listening after unmute',
() => getVoiceState() === 'listening',
'microphoneMuted is false after unmute',
() => getMicrophoneMuted() === false,
1000
);

Expand All @@ -190,15 +186,15 @@
);

// ===== TEST 6: Verify listening chunks contain real (non-zero) audio =====
const listeningChunks = audioChunks.filter(c => c.voiceState === 'listening');
const listeningChunks = audioChunks.filter(c => c.voiceState === 'listening' && !c.microphoneMuted);
expect(listeningChunks.length).toBeGreaterThanOrEqual(4); // At least 2 before mute + 2 after unmute

// Verify listening audio is non-zero (real audio)
for (const chunk of listeningChunks) {
expect(hasNonZeroAudio(chunk.content)).toBe(true);
expect(isAudioAllZeros(chunk.content)).toBe(false);
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for cleaning this.

}

// ===== TEST 7: Stop recording =====
// ===== TEST 7: Stop recording → microphoneMuted resets to false =====
await host.click(micButton);

await pageConditions.became(
Expand All @@ -207,7 +203,7 @@
2000
);

expect(muteControlRef.muted).toBe(false);
expect(muteControlRef.muted).toBe(false); // microphoneMuted resets on stop
});
</script>
</body>
Expand Down
6 changes: 5 additions & 1 deletion packages/api/src/hooks/useVoiceRecordingMuted.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,14 @@ import { useDispatch, useSelector } from './internal/WebChatReduxContext';

/**
* Hook to get and set voice recording mute state in speech-to-speech mode.
*
* Mute is independent of voice state - it can be toggled at any time.
* When muted, silent audio chunks are sent instead of real audio.
* Mute resets to false when recording stops.
*/
export default function useVoiceRecordingMuted(): readonly [boolean, (muted: boolean) => void] {
const dispatch = useDispatch();
const value = useSelector(({ voice }) => voice.voiceState === 'muted');
const value = useSelector(({ voice }) => voice.microphoneMuted);

const setter = useCallback(
(muted: boolean) => {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
import { useEffect, useCallback } from 'react';
import { useRecorder } from './useRecorder';
import usePostVoiceActivity from '../../../hooks/internal/usePostVoiceActivity';
import useVoiceRecordingMuted from '../../../hooks/useVoiceRecordingMuted';
import useVoiceState from '../../../hooks/useVoiceState';

/**
* VoiceRecorderBridge is an invisible component that bridges the Redux recording state
* with the actual microphone recording functionality.
*/
export function VoiceRecorderBridge(): null {
const [muted] = useVoiceRecordingMuted();
const [voiceState] = useVoiceState();
const postVoiceActivity = usePostVoiceActivity();

const muted = voiceState === 'muted';
// Derive recording state from voiceState - recording is active when not idle
const recording = voiceState !== 'idle';

Expand All @@ -32,17 +33,17 @@ export function VoiceRecorderBridge(): null {

const { mute, record } = useRecorder(handleAudioChunk);

useEffect(() => {
if (muted) {
return mute();
}
}, [muted, mute]);

useEffect(() => {
if (recording) {
return record();
}
}, [record, recording]);

useEffect(() => {
if (muted) {
return mute();
}
}, [muted, mute]);

return null;
}
2 changes: 1 addition & 1 deletion packages/core/src/actions/setVoiceState.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
const VOICE_SET_STATE = 'WEB_CHAT/VOICE_SET_STATE' as const;

type VoiceState = 'idle' | 'listening' | 'muted' | 'user_speaking' | 'processing' | 'bot_speaking';
type VoiceState = 'idle' | 'listening' | 'user_speaking' | 'processing' | 'bot_speaking';

type VoiceSetStateAction = {
type: typeof VOICE_SET_STATE;
Expand Down
17 changes: 8 additions & 9 deletions packages/core/src/reducers/voiceActivity.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,13 @@ type VoiceActivityActions =
| VoiceUnregisterHandlerAction;

interface VoiceActivityState {
microphoneMuted: boolean;
voiceState: VoiceState;
voiceHandlers: Map<string, VoiceHandler>;
}

const DEFAULT_STATE: VoiceActivityState = {
microphoneMuted: false,
voiceState: 'idle',
voiceHandlers: new Map()
};
Expand All @@ -39,15 +41,15 @@ export default function voiceActivity(
): VoiceActivityState {
switch (action.type) {
case VOICE_MUTE_RECORDING:
// Only allow muting when in listening state
if (state.voiceState !== 'listening') {
console.warn(`botframework-webchat: Cannot mute from "${state.voiceState}" state, must be "listening"`);
// Only allow muting when in recording state
if (state.voiceState === 'idle') {
Copy link
Copy Markdown
Contributor

@compulim compulim Apr 2, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If mute is being unassociated with voiceState, should we also remove this if-statement to untangle mute with voice state?

console.warn(`botframework-webchat: Cannot mute from "${state.voiceState}" state, must be in recording state.`);
return state;
}

return {
...state,
voiceState: 'muted'
microphoneMuted: true
};

case VOICE_REGISTER_HANDLER: {
Expand Down Expand Up @@ -87,17 +89,14 @@ export default function voiceActivity(
case VOICE_STOP_RECORDING:
return {
...state,
microphoneMuted: false,
voiceState: 'idle'
};

case VOICE_UNMUTE_RECORDING:
if (state.voiceState !== 'muted') {
console.warn(`botframework-webchat: Should not transit from "${state.voiceState}" to "listening"`);
}

return {
...state,
voiceState: 'listening'
microphoneMuted: false
};

default:
Expand Down
Loading