import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useCallSettings } = useCallStateHooks();
const settings = useCallSettings();
console.log(settings?.video.camera_default_on);
Camera & Microphone
Handling audio and video devices in a web application means working with MediaStream
,
MediaDeviceInfo
and other WebRTC API objects.
We did our best to hide this complexity through a set of APIs exposed as through a call
instance, or a set of utility hooks.
Camera management
The SDK does its best to make working with the camera easy. We expose the following objects on the call:
Call settings
The default state of the camera is determined by the call type settings:
Start-stop camera
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useCameraState } = useCallStateHooks();
const { camera, isMute } = useCameraState();
console.log(`Camera is ${isMute ? "off" : "on"}`);
await camera.toggle();
// or, alternatively
await camera.enable();
await camera.disable();
It’s always best to await calls to enable()
, disable()
, and toggle()
, however the SDK does its best to resolve potential race conditions: the last call always “wins”, so it’s safe to make these calls in an event handler.
Status is updated once the camera is actually enabled or disabled. Use optimisticIsMute
for the “optimistic” status that is updated immediately after toggling the camera.
List and select devices
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useCameraState } = useCallStateHooks();
const { camera, selectedDevice, devices } = useCameraState();
console.log("current camera id:", selectedDevice);
console.log("available devices:", devices);
const preferredDevice = devices.find((d) => d.label === "My Camera");
await camera.select(preferredDevice.deviceId);
Camera permissions
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useCameraState } = useCallStateHooks();
const { hasBrowserPermission } = useCameraState();
if (hasBrowserPermission) {
console.log("User has granted camera permissions!");
} else {
console.log("User has denied or not granted camera permissions!");
}
Lobby preview
Here is how to set up a video preview displayed before joining the call:
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useCameraState } = useCallStateHooks();
const { camera, mediaStream } = useCameraState();
// will turn on the camera
await camera.enable();
// play the video preview
<video srcObject={mediaStream} autoPlay muted />;
Alternatively, you can use the SDK-provided VideoPreview
component.
Access to the Camera’s MediaStream
Our SDK exposes the current mediaStream
instance that you can use for your needs (for example, local recording, etc…):
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useCameraState } = useCallStateHooks();
const { mediaStream } = useCameraState();
const [videoTrack] = mediaStream.getVideoTracks();
console.log("Video track", videoTrack);
Microphone management
The SDK does its best to make working with the microphone easy. We expose the following objects on the call:
Call settings
The default state of the microphone is determined by the call settings:
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useCallSettings } = useCallStateHooks();
const settings = useCallSettings();
console.log(settings?.audio.mic_default_on);
Start-stop microphone
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useMicrophoneState } = useCallStateHooks();
const { microphone, isMute } = useMicrophoneState();
console.log(`Microphone is ${isMute ? "off" : "on"}`);
await microphone.toggle();
// or, alternatively
await microphone.enable();
await microphone.disable();
It’s always best to await calls to enable()
, disable()
, and toggle()
, however the SDK does its best to resolve potential race conditions: the last call always “wins”, so it’s safe to make these calls in an event handler.
Status is updated once the microphone is actually enabled or disabled. Use optimisticIsMute
for the “optimistic” status that is updated immediately after toggling the microphone.
List and select devices
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useMicrophoneState } = useCallStateHooks();
const { microphone, selectedDevice, devices } = useMicrophoneState();
console.log("current mic-id:", selectedDevice);
console.log("available devices:", devices);
const preferredDevice = devices.find((d) => d.label === "My Mic");
await microphone.select(preferredDevice.deviceId);
Microphone permissions
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useMicrophoneState } = useCallStateHooks();
const { hasBrowserPermission } = useMicrophoneState();
if (hasBrowserPermission) {
console.log("User has granted microphone permissions!");
} else {
console.log("User has denied or not granted microphone permissions!");
}
Speaking while muted detection
Our SDK provides a mechanism that can detect whether the user started to speak while being muted. Through this mechanism, you can display a notification to the user, or apply any custom logic.
This feature is enabled by default unless the user doesn’t have the permission to send audio or explicitly disabled.
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useMicrophoneState } = useCallStateHooks();
const { isSpeakingWhileMuted, microphone } = useMicrophoneState();
if (isSpeakingWhileMuted) {
// your custom logic comes here
console.log("You are speaking while muted!");
}
// to disable this feature completely:
await microphone.disableSpeakingWhileMutedNotification();
// to enable it back:
await microphone.enableSpeakingWhileMutedNotification();
Access to the Microphone’s MediaStream
Our SDK exposes the current mediaStream
instance that you can use for your needs (for example, local recording, etc…):
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useMicrophoneState } = useCallStateHooks();
const { mediaStream } = useMicrophoneState();
const [audioTrack] = mediaStream.getAudioTracks();
console.log("Audio track", audioTrack);
Noise Cancellation
Check our Noise Cancellation guide.
Camera and Microphone AI/ML Filters
Both the Camera and the Microphone allow you to apply AI/ML filters to the media stream. This can be useful for various use-cases, such as:
- applying a video effects such as background blurring, or background replacement
- applying a custom video filter (e.g. color correction, or face detection)
- applying a custom audio filter (e.g. noise reduction)
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useCameraState, useMicrophoneState } = useCallStateHooks();
const { camera } = useCameraState();
const { microphone } = useMicrophoneState();
// apply a custom video filter
const { unregister: unregisterMyVideoFilter } = camera.registerFilter(
function myVideoFilter(inputMediaStream: MediaStream) {
// initialize the video filter, do some magic and
// return the modified media stream
return {
output: mediaStreamWithFilterApplied,
stop: () => {
// optional cleanup function
},
};
},
);
// apply a custom audio filter
const { unregister: unregisterMyAudioFilter } = microphone.registerFilter(
function myAudioFilter(inputMediaStream: MediaStream) {
// initialize the audio filter, do some magic and
// return the modified media stream
return {
output: mediaStreamWithFilterApplied,
stop: () => {
// optional cleanup function
},
};
},
);
// unregister the filters
unregisterMyVideoFilter();
unregisterMyAudioFilter();
Filters can be registered and unregistered at any time, and the SDK will take care of the rest.
Filters can be chained, and the order of registration matters.
The first registered filter will be the first to modify the raw MediaStream
.
A filter may be asynchronous. In this case, the function passed to the registerFilter
method should synchronously
return an object where output
is a promise that resolves to a MediaStream
:
camera.registerFilter(function myVideoFilter(inputMediaStream: MediaStream) {
// note that output is returned synchronously!
return {
output: new Promise((resolve) => resolve(mediaStreamWithFilterApplied)),
stop: () => {
// optional cleanup function
},
};
});
Registering a filter is not instantaneous. If you need to know when a filter is registered (e.g. to show
a spinner in the UI), await for the registered
promise returned by the registerFilter
method:
const { registered } = camera.registerFilter(
(inputMediaStream: MediaStream) => {
// your video filter
},
);
await registered;
// at this point, the filter is registered
Once a filter(s) is registered, the SDK will send the last returned MediaStream
to the remote participants.
Speaker management
Browser support
Selecting an audio output device for the call isn’t supported by all browsers, and this is how you can check the availability:
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useSpeakerState } = useCallStateHooks();
const { isDeviceSelectionSupported } = useSpeakerState();
console.log("is speaker selection supported:", isDeviceSelectionSupported);
List and select devices
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useSpeakerState } = useCallStateHooks();
const { speaker, selectedDevice, devices } = useSpeakerState();
console.log("current mic-id:", selectedDevice);
console.log("available devices:", devices);
const preferredDevice = devices.find((d) => d.label === "My Speakers");
await speaker.select(preferredDevice.deviceId);
Set master output volume
import { useCallStateHooks } from "@stream-io/video-react-sdk";
const { useSpeakerState } = useCallStateHooks();
const { speaker } = useSpeakerState();
speaker.setVolume(0.5); // 0.5 is 50% of the maximum volume
Set participant volume
import {
useCallStateHooks,
StreamVideoParticipant,
} from "@stream-io/video-react-sdk";
let p: StreamVideoParticipant;
const { useSpeakerState } = useCallStateHooks();
const { speaker } = useSpeakerState();
// will set the volume of the participant to 50%
speaker.setParticipantVolume(p.sessionId, 0.5);
// will mute the participant
speaker.setParticipantVolume(p.sessionId, 0);
// will reset the volume to the default value
speaker.setParticipantVolume(p.sessionId, undefined);
Persisting user’s device preferences
For user’s convenience, you might want to persist the user’s choices for the camera, microphone and speaker devices across sessions.
There are multiple ways on how to achieve this, and our SDK provides one utility hook that can help you with this by utilizing localStorage
under the hood.
import {
StreamVideo,
StreamCall,
usePersistedDevicePreferences,
} from "@stream-io/video-react-sdk";
export const MyApp = () => {
return (
<StreamVideo client={client}>
<StreamCall call={call}>
<MyCallShell />
</StreamCall>
</StreamVideo>
);
};
const MyCallShell = () => {
usePersistedDevicePreferences("my-custom-preference-key");
return <div>...</div>;
};