client.history.get_all(...)
-
-
-
Returns metadata about all your generated audio.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.get_all( page_size=1, voice_id="pMsXgVXv3BLzUgSXRplE", )
-
-
-
page_size:
typing.Optional[int]
— How many history items to return at maximum. Can not exceed 1000, defaults to 100.
-
start_after_history_item_id:
typing.Optional[str]
— After which ID to start fetching, use this parameter to paginate across a large collection of history items. In case this parameter is not provided history items will be fetched starting from the most recently created one ordered descending by their creation date.
-
voice_id:
typing.Optional[str]
— Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.history.get(...)
-
-
-
Returns information about an history item by its ID.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.get( history_item_id="ja9xsmfGhxYcymxGcOGB", )
-
-
-
history_item_id:
str
— History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.history.delete(...)
-
-
-
Delete a history item by its ID
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.delete( history_item_id="ja9xsmfGhxYcymxGcOGB", )
-
-
-
history_item_id:
str
— History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.history.get_audio(...)
-
-
-
Returns the audio of an history item.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.get_audio( history_item_id="ja9xsmfGhxYcymxGcOGB", )
-
-
-
history_item_id:
str
— History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.history.download(...)
-
-
-
Download one or more history items. If one history item ID is provided, we will return a single audio file. If more than one history item IDs are provided, we will provide the history items packed into a .zip file.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.history.download( history_item_ids=["ja9xsmfGhxYcymxGcOGB"], )
-
-
-
history_item_ids:
typing.Sequence[str]
— A list of history items to download, you can get IDs of history items and other metadata using the GET https://api.elevenlabs.io/v1/history endpoint.
-
output_format:
typing.Optional[str]
— Output format to transcode the audio file, can be wav or default.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.text_to_sound_effects.convert(...)
-
-
-
Converts a text of your choice into sound
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_sound_effects.convert( text="string", duration_seconds=1.1, prompt_influence=1.1, )
-
-
-
text:
str
— The text that will get converted into a sound effect.
-
duration_seconds:
typing.Optional[float]
— The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None.
-
prompt_influence:
typing.Optional[float]
— A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.audio_isolation.audio_isolation(...)
-
-
-
Removes background noise from audio
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.audio_isolation.audio_isolation()
-
-
-
audio: `from future import annotations
core.File` — See core.File for more documentation
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.audio_isolation.audio_isolation_stream(...)
-
-
-
Removes background noise from audio and streams the result
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.audio_isolation.audio_isolation_stream()
-
-
-
audio: `from future import annotations
core.File` — See core.File for more documentation
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.samples.delete(...)
-
-
-
Removes a sample by its ID.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.samples.delete( voice_id="ja9xsmfGhxYcymxGcOGB", sample_id="pMsXgVXv3BLzUgSXRplE", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
sample_id:
str
— Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.samples.get_audio(...)
-
-
-
Returns the audio corresponding to a sample attached to a voice.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.samples.get_audio( voice_id="ja9xsmfGhxYcymxGcOGB", sample_id="pMsXgVXv3BLzUgSXRplE", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
sample_id:
str
— Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.text_to_speech.convert(...)
-
-
-
Converts text into speech using a voice of your choice and returns audio.
-
-
-
from elevenlabs import ElevenLabs, VoiceSettings client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.convert( voice_id="pMsXgVXv3BLzUgSXRplE", optimize_streaming_latency="0", output_format="mp3_22050_32", text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", voice_settings=VoiceSettings( stability=0.5, similarity_boost=0.75, style=0.0, ), )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
text:
str
— The text that will get converted into speech.
-
enable_logging:
typing.Optional[bool]
— When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
-
optimize_streaming_latency:
typing.Optional[OptimizeStreamingLatency]
— You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
-
output_format:
typing.Optional[OutputFormat]
— The output format of the generated audio.
-
model_id:
typing.Optional[str]
— Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
-
language_code:
typing.Optional[str]
— Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided.
-
voice_settings:
typing.Optional[VoiceSettings]
— Voice settings overriding stored setttings for the given voice. They are applied only on the given request.
-
pronunciation_dictionary_locators:
typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
— A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
-
seed:
typing.Optional[int]
— If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
-
previous_text:
typing.Optional[str]
— The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
next_text:
typing.Optional[str]
— The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
previous_request_ids:
typing.Optional[typing.Sequence[str]]
— A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
-
next_request_ids:
typing.Optional[typing.Sequence[str]]
— A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
-
use_pvc_as_ivc:
typing.Optional[bool]
— If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
-
apply_text_normalization:
typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]
— This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.text_to_speech.convert_with_timestamps(...)
-
-
-
Converts text into speech using a voice of your choice and returns JSON containing audio as a base64 encoded string together with information on when which character was spoken.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.convert_with_timestamps( voice_id="21m00Tcm4TlvDq8ikWAM", text="text", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
text:
str
— The text that will get converted into speech.
-
enable_logging:
typing.Optional[bool]
— When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
-
optimize_streaming_latency:
typing.Optional[OptimizeStreamingLatency]
— You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
-
output_format:
typing.Optional[OutputFormat]
— The output format of the generated audio.
-
model_id:
typing.Optional[str]
— Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
-
language_code:
typing.Optional[str]
— Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided.
-
voice_settings:
typing.Optional[VoiceSettings]
— Voice settings overriding stored setttings for the given voice. They are applied only on the given request.
-
pronunciation_dictionary_locators:
typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
— A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
-
seed:
typing.Optional[int]
— If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
-
previous_text:
typing.Optional[str]
— The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
next_text:
typing.Optional[str]
— The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
previous_request_ids:
typing.Optional[typing.Sequence[str]]
— A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
-
next_request_ids:
typing.Optional[typing.Sequence[str]]
— A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
-
use_pvc_as_ivc:
typing.Optional[bool]
— If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
-
apply_text_normalization:
typing.Optional[ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization ]
— This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.text_to_speech.convert_as_stream(...)
-
-
-
Converts text into speech using a voice of your choice and returns audio as an audio stream.
-
-
-
from elevenlabs import ElevenLabs, VoiceSettings client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.convert_as_stream( voice_id="pMsXgVXv3BLzUgSXRplE", optimize_streaming_latency="0", output_format="mp3_22050_32", text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", voice_settings=VoiceSettings( stability=0.1, similarity_boost=0.3, style=0.2, ), )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
text:
str
— The text that will get converted into speech.
-
enable_logging:
typing.Optional[bool]
— When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
-
optimize_streaming_latency:
typing.Optional[OptimizeStreamingLatency]
— You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
-
output_format:
typing.Optional[OutputFormat]
— The output format of the generated audio.
-
model_id:
typing.Optional[str]
— Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
-
language_code:
typing.Optional[str]
— Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided.
-
voice_settings:
typing.Optional[VoiceSettings]
— Voice settings overriding stored setttings for the given voice. They are applied only on the given request.
-
pronunciation_dictionary_locators:
typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
— A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
-
seed:
typing.Optional[int]
— If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
-
previous_text:
typing.Optional[str]
— The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
next_text:
typing.Optional[str]
— The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
previous_request_ids:
typing.Optional[typing.Sequence[str]]
— A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
-
next_request_ids:
typing.Optional[typing.Sequence[str]]
— A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
-
use_pvc_as_ivc:
typing.Optional[bool]
— If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
-
apply_text_normalization:
typing.Optional[ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization ]
— This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.text_to_speech.stream_with_timestamps(...)
-
-
-
Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_speech.stream_with_timestamps( voice_id="21m00Tcm4TlvDq8ikWAM", text="text", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
text:
str
— The text that will get converted into speech.
-
enable_logging:
typing.Optional[bool]
— When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
-
optimize_streaming_latency:
typing.Optional[OptimizeStreamingLatency]
— You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
-
output_format:
typing.Optional[OutputFormat]
— The output format of the generated audio.
-
model_id:
typing.Optional[str]
— Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
-
language_code:
typing.Optional[str]
— Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided.
-
voice_settings:
typing.Optional[VoiceSettings]
— Voice settings overriding stored setttings for the given voice. They are applied only on the given request.
-
pronunciation_dictionary_locators:
typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
— A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
-
seed:
typing.Optional[int]
— If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
-
previous_text:
typing.Optional[str]
— The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
next_text:
typing.Optional[str]
— The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
previous_request_ids:
typing.Optional[typing.Sequence[str]]
— A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
-
next_request_ids:
typing.Optional[typing.Sequence[str]]
— A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
-
use_pvc_as_ivc:
typing.Optional[bool]
— If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
-
apply_text_normalization:
typing.Optional[ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization ]
— This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.speech_to_speech.convert(...)
-
-
-
Create speech by combining the content and emotion of the uploaded audio with a voice of your choice.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.speech_to_speech.convert( voice_id="string", enable_logging=True, optimize_streaming_latency="0", output_format="mp3_22050_32", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
audio: `from future import annotations
core.File` — See core.File for more documentation
-
enable_logging:
typing.Optional[bool]
— When enable_logging is set to false full privacy mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Full privacy mode may only be used by enterprise customers.
-
optimize_streaming_latency:
typing.Optional[OptimizeStreamingLatency]
— You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
-
output_format:
typing.Optional[OutputFormat]
— The output format of the generated audio.
-
model_id:
typing.Optional[str]
— Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
-
voice_settings:
typing.Optional[str]
— Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
-
seed:
typing.Optional[int]
— If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
-
remove_background_noise:
typing.Optional[bool]
— If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.speech_to_speech.convert_as_stream(...)
-
-
-
Create speech by combining the content and emotion of the uploaded audio with a voice of your choice and returns an audio stream.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.speech_to_speech.convert_as_stream( voice_id="string", enable_logging="0", optimize_streaming_latency="mp3_22050_32", output_format="string", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
audio: `from future import annotations
core.File` — See core.File for more documentation
-
enable_logging:
typing.Optional[OptimizeStreamingLatency]
— You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model.
-
optimize_streaming_latency:
typing.Optional[OutputFormat]
— The output format of the generated audio.
-
output_format:
typing.Optional[str]
Output format of the generated audio. Must be one of: mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. pcm_16000 - PCM format (S16LE) with 16kHz sample rate. pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. pcm_24000 - PCM format (S16LE) with 24kHz sample rate. pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs.
-
model_id:
typing.Optional[str]
— Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
-
voice_settings:
typing.Optional[str]
— Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
-
seed:
typing.Optional[int]
— If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
-
remove_background_noise:
typing.Optional[bool]
— If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.voice_generation.generate_parameters()
-
-
-
Get possible parameters for the /v1/voice-generation/generate-voice endpoint.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voice_generation.generate_parameters()
-
-
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voice_generation.generate(...)
-
-
-
Generate a random voice based on parameters. This method returns a generated_voice_id in the response header, and a sample of the voice in the body. If you like the generated voice call /v1/voice-generation/create-voice with the generated_voice_id to create the voice.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voice_generation.generate( gender="female", accent="american", age="middle_aged", accent_strength=2.0, text="It sure does, Jackie… My mama always said: “In Carolina, the air's so thick you can wear it!”", )
-
-
-
gender:
Gender
— Category code corresponding to the gender of the generated voice. Possible values: female, male.
-
accent:
str
— Category code corresponding to the accent of the generated voice. Possible values: american, british, african, australian, indian.
-
age:
Age
— Category code corresponding to the age of the generated voice. Possible values: young, middle_aged, old.
-
accent_strength:
float
— The strength of the accent of the generated voice. Has to be between 0.3 and 2.0.
-
text:
str
— Text to generate, text length has to be between 100 and 1000.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.voice_generation.create_a_previously_generated_voice(...)
-
-
-
Create a previously generated voice. This endpoint should be called after you fetched a generated_voice_id using /v1/voice-generation/generate-voice.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voice_generation.create_a_previously_generated_voice( voice_name="Alex", voice_description="Middle-aged American woman", generated_voice_id="rbVJFu6SGRD1dbWpKnWl", )
-
-
-
voice_name:
str
— Name to use for the created voice.
-
voice_description:
str
— Description to use for the created voice.
-
generated_voice_id:
str
— The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
-
played_not_selected_voice_ids:
typing.Optional[typing.Sequence[str]]
— List of voice ids that the user has played but not selected. Used for RLHF.
-
labels:
typing.Optional[typing.Dict[str, str]]
— Optional, metadata to add to the created voice. Defaults to None.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.text_to_voice.create_previews(...)
-
-
-
Generate a custom voice based on voice description. This method returns a list of voice previews. Each preview has a generated_voice_id and a sample of the voice as base64 encoded mp3 audio. If you like the a voice previewand want to create the voice call /v1/text-to-voice/create-voice-from-preview with the generated_voice_id to create the voice.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_voice.create_previews( voice_description="voice_description", text="text", )
-
-
-
voice_description:
str
— Description to use for the created voice.
-
text:
str
— Text to generate, text length has to be between 100 and 1000.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.text_to_voice.create_voice_from_preview(...)
-
-
-
Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_voice.create_voice_from_preview( voice_name="voice_name", voice_description="voice_description", generated_voice_id="generated_voice_id", )
-
-
-
voice_name:
str
— Name to use for the created voice.
-
voice_description:
str
— Description to use for the created voice.
-
generated_voice_id:
str
— The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet.
-
labels:
typing.Optional[typing.Dict[str, str]]
— Optional, metadata to add to the created voice. Defaults to None.
-
played_not_selected_voice_ids:
typing.Optional[typing.Sequence[str]]
— List of voice ids that the user has played but not selected. Used for RLHF.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.user.get_subscription()
-
-
-
Gets extended information about the users subscription
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.user.get_subscription()
-
-
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.user.get()
-
-
-
Gets information about the user
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.user.get()
-
-
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.get_all(...)
-
-
-
Gets a list of all available voices for a user.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get_all()
-
-
-
show_legacy:
typing.Optional[bool]
— If set to true, legacy premade voices will be included in responses from /v1/voices
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.get_default_settings()
-
-
-
Gets the default settings for voices. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get_default_settings()
-
-
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.get_settings(...)
-
-
-
Returns the settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get_settings( voice_id="2EiwWnXFnvU5JabPnv8n", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.get(...)
-
-
-
Returns metadata about a specific voice.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get( voice_id="29vD33N1CtxCmqQRPOHJ", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
with_settings:
typing.Optional[bool]
— If set will return settings information corresponding to the voice, requires authorization.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.delete(...)
-
-
-
Deletes a voice by its ID.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.delete( voice_id="29vD33N1CtxCmqQRPOHJ", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.edit_settings(...)
-
-
-
Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
-
-
-
from elevenlabs import ElevenLabs, VoiceSettings client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.edit_settings( voice_id="29vD33N1CtxCmqQRPOHJ", request=VoiceSettings( stability=0.1, similarity_boost=0.3, style=0.2, ), )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
request:
VoiceSettings
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.add(...)
-
-
-
Add a new voice to your collection of voices in VoiceLab.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.add( name="Alex", )
-
-
-
name:
str
— The name that identifies this voice. This will be displayed in the dropdown of the website.
-
files: `from future import annotations
typing.List[core.File]` — See core.File for more documentation
-
remove_background_noise:
typing.Optional[bool]
— If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
-
description:
typing.Optional[str]
— How would you describe the voice?
-
labels:
typing.Optional[str]
— Serialized labels dictionary for the voice.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.edit(...)
-
-
-
Edit a voice created by you.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.edit( voice_id="JBFqnCBsd6RMkjVDRZzb", name="George", )
-
-
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
name:
str
— The name that identifies this voice. This will be displayed in the dropdown of the website.
-
files: `from future import annotations
typing.Optional[typing.List[core.File]]` — See core.File for more documentation
-
remove_background_noise:
typing.Optional[bool]
— If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse.
-
description:
typing.Optional[str]
— How would you describe the voice?
-
labels:
typing.Optional[str]
— Serialized labels dictionary for the voice.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.add_sharing_voice(...)
-
-
-
Add a sharing voice to your collection of voices in VoiceLab.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.add_sharing_voice( public_user_id="63e84100a6bf7874ba37a1bab9a31828a379ec94b891b401653b655c5110880f", voice_id="sB1b5zUrxQVAFl2PhZFp", new_name="Alita", )
-
-
-
public_user_id:
str
— Public user ID used to publicly identify ElevenLabs users.
-
voice_id:
str
— Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
new_name:
str
— The name that identifies this voice. This will be displayed in the dropdown of the website.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.get_shared(...)
-
-
-
Gets a list of shared voices.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get_shared( page_size=1, gender="female", language="en", )
-
-
-
page_size:
typing.Optional[int]
— How many shared voices to return at maximum. Can not exceed 100, defaults to 30.
-
category:
typing.Optional[str]
— voice category used for filtering
-
gender:
typing.Optional[str]
— gender used for filtering
-
age:
typing.Optional[str]
— age used for filtering
-
accent:
typing.Optional[str]
— accent used for filtering
-
language:
typing.Optional[str]
— language used for filtering
-
search:
typing.Optional[str]
— search term used for filtering
-
use_cases:
typing.Optional[typing.Union[str, typing.Sequence[str]]]
— use-case used for filtering
-
descriptives:
typing.Optional[typing.Union[str, typing.Sequence[str]]]
— search term used for filtering
-
featured:
typing.Optional[bool]
— Filter featured voices
-
reader_app_enabled:
typing.Optional[bool]
— Filter voices that are enabled for the reader app
-
owner_id:
typing.Optional[str]
— Filter voices by public owner ID
-
sort:
typing.Optional[str]
— sort criteria
-
page:
typing.Optional[int]
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.get_similar_library_voices(...)
-
-
-
Returns a list of shared voices similar to the provided audio sample. If neither similarity_threshold nor top_k is provided, we will apply default values.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get_similar_library_voices()
-
-
-
audio_file: `from future import annotations
typing.Optional[core.File]` — See core.File for more documentation
-
similarity_threshold:
typing.Optional[float]
— Threshold for voice similarity between provided sample and library voices. Must be in range <0, 2>. The smaller the value the more similar voices will be returned.
-
top_k:
typing.Optional[int]
— Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Must be in range <1, 100>.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.voices.get_a_profile_page(...)
-
-
-
Gets a profile page based on a handle
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.voices.get_a_profile_page( handle="talexgeorge", )
-
-
-
handle:
str
— Handle for a VA's profile page
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.get_all()
-
-
-
Returns a list of your projects together and its metadata.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.get_all()
-
-
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.add(...)
-
-
-
Creates a new project, it can be either initialized as blank, from a document or from a URL.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.add( name="name", default_title_voice_id="default_title_voice_id", default_paragraph_voice_id="default_paragraph_voice_id", default_model_id="default_model_id", )
-
-
-
name:
str
— The name of the project, used for identification only.
-
default_title_voice_id:
str
— The voice_id that corresponds to the default voice used for new titles.
-
default_paragraph_voice_id:
str
— The voice_id that corresponds to the default voice used for new paragraphs.
-
default_model_id:
str
— The model_id of the model to be used for this project, you can query GET https://api.elevenlabs.io/v1/models to list all available models.
-
from_url:
typing.Optional[str]
— An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
-
from_document: `from future import annotations
typing.Optional[core.File]` — See core.File for more documentation
-
quality_preset:
typing.Optional[str]
Output quality of the generated audio. Must be one of: standard - standard output format, 128kbps with 44.1kHz sample rate. high - high quality output format, 192kbps with 44.1kHz sample rate and major improvements on our side. Using this setting increases the credit cost by 20%. ultra - ultra quality output format, 192kbps with 44.1kHz sample rate and highest improvements on our side. Using this setting increases the credit cost by 50%. ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate and highest improvements on our side in a fully lossless format. Using this setting increases the credit cost by 100%.
-
title:
typing.Optional[str]
— An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
-
author:
typing.Optional[str]
— An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
-
description:
typing.Optional[str]
— An optional description of the project.
-
genres:
typing.Optional[typing.List[str]]
— An optional list of genres associated with the project.
-
target_audience:
typing.Optional[ProjectsAddRequestTargetAudience]
— An optional target audience of the project.
-
language:
typing.Optional[str]
— An optional language of the project. Two-letter language code (ISO 639-1).
-
content_type:
typing.Optional[str]
— An optional content type of the project.
-
original_publication_date:
typing.Optional[str]
— An optional original publication date of the project, in the format YYYY-MM-DD or YYYY.
-
mature_content:
typing.Optional[bool]
— An optional mature content of the project.
-
isbn_number:
typing.Optional[str]
— An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
-
acx_volume_normalization:
typing.Optional[bool]
— [Deprecated] When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
-
volume_normalization:
typing.Optional[bool]
— When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
-
pronunciation_dictionary_locators:
typing.Optional[typing.List[str]]
— A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{"pronunciation_dictionary_id":"Vmd4Zor6fplcA7WrINey","version_id":"hRPaxjlTdR7wFMhV4w0b"}"' --form 'pronunciation_dictionary_locators="{"pronunciation_dictionary_id":"JzWtcGQMJ6bnlWwyMo7e","version_id":"lbmwxiLu4q6txYxgdZqn"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.get(...)
-
-
-
Returns information about a specific project. This endpoint returns more detailed information about a project than GET api.elevenlabs.io/v1/projects.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.get( project_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.edit_basic_project_info(...)
-
-
-
Edits basic project info.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.edit_basic_project_info( project_id="21m00Tcm4TlvDq8ikWAM", name="name", default_title_voice_id="default_title_voice_id", default_paragraph_voice_id="default_paragraph_voice_id", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
name:
str
— The name of the project, used for identification only.
-
default_title_voice_id:
str
— The voice_id that corresponds to the default voice used for new titles.
-
default_paragraph_voice_id:
str
— The voice_id that corresponds to the default voice used for new paragraphs.
-
title:
typing.Optional[str]
— An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
-
author:
typing.Optional[str]
— An optional name of the author of the project, this will be added as metadata to the mp3 file on project / chapter download.
-
isbn_number:
typing.Optional[str]
— An optional ISBN number of the project you want to create, this will be added as metadata to the mp3 file on project / chapter download.
-
volume_normalization:
typing.Optional[bool]
— When the project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.delete(...)
-
-
-
Delete a project by its project_id.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.delete( project_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.convert(...)
-
-
-
Starts conversion of a project and all of its chapters.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.convert( project_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.get_snapshots(...)
-
-
-
Gets the snapshots of a project.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.get_snapshots( project_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.stream_audio(...)
-
-
-
Stream the audio from a project snapshot.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.stream_audio( project_id="string", project_snapshot_id="string", convert_to_mpeg=True, )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
project_snapshot_id:
str
— The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project.
-
convert_to_mpeg:
typing.Optional[bool]
— Whether to convert the audio to mpeg format.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.projects.stream_archive(...)
-
-
-
Streams archive with project audio.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.stream_archive( project_id="21m00Tcm4TlvDq8ikWAM", project_snapshot_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
project_snapshot_id:
str
— The project_snapshot_id of the project snapshot. You can query GET /v1/projects/{project_id}/snapshots to list all available snapshots for a project.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.add_chapter_to_a_project(...)
-
-
-
Creates a new chapter either as blank or from a URL.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.add_chapter_to_a_project( project_id="21m00Tcm4TlvDq8ikWAM", name="name", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
name:
str
— The name of the chapter, used for identification only.
-
from_url:
typing.Optional[str]
— An optional URL from which we will extract content to initialize the project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the project as blank.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.projects.update_pronunciation_dictionaries(...)
-
-
-
Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does.
-
-
-
from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator client = ElevenLabs( api_key="YOUR_API_KEY", ) client.projects.update_pronunciation_dictionaries( project_id="21m00Tcm4TlvDq8ikWAM", pronunciation_dictionary_locators=[ PronunciationDictionaryVersionLocator( pronunciation_dictionary_id="pronunciation_dictionary_id", version_id="version_id", ) ], )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
pronunciation_dictionary_locators:
typing.Sequence[PronunciationDictionaryVersionLocator]
— A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{"pronunciation_dictionary_id":"Vmd4Zor6fplcA7WrINey","version_id":"hRPaxjlTdR7wFMhV4w0b"}"' --form 'pronunciation_dictionary_locators="{"pronunciation_dictionary_id":"JzWtcGQMJ6bnlWwyMo7e","version_id":"lbmwxiLu4q6txYxgdZqn"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.chapters.get_all(...)
-
-
-
Returns a list of your chapters for a project together and its metadata.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.chapters.get_all( project_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.chapters.get(...)
-
-
-
Returns information about a specific chapter.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.chapters.get( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
chapter_id:
str
— The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.chapters.delete(...)
-
-
-
Delete a chapter by its chapter_id.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.chapters.delete( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
chapter_id:
str
— The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.chapters.convert(...)
-
-
-
Starts conversion of a specific chapter.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.chapters.convert( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
chapter_id:
str
— The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.chapters.get_all_snapshots(...)
-
-
-
Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.chapters.get_all_snapshots( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
chapter_id:
str
— The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.chapters.stream_snapshot(...)
-
-
-
Stream the audio from a chapter snapshot. Use
GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots
to return the chapter snapshots of a chapter.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.chapters.stream_snapshot( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM", )
-
-
-
project_id:
str
— The project_id of the project, you can query GET https://api.elevenlabs.io/v1/projects to list all available projects.
-
chapter_id:
str
— The chapter_id of the chapter. You can query GET https://api.elevenlabs.io/v1/projects/{project_id}/chapters to list all available chapters for a project.
-
chapter_snapshot_id:
str
— The chapter_snapshot_id of the chapter snapshot. You can query GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots to the all available snapshots for a chapter.
-
convert_to_mpeg:
typing.Optional[bool]
— Whether to convert the audio to mpeg format.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.dubbing.dub_a_video_or_an_audio_file(...)
-
-
-
Dubs provided audio or video file into given language.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.dubbing.dub_a_video_or_an_audio_file( target_lang="target_lang", )
-
-
-
target_lang:
str
— The Target language to dub the content into.
-
file: `from future import annotations
typing.Optional[core.File]` — See core.File for more documentation
-
name:
typing.Optional[str]
— Name of the dubbing project.
-
source_url:
typing.Optional[str]
— URL of the source video/audio file.
-
source_lang:
typing.Optional[str]
— Source language.
-
num_speakers:
typing.Optional[int]
— Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers
-
watermark:
typing.Optional[bool]
— Whether to apply watermark to the output video.
-
start_time:
typing.Optional[int]
— Start time of the source video/audio file.
-
end_time:
typing.Optional[int]
— End time of the source video/audio file.
-
highest_resolution:
typing.Optional[bool]
— Whether to use the highest resolution available.
-
drop_background_audio:
typing.Optional[bool]
— An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues.
-
use_profanity_filter:
typing.Optional[bool]
— [BETA] Whether transcripts should have profanities censored with the words '[censored]'
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.dubbing.get_dubbing_project_metadata(...)
-
-
-
Returns metadata about a dubbing project, including whether it's still in progress or not
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.dubbing.get_dubbing_project_metadata( dubbing_id="dubbing_id", )
-
-
-
dubbing_id:
str
— ID of the dubbing project.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.dubbing.delete_dubbing_project(...)
-
-
-
Deletes a dubbing project.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.dubbing.delete_dubbing_project( dubbing_id="dubbing_id", )
-
-
-
dubbing_id:
str
— ID of the dubbing project.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.dubbing.get_dubbed_file(...)
-
-
-
Returns dubbed file as a streamed file. Videos will be returned in MP4 format and audio only dubs will be returned in MP3.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.dubbing.get_dubbed_file( dubbing_id="string", language_code="string", )
-
-
-
dubbing_id:
str
— ID of the dubbing project.
-
language_code:
str
— ID of the language.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration. You can pass in configuration such aschunk_size
, and more to customize the request and response.
-
-
client.dubbing.get_transcript_for_dub(...)
-
-
-
Returns transcript for the dub as an SRT file.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.dubbing.get_transcript_for_dub( dubbing_id="dubbing_id", language_code="language_code", )
-
-
-
dubbing_id:
str
— ID of the dubbing project.
-
language_code:
str
— ID of the language.
-
format_type:
typing.Optional[ GetTranscriptForDubV1DubbingDubbingIdTranscriptLanguageCodeGetRequestFormatType ]
— Format to use for the subtitle file, either 'srt' or 'webvtt'
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.workspace.get_sso_provider_admin(...)
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.workspace.get_sso_provider_admin( workspace_id="workspace_id", )
-
-
-
workspace_id:
str
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.workspace.invite_user(...)
-
-
-
Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.workspace.invite_user( email="email", )
-
-
-
email:
str
— Email of the target user.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.workspace.delete_existing_invitation(...)
-
-
-
Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.workspace.delete_existing_invitation( email="email", )
-
-
-
email:
str
— Email of the target user.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.workspace.update_member(...)
-
-
-
Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.workspace.update_member( email="email", )
-
-
-
email:
str
— Email of the target user.
-
is_locked:
typing.Optional[bool]
— Whether to lock or unlock the user account.
-
workspace_role:
typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]
— Role dictating permissions in the workspace.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.models.get_all()
-
-
-
Gets a list of available models.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.models.get_all()
-
-
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.audio_native.create(...)
-
-
-
Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.audio_native.create( name="name", )
-
-
-
name:
str
— Project name.
-
image:
typing.Optional[str]
— Image URL used in the player. If not provided, default image set in the Player settings is used.
-
author:
typing.Optional[str]
— Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
-
title:
typing.Optional[str]
— Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
-
small:
typing.Optional[bool]
— Whether to use small player or not. If not provided, default value set in the Player settings is used.
-
text_color:
typing.Optional[str]
— Text color used in the player. If not provided, default text color set in the Player settings is used.
-
background_color:
typing.Optional[str]
— Background color used in the player. If not provided, default background color set in the Player settings is used.
-
sessionization:
typing.Optional[int]
— Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
-
voice_id:
typing.Optional[str]
— Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
-
model_id:
typing.Optional[str]
— TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
-
file: `from future import annotations
typing.Optional[core.File]` — See core.File for more documentation
-
auto_convert:
typing.Optional[bool]
— Whether to auto convert the project to audio or not.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.usage.get_characters_usage_metrics(...)
-
-
-
Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.usage.get_characters_usage_metrics( start_unix=1, end_unix=1, )
-
-
-
start_unix:
int
— UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day.
-
end_unix:
int
— UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day.
-
include_workspace_metrics:
typing.Optional[bool]
— Whether or not to include the statistics of the entire workspace.
-
breakdown_type:
typing.Optional[BreakdownTypes]
— How to break down the information. Cannot be "user" if include_workspace_metrics is False.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.pronunciation_dictionary.add_from_file(...)
-
-
-
Creates a new pronunciation dictionary from a lexicon .PLS file
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.pronunciation_dictionary.add_from_file( name="name", )
-
-
-
name:
str
— The name of the pronunciation dictionary, used for identification only.
-
file: `from future import annotations
typing.Optional[core.File]` — See core.File for more documentation
-
description:
typing.Optional[str]
— A description of the pronunciation dictionary, used for identification only.
-
workspace_access:
typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]
— Should be one of 'editor' or 'viewer'. If not provided, defaults to no access.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary(...)
-
-
-
Add rules to the pronunciation dictionary
-
-
-
from elevenlabs import ElevenLabs from elevenlabs.pronunciation_dictionary import ( PronunciationDictionaryRule_Phoneme, ) client = ElevenLabs( api_key="YOUR_API_KEY", ) client.pronunciation_dictionary.add_rules_to_the_pronunciation_dictionary( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rules=[ PronunciationDictionaryRule_Phoneme( string_to_replace="rules", phoneme="rules", alphabet="rules", ) ], )
-
-
-
pronunciation_dictionary_id:
str
— The id of the pronunciation dictionary
-
rules:
typing.Sequence[PronunciationDictionaryRule]
List of pronunciation rules. Rule can be either: an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', } or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' }
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary(...)
-
-
-
Remove rules from the pronunciation dictionary
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.pronunciation_dictionary.remove_rules_from_the_pronunciation_dictionary( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rule_strings=["rule_strings"], )
-
-
-
pronunciation_dictionary_id:
str
— The id of the pronunciation dictionary
-
rule_strings:
typing.Sequence[str]
— List of strings to remove from the pronunciation dictionary.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.pronunciation_dictionary.download(...)
-
-
-
Get PLS file with a pronunciation dictionary version rules
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.pronunciation_dictionary.download( dictionary_id="Fm6AvNgS53NXe6Kqxp3e", version_id="KZFyRUq3R6kaqhKI146w", )
-
-
-
dictionary_id:
str
— The id of the pronunciation dictionary
-
version_id:
str
— The id of the version of the pronunciation dictionary
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.pronunciation_dictionary.get(...)
-
-
-
Get metadata for a pronunciation dictionary
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.pronunciation_dictionary.get( pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e", )
-
-
-
pronunciation_dictionary_id:
str
— The id of the pronunciation dictionary
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-
client.pronunciation_dictionary.get_all(...)
-
-
-
Get a list of the pronunciation dictionaries you have access to and their metadata
-
-
-
from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.pronunciation_dictionary.get_all( page_size=1, )
-
-
-
cursor:
typing.Optional[str]
— Used for fetching next page. Cursor is returned in the response.
-
page_size:
typing.Optional[int]
— How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30.
-
request_options:
typing.Optional[RequestOptions]
— Request-specific configuration.
-
-