code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
async def get_settings(
self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetAudioNativeProjectSettingsResponseModel:
"""
Get player settings for the specific project.
Parameters
----------
project_id : str
The ID of the Studio project.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAudioNativeProjectSettingsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.audio_native.get_settings(
project_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get_settings(project_id, request_options=request_options)
return _response.data |
Get player settings for the specific project.
Parameters
----------
project_id : str
The ID of the Studio project.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAudioNativeProjectSettingsResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.audio_native.get_settings(
project_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| get_settings | python | elevenlabs/elevenlabs-python | src/elevenlabs/audio_native/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/client.py | MIT |
async def update(
self,
project_id: str,
*,
file: typing.Optional[core.File] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
auto_publish: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AudioNativeEditContentResponseModel:
"""
Updates content for the specific AudioNative Project.
Parameters
----------
project_id : str
The ID of the project to be used. You can use the [List projects](/docs/api-reference/studio/get-projects) endpoint to list all the available projects.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
auto_publish : typing.Optional[bool]
Whether to auto publish the new project snapshot after it's converted.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioNativeEditContentResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.audio_native.update(
project_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.update(
project_id, file=file, auto_convert=auto_convert, auto_publish=auto_publish, request_options=request_options
)
return _response.data |
Updates content for the specific AudioNative Project.
Parameters
----------
project_id : str
The ID of the project to be used. You can use the [List projects](/docs/api-reference/studio/get-projects) endpoint to list all the available projects.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
auto_publish : typing.Optional[bool]
Whether to auto publish the new project snapshot after it's converted.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AudioNativeEditContentResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.audio_native.update(
project_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| update | python | elevenlabs/elevenlabs-python | src/elevenlabs/audio_native/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/client.py | MIT |
def create(
self,
*,
name: str,
image: typing.Optional[str] = OMIT,
author: typing.Optional[str] = OMIT,
title: typing.Optional[str] = OMIT,
small: typing.Optional[bool] = OMIT,
text_color: typing.Optional[str] = OMIT,
background_color: typing.Optional[str] = OMIT,
sessionization: typing.Optional[int] = OMIT,
voice_id: typing.Optional[str] = OMIT,
model_id: typing.Optional[str] = OMIT,
file: typing.Optional[core.File] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AudioNativeCreateProjectResponseModel]:
"""
Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
Parameters
----------
name : str
Project name.
image : typing.Optional[str]
(Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
author : typing.Optional[str]
Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
title : typing.Optional[str]
Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
small : typing.Optional[bool]
(Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
text_color : typing.Optional[str]
Text color used in the player. If not provided, default text color set in the Player settings is used.
background_color : typing.Optional[str]
Background color used in the player. If not provided, default background color set in the Player settings is used.
sessionization : typing.Optional[int]
(Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
voice_id : typing.Optional[str]
Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
model_id : typing.Optional[str]
TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AudioNativeCreateProjectResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/audio-native",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"name": name,
"image": image,
"author": author,
"title": title,
"small": small,
"text_color": text_color,
"background_color": background_color,
"sessionization": sessionization,
"voice_id": voice_id,
"model_id": model_id,
"auto_convert": auto_convert,
},
files={
**({"file": file} if file is not None else {}),
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AudioNativeCreateProjectResponseModel,
construct_type(
type_=AudioNativeCreateProjectResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
Parameters
----------
name : str
Project name.
image : typing.Optional[str]
(Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
author : typing.Optional[str]
Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
title : typing.Optional[str]
Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
small : typing.Optional[bool]
(Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
text_color : typing.Optional[str]
Text color used in the player. If not provided, default text color set in the Player settings is used.
background_color : typing.Optional[str]
Background color used in the player. If not provided, default background color set in the Player settings is used.
sessionization : typing.Optional[int]
(Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
voice_id : typing.Optional[str]
Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
model_id : typing.Optional[str]
TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AudioNativeCreateProjectResponseModel]
Successful Response
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/audio_native/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/raw_client.py | MIT |
def get_settings(
self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[GetAudioNativeProjectSettingsResponseModel]:
"""
Get player settings for the specific project.
Parameters
----------
project_id : str
The ID of the Studio project.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAudioNativeProjectSettingsResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/audio-native/{jsonable_encoder(project_id)}/settings",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAudioNativeProjectSettingsResponseModel,
construct_type(
type_=GetAudioNativeProjectSettingsResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Get player settings for the specific project.
Parameters
----------
project_id : str
The ID of the Studio project.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAudioNativeProjectSettingsResponseModel]
Successful Response
| get_settings | python | elevenlabs/elevenlabs-python | src/elevenlabs/audio_native/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/raw_client.py | MIT |
def update(
self,
project_id: str,
*,
file: typing.Optional[core.File] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
auto_publish: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AudioNativeEditContentResponseModel]:
"""
Updates content for the specific AudioNative Project.
Parameters
----------
project_id : str
The ID of the project to be used. You can use the [List projects](/docs/api-reference/studio/get-projects) endpoint to list all the available projects.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
auto_publish : typing.Optional[bool]
Whether to auto publish the new project snapshot after it's converted.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AudioNativeEditContentResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/audio-native/{jsonable_encoder(project_id)}/content",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"auto_convert": auto_convert,
"auto_publish": auto_publish,
},
files={
**({"file": file} if file is not None else {}),
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AudioNativeEditContentResponseModel,
construct_type(
type_=AudioNativeEditContentResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Updates content for the specific AudioNative Project.
Parameters
----------
project_id : str
The ID of the project to be used. You can use the [List projects](/docs/api-reference/studio/get-projects) endpoint to list all the available projects.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
auto_publish : typing.Optional[bool]
Whether to auto publish the new project snapshot after it's converted.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AudioNativeEditContentResponseModel]
Successful Response
| update | python | elevenlabs/elevenlabs-python | src/elevenlabs/audio_native/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/raw_client.py | MIT |
async def create(
self,
*,
name: str,
image: typing.Optional[str] = OMIT,
author: typing.Optional[str] = OMIT,
title: typing.Optional[str] = OMIT,
small: typing.Optional[bool] = OMIT,
text_color: typing.Optional[str] = OMIT,
background_color: typing.Optional[str] = OMIT,
sessionization: typing.Optional[int] = OMIT,
voice_id: typing.Optional[str] = OMIT,
model_id: typing.Optional[str] = OMIT,
file: typing.Optional[core.File] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AudioNativeCreateProjectResponseModel]:
"""
Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
Parameters
----------
name : str
Project name.
image : typing.Optional[str]
(Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
author : typing.Optional[str]
Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
title : typing.Optional[str]
Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
small : typing.Optional[bool]
(Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
text_color : typing.Optional[str]
Text color used in the player. If not provided, default text color set in the Player settings is used.
background_color : typing.Optional[str]
Background color used in the player. If not provided, default background color set in the Player settings is used.
sessionization : typing.Optional[int]
(Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
voice_id : typing.Optional[str]
Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
model_id : typing.Optional[str]
TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AudioNativeCreateProjectResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/audio-native",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"name": name,
"image": image,
"author": author,
"title": title,
"small": small,
"text_color": text_color,
"background_color": background_color,
"sessionization": sessionization,
"voice_id": voice_id,
"model_id": model_id,
"auto_convert": auto_convert,
},
files={
**({"file": file} if file is not None else {}),
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AudioNativeCreateProjectResponseModel,
construct_type(
type_=AudioNativeCreateProjectResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
Parameters
----------
name : str
Project name.
image : typing.Optional[str]
(Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
author : typing.Optional[str]
Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
title : typing.Optional[str]
Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
small : typing.Optional[bool]
(Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
text_color : typing.Optional[str]
Text color used in the player. If not provided, default text color set in the Player settings is used.
background_color : typing.Optional[str]
Background color used in the player. If not provided, default background color set in the Player settings is used.
sessionization : typing.Optional[int]
(Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used.
voice_id : typing.Optional[str]
Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used.
model_id : typing.Optional[str]
TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AudioNativeCreateProjectResponseModel]
Successful Response
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/audio_native/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/raw_client.py | MIT |
async def get_settings(
self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[GetAudioNativeProjectSettingsResponseModel]:
"""
Get player settings for the specific project.
Parameters
----------
project_id : str
The ID of the Studio project.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAudioNativeProjectSettingsResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/audio-native/{jsonable_encoder(project_id)}/settings",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAudioNativeProjectSettingsResponseModel,
construct_type(
type_=GetAudioNativeProjectSettingsResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Get player settings for the specific project.
Parameters
----------
project_id : str
The ID of the Studio project.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAudioNativeProjectSettingsResponseModel]
Successful Response
| get_settings | python | elevenlabs/elevenlabs-python | src/elevenlabs/audio_native/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/raw_client.py | MIT |
async def update(
self,
project_id: str,
*,
file: typing.Optional[core.File] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
auto_publish: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AudioNativeEditContentResponseModel]:
"""
Updates content for the specific AudioNative Project.
Parameters
----------
project_id : str
The ID of the project to be used. You can use the [List projects](/docs/api-reference/studio/get-projects) endpoint to list all the available projects.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
auto_publish : typing.Optional[bool]
Whether to auto publish the new project snapshot after it's converted.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AudioNativeEditContentResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/audio-native/{jsonable_encoder(project_id)}/content",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"auto_convert": auto_convert,
"auto_publish": auto_publish,
},
files={
**({"file": file} if file is not None else {}),
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AudioNativeEditContentResponseModel,
construct_type(
type_=AudioNativeEditContentResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Updates content for the specific AudioNative Project.
Parameters
----------
project_id : str
The ID of the project to be used. You can use the [List projects](/docs/api-reference/studio/get-projects) endpoint to list all the available projects.
file : typing.Optional[core.File]
See core.File for more documentation
auto_convert : typing.Optional[bool]
Whether to auto convert the project to audio or not.
auto_publish : typing.Optional[bool]
Whether to auto publish the new project snapshot after it's converted.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AudioNativeEditContentResponseModel]
Successful Response
| update | python | elevenlabs/elevenlabs-python | src/elevenlabs/audio_native/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/audio_native/raw_client.py | MIT |
def add_to_knowledge_base(
self,
*,
name: typing.Optional[str] = OMIT,
url: typing.Optional[str] = OMIT,
file: typing.Optional[core.File] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddKnowledgeBaseResponseModel:
"""
Upload a file or webpage URL to create a knowledge base document. <br> <Note> After creating the document, update the agent's knowledge base by calling [Update agent](/docs/conversational-ai/api-reference/agents/update-agent). </Note>
Parameters
----------
name : typing.Optional[str]
A custom, human-readable name for the document.
url : typing.Optional[str]
URL to a page of documentation that the agent will have access to in order to interact with users.
file : typing.Optional[core.File]
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.add_to_knowledge_base()
"""
_response = self._raw_client.add_to_knowledge_base(
name=name, url=url, file=file, request_options=request_options
)
return _response.data |
Upload a file or webpage URL to create a knowledge base document. <br> <Note> After creating the document, update the agent's knowledge base by calling [Update agent](/docs/conversational-ai/api-reference/agents/update-agent). </Note>
Parameters
----------
name : typing.Optional[str]
A custom, human-readable name for the document.
url : typing.Optional[str]
URL to a page of documentation that the agent will have access to in order to interact with users.
file : typing.Optional[core.File]
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.add_to_knowledge_base()
| add_to_knowledge_base | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
def get_document_rag_indexes(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> RagDocumentIndexesResponseModel:
"""
Provides information about all RAG indexes of the specified knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexesResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.get_document_rag_indexes(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.get_document_rag_indexes(documentation_id, request_options=request_options)
return _response.data |
Provides information about all RAG indexes of the specified knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexesResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.get_document_rag_indexes(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
| get_document_rag_indexes | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
def delete_document_rag_index(
self, documentation_id: str, rag_index_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> RagDocumentIndexResponseModel:
"""
Delete RAG index for the knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
rag_index_id : str
The id of RAG index of document from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.delete_document_rag_index(
documentation_id="21m00Tcm4TlvDq8ikWAM",
rag_index_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.delete_document_rag_index(
documentation_id, rag_index_id, request_options=request_options
)
return _response.data |
Delete RAG index for the knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
rag_index_id : str
The id of RAG index of document from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.delete_document_rag_index(
documentation_id="21m00Tcm4TlvDq8ikWAM",
rag_index_id="21m00Tcm4TlvDq8ikWAM",
)
| delete_document_rag_index | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
def rag_index_overview(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> RagIndexOverviewResponseModel:
"""
Provides total size and other information of RAG indexes used by knowledgebase documents
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagIndexOverviewResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.rag_index_overview()
"""
_response = self._raw_client.rag_index_overview(request_options=request_options)
return _response.data |
Provides total size and other information of RAG indexes used by knowledgebase documents
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagIndexOverviewResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.rag_index_overview()
| rag_index_overview | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
def update_secret(
self, secret_id: str, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None
) -> PostWorkspaceSecretResponseModel:
"""
Update an existing secret for the workspace
Parameters
----------
secret_id : str
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostWorkspaceSecretResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.update_secret(
secret_id="secret_id",
name="name",
value="value",
)
"""
_response = self._raw_client.update_secret(secret_id, name=name, value=value, request_options=request_options)
return _response.data |
Update an existing secret for the workspace
Parameters
----------
secret_id : str
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostWorkspaceSecretResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.update_secret(
secret_id="secret_id",
name="name",
value="value",
)
| update_secret | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
async def add_to_knowledge_base(
self,
*,
name: typing.Optional[str] = OMIT,
url: typing.Optional[str] = OMIT,
file: typing.Optional[core.File] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AddKnowledgeBaseResponseModel:
"""
Upload a file or webpage URL to create a knowledge base document. <br> <Note> After creating the document, update the agent's knowledge base by calling [Update agent](/docs/conversational-ai/api-reference/agents/update-agent). </Note>
Parameters
----------
name : typing.Optional[str]
A custom, human-readable name for the document.
url : typing.Optional[str]
URL to a page of documentation that the agent will have access to in order to interact with users.
file : typing.Optional[core.File]
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.add_to_knowledge_base()
asyncio.run(main())
"""
_response = await self._raw_client.add_to_knowledge_base(
name=name, url=url, file=file, request_options=request_options
)
return _response.data |
Upload a file or webpage URL to create a knowledge base document. <br> <Note> After creating the document, update the agent's knowledge base by calling [Update agent](/docs/conversational-ai/api-reference/agents/update-agent). </Note>
Parameters
----------
name : typing.Optional[str]
A custom, human-readable name for the document.
url : typing.Optional[str]
URL to a page of documentation that the agent will have access to in order to interact with users.
file : typing.Optional[core.File]
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AddKnowledgeBaseResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.add_to_knowledge_base()
asyncio.run(main())
| add_to_knowledge_base | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
async def get_document_rag_indexes(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> RagDocumentIndexesResponseModel:
"""
Provides information about all RAG indexes of the specified knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexesResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.get_document_rag_indexes(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get_document_rag_indexes(documentation_id, request_options=request_options)
return _response.data |
Provides information about all RAG indexes of the specified knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexesResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.get_document_rag_indexes(
documentation_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| get_document_rag_indexes | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
async def delete_document_rag_index(
self, documentation_id: str, rag_index_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> RagDocumentIndexResponseModel:
"""
Delete RAG index for the knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
rag_index_id : str
The id of RAG index of document from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.delete_document_rag_index(
documentation_id="21m00Tcm4TlvDq8ikWAM",
rag_index_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.delete_document_rag_index(
documentation_id, rag_index_id, request_options=request_options
)
return _response.data |
Delete RAG index for the knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
rag_index_id : str
The id of RAG index of document from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagDocumentIndexResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.delete_document_rag_index(
documentation_id="21m00Tcm4TlvDq8ikWAM",
rag_index_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| delete_document_rag_index | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
async def rag_index_overview(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> RagIndexOverviewResponseModel:
"""
Provides total size and other information of RAG indexes used by knowledgebase documents
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagIndexOverviewResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.rag_index_overview()
asyncio.run(main())
"""
_response = await self._raw_client.rag_index_overview(request_options=request_options)
return _response.data |
Provides total size and other information of RAG indexes used by knowledgebase documents
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
RagIndexOverviewResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.rag_index_overview()
asyncio.run(main())
| rag_index_overview | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
async def update_secret(
self, secret_id: str, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None
) -> PostWorkspaceSecretResponseModel:
"""
Update an existing secret for the workspace
Parameters
----------
secret_id : str
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostWorkspaceSecretResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.update_secret(
secret_id="secret_id",
name="name",
value="value",
)
asyncio.run(main())
"""
_response = await self._raw_client.update_secret(
secret_id, name=name, value=value, request_options=request_options
)
return _response.data |
Update an existing secret for the workspace
Parameters
----------
secret_id : str
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostWorkspaceSecretResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.update_secret(
secret_id="secret_id",
name="name",
value="value",
)
asyncio.run(main())
| update_secret | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/client.py | MIT |
def start(self):
"""Start the event loop in a separate thread for handling async operations."""
if self._running.is_set():
return
def run_event_loop():
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._loop)
self._running.set()
try:
self._loop.run_forever()
finally:
self._running.clear()
self._loop.close()
self._loop = None
self._thread = threading.Thread(target=run_event_loop, daemon=True, name="ClientTools-EventLoop")
self._thread.start()
# Wait for loop to be ready
self._running.wait() | Start the event loop in a separate thread for handling async operations. | start | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def stop(self):
"""Gracefully stop the event loop and clean up resources."""
if self._loop and self._running.is_set():
self._loop.call_soon_threadsafe(self._loop.stop)
self._thread.join()
self.thread_pool.shutdown(wait=False) | Gracefully stop the event loop and clean up resources. | stop | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def register(
self,
tool_name: str,
handler: Union[Callable[[dict], Any], Callable[[dict], Awaitable[Any]]],
is_async: bool = False,
) -> None:
"""Register a new tool that can be called by the AI agent.
Args:
tool_name: Unique identifier for the tool
handler: Function that implements the tool's logic
is_async: Whether the handler is an async function
"""
with self.lock:
if not callable(handler):
raise ValueError("Handler must be callable")
if tool_name in self.tools:
raise ValueError(f"Tool '{tool_name}' is already registered")
self.tools[tool_name] = (handler, is_async) | Register a new tool that can be called by the AI agent.
Args:
tool_name: Unique identifier for the tool
handler: Function that implements the tool's logic
is_async: Whether the handler is an async function
| register | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
async def handle(self, tool_name: str, parameters: dict) -> Any:
"""Execute a registered tool with the given parameters.
Returns the result of the tool execution.
"""
with self.lock:
if tool_name not in self.tools:
raise ValueError(f"Tool '{tool_name}' is not registered")
handler, is_async = self.tools[tool_name]
if is_async:
return await handler(parameters)
else:
return await asyncio.get_event_loop().run_in_executor(self.thread_pool, handler, parameters) | Execute a registered tool with the given parameters.
Returns the result of the tool execution.
| handle | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def execute_tool(self, tool_name: str, parameters: dict, callback: Callable[[dict], None]):
"""Execute a tool and send its result via the provided callback.
This method is non-blocking and handles both sync and async tools.
"""
if not self._running.is_set():
raise RuntimeError("ClientTools event loop is not running")
if self._loop is None:
raise RuntimeError("Event loop is not available")
async def _execute_and_callback():
try:
result = await self.handle(tool_name, parameters)
response = {
"type": "client_tool_result",
"tool_call_id": parameters.get("tool_call_id"),
"result": result or f"Client tool: {tool_name} called successfully.",
"is_error": False,
}
except Exception as e:
response = {
"type": "client_tool_result",
"tool_call_id": parameters.get("tool_call_id"),
"result": str(e),
"is_error": True,
}
callback(response)
asyncio.run_coroutine_threadsafe(_execute_and_callback(), self._loop) | Execute a tool and send its result via the provided callback.
This method is non-blocking and handles both sync and async tools.
| execute_tool | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def __init__(
self,
client: BaseElevenLabs,
agent_id: str,
*,
requires_auth: bool,
audio_interface: AudioInterface,
config: Optional[ConversationInitiationData] = None,
client_tools: Optional[ClientTools] = None,
callback_agent_response: Optional[Callable[[str], None]] = None,
callback_agent_response_correction: Optional[Callable[[str, str], None]] = None,
callback_user_transcript: Optional[Callable[[str], None]] = None,
callback_latency_measurement: Optional[Callable[[int], None]] = None,
):
"""Conversational AI session.
BETA: This API is subject to change without regard to backwards compatibility.
Args:
client: The ElevenLabs client to use for the conversation.
agent_id: The ID of the agent to converse with.
requires_auth: Whether the agent requires authentication.
audio_interface: The audio interface to use for input and output.
client_tools: The client tools to use for the conversation.
callback_agent_response: Callback for agent responses.
callback_agent_response_correction: Callback for agent response corrections.
First argument is the original response (previously given to
callback_agent_response), second argument is the corrected response.
callback_user_transcript: Callback for user transcripts.
callback_latency_measurement: Callback for latency measurements (in milliseconds).
"""
self.client = client
self.agent_id = agent_id
self.requires_auth = requires_auth
self.audio_interface = audio_interface
self.callback_agent_response = callback_agent_response
self.config = config or ConversationInitiationData()
self.client_tools = client_tools or ClientTools()
self.callback_agent_response_correction = callback_agent_response_correction
self.callback_user_transcript = callback_user_transcript
self.callback_latency_measurement = callback_latency_measurement
self.client_tools.start()
self._thread = None
self._ws: Optional[Connection] = None
self._should_stop = threading.Event()
self._conversation_id = None
self._last_interrupt_id = 0 | Conversational AI session.
BETA: This API is subject to change without regard to backwards compatibility.
Args:
client: The ElevenLabs client to use for the conversation.
agent_id: The ID of the agent to converse with.
requires_auth: Whether the agent requires authentication.
audio_interface: The audio interface to use for input and output.
client_tools: The client tools to use for the conversation.
callback_agent_response: Callback for agent responses.
callback_agent_response_correction: Callback for agent response corrections.
First argument is the original response (previously given to
callback_agent_response), second argument is the corrected response.
callback_user_transcript: Callback for user transcripts.
callback_latency_measurement: Callback for latency measurements (in milliseconds).
| __init__ | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def start_session(self):
"""Starts the conversation session.
Will run in background thread until `end_session` is called.
"""
ws_url = self._get_signed_url() if self.requires_auth else self._get_wss_url()
self._thread = threading.Thread(target=self._run, args=(ws_url,))
self._thread.start() | Starts the conversation session.
Will run in background thread until `end_session` is called.
| start_session | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def end_session(self):
"""Ends the conversation session and cleans up resources."""
self.audio_interface.stop()
self.client_tools.stop()
self._ws = None
self._should_stop.set() | Ends the conversation session and cleans up resources. | end_session | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def wait_for_session_end(self) -> Optional[str]:
"""Waits for the conversation session to end.
You must call `end_session` before calling this method, otherwise it will block.
Returns the conversation ID, if available.
"""
if not self._thread:
raise RuntimeError("Session not started.")
self._thread.join()
return self._conversation_id | Waits for the conversation session to end.
You must call `end_session` before calling this method, otherwise it will block.
Returns the conversation ID, if available.
| wait_for_session_end | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def send_user_message(self, text: str):
"""Send a text message from the user to the agent.
Args:
text: The text message to send to the agent.
Raises:
RuntimeError: If the session is not active or websocket is not connected.
"""
if not self._ws:
raise RuntimeError("Session not started or websocket not connected.")
event = UserMessageClientToOrchestratorEvent(text=text)
try:
self._ws.send(json.dumps(event.to_dict()))
except Exception as e:
print(f"Error sending user message: {e}")
raise | Send a text message from the user to the agent.
Args:
text: The text message to send to the agent.
Raises:
RuntimeError: If the session is not active or websocket is not connected.
| send_user_message | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def register_user_activity(self):
"""Register user activity to prevent session timeout.
This sends a ping to the orchestrator to reset the timeout timer.
Raises:
RuntimeError: If the session is not active or websocket is not connected.
"""
if not self._ws:
raise RuntimeError("Session not started or websocket not connected.")
event = UserActivityClientToOrchestratorEvent()
try:
self._ws.send(json.dumps(event.to_dict()))
except Exception as e:
print(f"Error registering user activity: {e}")
raise | Register user activity to prevent session timeout.
This sends a ping to the orchestrator to reset the timeout timer.
Raises:
RuntimeError: If the session is not active or websocket is not connected.
| register_user_activity | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def send_contextual_update(self, content: str):
"""Send a contextual update to the conversation.
Contextual updates are non-interrupting content that is sent to the server
to update the conversation state without directly prompting the agent.
Args:
content: The contextual information to send to the conversation.
Raises:
RuntimeError: If the session is not active or websocket is not connected.
"""
if not self._ws:
raise RuntimeError("Session not started or websocket not connected.")
event = ContextualUpdateClientToOrchestratorEvent(content=content)
try:
self._ws.send(json.dumps(event.to_dict()))
except Exception as e:
print(f"Error sending contextual update: {e}")
raise | Send a contextual update to the conversation.
Contextual updates are non-interrupting content that is sent to the server
to update the conversation state without directly prompting the agent.
Args:
content: The contextual information to send to the conversation.
Raises:
RuntimeError: If the session is not active or websocket is not connected.
| send_contextual_update | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversation.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversation.py | MIT |
def add_to_knowledge_base(
self,
*,
name: typing.Optional[str] = OMIT,
url: typing.Optional[str] = OMIT,
file: typing.Optional[core.File] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AddKnowledgeBaseResponseModel]:
"""
Upload a file or webpage URL to create a knowledge base document. <br> <Note> After creating the document, update the agent's knowledge base by calling [Update agent](/docs/conversational-ai/api-reference/agents/update-agent). </Note>
Parameters
----------
name : typing.Optional[str]
A custom, human-readable name for the document.
url : typing.Optional[str]
URL to a page of documentation that the agent will have access to in order to interact with users.
file : typing.Optional[core.File]
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"name": name,
"url": url,
},
files={
**({"file": file} if file is not None else {}),
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AddKnowledgeBaseResponseModel,
construct_type(
type_=AddKnowledgeBaseResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Upload a file or webpage URL to create a knowledge base document. <br> <Note> After creating the document, update the agent's knowledge base by calling [Update agent](/docs/conversational-ai/api-reference/agents/update-agent). </Note>
Parameters
----------
name : typing.Optional[str]
A custom, human-readable name for the document.
url : typing.Optional[str]
URL to a page of documentation that the agent will have access to in order to interact with users.
file : typing.Optional[core.File]
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
| add_to_knowledge_base | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
def get_document_rag_indexes(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[RagDocumentIndexesResponseModel]:
"""
Provides information about all RAG indexes of the specified knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[RagDocumentIndexesResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/rag-index",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
RagDocumentIndexesResponseModel,
construct_type(
type_=RagDocumentIndexesResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Provides information about all RAG indexes of the specified knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[RagDocumentIndexesResponseModel]
Successful Response
| get_document_rag_indexes | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
def delete_document_rag_index(
self, documentation_id: str, rag_index_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[RagDocumentIndexResponseModel]:
"""
Delete RAG index for the knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
rag_index_id : str
The id of RAG index of document from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[RagDocumentIndexResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/rag-index/{jsonable_encoder(rag_index_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
RagDocumentIndexResponseModel,
construct_type(
type_=RagDocumentIndexResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Delete RAG index for the knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
rag_index_id : str
The id of RAG index of document from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[RagDocumentIndexResponseModel]
Successful Response
| delete_document_rag_index | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
def rag_index_overview(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[RagIndexOverviewResponseModel]:
"""
Provides total size and other information of RAG indexes used by knowledgebase documents
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[RagIndexOverviewResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base/rag-index",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
RagIndexOverviewResponseModel,
construct_type(
type_=RagIndexOverviewResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Provides total size and other information of RAG indexes used by knowledgebase documents
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[RagIndexOverviewResponseModel]
Successful Response
| rag_index_overview | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
def update_secret(
self, secret_id: str, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[PostWorkspaceSecretResponseModel]:
"""
Update an existing secret for the workspace
Parameters
----------
secret_id : str
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[PostWorkspaceSecretResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/secrets/{jsonable_encoder(secret_id)}",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"name": name,
"value": value,
"type": "update",
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
PostWorkspaceSecretResponseModel,
construct_type(
type_=PostWorkspaceSecretResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Update an existing secret for the workspace
Parameters
----------
secret_id : str
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[PostWorkspaceSecretResponseModel]
Successful Response
| update_secret | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
async def add_to_knowledge_base(
self,
*,
name: typing.Optional[str] = OMIT,
url: typing.Optional[str] = OMIT,
file: typing.Optional[core.File] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AddKnowledgeBaseResponseModel]:
"""
Upload a file or webpage URL to create a knowledge base document. <br> <Note> After creating the document, update the agent's knowledge base by calling [Update agent](/docs/conversational-ai/api-reference/agents/update-agent). </Note>
Parameters
----------
name : typing.Optional[str]
A custom, human-readable name for the document.
url : typing.Optional[str]
URL to a page of documentation that the agent will have access to in order to interact with users.
file : typing.Optional[core.File]
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={
"name": name,
"url": url,
},
files={
**({"file": file} if file is not None else {}),
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AddKnowledgeBaseResponseModel,
construct_type(
type_=AddKnowledgeBaseResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Upload a file or webpage URL to create a knowledge base document. <br> <Note> After creating the document, update the agent's knowledge base by calling [Update agent](/docs/conversational-ai/api-reference/agents/update-agent). </Note>
Parameters
----------
name : typing.Optional[str]
A custom, human-readable name for the document.
url : typing.Optional[str]
URL to a page of documentation that the agent will have access to in order to interact with users.
file : typing.Optional[core.File]
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AddKnowledgeBaseResponseModel]
Successful Response
| add_to_knowledge_base | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
async def get_document_rag_indexes(
self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[RagDocumentIndexesResponseModel]:
"""
Provides information about all RAG indexes of the specified knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[RagDocumentIndexesResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/rag-index",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
RagDocumentIndexesResponseModel,
construct_type(
type_=RagDocumentIndexesResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Provides information about all RAG indexes of the specified knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[RagDocumentIndexesResponseModel]
Successful Response
| get_document_rag_indexes | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
async def delete_document_rag_index(
self, documentation_id: str, rag_index_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[RagDocumentIndexResponseModel]:
"""
Delete RAG index for the knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
rag_index_id : str
The id of RAG index of document from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[RagDocumentIndexResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/rag-index/{jsonable_encoder(rag_index_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
RagDocumentIndexResponseModel,
construct_type(
type_=RagDocumentIndexResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Delete RAG index for the knowledgebase document.
Parameters
----------
documentation_id : str
The id of a document from the knowledge base. This is returned on document addition.
rag_index_id : str
The id of RAG index of document from the knowledge base.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[RagDocumentIndexResponseModel]
Successful Response
| delete_document_rag_index | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
async def rag_index_overview(
self, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[RagIndexOverviewResponseModel]:
"""
Provides total size and other information of RAG indexes used by knowledgebase documents
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[RagIndexOverviewResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/knowledge-base/rag-index",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
RagIndexOverviewResponseModel,
construct_type(
type_=RagIndexOverviewResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Provides total size and other information of RAG indexes used by knowledgebase documents
Parameters
----------
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[RagIndexOverviewResponseModel]
Successful Response
| rag_index_overview | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
async def update_secret(
self, secret_id: str, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[PostWorkspaceSecretResponseModel]:
"""
Update an existing secret for the workspace
Parameters
----------
secret_id : str
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[PostWorkspaceSecretResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/secrets/{jsonable_encoder(secret_id)}",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"name": name,
"value": value,
"type": "update",
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
PostWorkspaceSecretResponseModel,
construct_type(
type_=PostWorkspaceSecretResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Update an existing secret for the workspace
Parameters
----------
secret_id : str
name : str
value : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[PostWorkspaceSecretResponseModel]
Successful Response
| update_secret | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/raw_client.py | MIT |
def create(
self,
*,
conversation_config: ConversationalConfig,
platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
tags: typing.Optional[typing.Sequence[str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateAgentResponseModel:
"""
Create an agent from a config object
Parameters
----------
conversation_config : ConversationalConfig
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
CreateAgentResponseModel
Successful Response
Examples
--------
from elevenlabs import ConversationalConfig, ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.create(
conversation_config=ConversationalConfig(),
)
"""
_response = self._raw_client.create(
conversation_config=conversation_config,
platform_settings=platform_settings,
name=name,
tags=tags,
request_options=request_options,
)
return _response.data |
Create an agent from a config object
Parameters
----------
conversation_config : ConversationalConfig
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
CreateAgentResponseModel
Successful Response
Examples
--------
from elevenlabs import ConversationalConfig, ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.create(
conversation_config=ConversationalConfig(),
)
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
def update(
self,
agent_id: str,
*,
conversation_config: typing.Optional[ConversationalConfig] = OMIT,
platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
tags: typing.Optional[typing.Sequence[str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> GetAgentResponseModel:
"""
Patches an Agent settings
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_config : typing.Optional[ConversationalConfig]
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.update(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.update(
agent_id,
conversation_config=conversation_config,
platform_settings=platform_settings,
name=name,
tags=tags,
request_options=request_options,
)
return _response.data |
Patches an Agent settings
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_config : typing.Optional[ConversationalConfig]
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.update(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
| update | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
def list(
self,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
search: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetAgentsPageResponseModel:
"""
Returns a list of your agents and their metadata.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many Agents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
Search by agents name.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentsPageResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.list()
"""
_response = self._raw_client.list(
cursor=cursor, page_size=page_size, search=search, request_options=request_options
)
return _response.data |
Returns a list of your agents and their metadata.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many Agents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
Search by agents name.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentsPageResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.list()
| list | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
def simulate_conversation(
self,
agent_id: str,
*,
simulation_specification: ConversationSimulationSpecification,
extra_evaluation_criteria: typing.Optional[typing.Sequence[PromptEvaluationCriteria]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AgentSimulatedChatTestResponseModel:
"""
Run a conversation between the agent and a simulated user.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AgentSimulatedChatTestResponseModel
Successful Response
Examples
--------
from elevenlabs import (
AgentConfig,
ConversationSimulationSpecification,
ElevenLabs,
)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.simulate_conversation(
agent_id="21m00Tcm4TlvDq8ikWAM",
simulation_specification=ConversationSimulationSpecification(
simulated_user_config=AgentConfig(
first_message="Hello, how can I help you today?",
language="en",
),
),
)
"""
_response = self._raw_client.simulate_conversation(
agent_id,
simulation_specification=simulation_specification,
extra_evaluation_criteria=extra_evaluation_criteria,
request_options=request_options,
)
return _response.data |
Run a conversation between the agent and a simulated user.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AgentSimulatedChatTestResponseModel
Successful Response
Examples
--------
from elevenlabs import (
AgentConfig,
ConversationSimulationSpecification,
ElevenLabs,
)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.simulate_conversation(
agent_id="21m00Tcm4TlvDq8ikWAM",
simulation_specification=ConversationSimulationSpecification(
simulated_user_config=AgentConfig(
first_message="Hello, how can I help you today?",
language="en",
),
),
)
| simulate_conversation | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
def simulate_conversation_stream(
self,
agent_id: str,
*,
simulation_specification: ConversationSimulationSpecification,
extra_evaluation_criteria: typing.Optional[typing.Sequence[PromptEvaluationCriteria]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> None:
"""
Run a conversation between the agent and a simulated user and stream back the response. Response is streamed back as partial lists of messages that should be concatenated and once the conversation has complete a single final message with the conversation analysis will be sent.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
from elevenlabs import (
AgentConfig,
ConversationSimulationSpecification,
ElevenLabs,
)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.simulate_conversation_stream(
agent_id="21m00Tcm4TlvDq8ikWAM",
simulation_specification=ConversationSimulationSpecification(
simulated_user_config=AgentConfig(
first_message="Hello, how can I help you today?",
language="en",
),
),
)
"""
_response = self._raw_client.simulate_conversation_stream(
agent_id,
simulation_specification=simulation_specification,
extra_evaluation_criteria=extra_evaluation_criteria,
request_options=request_options,
)
return _response.data |
Run a conversation between the agent and a simulated user and stream back the response. Response is streamed back as partial lists of messages that should be concatenated and once the conversation has complete a single final message with the conversation analysis will be sent.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
from elevenlabs import (
AgentConfig,
ConversationSimulationSpecification,
ElevenLabs,
)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.simulate_conversation_stream(
agent_id="21m00Tcm4TlvDq8ikWAM",
simulation_specification=ConversationSimulationSpecification(
simulated_user_config=AgentConfig(
first_message="Hello, how can I help you today?",
language="en",
),
),
)
| simulate_conversation_stream | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
async def create(
self,
*,
conversation_config: ConversationalConfig,
platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
tags: typing.Optional[typing.Sequence[str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateAgentResponseModel:
"""
Create an agent from a config object
Parameters
----------
conversation_config : ConversationalConfig
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
CreateAgentResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs, ConversationalConfig
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.create(
conversation_config=ConversationalConfig(),
)
asyncio.run(main())
"""
_response = await self._raw_client.create(
conversation_config=conversation_config,
platform_settings=platform_settings,
name=name,
tags=tags,
request_options=request_options,
)
return _response.data |
Create an agent from a config object
Parameters
----------
conversation_config : ConversationalConfig
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
CreateAgentResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs, ConversationalConfig
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.create(
conversation_config=ConversationalConfig(),
)
asyncio.run(main())
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
async def get(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetAgentResponseModel:
"""
Retrieve config for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get(agent_id, request_options=request_options)
return _response.data |
Retrieve config for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
async def delete(self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
Delete an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.delete(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.delete(agent_id, request_options=request_options)
return _response.data |
Delete an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.delete(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| delete | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
async def update(
self,
agent_id: str,
*,
conversation_config: typing.Optional[ConversationalConfig] = OMIT,
platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
tags: typing.Optional[typing.Sequence[str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> GetAgentResponseModel:
"""
Patches an Agent settings
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_config : typing.Optional[ConversationalConfig]
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.update(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.update(
agent_id,
conversation_config=conversation_config,
platform_settings=platform_settings,
name=name,
tags=tags,
request_options=request_options,
)
return _response.data |
Patches an Agent settings
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_config : typing.Optional[ConversationalConfig]
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.update(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| update | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
async def list(
self,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
search: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetAgentsPageResponseModel:
"""
Returns a list of your agents and their metadata.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many Agents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
Search by agents name.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentsPageResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.list()
asyncio.run(main())
"""
_response = await self._raw_client.list(
cursor=cursor, page_size=page_size, search=search, request_options=request_options
)
return _response.data |
Returns a list of your agents and their metadata.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many Agents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
Search by agents name.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentsPageResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.list()
asyncio.run(main())
| list | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
async def simulate_conversation(
self,
agent_id: str,
*,
simulation_specification: ConversationSimulationSpecification,
extra_evaluation_criteria: typing.Optional[typing.Sequence[PromptEvaluationCriteria]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AgentSimulatedChatTestResponseModel:
"""
Run a conversation between the agent and a simulated user.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AgentSimulatedChatTestResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import (
AgentConfig,
AsyncElevenLabs,
ConversationSimulationSpecification,
)
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.simulate_conversation(
agent_id="21m00Tcm4TlvDq8ikWAM",
simulation_specification=ConversationSimulationSpecification(
simulated_user_config=AgentConfig(
first_message="Hello, how can I help you today?",
language="en",
),
),
)
asyncio.run(main())
"""
_response = await self._raw_client.simulate_conversation(
agent_id,
simulation_specification=simulation_specification,
extra_evaluation_criteria=extra_evaluation_criteria,
request_options=request_options,
)
return _response.data |
Run a conversation between the agent and a simulated user.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AgentSimulatedChatTestResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import (
AgentConfig,
AsyncElevenLabs,
ConversationSimulationSpecification,
)
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.simulate_conversation(
agent_id="21m00Tcm4TlvDq8ikWAM",
simulation_specification=ConversationSimulationSpecification(
simulated_user_config=AgentConfig(
first_message="Hello, how can I help you today?",
language="en",
),
),
)
asyncio.run(main())
| simulate_conversation | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
async def simulate_conversation_stream(
self,
agent_id: str,
*,
simulation_specification: ConversationSimulationSpecification,
extra_evaluation_criteria: typing.Optional[typing.Sequence[PromptEvaluationCriteria]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> None:
"""
Run a conversation between the agent and a simulated user and stream back the response. Response is streamed back as partial lists of messages that should be concatenated and once the conversation has complete a single final message with the conversation analysis will be sent.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
import asyncio
from elevenlabs import (
AgentConfig,
AsyncElevenLabs,
ConversationSimulationSpecification,
)
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.simulate_conversation_stream(
agent_id="21m00Tcm4TlvDq8ikWAM",
simulation_specification=ConversationSimulationSpecification(
simulated_user_config=AgentConfig(
first_message="Hello, how can I help you today?",
language="en",
),
),
)
asyncio.run(main())
"""
_response = await self._raw_client.simulate_conversation_stream(
agent_id,
simulation_specification=simulation_specification,
extra_evaluation_criteria=extra_evaluation_criteria,
request_options=request_options,
)
return _response.data |
Run a conversation between the agent and a simulated user and stream back the response. Response is streamed back as partial lists of messages that should be concatenated and once the conversation has complete a single final message with the conversation analysis will be sent.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
None
Examples
--------
import asyncio
from elevenlabs import (
AgentConfig,
AsyncElevenLabs,
ConversationSimulationSpecification,
)
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.simulate_conversation_stream(
agent_id="21m00Tcm4TlvDq8ikWAM",
simulation_specification=ConversationSimulationSpecification(
simulated_user_config=AgentConfig(
first_message="Hello, how can I help you today?",
language="en",
),
),
)
asyncio.run(main())
| simulate_conversation_stream | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/client.py | MIT |
def create(
self,
*,
conversation_config: ConversationalConfig,
platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
tags: typing.Optional[typing.Sequence[str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[CreateAgentResponseModel]:
"""
Create an agent from a config object
Parameters
----------
conversation_config : ConversationalConfig
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[CreateAgentResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/agents/create",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"conversation_config": convert_and_respect_annotation_metadata(
object_=conversation_config, annotation=ConversationalConfig, direction="write"
),
"platform_settings": convert_and_respect_annotation_metadata(
object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write"
),
"name": name,
"tags": tags,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
CreateAgentResponseModel,
construct_type(
type_=CreateAgentResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Create an agent from a config object
Parameters
----------
conversation_config : ConversationalConfig
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[CreateAgentResponseModel]
Successful Response
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
def get(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[GetAgentResponseModel]:
"""
Retrieve config for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentResponseModel,
construct_type(
type_=GetAgentResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Retrieve config for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentResponseModel]
Successful Response
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
def delete(self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
Delete an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[None]
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Delete an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[None]
| delete | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
def update(
self,
agent_id: str,
*,
conversation_config: typing.Optional[ConversationalConfig] = OMIT,
platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
tags: typing.Optional[typing.Sequence[str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[GetAgentResponseModel]:
"""
Patches an Agent settings
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_config : typing.Optional[ConversationalConfig]
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"conversation_config": convert_and_respect_annotation_metadata(
object_=conversation_config, annotation=ConversationalConfig, direction="write"
),
"platform_settings": convert_and_respect_annotation_metadata(
object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write"
),
"name": name,
"tags": tags,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentResponseModel,
construct_type(
type_=GetAgentResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Patches an Agent settings
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_config : typing.Optional[ConversationalConfig]
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentResponseModel]
Successful Response
| update | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
def list(
self,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
search: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[GetAgentsPageResponseModel]:
"""
Returns a list of your agents and their metadata.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many Agents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
Search by agents name.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentsPageResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
"v1/convai/agents",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"cursor": cursor,
"page_size": page_size,
"search": search,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentsPageResponseModel,
construct_type(
type_=GetAgentsPageResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Returns a list of your agents and their metadata.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many Agents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
Search by agents name.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentsPageResponseModel]
Successful Response
| list | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
def simulate_conversation(
self,
agent_id: str,
*,
simulation_specification: ConversationSimulationSpecification,
extra_evaluation_criteria: typing.Optional[typing.Sequence[PromptEvaluationCriteria]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AgentSimulatedChatTestResponseModel]:
"""
Run a conversation between the agent and a simulated user.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AgentSimulatedChatTestResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/simulate-conversation",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"simulation_specification": convert_and_respect_annotation_metadata(
object_=simulation_specification, annotation=ConversationSimulationSpecification, direction="write"
),
"extra_evaluation_criteria": convert_and_respect_annotation_metadata(
object_=extra_evaluation_criteria,
annotation=typing.Sequence[PromptEvaluationCriteria],
direction="write",
),
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AgentSimulatedChatTestResponseModel,
construct_type(
type_=AgentSimulatedChatTestResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Run a conversation between the agent and a simulated user.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[AgentSimulatedChatTestResponseModel]
Successful Response
| simulate_conversation | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
def simulate_conversation_stream(
self,
agent_id: str,
*,
simulation_specification: ConversationSimulationSpecification,
extra_evaluation_criteria: typing.Optional[typing.Sequence[PromptEvaluationCriteria]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[None]:
"""
Run a conversation between the agent and a simulated user and stream back the response. Response is streamed back as partial lists of messages that should be concatenated and once the conversation has complete a single final message with the conversation analysis will be sent.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[None]
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/simulate-conversation/stream",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"simulation_specification": convert_and_respect_annotation_metadata(
object_=simulation_specification, annotation=ConversationSimulationSpecification, direction="write"
),
"extra_evaluation_criteria": convert_and_respect_annotation_metadata(
object_=extra_evaluation_criteria,
annotation=typing.Sequence[PromptEvaluationCriteria],
direction="write",
),
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Run a conversation between the agent and a simulated user and stream back the response. Response is streamed back as partial lists of messages that should be concatenated and once the conversation has complete a single final message with the conversation analysis will be sent.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[None]
| simulate_conversation_stream | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
async def create(
self,
*,
conversation_config: ConversationalConfig,
platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
tags: typing.Optional[typing.Sequence[str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[CreateAgentResponseModel]:
"""
Create an agent from a config object
Parameters
----------
conversation_config : ConversationalConfig
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[CreateAgentResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/agents/create",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"conversation_config": convert_and_respect_annotation_metadata(
object_=conversation_config, annotation=ConversationalConfig, direction="write"
),
"platform_settings": convert_and_respect_annotation_metadata(
object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write"
),
"name": name,
"tags": tags,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
CreateAgentResponseModel,
construct_type(
type_=CreateAgentResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Create an agent from a config object
Parameters
----------
conversation_config : ConversationalConfig
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[CreateAgentResponseModel]
Successful Response
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
async def get(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[GetAgentResponseModel]:
"""
Retrieve config for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentResponseModel,
construct_type(
type_=GetAgentResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Retrieve config for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentResponseModel]
Successful Response
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
async def delete(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[None]:
"""
Delete an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[None]
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}",
base_url=self._client_wrapper.get_environment().base,
method="DELETE",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Delete an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[None]
| delete | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
async def update(
self,
agent_id: str,
*,
conversation_config: typing.Optional[ConversationalConfig] = OMIT,
platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
tags: typing.Optional[typing.Sequence[str]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[GetAgentResponseModel]:
"""
Patches an Agent settings
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_config : typing.Optional[ConversationalConfig]
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}",
base_url=self._client_wrapper.get_environment().base,
method="PATCH",
json={
"conversation_config": convert_and_respect_annotation_metadata(
object_=conversation_config, annotation=ConversationalConfig, direction="write"
),
"platform_settings": convert_and_respect_annotation_metadata(
object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write"
),
"name": name,
"tags": tags,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentResponseModel,
construct_type(
type_=GetAgentResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Patches an Agent settings
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_config : typing.Optional[ConversationalConfig]
Conversation configuration for an agent
platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
A name to make the agent easier to find
tags : typing.Optional[typing.Sequence[str]]
Tags to help classify and filter the agent
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentResponseModel]
Successful Response
| update | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
async def list(
self,
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
search: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[GetAgentsPageResponseModel]:
"""
Returns a list of your agents and their metadata.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many Agents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
Search by agents name.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentsPageResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"v1/convai/agents",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"cursor": cursor,
"page_size": page_size,
"search": search,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentsPageResponseModel,
construct_type(
type_=GetAgentsPageResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Returns a list of your agents and their metadata.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
page_size : typing.Optional[int]
How many Agents to return at maximum. Can not exceed 100, defaults to 30.
search : typing.Optional[str]
Search by agents name.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentsPageResponseModel]
Successful Response
| list | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
async def simulate_conversation(
self,
agent_id: str,
*,
simulation_specification: ConversationSimulationSpecification,
extra_evaluation_criteria: typing.Optional[typing.Sequence[PromptEvaluationCriteria]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AgentSimulatedChatTestResponseModel]:
"""
Run a conversation between the agent and a simulated user.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AgentSimulatedChatTestResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/simulate-conversation",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"simulation_specification": convert_and_respect_annotation_metadata(
object_=simulation_specification, annotation=ConversationSimulationSpecification, direction="write"
),
"extra_evaluation_criteria": convert_and_respect_annotation_metadata(
object_=extra_evaluation_criteria,
annotation=typing.Sequence[PromptEvaluationCriteria],
direction="write",
),
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
AgentSimulatedChatTestResponseModel,
construct_type(
type_=AgentSimulatedChatTestResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Run a conversation between the agent and a simulated user.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[AgentSimulatedChatTestResponseModel]
Successful Response
| simulate_conversation | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
async def simulate_conversation_stream(
self,
agent_id: str,
*,
simulation_specification: ConversationSimulationSpecification,
extra_evaluation_criteria: typing.Optional[typing.Sequence[PromptEvaluationCriteria]] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[None]:
"""
Run a conversation between the agent and a simulated user and stream back the response. Response is streamed back as partial lists of messages that should be concatenated and once the conversation has complete a single final message with the conversation analysis will be sent.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[None]
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/simulate-conversation/stream",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"simulation_specification": convert_and_respect_annotation_metadata(
object_=simulation_specification, annotation=ConversationSimulationSpecification, direction="write"
),
"extra_evaluation_criteria": convert_and_respect_annotation_metadata(
object_=extra_evaluation_criteria,
annotation=typing.Sequence[PromptEvaluationCriteria],
direction="write",
),
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Run a conversation between the agent and a simulated user and stream back the response. Response is streamed back as partial lists of messages that should be concatenated and once the conversation has complete a single final message with the conversation analysis will be sent.
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
simulation_specification : ConversationSimulationSpecification
A specification detailing how the conversation should be simulated
extra_evaluation_criteria : typing.Optional[typing.Sequence[PromptEvaluationCriteria]]
A list of evaluation criteria to test
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[None]
| simulate_conversation_stream | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/raw_client.py | MIT |
def size(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetAgentKnowledgebaseSizeResponseModel:
"""
Returns the number of pages in the agent's knowledge base.
Parameters
----------
agent_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentKnowledgebaseSizeResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.knowledge_base.size(
agent_id="agent_id",
)
"""
_response = self._raw_client.size(agent_id, request_options=request_options)
return _response.data |
Returns the number of pages in the agent's knowledge base.
Parameters
----------
agent_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentKnowledgebaseSizeResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.knowledge_base.size(
agent_id="agent_id",
)
| size | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/knowledge_base/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/knowledge_base/client.py | MIT |
async def size(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetAgentKnowledgebaseSizeResponseModel:
"""
Returns the number of pages in the agent's knowledge base.
Parameters
----------
agent_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentKnowledgebaseSizeResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.knowledge_base.size(
agent_id="agent_id",
)
asyncio.run(main())
"""
_response = await self._raw_client.size(agent_id, request_options=request_options)
return _response.data |
Returns the number of pages in the agent's knowledge base.
Parameters
----------
agent_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentKnowledgebaseSizeResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.knowledge_base.size(
agent_id="agent_id",
)
asyncio.run(main())
| size | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/knowledge_base/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/knowledge_base/client.py | MIT |
def size(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[GetAgentKnowledgebaseSizeResponseModel]:
"""
Returns the number of pages in the agent's knowledge base.
Parameters
----------
agent_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentKnowledgebaseSizeResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agent/{jsonable_encoder(agent_id)}/knowledge-base/size",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentKnowledgebaseSizeResponseModel,
construct_type(
type_=GetAgentKnowledgebaseSizeResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Returns the number of pages in the agent's knowledge base.
Parameters
----------
agent_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentKnowledgebaseSizeResponseModel]
Successful Response
| size | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/knowledge_base/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/knowledge_base/raw_client.py | MIT |
async def size(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[GetAgentKnowledgebaseSizeResponseModel]:
"""
Returns the number of pages in the agent's knowledge base.
Parameters
----------
agent_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentKnowledgebaseSizeResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agent/{jsonable_encoder(agent_id)}/knowledge-base/size",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentKnowledgebaseSizeResponseModel,
construct_type(
type_=GetAgentKnowledgebaseSizeResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Returns the number of pages in the agent's knowledge base.
Parameters
----------
agent_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentKnowledgebaseSizeResponseModel]
Successful Response
| size | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/knowledge_base/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/knowledge_base/raw_client.py | MIT |
def get(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetAgentLinkResponseModel:
"""
Get the current link used to share the agent with others
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentLinkResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.link.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.get(agent_id, request_options=request_options)
return _response.data |
Get the current link used to share the agent with others
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentLinkResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.link.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/link/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/link/client.py | MIT |
async def get(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetAgentLinkResponseModel:
"""
Get the current link used to share the agent with others
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentLinkResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.link.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get(agent_id, request_options=request_options)
return _response.data |
Get the current link used to share the agent with others
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentLinkResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.link.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/link/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/link/client.py | MIT |
def get(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[GetAgentLinkResponseModel]:
"""
Get the current link used to share the agent with others
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentLinkResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/link",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentLinkResponseModel,
construct_type(
type_=GetAgentLinkResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Get the current link used to share the agent with others
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentLinkResponseModel]
Successful Response
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/link/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/link/raw_client.py | MIT |
async def get(
self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[GetAgentLinkResponseModel]:
"""
Get the current link used to share the agent with others
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentLinkResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/link",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentLinkResponseModel,
construct_type(
type_=GetAgentLinkResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Get the current link used to share the agent with others
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentLinkResponseModel]
Successful Response
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/link/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/link/raw_client.py | MIT |
def calculate(
self,
agent_id: str,
*,
prompt_length: typing.Optional[int] = OMIT,
number_of_pages: typing.Optional[int] = OMIT,
rag_enabled: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LlmUsageCalculatorResponseModel:
"""
Calculates expected number of LLM tokens needed for the specified agent.
Parameters
----------
agent_id : str
prompt_length : typing.Optional[int]
Length of the prompt in characters.
number_of_pages : typing.Optional[int]
Pages of content in pdf documents OR urls in agent's Knowledge Base.
rag_enabled : typing.Optional[bool]
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
LlmUsageCalculatorResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.llm_usage.calculate(
agent_id="agent_id",
)
"""
_response = self._raw_client.calculate(
agent_id,
prompt_length=prompt_length,
number_of_pages=number_of_pages,
rag_enabled=rag_enabled,
request_options=request_options,
)
return _response.data |
Calculates expected number of LLM tokens needed for the specified agent.
Parameters
----------
agent_id : str
prompt_length : typing.Optional[int]
Length of the prompt in characters.
number_of_pages : typing.Optional[int]
Pages of content in pdf documents OR urls in agent's Knowledge Base.
rag_enabled : typing.Optional[bool]
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
LlmUsageCalculatorResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.llm_usage.calculate(
agent_id="agent_id",
)
| calculate | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/llm_usage/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/llm_usage/client.py | MIT |
async def calculate(
self,
agent_id: str,
*,
prompt_length: typing.Optional[int] = OMIT,
number_of_pages: typing.Optional[int] = OMIT,
rag_enabled: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LlmUsageCalculatorResponseModel:
"""
Calculates expected number of LLM tokens needed for the specified agent.
Parameters
----------
agent_id : str
prompt_length : typing.Optional[int]
Length of the prompt in characters.
number_of_pages : typing.Optional[int]
Pages of content in pdf documents OR urls in agent's Knowledge Base.
rag_enabled : typing.Optional[bool]
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
LlmUsageCalculatorResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.llm_usage.calculate(
agent_id="agent_id",
)
asyncio.run(main())
"""
_response = await self._raw_client.calculate(
agent_id,
prompt_length=prompt_length,
number_of_pages=number_of_pages,
rag_enabled=rag_enabled,
request_options=request_options,
)
return _response.data |
Calculates expected number of LLM tokens needed for the specified agent.
Parameters
----------
agent_id : str
prompt_length : typing.Optional[int]
Length of the prompt in characters.
number_of_pages : typing.Optional[int]
Pages of content in pdf documents OR urls in agent's Knowledge Base.
rag_enabled : typing.Optional[bool]
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
LlmUsageCalculatorResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.llm_usage.calculate(
agent_id="agent_id",
)
asyncio.run(main())
| calculate | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/llm_usage/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/llm_usage/client.py | MIT |
def calculate(
self,
agent_id: str,
*,
prompt_length: typing.Optional[int] = OMIT,
number_of_pages: typing.Optional[int] = OMIT,
rag_enabled: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[LlmUsageCalculatorResponseModel]:
"""
Calculates expected number of LLM tokens needed for the specified agent.
Parameters
----------
agent_id : str
prompt_length : typing.Optional[int]
Length of the prompt in characters.
number_of_pages : typing.Optional[int]
Pages of content in pdf documents OR urls in agent's Knowledge Base.
rag_enabled : typing.Optional[bool]
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[LlmUsageCalculatorResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agent/{jsonable_encoder(agent_id)}/llm-usage/calculate",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"prompt_length": prompt_length,
"number_of_pages": number_of_pages,
"rag_enabled": rag_enabled,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
LlmUsageCalculatorResponseModel,
construct_type(
type_=LlmUsageCalculatorResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Calculates expected number of LLM tokens needed for the specified agent.
Parameters
----------
agent_id : str
prompt_length : typing.Optional[int]
Length of the prompt in characters.
number_of_pages : typing.Optional[int]
Pages of content in pdf documents OR urls in agent's Knowledge Base.
rag_enabled : typing.Optional[bool]
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[LlmUsageCalculatorResponseModel]
Successful Response
| calculate | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/llm_usage/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/llm_usage/raw_client.py | MIT |
async def calculate(
self,
agent_id: str,
*,
prompt_length: typing.Optional[int] = OMIT,
number_of_pages: typing.Optional[int] = OMIT,
rag_enabled: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[LlmUsageCalculatorResponseModel]:
"""
Calculates expected number of LLM tokens needed for the specified agent.
Parameters
----------
agent_id : str
prompt_length : typing.Optional[int]
Length of the prompt in characters.
number_of_pages : typing.Optional[int]
Pages of content in pdf documents OR urls in agent's Knowledge Base.
rag_enabled : typing.Optional[bool]
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[LlmUsageCalculatorResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agent/{jsonable_encoder(agent_id)}/llm-usage/calculate",
base_url=self._client_wrapper.get_environment().base,
method="POST",
json={
"prompt_length": prompt_length,
"number_of_pages": number_of_pages,
"rag_enabled": rag_enabled,
},
headers={
"content-type": "application/json",
},
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
LlmUsageCalculatorResponseModel,
construct_type(
type_=LlmUsageCalculatorResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Calculates expected number of LLM tokens needed for the specified agent.
Parameters
----------
agent_id : str
prompt_length : typing.Optional[int]
Length of the prompt in characters.
number_of_pages : typing.Optional[int]
Pages of content in pdf documents OR urls in agent's Knowledge Base.
rag_enabled : typing.Optional[bool]
Whether RAG is enabled.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[LlmUsageCalculatorResponseModel]
Successful Response
| calculate | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/llm_usage/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/llm_usage/raw_client.py | MIT |
def get(
self,
agent_id: str,
*,
conversation_signature: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetAgentEmbedResponseModel:
"""
Retrieve the widget configuration for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_signature : typing.Optional[str]
An expiring token that enables a websocket conversation to start. These can be generated for an agent using the /v1/convai/conversation/get-signed-url endpoint
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentEmbedResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.widget.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.get(
agent_id, conversation_signature=conversation_signature, request_options=request_options
)
return _response.data |
Retrieve the widget configuration for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_signature : typing.Optional[str]
An expiring token that enables a websocket conversation to start. These can be generated for an agent using the /v1/convai/conversation/get-signed-url endpoint
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentEmbedResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.widget.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/widget/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/widget/client.py | MIT |
async def get(
self,
agent_id: str,
*,
conversation_signature: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetAgentEmbedResponseModel:
"""
Retrieve the widget configuration for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_signature : typing.Optional[str]
An expiring token that enables a websocket conversation to start. These can be generated for an agent using the /v1/convai/conversation/get-signed-url endpoint
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentEmbedResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.widget.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.get(
agent_id, conversation_signature=conversation_signature, request_options=request_options
)
return _response.data |
Retrieve the widget configuration for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_signature : typing.Optional[str]
An expiring token that enables a websocket conversation to start. These can be generated for an agent using the /v1/convai/conversation/get-signed-url endpoint
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetAgentEmbedResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.widget.get(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/widget/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/widget/client.py | MIT |
def get(
self,
agent_id: str,
*,
conversation_signature: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[GetAgentEmbedResponseModel]:
"""
Retrieve the widget configuration for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_signature : typing.Optional[str]
An expiring token that enables a websocket conversation to start. These can be generated for an agent using the /v1/convai/conversation/get-signed-url endpoint
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentEmbedResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"conversation_signature": conversation_signature,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentEmbedResponseModel,
construct_type(
type_=GetAgentEmbedResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Retrieve the widget configuration for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_signature : typing.Optional[str]
An expiring token that enables a websocket conversation to start. These can be generated for an agent using the /v1/convai/conversation/get-signed-url endpoint
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[GetAgentEmbedResponseModel]
Successful Response
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/widget/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/widget/raw_client.py | MIT |
async def get(
self,
agent_id: str,
*,
conversation_signature: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[GetAgentEmbedResponseModel]:
"""
Retrieve the widget configuration for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_signature : typing.Optional[str]
An expiring token that enables a websocket conversation to start. These can be generated for an agent using the /v1/convai/conversation/get-signed-url endpoint
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentEmbedResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget",
base_url=self._client_wrapper.get_environment().base,
method="GET",
params={
"conversation_signature": conversation_signature,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
GetAgentEmbedResponseModel,
construct_type(
type_=GetAgentEmbedResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Retrieve the widget configuration for an agent
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
conversation_signature : typing.Optional[str]
An expiring token that enables a websocket conversation to start. These can be generated for an agent using the /v1/convai/conversation/get-signed-url endpoint
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[GetAgentEmbedResponseModel]
Successful Response
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/widget/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/widget/raw_client.py | MIT |
def create(
self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None
) -> PostAgentAvatarResponseModel:
"""
Sets the avatar for an agent displayed in the widget
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
avatar_file : core.File
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostAgentAvatarResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.widget.avatar.create(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.create(agent_id, avatar_file=avatar_file, request_options=request_options)
return _response.data |
Sets the avatar for an agent displayed in the widget
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
avatar_file : core.File
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostAgentAvatarResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.agents.widget.avatar.create(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/widget/avatar/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/widget/avatar/client.py | MIT |
async def create(
self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None
) -> PostAgentAvatarResponseModel:
"""
Sets the avatar for an agent displayed in the widget
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
avatar_file : core.File
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostAgentAvatarResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.widget.avatar.create(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
"""
_response = await self._raw_client.create(agent_id, avatar_file=avatar_file, request_options=request_options)
return _response.data |
Sets the avatar for an agent displayed in the widget
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
avatar_file : core.File
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
PostAgentAvatarResponseModel
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.agents.widget.avatar.create(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
asyncio.run(main())
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/widget/avatar/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/widget/avatar/client.py | MIT |
def create(
self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[PostAgentAvatarResponseModel]:
"""
Sets the avatar for an agent displayed in the widget
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
avatar_file : core.File
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[PostAgentAvatarResponseModel]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={},
files={
"avatar_file": avatar_file,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
PostAgentAvatarResponseModel,
construct_type(
type_=PostAgentAvatarResponseModel, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Sets the avatar for an agent displayed in the widget
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
avatar_file : core.File
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[PostAgentAvatarResponseModel]
Successful Response
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/widget/avatar/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/widget/avatar/raw_client.py | MIT |
async def create(
self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[PostAgentAvatarResponseModel]:
"""
Sets the avatar for an agent displayed in the widget
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
avatar_file : core.File
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[PostAgentAvatarResponseModel]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/agents/{jsonable_encoder(agent_id)}/avatar",
base_url=self._client_wrapper.get_environment().base,
method="POST",
data={},
files={
"avatar_file": avatar_file,
},
request_options=request_options,
omit=OMIT,
force_multipart=True,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
PostAgentAvatarResponseModel,
construct_type(
type_=PostAgentAvatarResponseModel, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Sets the avatar for an agent displayed in the widget
Parameters
----------
agent_id : str
The id of an agent. This is returned on agent creation.
avatar_file : core.File
See core.File for more documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[PostAgentAvatarResponseModel]
Successful Response
| create | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/agents/widget/avatar/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/agents/widget/avatar/raw_client.py | MIT |
def get(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> BatchCallDetailedResponse:
"""
Get detailed information about a batch call including all recipients.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
BatchCallDetailedResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.batch_calls.get(
batch_id="batch_id",
)
"""
_response = self._raw_client.get(batch_id, request_options=request_options)
return _response.data |
Get detailed information about a batch call including all recipients.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
BatchCallDetailedResponse
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.batch_calls.get(
batch_id="batch_id",
)
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/client.py | MIT |
async def get(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> BatchCallDetailedResponse:
"""
Get detailed information about a batch call including all recipients.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
BatchCallDetailedResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.batch_calls.get(
batch_id="batch_id",
)
asyncio.run(main())
"""
_response = await self._raw_client.get(batch_id, request_options=request_options)
return _response.data |
Get detailed information about a batch call including all recipients.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
BatchCallDetailedResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.batch_calls.get(
batch_id="batch_id",
)
asyncio.run(main())
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/client.py | MIT |
async def cancel(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> BatchCallResponse:
"""
Cancel a running batch call and set all recipients to cancelled status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
BatchCallResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.batch_calls.cancel(
batch_id="batch_id",
)
asyncio.run(main())
"""
_response = await self._raw_client.cancel(batch_id, request_options=request_options)
return _response.data |
Cancel a running batch call and set all recipients to cancelled status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
BatchCallResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.batch_calls.cancel(
batch_id="batch_id",
)
asyncio.run(main())
| cancel | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/client.py | MIT |
async def retry(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> BatchCallResponse:
"""
Retry a batch call by setting completed recipients back to pending status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
BatchCallResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.batch_calls.retry(
batch_id="batch_id",
)
asyncio.run(main())
"""
_response = await self._raw_client.retry(batch_id, request_options=request_options)
return _response.data |
Retry a batch call by setting completed recipients back to pending status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
BatchCallResponse
Successful Response
Examples
--------
import asyncio
from elevenlabs import AsyncElevenLabs
client = AsyncElevenLabs(
api_key="YOUR_API_KEY",
)
async def main() -> None:
await client.conversational_ai.batch_calls.retry(
batch_id="batch_id",
)
asyncio.run(main())
| retry | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/client.py | MIT |
def get(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[BatchCallDetailedResponse]:
"""
Get detailed information about a batch call including all recipients.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[BatchCallDetailedResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/batch-calling/{jsonable_encoder(batch_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
BatchCallDetailedResponse,
construct_type(
type_=BatchCallDetailedResponse, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Get detailed information about a batch call including all recipients.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[BatchCallDetailedResponse]
Successful Response
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/raw_client.py | MIT |
def cancel(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[BatchCallResponse]:
"""
Cancel a running batch call and set all recipients to cancelled status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[BatchCallResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/batch-calling/{jsonable_encoder(batch_id)}/cancel",
base_url=self._client_wrapper.get_environment().base,
method="POST",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
BatchCallResponse,
construct_type(
type_=BatchCallResponse, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Cancel a running batch call and set all recipients to cancelled status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[BatchCallResponse]
Successful Response
| cancel | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/raw_client.py | MIT |
def retry(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> HttpResponse[BatchCallResponse]:
"""
Retry a batch call by setting completed recipients back to pending status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[BatchCallResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"v1/convai/batch-calling/{jsonable_encoder(batch_id)}/retry",
base_url=self._client_wrapper.get_environment().base,
method="POST",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
BatchCallResponse,
construct_type(
type_=BatchCallResponse, # type: ignore
object_=_response.json(),
),
)
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Retry a batch call by setting completed recipients back to pending status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
HttpResponse[BatchCallResponse]
Successful Response
| retry | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/raw_client.py | MIT |
async def get(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[BatchCallDetailedResponse]:
"""
Get detailed information about a batch call including all recipients.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[BatchCallDetailedResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/batch-calling/{jsonable_encoder(batch_id)}",
base_url=self._client_wrapper.get_environment().base,
method="GET",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
BatchCallDetailedResponse,
construct_type(
type_=BatchCallDetailedResponse, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Get detailed information about a batch call including all recipients.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[BatchCallDetailedResponse]
Successful Response
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/raw_client.py | MIT |
async def cancel(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[BatchCallResponse]:
"""
Cancel a running batch call and set all recipients to cancelled status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[BatchCallResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/batch-calling/{jsonable_encoder(batch_id)}/cancel",
base_url=self._client_wrapper.get_environment().base,
method="POST",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
BatchCallResponse,
construct_type(
type_=BatchCallResponse, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Cancel a running batch call and set all recipients to cancelled status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[BatchCallResponse]
Successful Response
| cancel | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/raw_client.py | MIT |
async def retry(
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> AsyncHttpResponse[BatchCallResponse]:
"""
Retry a batch call by setting completed recipients back to pending status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[BatchCallResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"v1/convai/batch-calling/{jsonable_encoder(batch_id)}/retry",
base_url=self._client_wrapper.get_environment().base,
method="POST",
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
BatchCallResponse,
construct_type(
type_=BatchCallResponse, # type: ignore
object_=_response.json(),
),
)
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
headers=dict(_response.headers),
body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
),
)
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) |
Retry a batch call by setting completed recipients back to pending status.
Parameters
----------
batch_id : str
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
AsyncHttpResponse[BatchCallResponse]
Successful Response
| retry | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/batch_calls/raw_client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/batch_calls/raw_client.py | MIT |
def get_signed_url(
self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None
) -> ConversationSignedUrlResponseModel:
"""
Get a signed url to start a conversation with an agent with an agent that requires authorization
Parameters
----------
agent_id : str
The id of the agent you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
ConversationSignedUrlResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.get_signed_url(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.get_signed_url(agent_id=agent_id, request_options=request_options)
return _response.data |
Get a signed url to start a conversation with an agent with an agent that requires authorization
Parameters
----------
agent_id : str
The id of the agent you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
ConversationSignedUrlResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.get_signed_url(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
| get_signed_url | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversations/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/client.py | MIT |
def list(
self,
*,
cursor: typing.Optional[str] = None,
agent_id: typing.Optional[str] = None,
call_successful: typing.Optional[EvaluationSuccessResult] = None,
call_start_before_unix: typing.Optional[int] = None,
call_start_after_unix: typing.Optional[int] = None,
page_size: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetConversationsPageResponseModel:
"""
Get all conversations of agents that user owns. With option to restrict to a specific agent.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
agent_id : typing.Optional[str]
The id of the agent you're taking the action on.
call_successful : typing.Optional[EvaluationSuccessResult]
The result of the success evaluation
call_start_before_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations up to this start date.
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConversationsPageResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.list()
"""
_response = self._raw_client.list(
cursor=cursor,
agent_id=agent_id,
call_successful=call_successful,
call_start_before_unix=call_start_before_unix,
call_start_after_unix=call_start_after_unix,
page_size=page_size,
request_options=request_options,
)
return _response.data |
Get all conversations of agents that user owns. With option to restrict to a specific agent.
Parameters
----------
cursor : typing.Optional[str]
Used for fetching next page. Cursor is returned in the response.
agent_id : typing.Optional[str]
The id of the agent you're taking the action on.
call_successful : typing.Optional[EvaluationSuccessResult]
The result of the success evaluation
call_start_before_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations up to this start date.
call_start_after_unix : typing.Optional[int]
Unix timestamp (in seconds) to filter conversations after to this start date.
page_size : typing.Optional[int]
How many conversations to return at maximum. Can not exceed 100, defaults to 30.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConversationsPageResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.list()
| list | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversations/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/client.py | MIT |
def get(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> GetConversationResponseModel:
"""
Get the details of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConversationResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.get(
conversation_id="123",
)
"""
_response = self._raw_client.get(conversation_id, request_options=request_options)
return _response.data |
Get the details of a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
GetConversationResponseModel
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.get(
conversation_id="123",
)
| get | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversations/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/client.py | MIT |
def delete(
self, conversation_id: str, *, request_options: typing.Optional[RequestOptions] = None
) -> typing.Optional[typing.Any]:
"""
Delete a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.delete(
conversation_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._raw_client.delete(conversation_id, request_options=request_options)
return _response.data |
Delete a particular conversation
Parameters
----------
conversation_id : str
The id of the conversation you're taking the action on.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
typing.Optional[typing.Any]
Successful Response
Examples
--------
from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.conversational_ai.conversations.delete(
conversation_id="21m00Tcm4TlvDq8ikWAM",
)
| delete | python | elevenlabs/elevenlabs-python | src/elevenlabs/conversational_ai/conversations/client.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/src/elevenlabs/conversational_ai/conversations/client.py | MIT |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.