psaegert's picture
Upload 201 files
2c34d2f
_val, exc_tb=exc_tb)
async def get_version(self) -> str:
"""Gets version of the AlephAlpha HTTP API."""
return await self._get_request_text("version")
async def _get_request_text(self, endpoint: str) -> str:
async with self.session.get(
self.host + endpoint,
) as response:
if not response.ok:
_raise_for_status(response.status, await response.text())
return await response.text()
async def _get_request_json(
self, endpoint: str
) -> Union[List[Mapping[str, Any]], Mapping[str, Any]]:
async with self.session.get(
self.host + endpoint,
) as response:
if not response.ok:
_raise_for_status(response.status, await response.text())
return await response.json()
async def _post_request(
self,
endpoint: str,
request: AnyRequest,
model: Optional[str] = None,
) -> Dict[str, Any]:
json_body = self._build_json_body(request, model)
query_params = self._build_query_parameters()
async with self.session.post(
self.host + endpoint, json=json_body, params=query_params
) as response:
if not response.ok:
_raise_for_status(response.status, await response.text())
return await response.json()
def _build_query_parameters(self) -> Mapping[str, str]:
return {
# cannot use str() here because we want lowercase true/false in query string
# Also do not want to send the nice flag with every request if it is false
**({"nice": "true"} if self.nice else {}),
}
def _build_json_body(
self, request: AnyRequest, model: Optional[str]
) -> Mapping[str, Any]:
json_body = dict(request.to_json())
if model is not None:
json_body["model"] = model
if self.hosting is not None:
json_body["hosting"] = self.hosting
return json_body
async def models(self) -> List[Mapping[str, Any]]:
"""
Queries all models which are currently available.
For documentation of the response, see https://docs.aleph-alpha.com/api/available-models/
"""
return await self._get_request_json("models_available") # type: ignore
async def complete(
self,
request: CompletionRequest,
model: str,
) -> CompletionResponse:
"""Generates completions given a prompt.
Parameters:
request (CompletionRequest, required):
Parameters for the requested completion.
model (string, required):
Name of model to use. A model name refers to a model architecture (number of parameters among others).
Always the latest version of model is used.
Examples:
>>> # create a prompt
>>> prompt = Prompt.from_text("An apple a day, ")
>>>
>>> # create a completion request
>>> request = CompletionRequest(
prompt=prompt,
maximum_tokens=32,
stop_sequences=["###","\\n"],
temperature=0.12
)
>>>
>>> # complete the prompt
>>> result = await client.complete(request, model=model_name)
"""
response = await self._post_request(
"complete",
request,
model,
)
return CompletionResponse.from_json(response)
async def tokenize(
self,
request: TokenizationRequest,
model: str,
) -> TokenizationResponse:
"""Tokenizes the given prompt for the given model.
Parameters:
request (TokenizationRequest, required):
Parameters for the requested tokenization.
model (string, required):
Name of model to use. A model name refers to a model architecture (number of parameters among others).
Always the latest version of model is used.
Examples:
>>> request = TokenizationRequest(prompt="hello", token_ids=True, tokens=True)
>>> response = await client.tokenize(request, model=model_name)
"""
response = await self._post_request(
"tokenize",
request,
model,
)
return TokenizationResponse.from_json(response)
async def detokenize(
self,
request: DetokenizationRequest,
model: str,
) -> DetokenizationResponse:
"""Detokenizes the given prompt for the given model.
Parameters:
request (DetokenizationRequest, required):
Parameters for the requested detokenization.
model (string, required):
Name of model to use. A model name refers to a model architecture (number of parameters among others).
Always the latest version of model is used.
Examples:
>>> request = DetokenizationRequest(token_ids=[2, 3, 4])
>>> response = await client.detokenize(request, model=model_name)
"""
response = await self._post_request(
"detokenize",
request,
model,
)
return DetokenizationResponse.from_json(response)
async def embed(
self,
request: EmbeddingRequest,
model: str,
) -> EmbeddingResponse:
"""Embeds a text and returns vectors that can be used for downstream tasks (e.g. semantic similarity) and models (e.g. classifiers).
Parameters:
request (EmbeddingRequest, required):
Parameters for the requested embedding.
model (string, required):
Name of