| temperature=0.12 | |
| ) | |
| >>> | |
| >>> # complete the prompt | |
| >>> result = client.complete(request, model=model_name) | |
| """ | |
| response = self._post_request("complete", request, model) | |
| return CompletionResponse.from_json(response) | |
| def tokenize( | |
| self, | |
| request: TokenizationRequest, | |
| model: str, | |
| ) -> TokenizationResponse: | |
| """Tokenizes the given prompt for the given model. | |
| Parameters: | |
| request (TokenizationRequest, required): | |
| Parameters for the requested tokenization. | |
| model (string, required): | |
| Name of model to use. A model name refers to a model architecture (number of parameters among others). | |
| Always the latest version of model is used. | |
| Examples: | |
| >>> request = TokenizationRequest( | |
| prompt="hello", token_ids=True, tokens=True | |
| ) | |
| >>> response = client.tokenize(request, model=model_name) | |
| """ | |
| response = self._post_request( | |
| "tokenize", | |
| request, | |
| model, | |
| ) | |
| return TokenizationResponse.from_json(response) | |
| def detokenize( | |
| self, | |
| request: DetokenizationRequest, | |
| model: str, | |
| ) -> DetokenizationResponse: | |
| """Detokenizes the given prompt for the given model. | |
| Parameters: | |
| request (DetokenizationRequest, required): | |
| Parameters for the requested detokenization. | |
| model (string, required): | |
| Name of model to use. A model name refers to a model architecture (number of parameters among others). | |
| Always the latest version of model is used. | |
| Examples: | |
| >>> request = DetokenizationRequest(token_ids=[2, 3, 4]) | |
| >>> response = client.detokenize(request, model=model_name) | |
| """ | |
| response = sel |