method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_get_mock_folder
return {'folder': {'title': 'runbook', 'creator_id': 'testing', 'folder_type': 'shared', 'parent_id': 'ABCD', 'inherit_mode': 'inherit', 'color': 'manila', 'id': f'{folder_id}', 'created_usec': 1668405728528904, 'updated_usec': 1697356632672453, 'link': 'https://example.quip.com/YPH9OAR2Eu5'}, 'member_...
def _get_mock_folder(self, folder_id: str) ->Dict: return {'folder': {'title': 'runbook', 'creator_id': 'testing', 'folder_type': 'shared', 'parent_id': 'ABCD', 'inherit_mode': 'inherit', 'color': 'manila', 'id': f'{folder_id}', 'created_usec': 1668405728528904, 'updated_usec': 1697356632672...
null
from_uri
"""Construct a SQLAlchemy engine from URI.""" _engine_args = engine_args or {} return cls(create_engine(database_uri, **_engine_args), **kwargs)
@classmethod def from_uri(cls, database_uri: str, engine_args: Optional[dict]=None, ** kwargs: Any) ->SQLDatabase: """Construct a SQLAlchemy engine from URI.""" _engine_args = engine_args or {} return cls(create_engine(database_uri, **_engine_args), **kwargs)
Construct a SQLAlchemy engine from URI.
_chain_type
return 'llm_personalizer_chain'
@property def _chain_type(self) ->str: return 'llm_personalizer_chain'
null
_run
try: unix_timestamp = dt.timestamp(dt.strptime(timestamp, UTC_FORMAT)) result = self.client.chat_scheduleMessage(channel=channel, text=message, post_at=unix_timestamp) output = 'Message scheduled: ' + str(result) return output except Exception as e: return 'Error scheduling message: {}'.form...
def _run(self, message: str, channel: str, timestamp: str, run_manager: Optional[CallbackManagerForToolRun]=None) ->str: try: unix_timestamp = dt.timestamp(dt.strptime(timestamp, UTC_FORMAT)) result = self.client.chat_scheduleMessage(channel=channel, text= message, post_at=unix_times...
null
on_tool_error
self.on_tool_error_common()
def on_tool_error(self, *args: Any, **kwargs: Any) ->Any: self.on_tool_error_common()
null
test_gpt_router_generate
"""Test generate method of GPTRouter.""" anthropic_claude = GPTRouterModel(name='claude-instant-1.2', provider_name= 'anthropic') chat = GPTRouter(models_priority_list=[anthropic_claude]) chat_messages: List[List[BaseMessage]] = [[HumanMessage(content= 'If (5 + x = 18), what is x?')]] messages_copy = [messages....
def test_gpt_router_generate() ->None: """Test generate method of GPTRouter.""" anthropic_claude = GPTRouterModel(name='claude-instant-1.2', provider_name='anthropic') chat = GPTRouter(models_priority_list=[anthropic_claude]) chat_messages: List[List[BaseMessage]] = [[HumanMessage(content= ...
Test generate method of GPTRouter.
test_pgvector_max_marginal_relevance_search
"""Test max marginal relevance search.""" texts = ['foo', 'bar', 'baz'] docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), connection_string=CONNECTION_STRING, pre_delete_collection=True) output = docsearch.max_marginal_relevance_search('...
def test_pgvector_max_marginal_relevance_search() ->None: """Test max marginal relevance search.""" texts = ['foo', 'bar', 'baz'] docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), connection_string=CONNECTION_STRING, ...
Test max marginal relevance search.
_stream_with_aggregation
final_chunk: Optional[GenerationChunk] = None for stream_resp in self._create_generate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_generation_chunk(stream_resp) if final_chunk is None: final_chunk = chunk else: final_chunk += chunk ...
def _stream_with_aggregation(self, prompt: str, stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, verbose: bool=False, **kwargs: Any) ->GenerationChunk: final_chunk: Optional[GenerationChunk] = None for stream_resp in self._create_generate_stream(prompt, stop, **kwar...
null
_get_topics_of_reflection
"""Return the 3 most salient high-level questions about recent observations.""" prompt = PromptTemplate.from_template( """{observations} Given only the information above, what are the 3 most salient high-level questions we can answer about the subjects in the statements? Provide each question on a new line.""" ...
def _get_topics_of_reflection(self, last_k: int=50) ->List[str]: """Return the 3 most salient high-level questions about recent observations.""" prompt = PromptTemplate.from_template( """{observations} Given only the information above, what are the 3 most salient high-level questions we can answer abou...
Return the 3 most salient high-level questions about recent observations.
test_huggingface_instructor_embedding_query
"""Test huggingface embeddings.""" query = 'foo bar' model_name = 'hkunlp/instructor-base' embedding = HuggingFaceInstructEmbeddings(model_name=model_name) output = embedding.embed_query(query) assert len(output) == 768
def test_huggingface_instructor_embedding_query() ->None: """Test huggingface embeddings.""" query = 'foo bar' model_name = 'hkunlp/instructor-base' embedding = HuggingFaceInstructEmbeddings(model_name=model_name) output = embedding.embed_query(query) assert len(output) == 768
Test huggingface embeddings.
load
"""Load documents.""" chunks: List[Document] = [] if self.access_token and self.docset_id: _document_details = self._document_details_for_docset_id(self.docset_id) if self.document_ids: _document_details = [d for d in _document_details if d[ID_KEY] in self.document_ids] _project_details ...
def load(self) ->List[Document]: """Load documents.""" chunks: List[Document] = [] if self.access_token and self.docset_id: _document_details = self._document_details_for_docset_id(self.docset_id ) if self.document_ids: _document_details = [d for d in _document_detail...
Load documents.
_get_tools_requests_delete
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_delete() ->BaseTool: return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
null
_identifying_params
"""Get the identifying parameters.""" return {**{'model_url': self.model_url}, **self._default_params}
@property def _identifying_params(self) ->Dict[str, Any]: """Get the identifying parameters.""" return {**{'model_url': self.model_url}, **self._default_params}
Get the identifying parameters.
test_message_chunk_to_message
assert message_chunk_to_message(AIMessageChunk(content='I am', additional_kwargs={'foo': 'bar'})) == AIMessage(content='I am', additional_kwargs={'foo': 'bar'}) assert message_chunk_to_message(HumanMessageChunk(content='I am') ) == HumanMessage(content='I am') assert message_chunk_to_message(ChatMessageChun...
def test_message_chunk_to_message() ->None: assert message_chunk_to_message(AIMessageChunk(content='I am', additional_kwargs={'foo': 'bar'})) == AIMessage(content='I am', additional_kwargs={'foo': 'bar'}) assert message_chunk_to_message(HumanMessageChunk(content='I am') ) == HumanMessage...
null
validate_code
try: code_tree = ast.parse(code) except (SyntaxError, UnicodeDecodeError): raise ValueError(f'Generated code is not valid python code: {code}') except TypeError: raise ValueError( f'Generated code is expected to be a string, instead found {type(code)}' ) except OverflowError: raise Value...
@classmethod def validate_code(cls, code: str, code_validations: PALValidation) ->None: try: code_tree = ast.parse(code) except (SyntaxError, UnicodeDecodeError): raise ValueError(f'Generated code is not valid python code: {code}') except TypeError: raise ValueError( f'Ge...
null
_on_tool_end
"""Process the Tool Run.""" self._process_end_trace(run)
def _on_tool_end(self, run: 'Run') ->None: """Process the Tool Run.""" self._process_end_trace(run)
Process the Tool Run.
test_parse_partial_json
case, expected = json_strings parsed = parse_partial_json(case) assert parsed == json.loads(expected)
@pytest.mark.parametrize('json_strings', TEST_CASES_PARTIAL) def test_parse_partial_json(json_strings: Tuple[str, str]) ->None: case, expected = json_strings parsed = parse_partial_json(case) assert parsed == json.loads(expected)
null
extension
return 'parquet'
@classmethod def extension(cls) ->str: return 'parquet'
null
fill
"""Indent a piece of text, according to the current indentation level""" self.f.write('\n' + ' ' * self._indent + text)
def fill(self, text=''): """Indent a piece of text, according to the current indentation level""" self.f.write('\n' + ' ' * self._indent + text)
Indent a piece of text, according to the current indentation level
agent_executor
self._agent_executor = agent_executor self.inputs = self.inputs
@agent_executor.setter def agent_executor(self, agent_executor: AgentExecutor) ->None: self._agent_executor = agent_executor self.inputs = self.inputs
null
test_simple_text
"""Test simple question that should not need python.""" question = 'a' output = fake_llm_summarization_checker_chain.run(question) assert output == 'b'
def test_simple_text(fake_llm_summarization_checker_chain: LLMSummarizationCheckerChain) ->None: """Test simple question that should not need python.""" question = 'a' output = fake_llm_summarization_checker_chain.run(question) assert output == 'b'
Test simple question that should not need python.
embed_documents
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
def embed_documents(self, texts: List[str]) ->List[List[float]]: return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
null
get_lists
""" Get all available lists. """ url = f'{DEFAULT_URL}/folder/{self.folder_id}/list' params = self.get_default_params() response = requests.get(url, headers=self.get_headers(), params=params) return {'response': response}
def get_lists(self) ->Dict: """ Get all available lists. """ url = f'{DEFAULT_URL}/folder/{self.folder_id}/list' params = self.get_default_params() response = requests.get(url, headers=self.get_headers(), params=params) return {'response': response}
Get all available lists.
_run
"""Use the tool.""" query_params = {'file_url': query, 'language': self.language, 'attributes_as_list': False} return self._call_eden_ai(query_params)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" query_params = {'file_url': query, 'language': self.language, 'attributes_as_list': False} return self._call_eden_ai(query_params)
Use the tool.
_import_graphql
from langchain_community.utilities.graphql import GraphQLAPIWrapper return GraphQLAPIWrapper
def _import_graphql() ->Any: from langchain_community.utilities.graphql import GraphQLAPIWrapper return GraphQLAPIWrapper
null
embed_documents
"""Return simple embeddings.""" return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in range( len(texts))]
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Return simple embeddings.""" return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in range(len(texts))]
Return simple embeddings.
__getattr__
"""Get attr name.""" if name == 'create_xorbits_agent': HERE = Path(__file__).parents[3] here = as_import_path(Path(__file__).parent, relative_to=HERE) old_path = 'langchain.' + here + '.' + name new_path = 'langchain_experimental.' + here + '.' + name raise ImportError( f"""This agent has b...
def __getattr__(name: str) ->Any: """Get attr name.""" if name == 'create_xorbits_agent': HERE = Path(__file__).parents[3] here = as_import_path(Path(__file__).parent, relative_to=HERE) old_path = 'langchain.' + here + '.' + name new_path = 'langchain_experimental.' + here + '.' ...
Get attr name.
__add__
if isinstance(other, BaseMessageChunk): return self.__class__(content=merge_content(self.content, other.content ), additional_kwargs=self._merge_kwargs_dict(self.additional_kwargs, other.additional_kwargs)) else: raise TypeError( f'unsupported operand type(s) for +: "{self.__class__.__na...
def __add__(self, other: Any) ->BaseMessageChunk: if isinstance(other, BaseMessageChunk): return self.__class__(content=merge_content(self.content, other. content), additional_kwargs=self._merge_kwargs_dict(self. additional_kwargs, other.additional_kwargs)) else: raise Ty...
null
parse
raise ValueError('Can only parse messages')
def parse(self, text: str) ->Union[AgentAction, AgentFinish]: raise ValueError('Can only parse messages')
null
parse
"""Parse the output of an LLM call to a boolean. Args: text: output of a language model Returns: boolean """ cleaned_text = text.strip() if cleaned_text.upper() not in (self.true_val.upper(), self.false_val.upper()): raise ValueError( f'BooleanOutputParser ...
def parse(self, text: str) ->bool: """Parse the output of an LLM call to a boolean. Args: text: output of a language model Returns: boolean """ cleaned_text = text.strip() if cleaned_text.upper() not in (self.true_val.upper(), self.false_val. upper(...
Parse the output of an LLM call to a boolean. Args: text: output of a language model Returns: boolean
_get_docs
"""Get docs."""
@abstractmethod def _get_docs(self, question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun) ->List[Document]: """Get docs."""
Get docs.
on_retriever_error
if parent_run_id is None: self.increment()
def on_retriever_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: if parent_run_id is None: self.increment()
null
add_texts
"""Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters """ from psycopg2 import sql texts = list(texts) cursor = s...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, **kwargs: Any) ->List[str]: """Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ...
Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters
load
"""Load HTML document into document objects.""" from bs4 import BeautifulSoup with open(self.file_path, 'r', encoding=self.open_encoding) as f: soup = BeautifulSoup(f, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = '' metadata: Dic...
def load(self) ->List[Document]: """Load HTML document into document objects.""" from bs4 import BeautifulSoup with open(self.file_path, 'r', encoding=self.open_encoding) as f: soup = BeautifulSoup(f, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: titl...
Load HTML document into document objects.
from_gitlab_api_wrapper
operations: List[Dict] = [{'mode': 'get_issues', 'name': 'Get Issues', 'description': GET_ISSUES_PROMPT}, {'mode': 'get_issue', 'name': 'Get Issue', 'description': GET_ISSUE_PROMPT}, {'mode': 'comment_on_issue', 'name': 'Comment on Issue', 'description': COMMENT_ON_ISSUE_PROMPT}, {'mode': 'create_pull_r...
@classmethod def from_gitlab_api_wrapper(cls, gitlab_api_wrapper: GitLabAPIWrapper ) ->'GitLabToolkit': operations: List[Dict] = [{'mode': 'get_issues', 'name': 'Get Issues', 'description': GET_ISSUES_PROMPT}, {'mode': 'get_issue', 'name': 'Get Issue', 'description': GET_ISSUE_PROMPT}, {'mode': ...
null
go_to_page
self.page.goto(url=url if '://' in url else 'http://' + url) self.client = self.page.context.new_cdp_session(self.page) self.page_element_buffer = {}
def go_to_page(self, url: str) ->None: self.page.goto(url=url if '://' in url else 'http://' + url) self.client = self.page.context.new_cdp_session(self.page) self.page_element_buffer = {}
null
__del__
if self._connection: self._connection.close()
def __del__(self) ->None: if self._connection: self._connection.close()
null
_get_mock_thread
return {'thread': {'author_id': 'testing', 'thread_class': 'document', 'owning_company_id': 'ABC', 'id': f'{thread_id}', 'created_usec': 1690873126670055, 'updated_usec': 1690874891638991, 'title': f'Unit Test Doc {thread_id}', 'link': f'https://example.quip.com/{thread_id}', 'document_id': 'ABC', 'typ...
def _get_mock_thread(self, thread_id: str) ->Dict: return {'thread': {'author_id': 'testing', 'thread_class': 'document', 'owning_company_id': 'ABC', 'id': f'{thread_id}', 'created_usec': 1690873126670055, 'updated_usec': 1690874891638991, 'title': f'Unit Test Doc {thread_id}', 'link': ...
null
test_api_key_masked_when_passed_via_constructor
llm = EmbaasEmbeddings(embaas_api_key='secret-api-key') print(llm.embaas_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = EmbaasEmbeddings(embaas_api_key='secret-api-key') print(llm.embaas_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
null
metadata_column
return ''
@property def metadata_column(self) ->str: return ''
null
_parse_tables
result = [] for table in tables: rc, cc = table.row_count, table.column_count _table = [['' for _ in range(cc)] for _ in range(rc)] for cell in table.cells: _table[cell.row_index][cell.column_index] = cell.content result.append(_table) return result
def _parse_tables(self, tables: List[Any]) ->List[Any]: result = [] for table in tables: rc, cc = table.row_count, table.column_count _table = [['' for _ in range(cc)] for _ in range(rc)] for cell in table.cells: _table[cell.row_index][cell.column_index] = cell.content ...
null
test_edenai_call
"""Test simple call to edenai's text to speech endpoint.""" text2speech = EdenAiTextToSpeechTool(providers=['amazon'], language='en', voice='MALE') output = text2speech('hello') parsed_url = urlparse(output) assert text2speech.name == 'edenai_text_to_speech' assert text2speech.feature == 'audio' assert text2speech....
def test_edenai_call() ->None: """Test simple call to edenai's text to speech endpoint.""" text2speech = EdenAiTextToSpeechTool(providers=['amazon'], language= 'en', voice='MALE') output = text2speech('hello') parsed_url = urlparse(output) assert text2speech.name == 'edenai_text_to_speech' ...
Test simple call to edenai's text to speech endpoint.
test_anonymize_with_custom_operator
"""Test anonymize a name with a custom operator""" from presidio_anonymizer.entities import OperatorConfig from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer custom_operator = {'PERSON': OperatorConfig('replace', {'new_value': 'NAME'})} anonymizer = PresidioReversibleAnonymizer(operators=cu...
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_anonymize_with_custom_operator() ->None: """Test anonymize a name with a custom operator""" from presidio_anonymizer.entities import OperatorConfig from langchain_experimental.data_anonymizer import PresidioReversibleAnonymi...
Test anonymize a name with a custom operator
test_importable_all
for path in glob.glob('../langchain/langchain/*'): relative_path = Path(path).parts[-1] if relative_path.endswith('.typed'): continue module_name = relative_path.split('.')[0] module = importlib.import_module('langchain.' + module_name) all_ = getattr(module, '__all__', []) for cls_ in a...
def test_importable_all() ->None: for path in glob.glob('../langchain/langchain/*'): relative_path = Path(path).parts[-1] if relative_path.endswith('.typed'): continue module_name = relative_path.split('.')[0] module = importlib.import_module('langchain.' + module_name) ...
null
zep_summary
"""Retrieve summary from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory or not zep_memory.summary: return None return zep_memory.summary.content
@property def zep_summary(self) ->Optional[str]: """Retrieve summary from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory or not zep_memory.summary: return None return zep_memory.summary.content
Retrieve summary from Zep memory
knn_hybrid_search
""" Perform a hybrid k-NN and text search on the Elasticsearch index. Args: query (str, optional): The query text to search for. k (int, optional): The number of nearest neighbors to return. query_vector (List[float], optional): The query vector to search for. ...
def knn_hybrid_search(self, query: Optional[str]=None, k: Optional[int]=10, query_vector: Optional[List[float]]=None, model_id: Optional[str]=None, size: Optional[int]=10, source: Optional[bool]=True, knn_boost: Optional[float]=0.9, query_boost: Optional[float]=0.1, fields: Optional [Union[List[Mapping[...
Perform a hybrid k-NN and text search on the Elasticsearch index. Args: query (str, optional): The query text to search for. k (int, optional): The number of nearest neighbors to return. query_vector (List[float], optional): The query vector to search for. model_id (str, optional): The ID of the model ...
_deduplicate_in_order
"""Deduplicate a list of hashed documents while preserving order.""" seen: Set[str] = set() for hashed_doc in hashed_documents: if hashed_doc.hash_ not in seen: seen.add(hashed_doc.hash_) yield hashed_doc
def _deduplicate_in_order(hashed_documents: Iterable[_HashedDocument] ) ->Iterator[_HashedDocument]: """Deduplicate a list of hashed documents while preserving order.""" seen: Set[str] = set() for hashed_doc in hashed_documents: if hashed_doc.hash_ not in seen: seen.add(hashed_doc.ha...
Deduplicate a list of hashed documents while preserving order.
pytest_addoption
"""Add custom command line options to pytest.""" parser.addoption('--only-extended', action='store_true', help= 'Only run extended tests. Does not allow skipping any extended tests.') parser.addoption('--only-core', action='store_true', help= 'Only run core tests. Never runs any extended tests.')
def pytest_addoption(parser: Parser) ->None: """Add custom command line options to pytest.""" parser.addoption('--only-extended', action='store_true', help= 'Only run extended tests. Does not allow skipping any extended tests.') parser.addoption('--only-core', action='store_true', help= 'Onl...
Add custom command line options to pytest.
from_texts
jagstore = cls(pod, store, vector_index, vector_type, vector_dimension, url, embedding) jagstore.login(jaguar_api_key) jagstore.clear() jagstore.add_texts(texts, metadatas, **kwargs) return jagstore
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, url: str, pod: str, store: str, vector_index: str, vector_type: str, vector_dimension: int, metadatas: Optional[List[dict]]=None, jaguar_api_key: Optional[str ]='', **kwargs: Any) ->Jaguar: jagstore = cls(pod, store, vector_index,...
null
test_anthropic_streaming_callback
"""Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) llm = Anthropic(streaming=True, callback_manager=callback_manager, verbose=True ) llm('Write me a sentence with 100 words.') assert callback_handler....
def test_anthropic_streaming_callback() ->None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) llm = Anthropic(streaming=True, callback_manager=callback_manager, verbose=True) ...
Test that streaming correctly invokes on_llm_new_token callback.
program
return items
def program(self, *items: Any) ->tuple: return items
null
test_mhtml_loader
"""Test mhtml loader.""" file_path = EXAMPLES / 'example.mht' loader = MHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 metadata = docs[0].metadata content = docs[0].page_content assert metadata['title'] == 'LangChain' assert metadata['source'] == str(file_path) assert 'LANG CHAIN 🦜️🔗Official Ho...
@pytest.mark.requires('bs4', 'lxml') def test_mhtml_loader() ->None: """Test mhtml loader.""" file_path = EXAMPLES / 'example.mht' loader = MHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 metadata = docs[0].metadata content = docs[0].page_content assert metadata['t...
Test mhtml loader.
test_chat_model_caching_params
prompt: List[BaseMessage] = [HumanMessage(content='How are you?')] response = 'Test response' cached_response = 'Cached test response' cached_message = AIMessage(content=cached_response) llm = FakeListChatModel(responses=[response]) if get_llm_cache(): get_llm_cache().update(prompt=dumps(prompt), llm_string=llm. ...
def test_chat_model_caching_params() ->None: prompt: List[BaseMessage] = [HumanMessage(content='How are you?')] response = 'Test response' cached_response = 'Cached test response' cached_message = AIMessage(content=cached_response) llm = FakeListChatModel(responses=[response]) if get_llm_cache()...
null
get_next_task
"""Get the next task.""" task_names = [t['task_name'] for t in self.task_list] incomplete_tasks = ', '.join(task_names) response = self.task_creation_chain.run(result=result, task_description= task_description, incomplete_tasks=incomplete_tasks, objective= objective, **kwargs) new_tasks = response.split('\n') r...
def get_next_task(self, result: str, task_description: str, objective: str, **kwargs: Any) ->List[Dict]: """Get the next task.""" task_names = [t['task_name'] for t in self.task_list] incomplete_tasks = ', '.join(task_names) response = self.task_creation_chain.run(result=result, task_description ...
Get the next task.
similarity_search_with_score
""" Return list of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding is None: raise ValueError( '_embedding cannot be None for similarity_search_with_score') content: Dict[str, Any] =...
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any ) ->List[Tuple[Document, float]]: """ Return list of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding is No...
Return list of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity.
_import_google_serper_tool_GoogleSerperRun
from langchain_community.tools.google_serper.tool import GoogleSerperRun return GoogleSerperRun
def _import_google_serper_tool_GoogleSerperRun() ->Any: from langchain_community.tools.google_serper.tool import GoogleSerperRun return GoogleSerperRun
null
_completion_with_retry
return _make_request(llm, **_kwargs)
@retry_decorator def _completion_with_retry(**_kwargs: Any) ->Any: return _make_request(llm, **_kwargs)
null
test_timeout_kwargs
"""Test that timeout kwarg works.""" chat = AzureMLChatOnlineEndpoint(content_formatter=LlamaContentFormatter()) response = chat(messages=[HumanMessage(content='FOO')], timeout=60) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
def test_timeout_kwargs() ->None: """Test that timeout kwarg works.""" chat = AzureMLChatOnlineEndpoint(content_formatter=LlamaContentFormatter()) response = chat(messages=[HumanMessage(content='FOO')], timeout=60) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
Test that timeout kwarg works.
_get_relevant_documents
from sklearn.metrics.pairwise import cosine_similarity query_vec = self.vectorizer.transform([query]) results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,)) return_docs = [self.docs[i] for i in results.argsort()[-self.k:][::-1]] return return_docs
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: from sklearn.metrics.pairwise import cosine_similarity query_vec = self.vectorizer.transform([query]) results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,)) return_docs = ...
null
_create_retry_decorator
import openai errors = [openai.error.Timeout, openai.error.APIError, openai.error. APIConnectionError, openai.error.RateLimitError, openai.error. ServiceUnavailableError] return create_base_retry_decorator(error_types=errors, max_retries=llm. max_retries, run_manager=run_manager)
def _create_retry_decorator(llm: ChatOpenAI, run_manager: Optional[Union[ AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None) ->Callable[ [Any], Any]: import openai errors = [openai.error.Timeout, openai.error.APIError, openai.error. APIConnectionError, openai.error.RateLimitError, o...
null
delete_session
"""Delete a session""" requests.delete(f'{self.url}/sessions/{self.session_id}/memory')
def delete_session(self) ->None: """Delete a session""" requests.delete(f'{self.url}/sessions/{self.session_id}/memory')
Delete a session
_search_rows
prompt_pd5 = self.get_md5(prompt) stmt = select(self.cache_schema.response).where(self.cache_schema. prompt_md5 == prompt_pd5).where(self.cache_schema.llm == llm_string).where( self.cache_schema.prompt == prompt).order_by(self.cache_schema.idx) with Session(self.engine) as session: return session.execute(st...
def _search_rows(self, prompt: str, llm_string: str) ->List[Row]: prompt_pd5 = self.get_md5(prompt) stmt = select(self.cache_schema.response).where(self.cache_schema. prompt_md5 == prompt_pd5).where(self.cache_schema.llm == llm_string ).where(self.cache_schema.prompt == prompt).order_by(self. ...
null
get_retriever_with_metadata
start_dt = x.get('start_date', None) end_dt = x.get('end_date', None) metadata_filter = x.get('metadata_filter', None) opt = {} if start_dt is not None: opt['start_date'] = start_dt if end_dt is not None: opt['end_date'] = end_dt if metadata_filter is not None: opt['filter'] = metadata_filter v = vectorstor...
def get_retriever_with_metadata(x): start_dt = x.get('start_date', None) end_dt = x.get('end_date', None) metadata_filter = x.get('metadata_filter', None) opt = {} if start_dt is not None: opt['start_date'] = start_dt if end_dt is not None: opt['end_date'] = end_dt if metadat...
null
_to_snake_case
"""Convert a name into snake_case.""" snake_case = '' for i, char in enumerate(name): if char.isupper() and i != 0: snake_case += '_' + char.lower() else: snake_case += char.lower() return snake_case
def _to_snake_case(name: str) ->str: """Convert a name into snake_case.""" snake_case = '' for i, char in enumerate(name): if char.isupper() and i != 0: snake_case += '_' + char.lower() else: snake_case += char.lower() return snake_case
Convert a name into snake_case.
delete
"""Delete by vector IDs. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if ids is None: raise ValueError('No ids provided to delete.') for document_id in ids: ...
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool ]: """Delete by vector IDs. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ i...
Delete by vector IDs. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented.
_collection_is_ready
"""Checks whether the collection for this message history is ready to be queried """ return self.client.Collections.get(collection=self.collection ).data.status == 'READY'
def _collection_is_ready(self) ->bool: """Checks whether the collection for this message history is ready to be queried """ return self.client.Collections.get(collection=self.collection ).data.status == 'READY'
Checks whether the collection for this message history is ready to be queried
test_base_tool_inheritance_base_schema
"""Test schema is correctly inferred when inheriting from BaseTool.""" class _MockSimpleTool(BaseTool): name: str = 'simple_tool' description: str = 'A Simple Tool' def _run(self, tool_input: str) ->str: return f'{tool_input}' async def _arun(self, tool_input: str) ->str: raise NotImpl...
def test_base_tool_inheritance_base_schema() ->None: """Test schema is correctly inferred when inheriting from BaseTool.""" class _MockSimpleTool(BaseTool): name: str = 'simple_tool' description: str = 'A Simple Tool' def _run(self, tool_input: str) ->str: return f'{tool_i...
Test schema is correctly inferred when inheriting from BaseTool.
distance_strategy
if self._distance_strategy == DistanceStrategy.EUCLIDEAN: return self.EmbeddingStore.embedding.l2_distance elif self._distance_strategy == DistanceStrategy.COSINE: return self.EmbeddingStore.embedding.cosine_distance elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self.EmbeddingSt...
@property def distance_strategy(self) ->Any: if self._distance_strategy == DistanceStrategy.EUCLIDEAN: return self.EmbeddingStore.embedding.l2_distance elif self._distance_strategy == DistanceStrategy.COSINE: return self.EmbeddingStore.embedding.cosine_distance elif self._distance_strategy =...
null
from_llm
"""Create a chain from an LLM.""" critique_chain = LLMChain(llm=llm, prompt=critique_prompt) revision_chain = LLMChain(llm=llm, prompt=revision_prompt) return cls(chain=chain, critique_chain=critique_chain, revision_chain= revision_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, chain: LLMChain, critique_prompt: BasePromptTemplate=CRITIQUE_PROMPT, revision_prompt: BasePromptTemplate =REVISION_PROMPT, **kwargs: Any) ->'ConstitutionalChain': """Create a chain from an LLM.""" critique_chain = LLMChain(llm=llm, prompt=critique_...
Create a chain from an LLM.
_run_output_key
if len(self.output_keys) != 1: raise ValueError( f'`run` not supported when there is not exactly one output key. Got {self.output_keys}.' ) return self.output_keys[0]
@property def _run_output_key(self) ->str: if len(self.output_keys) != 1: raise ValueError( f'`run` not supported when there is not exactly one output key. Got {self.output_keys}.' ) return self.output_keys[0]
null
_load_examples
"""Load examples if necessary.""" if isinstance(config['examples'], list): pass elif isinstance(config['examples'], str): with open(config['examples']) as f: if config['examples'].endswith('.json'): examples = json.load(f) elif config['examples'].endswith(('.yaml', '.yml')): ...
def _load_examples(config: dict) ->dict: """Load examples if necessary.""" if isinstance(config['examples'], list): pass elif isinstance(config['examples'], str): with open(config['examples']) as f: if config['examples'].endswith('.json'): examples = json.load(f) ...
Load examples if necessary.
get
res = self.redis_client.getex(f'{self.full_key_prefix}:{key}', ex=self. recall_ttl) or default or '' logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res
def get(self, key: str, default: Optional[str]=None) ->Optional[str]: res = self.redis_client.getex(f'{self.full_key_prefix}:{key}', ex=self. recall_ttl) or default or '' logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res
null
merge_documents
""" Merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents. """ retriever_docs = [retriever.get_relevant_documents(query, callbacks= run_manager.get_child('retriever_{}'.format(i + 1))) for i, retriever...
def merge_documents(self, query: str, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: """ Merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents. """ retriever_docs = [retriever.g...
Merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents.
_get_google_trends
return GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper(**kwargs))
def _get_google_trends(**kwargs: Any) ->BaseTool: return GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper(**kwargs))
null
flatten_run
"""Utility to flatten a nest run object into a list of runs. :param run: The base run to flatten. :return: The flattened list of runs. """ def flatten(child_runs: List[Dict[str, Any]]) ->List[Dict[str, Any]]: """Utility to recursively flatten a list of child runs in a run. :param...
def flatten_run(self, run: Dict[str, Any]) ->List[Dict[str, Any]]: """Utility to flatten a nest run object into a list of runs. :param run: The base run to flatten. :return: The flattened list of runs. """ def flatten(child_runs: List[Dict[str, Any]]) ->List[Dict[str, Any]]: """...
Utility to flatten a nest run object into a list of runs. :param run: The base run to flatten. :return: The flattened list of runs.
test_functionality_multiline
"""Test correct functionality for ChatGPT multiline commands.""" chain = PythonREPL() tool = PythonREPLTool(python_repl=chain) output = tool.run(_SAMPLE_CODE) assert output == '30\n'
def test_functionality_multiline() ->None: """Test correct functionality for ChatGPT multiline commands.""" chain = PythonREPL() tool = PythonREPLTool(python_repl=chain) output = tool.run(_SAMPLE_CODE) assert output == '30\n'
Test correct functionality for ChatGPT multiline commands.
update_document
"""Update a document in the cluster. Args: document_id (str): ID of the document to update. document (Document): Document to update. """ text = document.page_content metadata = document.metadata self._cluster.update(ids=[document_id], documents=[text], metadatas=[metadata])
def update_document(self, document_id: str, document: Document) ->None: """Update a document in the cluster. Args: document_id (str): ID of the document to update. document (Document): Document to update. """ text = document.page_content metadata = document.metadata ...
Update a document in the cluster. Args: document_id (str): ID of the document to update. document (Document): Document to update.
_get_message_metadata
"""Create and return metadata for a given message and channel.""" timestamp = message.get('ts', '') user = message.get('user', '') source = self._get_message_source(channel_name, user, timestamp) return {'source': source, 'channel': channel_name, 'timestamp': timestamp, 'user': user}
def _get_message_metadata(self, message: dict, channel_name: str) ->dict: """Create and return metadata for a given message and channel.""" timestamp = message.get('ts', '') user = message.get('user', '') source = self._get_message_source(channel_name, user, timestamp) return {'source': source, 'cha...
Create and return metadata for a given message and channel.
validate_environment
"""Validate that api key and python package exists in environment.""" github_repository = get_from_dict_or_env(values, 'github_repository', 'GITHUB_REPOSITORY') github_app_id = get_from_dict_or_env(values, 'github_app_id', 'GITHUB_APP_ID') github_app_private_key = get_from_dict_or_env(values, 'github_app_privat...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" github_repository = get_from_dict_or_env(values, 'github_repository', 'GITHUB_REPOSITORY') github_app_id = get_from_dict_or_env(values, 'github_app_id', ...
Validate that api key and python package exists in environment.
test_partial_text_json_output_parser_diff
def input_iter(_: Any) ->Iterator[str]: for token in STREAMED_TOKENS: yield token chain = input_iter | SimpleJsonOutputParser(diff=True) assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF
def test_partial_text_json_output_parser_diff() ->None: def input_iter(_: Any) ->Iterator[str]: for token in STREAMED_TOKENS: yield token chain = input_iter | SimpleJsonOutputParser(diff=True) assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF
null
from_chain_type
"""Load chain from chain type.""" _chain_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_with_sources_chain(llm, chain_type= chain_type, **_chain_kwargs) return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@classmethod def from_chain_type(cls, llm: BaseLanguageModel, chain_type: str='stuff', chain_type_kwargs: Optional[dict]=None, **kwargs: Any ) ->BaseQAWithSourcesChain: """Load chain from chain type.""" _chain_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_with_sources_chain(llm,...
Load chain from chain type.
_get_metric
"""Get the metric function for the given metric name. Args: metric (EmbeddingDistance): The metric name. Returns: Any: The metric function. """ metrics = {EmbeddingDistance.COSINE: self._cosine_distance, EmbeddingDistance.EUCLIDEAN: self._euclidean_distance, Emb...
def _get_metric(self, metric: EmbeddingDistance) ->Any: """Get the metric function for the given metric name. Args: metric (EmbeddingDistance): The metric name. Returns: Any: The metric function. """ metrics = {EmbeddingDistance.COSINE: self._cosine_distance, ...
Get the metric function for the given metric name. Args: metric (EmbeddingDistance): The metric name. Returns: Any: The metric function.
_import_bearly_tool
from langchain_community.tools.bearly.tool import BearlyInterpreterTool return BearlyInterpreterTool
def _import_bearly_tool() ->Any: from langchain_community.tools.bearly.tool import BearlyInterpreterTool return BearlyInterpreterTool
null
__init__
self.persist_path = persist_path
def __init__(self, persist_path: str) ->None: self.persist_path = persist_path
null
_import_google_palm
from langchain_community.llms.google_palm import GooglePalm return GooglePalm
def _import_google_palm() ->Any: from langchain_community.llms.google_palm import GooglePalm return GooglePalm
null
as_retriever
"""Return VectorStoreRetriever initialized from this VectorStore. Args: search_type (Optional[str]): Defines the type of search that the Retriever should perform. Can be "similarity" (default), "mmr", or "similarity_score_threshold". searc...
def as_retriever(self, **kwargs: Any) ->VectorStoreRetriever: """Return VectorStoreRetriever initialized from this VectorStore. Args: search_type (Optional[str]): Defines the type of search that the Retriever should perform. Can be "similarity" (default), "mmr", ...
Return VectorStoreRetriever initialized from this VectorStore. Args: search_type (Optional[str]): Defines the type of search that the Retriever should perform. Can be "similarity" (default), "mmr", or "similarity_score_threshold". search_kwargs (Optional[Dict]): Keyword arguments to pas...
parse_to_str
"""Parse the details result.""" result = '' for key, value in details.items(): result += 'The ' + str(key) + ' is: ' + str(value) + '\n' return result
def parse_to_str(self, details: dict) ->str: """Parse the details result.""" result = '' for key, value in details.items(): result += 'The ' + str(key) + ' is: ' + str(value) + '\n' return result
Parse the details result.
_get_run
try: run = self.run_map[str(run_id)] except KeyError as exc: raise TracerException(f'No indexed run ID {run_id}.') from exc if run_type is not None and run.run_type != run_type: raise TracerException( f'Found {run.run_type} run at ID {run_id}, but expected {run_type} run.' ) return run
def _get_run(self, run_id: UUID, run_type: Optional[str]=None) ->Run: try: run = self.run_map[str(run_id)] except KeyError as exc: raise TracerException(f'No indexed run ID {run_id}.') from exc if run_type is not None and run.run_type != run_type: raise TracerException( f...
null
test_pgvector_relevance_score
"""Test to make sure the relevance score is scaled to 0-1.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, connection_strin...
def test_pgvector_relevance_score() ->None: """Test to make sure the relevance score is scaled to 0-1.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddi...
Test to make sure the relevance score is scaled to 0-1.
_run
try: loop = asyncio.get_event_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if loop.is_closed(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if loop.is_running(): result_container = [] def thread_target() ->None: nonlocal r...
def _run(self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] =None, **kwargs: Any) ->str: try: loop = asyncio.get_event_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if loop.is_closed(): loop = asyncio.new_event_lo...
null
create_filter
md_filter_expr = self.config.field_name_mapping[md_key] if md_filter_expr is None: return '' expr = md_filter_expr.split(',') if len(expr) != 2: logger.error( f'filter {md_filter_expr} express is not correct, must contain mapping field and operator.' ) return '' md_filter_key = expr[0].strip...
def create_filter(md_key: str, md_value: Any) ->str: md_filter_expr = self.config.field_name_mapping[md_key] if md_filter_expr is None: return '' expr = md_filter_expr.split(',') if len(expr) != 2: logger.error( f'filter {md_filter_expr} express is not correct, must contain m...
null
getERC721Tx
url = ( f'https://api.etherscan.io/api?module=account&action=tokennfttx&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}' ) try: response = requests.get(url) response.raise_for_status() exc...
def getERC721Tx(self) ->List[Document]: url = ( f'https://api.etherscan.io/api?module=account&action=tokennfttx&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}' ) try: resp...
null
__init__
self.generator = generator
def __init__(self, generator: Any): self.generator = generator
null
_validate
from jsonschema import ValidationError, validate try: validate(instance=prediction, schema=schema) return {'score': True} except ValidationError as e: return {'score': False, 'reasoning': repr(e)}
def _validate(self, prediction: Any, schema: Any) ->dict: from jsonschema import ValidationError, validate try: validate(instance=prediction, schema=schema) return {'score': True} except ValidationError as e: return {'score': False, 'reasoning': repr(e)}
null
__init__
"""Initialize the WhatsAppChatLoader. Args: path (str): Path to the exported WhatsApp chat zip directory, folder, or file. To generate the dump, open the chat, click the three dots in the top right corner, and select "More". Then select "Export chat" and cho...
def __init__(self, path: str): """Initialize the WhatsAppChatLoader. Args: path (str): Path to the exported WhatsApp chat zip directory, folder, or file. To generate the dump, open the chat, click the three dots in the top right corner, and select "More". Then s...
Initialize the WhatsAppChatLoader. Args: path (str): Path to the exported WhatsApp chat zip directory, folder, or file. To generate the dump, open the chat, click the three dots in the top right corner, and select "More". Then select "Export chat" and choose "Without media".
test_similarity_search_with_score_with_limit_distance
"""Test similarity search with score with limit score.""" docsearch = Redis.from_texts(texts, ConsistentFakeEmbeddings(), redis_url= TEST_REDIS_URL) output = docsearch.similarity_search_with_score(texts[0], k=3, distance_threshold=0.1, return_metadata=True) assert len(output) == 2 for out, score in output: ...
def test_similarity_search_with_score_with_limit_distance(texts: List[str] ) ->None: """Test similarity search with score with limit score.""" docsearch = Redis.from_texts(texts, ConsistentFakeEmbeddings(), redis_url=TEST_REDIS_URL) output = docsearch.similarity_search_with_score(texts[0], k=3, ...
Test similarity search with score with limit score.
validate_environment
"""Validate that python package exists in environment.""" try: import text_generation values['client'] = text_generation.Client(values['inference_server_url' ], timeout=values['timeout'], **values['server_kwargs']) values['async_client'] = text_generation.AsyncClient(values[ 'inference_serve...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that python package exists in environment.""" try: import text_generation values['client'] = text_generation.Client(values[ 'inference_server_url'], timeout=values['timeout'], **values[ 'ser...
Validate that python package exists in environment.
_on_chain_end
crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback(f"{get_colored_text('[chain/end]', color='blue')} " + get_bolded_text( f"""[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output: """ ) + f"{try_json_stringify(run.outputs, '[outputs]')}")
def _on_chain_end(self, run: Run) ->None: crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/end]', color='blue')} " + get_bolded_text( f"""[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output: """ ...
null
__init__
log_method = getattr(logger, logging.getLevelName(level=log_level).lower()) def callback(text: str) ->None: log_method(text, extra=extra) super().__init__(function=callback, **kwargs)
def __init__(self, logger: logging.Logger, log_level: int=logging.INFO, extra: Optional[dict]=None, **kwargs: Any) ->None: log_method = getattr(logger, logging.getLevelName(level=log_level).lower()) def callback(text: str) ->None: log_method(text, extra=extra) super().__init__(function=callback...
null
service_url_from_db_params
"""Return connection string from database parameters.""" return f'postgresql://{user}:{password}@{host}:{port}/{database}'
@classmethod def service_url_from_db_params(cls, host: str, port: int, database: str, user: str, password: str) ->str: """Return connection string from database parameters.""" return f'postgresql://{user}:{password}@{host}:{port}/{database}'
Return connection string from database parameters.