method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_import_slack_schedule_message
from langchain_community.tools.slack.schedule_message import SlackScheduleMessage return SlackScheduleMessage
def _import_slack_schedule_message() ->Any: from langchain_community.tools.slack.schedule_message import SlackScheduleMessage return SlackScheduleMessage
null
_generate
"""Run the LLM on the given prompt and input.""" from aphrodite import SamplingParams params = {**self._default_params, **kwargs, 'stop': stop} if 'logit_bias' in params: del params['logit_bias'] sampling_params = SamplingParams(**params) outputs = self.client.generate(prompts, sampling_params) generations = [] for output in outputs: text = output.outputs[0].text generations.append([Generation(text=text)]) return LLMResult(generations=generations)
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: """Run the LLM on the given prompt and input.""" from aphrodite import SamplingParams params = {**self._default_params, **kwargs, 'stop': stop} if 'logit_bias' in params: del params['logit_bias'] sampling_params = SamplingParams(**params) outputs = self.client.generate(prompts, sampling_params) generations = [] for output in outputs: text = output.outputs[0].text generations.append([Generation(text=text)]) return LLMResult(generations=generations)
Run the LLM on the given prompt and input.
test_media_search
"""Test for NASA Image and Video Library media search""" nasa = NasaAPIWrapper() query = ( '{"q": "saturn", + "year_start": "2002", "year_end": "2010", "page": 2}') output = nasa.run('search_media', query) assert output is not None assert 'collection' in output
def test_media_search() ->None: """Test for NASA Image and Video Library media search""" nasa = NasaAPIWrapper() query = ( '{"q": "saturn", + "year_start": "2002", "year_end": "2010", "page": 2}' ) output = nasa.run('search_media', query) assert output is not None assert 'collection' in output
Test for NASA Image and Video Library media search
__init__
self.document = guard_import('tcvectordb.model.document') tcvectordb = guard_import('tcvectordb') self.embedding_func = embedding self.index_params = index_params self.vdb_client = tcvectordb.VectorDBClient(url=connection_params.url, username=connection_params.username, key=connection_params.key, timeout =connection_params.timeout) db_list = self.vdb_client.list_databases() db_exist: bool = False for db in db_list: if database_name == db.database_name: db_exist = True break if db_exist: self.database = self.vdb_client.database(database_name) else: self.database = self.vdb_client.create_database(database_name) try: self.collection = self.database.describe_collection(collection_name) if drop_old: self.database.drop_collection(collection_name) self._create_collection(collection_name) except tcvectordb.exceptions.VectorDBException: self._create_collection(collection_name)
def __init__(self, embedding: Embeddings, connection_params: ConnectionParams, index_params: IndexParams=IndexParams(128), database_name: str='LangChainDatabase', collection_name: str= 'LangChainCollection', drop_old: Optional[bool]=False): self.document = guard_import('tcvectordb.model.document') tcvectordb = guard_import('tcvectordb') self.embedding_func = embedding self.index_params = index_params self.vdb_client = tcvectordb.VectorDBClient(url=connection_params.url, username=connection_params.username, key=connection_params.key, timeout=connection_params.timeout) db_list = self.vdb_client.list_databases() db_exist: bool = False for db in db_list: if database_name == db.database_name: db_exist = True break if db_exist: self.database = self.vdb_client.database(database_name) else: self.database = self.vdb_client.create_database(database_name) try: self.collection = self.database.describe_collection(collection_name) if drop_old: self.database.drop_collection(collection_name) self._create_collection(collection_name) except tcvectordb.exceptions.VectorDBException: self._create_collection(collection_name)
null
test_faiss_vector_sim_with_score_threshold
"""Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content ='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]: Document(page_content='baz')}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2) assert output == [Document(page_content='foo')]
@pytest.mark.requires('faiss') def test_faiss_vector_sim_with_score_threshold() ->None: """Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document( page_content='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]: Document(page_content='baz')}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2) assert output == [Document(page_content='foo')]
Test vector similarity.
load
""" Get logs from Datadog. Returns: A list of Document objects. - page_content - metadata - id - service - status - tags - timestamp """ try: from datadog_api_client import ApiClient from datadog_api_client.v2.api.logs_api import LogsApi from datadog_api_client.v2.model.logs_list_request import LogsListRequest from datadog_api_client.v2.model.logs_list_request_page import LogsListRequestPage from datadog_api_client.v2.model.logs_query_filter import LogsQueryFilter from datadog_api_client.v2.model.logs_sort import LogsSort except ImportError as ex: raise ImportError( 'Could not import datadog_api_client python package. Please install it with `pip install datadog_api_client`.' ) from ex now = datetime.now() twenty_minutes_before = now - timedelta(minutes=20) now_timestamp = int(now.timestamp() * 1000) twenty_minutes_before_timestamp = int(twenty_minutes_before.timestamp() * 1000) _from = (self.from_time if self.from_time is not None else twenty_minutes_before_timestamp) body = LogsListRequest(filter=LogsQueryFilter(query=self.query, _from=_from, to=f'{self.to_time if self.to_time is not None else now_timestamp}'), sort=LogsSort.TIMESTAMP_ASCENDING, page=LogsListRequestPage(limit=self. limit)) with ApiClient(configuration=self.configuration) as api_client: api_instance = LogsApi(api_client) response = api_instance.list_logs(body=body).to_dict() docs: List[Document] = [] for row in response['data']: docs.append(self.parse_log(row)) return docs
def load(self) ->List[Document]: """ Get logs from Datadog. Returns: A list of Document objects. - page_content - metadata - id - service - status - tags - timestamp """ try: from datadog_api_client import ApiClient from datadog_api_client.v2.api.logs_api import LogsApi from datadog_api_client.v2.model.logs_list_request import LogsListRequest from datadog_api_client.v2.model.logs_list_request_page import LogsListRequestPage from datadog_api_client.v2.model.logs_query_filter import LogsQueryFilter from datadog_api_client.v2.model.logs_sort import LogsSort except ImportError as ex: raise ImportError( 'Could not import datadog_api_client python package. Please install it with `pip install datadog_api_client`.' ) from ex now = datetime.now() twenty_minutes_before = now - timedelta(minutes=20) now_timestamp = int(now.timestamp() * 1000) twenty_minutes_before_timestamp = int(twenty_minutes_before.timestamp() * 1000) _from = (self.from_time if self.from_time is not None else twenty_minutes_before_timestamp) body = LogsListRequest(filter=LogsQueryFilter(query=self.query, _from= _from, to= f'{self.to_time if self.to_time is not None else now_timestamp}'), sort=LogsSort.TIMESTAMP_ASCENDING, page=LogsListRequestPage(limit= self.limit)) with ApiClient(configuration=self.configuration) as api_client: api_instance = LogsApi(api_client) response = api_instance.list_logs(body=body).to_dict() docs: List[Document] = [] for row in response['data']: docs.append(self.parse_log(row)) return docs
Get logs from Datadog. Returns: A list of Document objects. - page_content - metadata - id - service - status - tags - timestamp
update
"""Update cache based on prompt and llm_string."""
@abstractmethod def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE ) ->None: """Update cache based on prompt and llm_string."""
Update cache based on prompt and llm_string.
__init__
"""Initialize with the LLM and a docstore.""" docstore_explorer = DocstoreExplorer(docstore) tools = [Tool(name='Search', func=docstore_explorer.search, description= 'Search for a term in the docstore.'), Tool(name='Lookup', func= docstore_explorer.lookup, description='Lookup a term in the docstore.')] agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools) super().__init__(agent=agent, tools=tools, **kwargs)
def __init__(self, llm: BaseLanguageModel, docstore: Docstore, **kwargs: Any): """Initialize with the LLM and a docstore.""" docstore_explorer = DocstoreExplorer(docstore) tools = [Tool(name='Search', func=docstore_explorer.search, description ='Search for a term in the docstore.'), Tool(name='Lookup', func= docstore_explorer.lookup, description='Lookup a term in the docstore.') ] agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools) super().__init__(agent=agent, tools=tools, **kwargs)
Initialize with the LLM and a docstore.
from_parameter
"""Instantiate from an OpenAPI Parameter.""" location = APIPropertyLocation.from_str(parameter.param_in) cls._validate_location(location, parameter.name) cls._validate_content(parameter.content) schema = cls._get_schema(parameter, spec) schema_type = cls._get_schema_type(parameter, schema) default_val = schema.default if schema is not None else None return cls(name=parameter.name, location=location, default=default_val, description=parameter.description, required=parameter.required, type= schema_type)
@classmethod def from_parameter(cls, parameter: Parameter, spec: OpenAPISpec ) ->'APIProperty': """Instantiate from an OpenAPI Parameter.""" location = APIPropertyLocation.from_str(parameter.param_in) cls._validate_location(location, parameter.name) cls._validate_content(parameter.content) schema = cls._get_schema(parameter, spec) schema_type = cls._get_schema_type(parameter, schema) default_val = schema.default if schema is not None else None return cls(name=parameter.name, location=location, default=default_val, description=parameter.description, required=parameter.required, type=schema_type)
Instantiate from an OpenAPI Parameter.
__init__
"""Initialize callback handler.""" wandb = import_wandb() import_pandas() import_textstat() spacy = import_spacy() super().__init__() self.job_type = job_type self.project = project self.entity = entity self.tags = tags self.group = group self.name = name self.notes = notes self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() self.run: wandb.sdk.wandb_run.Run = wandb.init(job_type=self.job_type, project=self.project, entity=self.entity, tags=self.tags, group=self. group, name=self.name, notes=self.notes) warning = ( 'DEPRECATION: The `WandbCallbackHandler` will soon be deprecated in favor of the `WandbTracer`. Please update your code to use the `WandbTracer` instead.' ) wandb.termwarn(warning, repeat=False) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load('en_core_web_sm')
def __init__(self, job_type: Optional[str]=None, project: Optional[str]= 'langchain_callback_demo', entity: Optional[str]=None, tags: Optional[ Sequence]=None, group: Optional[str]=None, name: Optional[str]=None, notes: Optional[str]=None, visualize: bool=False, complexity_metrics: bool=False, stream_logs: bool=False) ->None: """Initialize callback handler.""" wandb = import_wandb() import_pandas() import_textstat() spacy = import_spacy() super().__init__() self.job_type = job_type self.project = project self.entity = entity self.tags = tags self.group = group self.name = name self.notes = notes self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() self.run: wandb.sdk.wandb_run.Run = wandb.init(job_type=self.job_type, project=self.project, entity=self.entity, tags=self.tags, group= self.group, name=self.name, notes=self.notes) warning = ( 'DEPRECATION: The `WandbCallbackHandler` will soon be deprecated in favor of the `WandbTracer`. Please update your code to use the `WandbTracer` instead.' ) wandb.termwarn(warning, repeat=False) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load('en_core_web_sm')
Initialize callback handler.
on_agent_finish
"""Run on agent end."""
def on_agent_finish(self, finish: AgentFinish, *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: """Run on agent end."""
Run on agent end.
run
"""Run command with own globals/locals and returns anything printed. Timeout after the specified number of seconds.""" warn_once() queue: multiprocessing.Queue = multiprocessing.Queue() if timeout is not None: p = multiprocessing.Process(target=self.worker, args=(command, self. globals, self.locals, queue)) p.start() p.join(timeout) if p.is_alive(): p.terminate() return 'Execution timed out' else: self.worker(command, self.globals, self.locals, queue) return queue.get()
def run(self, command: str, timeout: Optional[int]=None) ->str: """Run command with own globals/locals and returns anything printed. Timeout after the specified number of seconds.""" warn_once() queue: multiprocessing.Queue = multiprocessing.Queue() if timeout is not None: p = multiprocessing.Process(target=self.worker, args=(command, self .globals, self.locals, queue)) p.start() p.join(timeout) if p.is_alive(): p.terminate() return 'Execution timed out' else: self.worker(command, self.globals, self.locals, queue) return queue.get()
Run command with own globals/locals and returns anything printed. Timeout after the specified number of seconds.
_chain_type
return 'llm_chain'
@property def _chain_type(self) ->str: return 'llm_chain'
null
from_credentials
"""Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain_community.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env('es_cloud_id', 'ES_CLOUD_ID') es_user = es_user or get_from_env('es_user', 'ES_USER') es_password = es_password or get_from_env('es_password', 'ES_PASSWORD') es_connection = Elasticsearch(cloud_id=es_cloud_id, basic_auth=(es_user, es_password)) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field)
@classmethod def from_credentials(cls, model_id: str, *, es_cloud_id: Optional[str]=None, es_user: Optional[str]=None, es_password: Optional[str]=None, input_field: str='text_field') ->ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain_community.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env('es_cloud_id', 'ES_CLOUD_ID') es_user = es_user or get_from_env('es_user', 'ES_USER') es_password = es_password or get_from_env('es_password', 'ES_PASSWORD') es_connection = Elasticsearch(cloud_id=es_cloud_id, basic_auth=(es_user, es_password)) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field)
Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain_community.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents)
get_debug
"""Get the value of the `debug` global setting.""" try: import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= 'Importing debug from langchain root module is no longer supported' ) old_debug = langchain.debug except ImportError: old_debug = False global _debug return _debug or old_debug
def get_debug() ->bool: """Get the value of the `debug` global setting.""" try: import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= 'Importing debug from langchain root module is no longer supported' ) old_debug = langchain.debug except ImportError: old_debug = False global _debug return _debug or old_debug
Get the value of the `debug` global setting.
get_num_tokens
"""Count approximate number of tokens""" return round(len(text) / 4.6)
def get_num_tokens(self, text: str) ->int: """Count approximate number of tokens""" return round(len(text) / 4.6)
Count approximate number of tokens
test_prompt_with_chat_model_and_parser
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.' ) + '{question}' chat = FakeListChatModel(responses=['foo, bar']) parser = CommaSeparatedListOutputParser() chain = prompt | chat | parser assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [chat] assert chain.last == parser assert dumps(chain, pretty=True) == snapshot prompt_spy = mocker.spy(prompt.__class__, 'invoke') chat_spy = mocker.spy(chat.__class__, 'invoke') parser_spy = mocker.spy(parser.__class__, 'invoke') tracer = FakeTracer() assert chain.invoke({'question': 'What is your name?'}, dict(callbacks=[ tracer])) == ['foo', 'bar'] assert prompt_spy.call_args.args[1] == {'question': 'What is your name?'} assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[ SystemMessage(content='You are a nice assistant.'), HumanMessage( content='What is your name?')]) assert parser_spy.call_args.args[1] == AIMessage(content='foo, bar') assert tracer.runs == snapshot
@freeze_time('2023-01-01') def test_prompt_with_chat_model_and_parser(mocker: MockerFixture, snapshot: SnapshotAssertion) ->None: prompt = SystemMessagePromptTemplate.from_template( 'You are a nice assistant.') + '{question}' chat = FakeListChatModel(responses=['foo, bar']) parser = CommaSeparatedListOutputParser() chain = prompt | chat | parser assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [chat] assert chain.last == parser assert dumps(chain, pretty=True) == snapshot prompt_spy = mocker.spy(prompt.__class__, 'invoke') chat_spy = mocker.spy(chat.__class__, 'invoke') parser_spy = mocker.spy(parser.__class__, 'invoke') tracer = FakeTracer() assert chain.invoke({'question': 'What is your name?'}, dict(callbacks= [tracer])) == ['foo', 'bar'] assert prompt_spy.call_args.args[1] == {'question': 'What is your name?'} assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[ SystemMessage(content='You are a nice assistant.'), HumanMessage( content='What is your name?')]) assert parser_spy.call_args.args[1] == AIMessage(content='foo, bar') assert tracer.runs == snapshot
null
search
"""Return docs most similar to query using specified search type.""" if search_type == 'similarity': return self.similarity_search(query, **kwargs) elif search_type == 'mmr': return self.max_marginal_relevance_search(query, **kwargs) else: raise ValueError( f"search_type of {search_type} not allowed. Expected search_type to be 'similarity' or 'mmr'." )
def search(self, query: str, search_type: str, **kwargs: Any) ->List[Document]: """Return docs most similar to query using specified search type.""" if search_type == 'similarity': return self.similarity_search(query, **kwargs) elif search_type == 'mmr': return self.max_marginal_relevance_search(query, **kwargs) else: raise ValueError( f"search_type of {search_type} not allowed. Expected search_type to be 'similarity' or 'mmr'." )
Return docs most similar to query using specified search type.
test_api_key_masked_when_passed_via_constructor
llm = VolcEngineMaasBase(volc_engine_maas_ak='secret-volc-ak', volc_engine_maas_sk='secret-volc-sk') print(llm.volc_engine_maas_ak, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = VolcEngineMaasBase(volc_engine_maas_ak='secret-volc-ak', volc_engine_maas_sk='secret-volc-sk') print(llm.volc_engine_maas_ak, end='') captured = capsys.readouterr() assert captured.out == '**********'
null
stream
result = self.invoke(input, config) for c in result: if self.sleep is not None: time.sleep(self.sleep) yield c
def stream(self, input: LanguageModelInput, config: Optional[RunnableConfig ]=None, *, stop: Optional[List[str]]=None, **kwargs: Any) ->Iterator[str]: result = self.invoke(input, config) for c in result: if self.sleep is not None: time.sleep(self.sleep) yield c
null
_import_xinference
from langchain_community.llms.xinference import Xinference return Xinference
def _import_xinference() ->Any: from langchain_community.llms.xinference import Xinference return Xinference
null
_get_message_data
msg = service.users().messages().get(userId='me', id=message['id']).execute() message_content = _extract_email_content(msg) in_reply_to = None email_data = msg['payload']['headers'] for values in email_data: name = values['name'] if name == 'In-Reply-To': in_reply_to = values['value'] if in_reply_to is None: raise ValueError thread_id = msg['threadId'] thread = service.users().threads().get(userId='me', id=thread_id).execute() messages = thread['messages'] response_email = None for message in messages: email_data = message['payload']['headers'] for values in email_data: if values['name'] == 'Message-ID': message_id = values['value'] if message_id == in_reply_to: response_email = message if response_email is None: raise ValueError starter_content = _extract_email_content(response_email) return ChatSession(messages=[starter_content, message_content])
def _get_message_data(service: Any, message: Any) ->ChatSession: msg = service.users().messages().get(userId='me', id=message['id'] ).execute() message_content = _extract_email_content(msg) in_reply_to = None email_data = msg['payload']['headers'] for values in email_data: name = values['name'] if name == 'In-Reply-To': in_reply_to = values['value'] if in_reply_to is None: raise ValueError thread_id = msg['threadId'] thread = service.users().threads().get(userId='me', id=thread_id).execute() messages = thread['messages'] response_email = None for message in messages: email_data = message['payload']['headers'] for values in email_data: if values['name'] == 'Message-ID': message_id = values['value'] if message_id == in_reply_to: response_email = message if response_email is None: raise ValueError starter_content = _extract_email_content(response_email) return ChatSession(messages=[starter_content, message_content])
null
validate_environment
"""Validate that group id and api key exists in environment.""" minimax_group_id = get_from_dict_or_env(values, 'minimax_group_id', 'MINIMAX_GROUP_ID') minimax_api_key = convert_to_secret_str(get_from_dict_or_env(values, 'minimax_api_key', 'MINIMAX_API_KEY')) values['minimax_group_id'] = minimax_group_id values['minimax_api_key'] = minimax_api_key return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that group id and api key exists in environment.""" minimax_group_id = get_from_dict_or_env(values, 'minimax_group_id', 'MINIMAX_GROUP_ID') minimax_api_key = convert_to_secret_str(get_from_dict_or_env(values, 'minimax_api_key', 'MINIMAX_API_KEY')) values['minimax_group_id'] = minimax_group_id values['minimax_api_key'] = minimax_api_key return values
Validate that group id and api key exists in environment.
load
try: from google.cloud import bigquery except ImportError as ex: raise ImportError( 'Could not import google-cloud-bigquery python package. Please install it with `pip install google-cloud-bigquery`.' ) from ex bq_client = bigquery.Client(credentials=self.credentials, project=self. project, client_info=get_client_info(module='bigquery')) if not bq_client.project: error_desc = ( 'GCP project for Big Query is not set! Either provide a `project` argument during BigQueryLoader instantiation, or set a default project with `gcloud config set project` command.' ) raise ValueError(error_desc) query_result = bq_client.query(self.query).result() docs: List[Document] = [] page_content_columns = self.page_content_columns metadata_columns = self.metadata_columns if page_content_columns is None: page_content_columns = [column.name for column in query_result.schema] if metadata_columns is None: metadata_columns = [] for row in query_result: page_content = '\n'.join(f'{k}: {v}' for k, v in row.items() if k in page_content_columns) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs
def load(self) ->List[Document]: try: from google.cloud import bigquery except ImportError as ex: raise ImportError( 'Could not import google-cloud-bigquery python package. Please install it with `pip install google-cloud-bigquery`.' ) from ex bq_client = bigquery.Client(credentials=self.credentials, project=self. project, client_info=get_client_info(module='bigquery')) if not bq_client.project: error_desc = ( 'GCP project for Big Query is not set! Either provide a `project` argument during BigQueryLoader instantiation, or set a default project with `gcloud config set project` command.' ) raise ValueError(error_desc) query_result = bq_client.query(self.query).result() docs: List[Document] = [] page_content_columns = self.page_content_columns metadata_columns = self.metadata_columns if page_content_columns is None: page_content_columns = [column.name for column in query_result.schema] if metadata_columns is None: metadata_columns = [] for row in query_result: page_content = '\n'.join(f'{k}: {v}' for k, v in row.items() if k in page_content_columns) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs
null
_run
"""Run the tool.""" try: data = _parse_input(text) return self.requests_wrapper.patch(_clean_url(data['url']), data['data']) except Exception as e: return repr(e)
def _run(self, text: str, run_manager: Optional[CallbackManagerForToolRun]=None ) ->str: """Run the tool.""" try: data = _parse_input(text) return self.requests_wrapper.patch(_clean_url(data['url']), data[ 'data']) except Exception as e: return repr(e)
Run the tool.
clear
"""Clear session memory from the local file""" self.file_path.write_text(json.dumps([]))
def clear(self) ->None: """Clear session memory from the local file""" self.file_path.write_text(json.dumps([]))
Clear session memory from the local file
_identifying_params
"""Get the identifying parameters.""" return {**{'hardware': self.hardware}}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'hardware': self.hardware}}
Get the identifying parameters.
on_text
"""Do nothing.""" pass
def on_text(self, text: str, **kwargs: Any) ->None: """Do nothing.""" pass
Do nothing.
test_add_embeddings
""" Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas page_content is the answer. """ embeddings = ConsistentFakeEmbeddings() text_input = ['foo1', 'foo2', 'foo3'] metadatas = [{'page': i} for i in range(len(text_input))] """In real use case, embedding_input can be questions for each text""" embedding_input = ['foo2', 'foo3', 'foo1'] embedding_vectors = embeddings.embed_documents(embedding_input) docsearch = ElasticsearchStore._create_cls_from_kwargs(embeddings, ** elasticsearch_connection, index_name=index_name) docsearch.add_embeddings(list(zip(text_input, embedding_vectors)), metadatas) output = docsearch.similarity_search('foo1', k=1) assert output == [Document(page_content='foo3', metadata={'page': 2})]
def test_add_embeddings(self, elasticsearch_connection: dict, index_name: str ) ->None: """ Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas page_content is the answer. """ embeddings = ConsistentFakeEmbeddings() text_input = ['foo1', 'foo2', 'foo3'] metadatas = [{'page': i} for i in range(len(text_input))] """In real use case, embedding_input can be questions for each text""" embedding_input = ['foo2', 'foo3', 'foo1'] embedding_vectors = embeddings.embed_documents(embedding_input) docsearch = ElasticsearchStore._create_cls_from_kwargs(embeddings, ** elasticsearch_connection, index_name=index_name) docsearch.add_embeddings(list(zip(text_input, embedding_vectors)), metadatas) output = docsearch.similarity_search('foo1', k=1) assert output == [Document(page_content='foo3', metadata={'page': 2})]
Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas page_content is the answer.
get_reduce_chain
"""For backwards compatibility.""" if 'combine_document_chain' in values: if 'reduce_documents_chain' in values: raise ValueError( 'Both `reduce_documents_chain` and `combine_document_chain` cannot be provided at the same time. `combine_document_chain` is deprecated, please only provide `reduce_documents_chain`' ) combine_chain = values['combine_document_chain'] collapse_chain = values.get('collapse_document_chain') reduce_chain = ReduceDocumentsChain(combine_documents_chain= combine_chain, collapse_documents_chain=collapse_chain) values['reduce_documents_chain'] = reduce_chain del values['combine_document_chain'] if 'collapse_document_chain' in values: del values['collapse_document_chain'] return values
@root_validator(pre=True) def get_reduce_chain(cls, values: Dict) ->Dict: """For backwards compatibility.""" if 'combine_document_chain' in values: if 'reduce_documents_chain' in values: raise ValueError( 'Both `reduce_documents_chain` and `combine_document_chain` cannot be provided at the same time. `combine_document_chain` is deprecated, please only provide `reduce_documents_chain`' ) combine_chain = values['combine_document_chain'] collapse_chain = values.get('collapse_document_chain') reduce_chain = ReduceDocumentsChain(combine_documents_chain= combine_chain, collapse_documents_chain=collapse_chain) values['reduce_documents_chain'] = reduce_chain del values['combine_document_chain'] if 'collapse_document_chain' in values: del values['collapse_document_chain'] return values
For backwards compatibility.
bind
""" Bind arguments to a Runnable, returning a new Runnable. """ return RunnableBinding(bound=self, kwargs=kwargs, config={})
def bind(self, **kwargs: Any) ->Runnable[Input, Output]: """ Bind arguments to a Runnable, returning a new Runnable. """ return RunnableBinding(bound=self, kwargs=kwargs, config={})
Bind arguments to a Runnable, returning a new Runnable.
multi_modal_rag_chain
""" Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process. """ model = ChatOllama(model='bakllava', temperature=0) chain = {'context': retriever | RunnableLambda(get_resized_images), 'question': RunnablePassthrough()} | RunnableLambda(img_prompt_func ) | model | StrOutputParser() return chain
def multi_modal_rag_chain(retriever): """ Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process. """ model = ChatOllama(model='bakllava', temperature=0) chain = {'context': retriever | RunnableLambda(get_resized_images), 'question': RunnablePassthrough()} | RunnableLambda(img_prompt_func ) | model | StrOutputParser() return chain
Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process.
parse
"""Parse the output of an LLM call.""" result = {} for output_key, expected_format in self.output_key_to_format.items(): specific_regex = self.regex_pattern.format(re.escape(expected_format)) matches = re.findall(specific_regex, text) if not matches: raise ValueError( f'No match found for output key: {output_key} with expected format {expected_format} on text {text}' ) elif len(matches) > 1: raise ValueError( f'Multiple matches found for output key: {output_key} with expected format {expected_format} on text {text}' ) elif self.no_update_value is not None and matches[0 ] == self.no_update_value: continue else: result[output_key] = matches[0] return result
def parse(self, text: str) ->Dict[str, str]: """Parse the output of an LLM call.""" result = {} for output_key, expected_format in self.output_key_to_format.items(): specific_regex = self.regex_pattern.format(re.escape(expected_format)) matches = re.findall(specific_regex, text) if not matches: raise ValueError( f'No match found for output key: {output_key} with expected format {expected_format} on text {text}' ) elif len(matches) > 1: raise ValueError( f'Multiple matches found for output key: {output_key} with expected format {expected_format} on text {text}' ) elif self.no_update_value is not None and matches[0 ] == self.no_update_value: continue else: result[output_key] = matches[0] return result
Parse the output of an LLM call.
lazy_load
"""Lazy load Documents from table.""" from pyairtable import Table table = Table(self.api_token, self.base_id, self.table_id) records = table.all() for record in records: yield Document(page_content=str(record), metadata={'source': self. base_id + '_' + self.table_id, 'base_id': self.base_id, 'table_id': self.table_id})
def lazy_load(self) ->Iterator[Document]: """Lazy load Documents from table.""" from pyairtable import Table table = Table(self.api_token, self.base_id, self.table_id) records = table.all() for record in records: yield Document(page_content=str(record), metadata={'source': self. base_id + '_' + self.table_id, 'base_id': self.base_id, 'table_id': self.table_id})
Lazy load Documents from table.
description
return (base_description + '\n\n' + self.file_description).strip()
@property def description(self) ->str: return (base_description + '\n\n' + self.file_description).strip()
null
_add_vectors
"""Add vectors to Supabase table.""" rows: List[Dict[str, Any]] = [{'id': ids[idx], 'content': documents[idx]. page_content, 'embedding': embedding, 'metadata': documents[idx]. metadata} for idx, embedding in enumerate(vectors)] id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i:i + chunk_size] result = client.from_(table_name).upsert(chunk).execute() if len(result.data) == 0: raise Exception('Error inserting: No rows added') ids = [str(i.get('id')) for i in result.data if i.get('id')] id_list.extend(ids) return id_list
@staticmethod def _add_vectors(client: supabase.client.Client, table_name: str, vectors: List[List[float]], documents: List[Document], ids: List[str], chunk_size: int) ->List[str]: """Add vectors to Supabase table.""" rows: List[Dict[str, Any]] = [{'id': ids[idx], 'content': documents[idx ].page_content, 'embedding': embedding, 'metadata': documents[idx]. metadata} for idx, embedding in enumerate(vectors)] id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i:i + chunk_size] result = client.from_(table_name).upsert(chunk).execute() if len(result.data) == 0: raise Exception('Error inserting: No rows added') ids = [str(i.get('id')) for i in result.data if i.get('id')] id_list.extend(ids) return id_list
Add vectors to Supabase table.
embed_query
"""Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed_documents([text])[0]
def embed_query(self, text: str) ->List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed_documents([text])[0]
Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text.
test_init
mock_session_pool.return_value = MagicMock() nebula_graph = NebulaGraph(self.space, self.username, self.password, self. address, self.port, self.session_pool_size) self.assertEqual(nebula_graph.space, self.space) self.assertEqual(nebula_graph.username, self.username) self.assertEqual(nebula_graph.password, self.password) self.assertEqual(nebula_graph.address, self.address) self.assertEqual(nebula_graph.port, self.port) self.assertEqual(nebula_graph.session_pool_size, self.session_pool_size)
@patch('nebula3.gclient.net.SessionPool.SessionPool') def test_init(self, mock_session_pool: Any) ->None: mock_session_pool.return_value = MagicMock() nebula_graph = NebulaGraph(self.space, self.username, self.password, self.address, self.port, self.session_pool_size) self.assertEqual(nebula_graph.space, self.space) self.assertEqual(nebula_graph.username, self.username) self.assertEqual(nebula_graph.password, self.password) self.assertEqual(nebula_graph.address, self.address) self.assertEqual(nebula_graph.port, self.port) self.assertEqual(nebula_graph.session_pool_size, self.session_pool_size)
null
__init__
self.generator = stream_generate_with_retry(_llm, **_kwargs)
def __init__(self, _llm: Tongyi, **_kwargs: Any): self.generator = stream_generate_with_retry(_llm, **_kwargs)
null
wrap
async def wrapped_f(*args: Any, **kwargs: Any) ->Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError('this is unreachable') return wrapped_f
def wrap(func: Callable) ->Callable: async def wrapped_f(*args: Any, **kwargs: Any) ->Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError('this is unreachable') return wrapped_f
null
max_marginal_relevance_search_with_score_by_vector
"""Return docs selected using the maximal marginal relevance with score to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter ) embedding_list = [result.EmbeddingStore.embedding for result in results] mmr_selected = maximal_marginal_relevance(np.array(embedding, dtype=np. float32), embedding_list, k=k, lambda_mult=lambda_mult) candidates = self._results_to_docs_and_scores(results) return [r for i, r in enumerate(candidates) if i in mmr_selected]
def max_marginal_relevance_search_with_score_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[str, str]]=None, **kwargs: Any) ->List[Tuple[Document, float] ]: """Return docs selected using the maximal marginal relevance with score to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter) embedding_list = [result.EmbeddingStore.embedding for result in results] mmr_selected = maximal_marginal_relevance(np.array(embedding, dtype=np. float32), embedding_list, k=k, lambda_mult=lambda_mult) candidates = self._results_to_docs_and_scores(results) return [r for i, r in enumerate(candidates) if i in mmr_selected]
Return docs selected using the maximal marginal relevance with score to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each.
extract_paths
""" Args: query: cypher query """ paths = [] idx = 0 while (matched := self.path_pattern.findall(query[idx:])): matched = matched[0] matched = [m for i, m in enumerate(matched) if i not in [1, len(matched ) - 1]] path = ''.join(matched) idx = query.find(path) + len(path) - len(matched[-1]) paths.append(path) return paths
def extract_paths(self, query: str) ->'List[str]': """ Args: query: cypher query """ paths = [] idx = 0 while (matched := self.path_pattern.findall(query[idx:])): matched = matched[0] matched = [m for i, m in enumerate(matched) if i not in [1, len( matched) - 1]] path = ''.join(matched) idx = query.find(path) + len(path) - len(matched[-1]) paths.append(path) return paths
Args: query: cypher query
test_dependency_string_invalids
with pytest.raises(ValueError): parse_dependency_string( 'git+https://github.com/efriis/myrepo.git#subdirectory=src@branch', None, None, None)
def test_dependency_string_invalids() ->None: with pytest.raises(ValueError): parse_dependency_string( 'git+https://github.com/efriis/myrepo.git#subdirectory=src@branch', None, None, None)
null
test_pandas_output_parser_row_first
expected_output = {'1': pd.Series({'chicken': 2, 'veggies': 4, 'steak': 8})} actual_output = parser.parse_folder('row:1') assert actual_output['1'].equals(expected_output['1'])
def test_pandas_output_parser_row_first() ->None: expected_output = {'1': pd.Series({'chicken': 2, 'veggies': 4, 'steak': 8}) } actual_output = parser.parse_folder('row:1') assert actual_output['1'].equals(expected_output['1'])
null
test_check_package_version_fail
with pytest.raises(ValueError): check_package_version('PyYAML', lt_version='5.4.1')
def test_check_package_version_fail() ->None: with pytest.raises(ValueError): check_package_version('PyYAML', lt_version='5.4.1')
null
lazy_load
""" Get pages from OneNote notebooks. Returns: A list of Documents with attributes: - page_content - metadata - title """ self._auth() try: from bs4 import BeautifulSoup except ImportError: raise ImportError( 'beautifulsoup4 package not found, please install it with `pip install bs4`' ) if self.object_ids is not None: for object_id in self.object_ids: page_content_html = self._get_page_content(object_id) soup = BeautifulSoup(page_content_html, 'html.parser') page_title = '' title_tag = soup.title if title_tag: page_title = title_tag.get_text(strip=True) page_content = soup.get_text(separator='\n', strip=True) yield Document(page_content=page_content, metadata={'title': page_title}) else: request_url = self._url while request_url != '': response = requests.get(request_url, headers=self._headers, timeout=10) response.raise_for_status() pages = response.json() for page in pages['value']: page_id = page['id'] page_content_html = self._get_page_content(page_id) soup = BeautifulSoup(page_content_html, 'html.parser') page_title = '' title_tag = soup.title if title_tag: page_content = soup.get_text(separator='\n', strip=True) yield Document(page_content=page_content, metadata={'title': page_title}) if '@odata.nextLink' in pages: request_url = pages['@odata.nextLink'] else: request_url = ''
def lazy_load(self) ->Iterator[Document]: """ Get pages from OneNote notebooks. Returns: A list of Documents with attributes: - page_content - metadata - title """ self._auth() try: from bs4 import BeautifulSoup except ImportError: raise ImportError( 'beautifulsoup4 package not found, please install it with `pip install bs4`' ) if self.object_ids is not None: for object_id in self.object_ids: page_content_html = self._get_page_content(object_id) soup = BeautifulSoup(page_content_html, 'html.parser') page_title = '' title_tag = soup.title if title_tag: page_title = title_tag.get_text(strip=True) page_content = soup.get_text(separator='\n', strip=True) yield Document(page_content=page_content, metadata={'title': page_title}) else: request_url = self._url while request_url != '': response = requests.get(request_url, headers=self._headers, timeout=10) response.raise_for_status() pages = response.json() for page in pages['value']: page_id = page['id'] page_content_html = self._get_page_content(page_id) soup = BeautifulSoup(page_content_html, 'html.parser') page_title = '' title_tag = soup.title if title_tag: page_content = soup.get_text(separator='\n', strip=True) yield Document(page_content=page_content, metadata={'title': page_title}) if '@odata.nextLink' in pages: request_url = pages['@odata.nextLink'] else: request_url = ''
Get pages from OneNote notebooks. Returns: A list of Documents with attributes: - page_content - metadata - title
scrape_page
from urllib.parse import urljoin from bs4 import BeautifulSoup soup = BeautifulSoup(html_content, 'lxml') anchors = soup.find_all('a') if absolute_urls: base_url = page.url links = [urljoin(base_url, anchor.get('href', '')) for anchor in anchors] else: links = [anchor.get('href', '') for anchor in anchors] return json.dumps(links)
@staticmethod def scrape_page(page: Any, html_content: str, absolute_urls: bool) ->str: from urllib.parse import urljoin from bs4 import BeautifulSoup soup = BeautifulSoup(html_content, 'lxml') anchors = soup.find_all('a') if absolute_urls: base_url = page.url links = [urljoin(base_url, anchor.get('href', '')) for anchor in anchors] else: links = [anchor.get('href', '') for anchor in anchors] return json.dumps(links)
null
test_hippo
docsearch = _hippo_from_texts() output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_hippo() ->None: docsearch = _hippo_from_texts() output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
null
validate_environment
values['baichuan_api_base'] = get_from_dict_or_env(values, 'baichuan_api_base', 'BAICHUAN_API_BASE', DEFAULT_API_BASE) values['baichuan_api_key'] = convert_to_secret_str(get_from_dict_or_env( values, 'baichuan_api_key', 'BAICHUAN_API_KEY')) values['baichuan_secret_key'] = convert_to_secret_str(get_from_dict_or_env( values, 'baichuan_secret_key', 'BAICHUAN_SECRET_KEY')) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: values['baichuan_api_base'] = get_from_dict_or_env(values, 'baichuan_api_base', 'BAICHUAN_API_BASE', DEFAULT_API_BASE) values['baichuan_api_key'] = convert_to_secret_str(get_from_dict_or_env (values, 'baichuan_api_key', 'BAICHUAN_API_KEY')) values['baichuan_secret_key'] = convert_to_secret_str(get_from_dict_or_env (values, 'baichuan_secret_key', 'BAICHUAN_SECRET_KEY')) return values
null
embed_query
"""Call out to LocalAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding
def embed_query(self, text: str) ->List[float]: """Call out to LocalAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding
Call out to LocalAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text.
read_file
""" Read a file from this agent's branch, defined by self.active_branch, which supports PR branches. Parameters: file_path(str): the file path Returns: str: The file decoded as a string, or an error message if not found """ try: file = self.github_repo_instance.get_contents(file_path, ref=self. active_branch) return file.decoded_content.decode('utf-8') except Exception as e: return ( f'File not found `{file_path}` on branch`{self.active_branch}`. Error: {str(e)}' )
def read_file(self, file_path: str) ->str: """ Read a file from this agent's branch, defined by self.active_branch, which supports PR branches. Parameters: file_path(str): the file path Returns: str: The file decoded as a string, or an error message if not found """ try: file = self.github_repo_instance.get_contents(file_path, ref=self. active_branch) return file.decoded_content.decode('utf-8') except Exception as e: return ( f'File not found `{file_path}` on branch`{self.active_branch}`. Error: {str(e)}' )
Read a file from this agent's branch, defined by self.active_branch, which supports PR branches. Parameters: file_path(str): the file path Returns: str: The file decoded as a string, or an error message if not found
evaluation_name
return 'COT Contextual Accuracy'
@property def evaluation_name(self) ->str: return 'COT Contextual Accuracy'
null
_get_llm
return AzureOpenAI(deployment_name=DEPLOYMENT_NAME, openai_api_version= OPENAI_API_VERSION, openai_api_base=OPENAI_API_BASE, openai_api_key= OPENAI_API_KEY, **kwargs)
def _get_llm(**kwargs: Any) ->AzureOpenAI: return AzureOpenAI(deployment_name=DEPLOYMENT_NAME, openai_api_version= OPENAI_API_VERSION, openai_api_base=OPENAI_API_BASE, openai_api_key =OPENAI_API_KEY, **kwargs)
null
parse_result
result = super().parse_result(result) return getattr(result, self.attr_name)
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any: result = super().parse_result(result) return getattr(result, self.attr_name)
null
test_ids_backwards_compatibility
"""Test that ids are backwards compatible.""" db = DeepLake(dataset_path='mem://test_path', embedding_function= FakeEmbeddings(), tensor_params=[{'name': 'ids', 'htype': 'text'}, { 'name': 'text', 'htype': 'text'}, {'name': 'embedding', 'htype': 'embedding'}, {'name': 'metadata', 'htype': 'json'}]) db.vectorstore.add(ids=['1', '2', '3'], text=['foo', 'bar', 'baz'], embedding=FakeEmbeddings().embed_documents(['foo', 'bar', 'baz']), metadata=[{'page': str(i)} for i in range(3)]) output = db.similarity_search('foo', k=1) assert len(output) == 1
def test_ids_backwards_compatibility() ->None: """Test that ids are backwards compatible.""" db = DeepLake(dataset_path='mem://test_path', embedding_function= FakeEmbeddings(), tensor_params=[{'name': 'ids', 'htype': 'text'}, {'name': 'text', 'htype': 'text'}, {'name': 'embedding', 'htype': 'embedding'}, {'name': 'metadata', 'htype': 'json'}]) db.vectorstore.add(ids=['1', '2', '3'], text=['foo', 'bar', 'baz'], embedding=FakeEmbeddings().embed_documents(['foo', 'bar', 'baz']), metadata=[{'page': str(i)} for i in range(3)]) output = db.similarity_search('foo', k=1) assert len(output) == 1
Test that ids are backwards compatible.
_get_default_llm_chain
from langchain.chains.llm import LLMChain return LLMChain(llm=OpenAI(), prompt=prompt)
def _get_default_llm_chain(prompt: BasePromptTemplate) ->Any: from langchain.chains.llm import LLMChain return LLMChain(llm=OpenAI(), prompt=prompt)
null
_add_vectors
"""Add vectors to the Xata database.""" rows: List[Dict[str, Any]] = [] for idx, embedding in enumerate(vectors): row = {'content': documents[idx].page_content, 'embedding': embedding} if ids: row['id'] = ids[idx] for key, val in documents[idx].metadata.items(): if key not in ['id', 'content', 'embedding']: row[key] = val rows.append(row) chunk_size = 1000 id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i:i + chunk_size] r = self._client.records().bulk_insert(self._table_name, {'records': chunk} ) if r.status_code != 200: raise Exception(f'Error adding vectors to Xata: {r.status_code} {r}') id_list.extend(r['recordIDs']) return id_list
def _add_vectors(self, vectors: List[List[float]], documents: List[Document ], ids: Optional[List[str]]=None) ->List[str]: """Add vectors to the Xata database.""" rows: List[Dict[str, Any]] = [] for idx, embedding in enumerate(vectors): row = {'content': documents[idx].page_content, 'embedding': embedding} if ids: row['id'] = ids[idx] for key, val in documents[idx].metadata.items(): if key not in ['id', 'content', 'embedding']: row[key] = val rows.append(row) chunk_size = 1000 id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i:i + chunk_size] r = self._client.records().bulk_insert(self._table_name, {'records': chunk}) if r.status_code != 200: raise Exception( f'Error adding vectors to Xata: {r.status_code} {r}') id_list.extend(r['recordIDs']) return id_list
Add vectors to the Xata database.
_resolve
"""Improve upon the best idea as chosen in critique step & return it.""" llm = self.resolver_llm if self.resolver_llm else self.llm prompt = self.resolve_prompt().format_prompt(**self.history. resolve_prompt_inputs()) callbacks = run_manager.handlers if run_manager else None if llm: resolution = self._get_text_from_llm_result(llm.generate_prompt([prompt ], stop, callbacks), step='resolve') _colored_text = get_colored_text(resolution, 'green') _text = 'Resolution:\n' + _colored_text if run_manager: run_manager.on_text(_text, end='\n', verbose=self.verbose) return resolution else: raise ValueError('llm is none, which should never happen')
def _resolve(self, stop: Optional[List[str]]=None, run_manager: Optional[ CallbackManagerForChainRun]=None) ->str: """Improve upon the best idea as chosen in critique step & return it.""" llm = self.resolver_llm if self.resolver_llm else self.llm prompt = self.resolve_prompt().format_prompt(**self.history. resolve_prompt_inputs()) callbacks = run_manager.handlers if run_manager else None if llm: resolution = self._get_text_from_llm_result(llm.generate_prompt([ prompt], stop, callbacks), step='resolve') _colored_text = get_colored_text(resolution, 'green') _text = 'Resolution:\n' + _colored_text if run_manager: run_manager.on_text(_text, end='\n', verbose=self.verbose) return resolution else: raise ValueError('llm is none, which should never happen')
Improve upon the best idea as chosen in critique step & return it.
set_verbose
"""Set a new value for the `verbose` global setting.""" try: import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= 'Importing verbose from langchain root module is no longer supported' ) langchain.verbose = value except ImportError: pass global _verbose _verbose = value
def set_verbose(value: bool) ->None: """Set a new value for the `verbose` global setting.""" try: import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= 'Importing verbose from langchain root module is no longer supported' ) langchain.verbose = value except ImportError: pass global _verbose _verbose = value
Set a new value for the `verbose` global setting.
parse_partial_json
"""Parse a JSON string that may be missing closing braces. Args: s: The JSON string to parse. strict: Whether to use strict parsing. Defaults to False. Returns: The parsed JSON object as a Python dictionary. """ try: return json.loads(s, strict=strict) except json.JSONDecodeError: pass new_s = '' stack = [] is_inside_string = False escaped = False for char in s: if is_inside_string: if char == '"' and not escaped: is_inside_string = False elif char == '\n' and not escaped: char = '\\n' elif char == '\\': escaped = not escaped else: escaped = False elif char == '"': is_inside_string = True escaped = False elif char == '{': stack.append('}') elif char == '[': stack.append(']') elif char == '}' or char == ']': if stack and stack[-1] == char: stack.pop() else: return None new_s += char if is_inside_string: new_s += '"' while new_s: final_s = new_s for closing_char in reversed(stack): final_s += closing_char try: return json.loads(final_s, strict=strict) except json.JSONDecodeError: new_s = new_s[:-1] return json.loads(s, strict=strict)
def parse_partial_json(s: str, *, strict: bool=False) ->Any: """Parse a JSON string that may be missing closing braces. Args: s: The JSON string to parse. strict: Whether to use strict parsing. Defaults to False. Returns: The parsed JSON object as a Python dictionary. """ try: return json.loads(s, strict=strict) except json.JSONDecodeError: pass new_s = '' stack = [] is_inside_string = False escaped = False for char in s: if is_inside_string: if char == '"' and not escaped: is_inside_string = False elif char == '\n' and not escaped: char = '\\n' elif char == '\\': escaped = not escaped else: escaped = False elif char == '"': is_inside_string = True escaped = False elif char == '{': stack.append('}') elif char == '[': stack.append(']') elif char == '}' or char == ']': if stack and stack[-1] == char: stack.pop() else: return None new_s += char if is_inside_string: new_s += '"' while new_s: final_s = new_s for closing_char in reversed(stack): final_s += closing_char try: return json.loads(final_s, strict=strict) except json.JSONDecodeError: new_s = new_s[:-1] return json.loads(s, strict=strict)
Parse a JSON string that may be missing closing braces. Args: s: The JSON string to parse. strict: Whether to use strict parsing. Defaults to False. Returns: The parsed JSON object as a Python dictionary.
on_retriever_end
"""Run when Retriever ends running."""
def on_retriever_end(self, documents: Sequence[Document], *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: """Run when Retriever ends running."""
Run when Retriever ends running.
test_search
"""Test for Searching issues on JIRA""" jql = 'project = TP' jira = JiraAPIWrapper() output = jira.run('jql', jql) assert 'issues' in output
def test_search() ->None: """Test for Searching issues on JIRA""" jql = 'project = TP' jira = JiraAPIWrapper() output = jira.run('jql', jql) assert 'issues' in output
Test for Searching issues on JIRA
test_api_key_masked_when_passed_from_env
"""Test initialization with an API key provided via an env variable""" monkeypatch.setenv('MINIMAX_API_KEY', 'secret-api-key') monkeypatch.setenv('MINIMAX_GROUP_ID', 'group_id') llm = Minimax() print(llm.minimax_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_api_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch, capsys: CaptureFixture) ->None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv('MINIMAX_API_KEY', 'secret-api-key') monkeypatch.setenv('MINIMAX_GROUP_ID', 'group_id') llm = Minimax() print(llm.minimax_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
Test initialization with an API key provided via an env variable
on_chain_error
"""Run when chain errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None: """Run when chain errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
Run when chain errors.
test_azure_openai_embedding_with_empty_string
"""Test openai embeddings with empty string.""" import openai document = ['', 'abc'] embedding = _get_embeddings() output = embedding.embed_documents(document) assert len(output) == 2 assert len(output[0]) == 1536 expected_output = openai.Embedding.create(input='', model= 'text-embedding-ada-002')['data'][0]['embedding'] assert np.allclose(output[0], expected_output) assert len(output[1]) == 1536
@pytest.mark.skip(reason='Unblock scheduled testing. TODO: fix.') def test_azure_openai_embedding_with_empty_string() ->None: """Test openai embeddings with empty string.""" import openai document = ['', 'abc'] embedding = _get_embeddings() output = embedding.embed_documents(document) assert len(output) == 2 assert len(output[0]) == 1536 expected_output = openai.Embedding.create(input='', model= 'text-embedding-ada-002')['data'][0]['embedding'] assert np.allclose(output[0], expected_output) assert len(output[1]) == 1536
Test openai embeddings with empty string.
test_clickhouse_with_metadatas
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] config = ClickhouseSettings() config.table = 'test_clickhouse_with_metadatas' docsearch = Clickhouse.from_texts(texts=texts, embedding=FakeEmbeddings(), config=config, metadatas=metadatas) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': '0'})] docsearch.drop()
def test_clickhouse_with_metadatas() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] config = ClickhouseSettings() config.table = 'test_clickhouse_with_metadatas' docsearch = Clickhouse.from_texts(texts=texts, embedding=FakeEmbeddings (), config=config, metadatas=metadatas) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': '0'})] docsearch.drop()
Test end to end construction and search.
_resolve_prompt
expected_input_vars = {'input', 'output', 'criteria', 'reference'} prompt_ = prompt or PROMPT_WITH_REFERENCES if expected_input_vars != set(prompt_.input_variables): raise ValueError( f'Input variables should be {expected_input_vars}, but got {prompt_.input_variables}' ) return prompt_
@classmethod def _resolve_prompt(cls, prompt: Optional[BasePromptTemplate]=None ) ->BasePromptTemplate: expected_input_vars = {'input', 'output', 'criteria', 'reference'} prompt_ = prompt or PROMPT_WITH_REFERENCES if expected_input_vars != set(prompt_.input_variables): raise ValueError( f'Input variables should be {expected_input_vars}, but got {prompt_.input_variables}' ) return prompt_
null
__init__
""" Initialize the transformer. This checks if the BeautifulSoup4 package is installed. If not, it raises an ImportError. """ try: import bs4 except ImportError: raise ImportError( 'BeautifulSoup4 is required for BeautifulSoupTransformer. Please install it with `pip install beautifulsoup4`.' )
def __init__(self) ->None: """ Initialize the transformer. This checks if the BeautifulSoup4 package is installed. If not, it raises an ImportError. """ try: import bs4 except ImportError: raise ImportError( 'BeautifulSoup4 is required for BeautifulSoupTransformer. Please install it with `pip install beautifulsoup4`.' )
Initialize the transformer. This checks if the BeautifulSoup4 package is installed. If not, it raises an ImportError.
max_tokens_for_prompt
"""Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) return self.max_context_size - num_tokens
def max_tokens_for_prompt(self, prompt: str) ->int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) return self.max_context_size - num_tokens
Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.")
test_sqlitevss_add_extra
"""Test end to end construction and MRR search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = _sqlite_vss_from_texts(metadatas=metadatas) docsearch.add_texts(texts, metadatas) output = docsearch.similarity_search('foo', k=10) assert len(output) == 6
@pytest.mark.requires('sqlite-vss') def test_sqlitevss_add_extra() ->None: """Test end to end construction and MRR search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = _sqlite_vss_from_texts(metadatas=metadatas) docsearch.add_texts(texts, metadatas) output = docsearch.similarity_search('foo', k=10) assert len(output) == 6
Test end to end construction and MRR search.
test_elasticsearch_with_user_agent
"""Test to make sure the user-agent is set correctly.""" texts = ['foo', 'bob', 'baz'] ElasticsearchStore.from_texts(texts, FakeEmbeddings(), es_connection= es_client, index_name=index_name) user_agent = es_client.transport.requests[0]['headers']['User-Agent'] pattern = '^langchain-py-vs/\\d+\\.\\d+\\.\\d+$' match = re.match(pattern, user_agent) assert match is not None, f"The string '{user_agent}' does not match the expected pattern."
def test_elasticsearch_with_user_agent(self, es_client: Any, index_name: str ) ->None: """Test to make sure the user-agent is set correctly.""" texts = ['foo', 'bob', 'baz'] ElasticsearchStore.from_texts(texts, FakeEmbeddings(), es_connection= es_client, index_name=index_name) user_agent = es_client.transport.requests[0]['headers']['User-Agent'] pattern = '^langchain-py-vs/\\d+\\.\\d+\\.\\d+$' match = re.match(pattern, user_agent) assert match is not None, f"The string '{user_agent}' does not match the expected pattern."
Test to make sure the user-agent is set correctly.
test_invalid_api_key_error_handling
"""Test error handling with an invalid API key.""" with pytest.raises(GoogleGenerativeAIError): GoogleGenerativeAIEmbeddings(model=_MODEL, google_api_key='invalid_key' ).embed_query('Hello world')
def test_invalid_api_key_error_handling() ->None: """Test error handling with an invalid API key.""" with pytest.raises(GoogleGenerativeAIError): GoogleGenerativeAIEmbeddings(model=_MODEL, google_api_key='invalid_key' ).embed_query('Hello world')
Test error handling with an invalid API key.
embed_query
"""Embed a query using EdenAI. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._generate_embeddings([text])[0]
def embed_query(self, text: str) ->List[float]: """Embed a query using EdenAI. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._generate_embeddings([text])[0]
Embed a query using EdenAI. Args: text: The text to embed. Returns: Embeddings for the text.
main
print('Hello World!') return 0
def main() ->int: print('Hello World!') return 0
null
handle_data
"""Hook when handling data.""" stripped_data = data.strip() if self.depth == 0 and stripped_data not in (',', ''): self.success = False if stripped_data: self.data = stripped_data
def handle_data(self, data: str) ->None: """Hook when handling data.""" stripped_data = data.strip() if self.depth == 0 and stripped_data not in (',', ''): self.success = False if stripped_data: self.data = stripped_data
Hook when handling data.
test_paginated_texts
documents = ['foo bar', 'foo baz', 'bar foo', 'baz foo', 'bar bar', 'foo foo', 'baz baz', 'baz bar'] model = VertexAIEmbeddings() output = model.embed_documents(documents) assert len(output) == 8 assert len(output[0]) == 768 assert model.model_name == model.client._model_id
def test_paginated_texts() ->None: documents = ['foo bar', 'foo baz', 'bar foo', 'baz foo', 'bar bar', 'foo foo', 'baz baz', 'baz bar'] model = VertexAIEmbeddings() output = model.embed_documents(documents) assert len(output) == 8 assert len(output[0]) == 768 assert model.model_name == model.client._model_id
null
_identifying_params
return self._default_params
@property def _identifying_params(self) ->Dict[str, Any]: return self._default_params
null
test_decorated_function_schema_equivalent
"""Test that a BaseTool without a schema meets expectations.""" @tool def structured_tool_input(arg1: int, arg2: bool, arg3: Optional[dict]=None ) ->str: """Return the arguments directly.""" return f'{arg1} {arg2} {arg3}' assert isinstance(structured_tool_input, BaseTool) assert structured_tool_input.args_schema is not None assert structured_tool_input.args_schema.schema()['properties' ] == _MockSchema.schema()['properties'] == structured_tool_input.args
def test_decorated_function_schema_equivalent() ->None: """Test that a BaseTool without a schema meets expectations.""" @tool def structured_tool_input(arg1: int, arg2: bool, arg3: Optional[dict]=None ) ->str: """Return the arguments directly.""" return f'{arg1} {arg2} {arg3}' assert isinstance(structured_tool_input, BaseTool) assert structured_tool_input.args_schema is not None assert structured_tool_input.args_schema.schema()['properties' ] == _MockSchema.schema()['properties'] == structured_tool_input.args
Test that a BaseTool without a schema meets expectations.
on_llm_start
self._reset_llm_token_stream()
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str]) ->None: self._reset_llm_token_stream()
null
test_default_no_scorer_specified
_, PROMPT = setup() chain_llm = FakeListChatModel(responses=['hey', '100']) chain = pick_best_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT, feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder())) response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(['0', '1', '2'])) assert response['response'] == 'hey' selection_metadata = response['selection_metadata'] assert selection_metadata.selected.score == 100.0
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers') def test_default_no_scorer_specified() ->None: _, PROMPT = setup() chain_llm = FakeListChatModel(responses=['hey', '100']) chain = pick_best_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT, feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed =False, model=MockEncoder())) response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(['0', '1', '2'])) assert response['response'] == 'hey' selection_metadata = response['selection_metadata'] assert selection_metadata.selected.score == 100.0
null
from_llm_and_api_docs
"""Load chain from just an LLM and the api docs.""" get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) requests_wrapper = TextRequestsWrapper(headers=headers) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls(api_request_chain=get_request_chain, api_answer_chain= get_answer_chain, requests_wrapper=requests_wrapper, api_docs=api_docs, limit_to_domains=limit_to_domains, **kwargs)
@classmethod def from_llm_and_api_docs(cls, llm: BaseLanguageModel, api_docs: str, headers: Optional[dict]=None, api_url_prompt: BasePromptTemplate= API_URL_PROMPT, api_response_prompt: BasePromptTemplate= API_RESPONSE_PROMPT, limit_to_domains: Optional[Sequence[str]]=tuple(), **kwargs: Any) ->APIChain: """Load chain from just an LLM and the api docs.""" get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) requests_wrapper = TextRequestsWrapper(headers=headers) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls(api_request_chain=get_request_chain, api_answer_chain= get_answer_chain, requests_wrapper=requests_wrapper, api_docs= api_docs, limit_to_domains=limit_to_domains, **kwargs)
Load chain from just an LLM and the api docs.
__init__
"""Initialize with a list of loaders""" self.loaders = loaders
def __init__(self, loaders: List): """Initialize with a list of loaders""" self.loaders = loaders
Initialize with a list of loaders
save_deanonymizer_mapping
"""Save the deanonymizer mapping to a JSON or YAML file. Args: file_path: Path to file to save the mapping to. Example: .. code-block:: python anonymizer.save_deanonymizer_mapping(file_path="path/mapping.json") """ save_path = Path(file_path) if save_path.suffix not in ['.json', '.yaml']: raise ValueError(f'{save_path} must have an extension of .json or .yaml') save_path.parent.mkdir(parents=True, exist_ok=True) if save_path.suffix == '.json': with open(save_path, 'w') as f: json.dump(self.deanonymizer_mapping, f, indent=2) elif save_path.suffix == '.yaml': with open(save_path, 'w') as f: yaml.dump(self.deanonymizer_mapping, f, default_flow_style=False)
def save_deanonymizer_mapping(self, file_path: Union[Path, str]) ->None: """Save the deanonymizer mapping to a JSON or YAML file. Args: file_path: Path to file to save the mapping to. Example: .. code-block:: python anonymizer.save_deanonymizer_mapping(file_path="path/mapping.json") """ save_path = Path(file_path) if save_path.suffix not in ['.json', '.yaml']: raise ValueError( f'{save_path} must have an extension of .json or .yaml') save_path.parent.mkdir(parents=True, exist_ok=True) if save_path.suffix == '.json': with open(save_path, 'w') as f: json.dump(self.deanonymizer_mapping, f, indent=2) elif save_path.suffix == '.yaml': with open(save_path, 'w') as f: yaml.dump(self.deanonymizer_mapping, f, default_flow_style=False)
Save the deanonymizer mapping to a JSON or YAML file. Args: file_path: Path to file to save the mapping to. Example: .. code-block:: python anonymizer.save_deanonymizer_mapping(file_path="path/mapping.json")
__init__
"""Initialize with blackboard course url. The BbRouter cookie is required for most blackboard courses. Args: blackboard_course_url: Blackboard course url. bbrouter: BbRouter cookie. load_all_recursively: If True, load all documents recursively. basic_auth: Basic auth credentials. cookies: Cookies. continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False Raises: ValueError: If blackboard course url is invalid. """ super().__init__(web_paths=blackboard_course_url, continue_on_failure= continue_on_failure) try: self.base_url = blackboard_course_url.split('/webapps/blackboard')[0] except IndexError: raise IndexError( 'Invalid blackboard course url. Please provide a url that starts with https://<blackboard_url>/webapps/blackboard' ) if basic_auth is not None: self.session.auth = basic_auth if cookies is None: cookies = {} cookies.update({'BbRouter': bbrouter}) self.session.cookies.update(cookies) self.load_all_recursively = load_all_recursively self.check_bs4()
def __init__(self, blackboard_course_url: str, bbrouter: str, load_all_recursively: bool=True, basic_auth: Optional[Tuple[str, str]]= None, cookies: Optional[dict]=None, continue_on_failure: bool=False): """Initialize with blackboard course url. The BbRouter cookie is required for most blackboard courses. Args: blackboard_course_url: Blackboard course url. bbrouter: BbRouter cookie. load_all_recursively: If True, load all documents recursively. basic_auth: Basic auth credentials. cookies: Cookies. continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False Raises: ValueError: If blackboard course url is invalid. """ super().__init__(web_paths=blackboard_course_url, continue_on_failure= continue_on_failure) try: self.base_url = blackboard_course_url.split('/webapps/blackboard')[0] except IndexError: raise IndexError( 'Invalid blackboard course url. Please provide a url that starts with https://<blackboard_url>/webapps/blackboard' ) if basic_auth is not None: self.session.auth = basic_auth if cookies is None: cookies = {} cookies.update({'BbRouter': bbrouter}) self.session.cookies.update(cookies) self.load_all_recursively = load_all_recursively self.check_bs4()
Initialize with blackboard course url. The BbRouter cookie is required for most blackboard courses. Args: blackboard_course_url: Blackboard course url. bbrouter: BbRouter cookie. load_all_recursively: If True, load all documents recursively. basic_auth: Basic auth credentials. cookies: Cookies. continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False Raises: ValueError: If blackboard course url is invalid.
_get_document
"""Fetch content from page and return Document.""" page_content_raw = soup.find(self.content_selector) if not page_content_raw: return None content = page_content_raw.get_text(separator='\n').strip() title_if_exists = page_content_raw.find('h1') title = title_if_exists.text if title_if_exists else '' metadata = {'source': custom_url or self.web_path, 'title': title} return Document(page_content=content, metadata=metadata)
def _get_document(self, soup: Any, custom_url: Optional[str]=None) ->Optional[ Document]: """Fetch content from page and return Document.""" page_content_raw = soup.find(self.content_selector) if not page_content_raw: return None content = page_content_raw.get_text(separator='\n').strip() title_if_exists = page_content_raw.find('h1') title = title_if_exists.text if title_if_exists else '' metadata = {'source': custom_url or self.web_path, 'title': title} return Document(page_content=content, metadata=metadata)
Fetch content from page and return Document.
get_index_name
"""Returns the index name Returns: Returns the index name """ return self._index_name
def get_index_name(self) ->str: """Returns the index name Returns: Returns the index name """ return self._index_name
Returns the index name Returns: Returns the index name
_convert_one_message_to_text
content = cast(str, message.content) if isinstance(message, ChatMessage): message_text = f'\n\n{message.role.capitalize()}: {content}' elif isinstance(message, HumanMessage): message_text = f'{human_prompt} {content}' elif isinstance(message, AIMessage): message_text = f'{ai_prompt} {content}' elif isinstance(message, SystemMessage): message_text = content else: raise ValueError(f'Got unknown type {message}') return message_text
def _convert_one_message_to_text(message: BaseMessage, human_prompt: str, ai_prompt: str) ->str: content = cast(str, message.content) if isinstance(message, ChatMessage): message_text = f'\n\n{message.role.capitalize()}: {content}' elif isinstance(message, HumanMessage): message_text = f'{human_prompt} {content}' elif isinstance(message, AIMessage): message_text = f'{ai_prompt} {content}' elif isinstance(message, SystemMessage): message_text = content else: raise ValueError(f'Got unknown type {message}') return message_text
null
get_schema
"""Returns the schema of the HugeGraph database""" return self.schema
@property def get_schema(self) ->str: """Returns the schema of the HugeGraph database""" return self.schema
Returns the schema of the HugeGraph database
elapsed
"""Get the elapsed time of a run. Args: run: any object with a start_time and end_time attribute. Returns: A string with the elapsed time in seconds or milliseconds if time is less than a second. """ elapsed_time = run.end_time - run.start_time milliseconds = elapsed_time.total_seconds() * 1000 if milliseconds < 1000: return f'{milliseconds:.0f}ms' return f'{milliseconds / 1000:.2f}s'
def elapsed(run: Any) ->str: """Get the elapsed time of a run. Args: run: any object with a start_time and end_time attribute. Returns: A string with the elapsed time in seconds or milliseconds if time is less than a second. """ elapsed_time = run.end_time - run.start_time milliseconds = elapsed_time.total_seconds() * 1000 if milliseconds < 1000: return f'{milliseconds:.0f}ms' return f'{milliseconds / 1000:.2f}s'
Get the elapsed time of a run. Args: run: any object with a start_time and end_time attribute. Returns: A string with the elapsed time in seconds or milliseconds if time is less than a second.
mock_confluence
with patch('atlassian.Confluence') as mock_confluence: yield mock_confluence
@pytest.fixture def mock_confluence(): with patch('atlassian.Confluence') as mock_confluence: yield mock_confluence
null
__init__
"""Initialize with Dingo client.""" try: import dingodb except ImportError: raise ImportError( 'Could not import dingo python package. Please install it with `pip install dingodb.' ) host = host if host is not None else ['172.20.31.10:13000'] if client is not None: dingo_client = client else: try: dingo_client = dingodb.DingoDB(user, password, host) except ValueError as e: raise ValueError(f'Dingo failed to connect: {e}') self._text_key = text_key self._client = dingo_client if index_name is not None and index_name not in dingo_client.get_index( ) and index_name.upper() not in dingo_client.get_index(): if self_id is True: dingo_client.create_index(index_name, dimension=dimension, auto_id= False) else: dingo_client.create_index(index_name, dimension=dimension) self._index_name = index_name self._embedding = embedding
def __init__(self, embedding: Embeddings, text_key: str, *, client: Any= None, index_name: Optional[str]=None, dimension: int=1024, host: Optional[List[str]]=None, user: str='root', password: str='123123', self_id: bool=False): """Initialize with Dingo client.""" try: import dingodb except ImportError: raise ImportError( 'Could not import dingo python package. Please install it with `pip install dingodb.' ) host = host if host is not None else ['172.20.31.10:13000'] if client is not None: dingo_client = client else: try: dingo_client = dingodb.DingoDB(user, password, host) except ValueError as e: raise ValueError(f'Dingo failed to connect: {e}') self._text_key = text_key self._client = dingo_client if index_name is not None and index_name not in dingo_client.get_index( ) and index_name.upper() not in dingo_client.get_index(): if self_id is True: dingo_client.create_index(index_name, dimension=dimension, auto_id=False) else: dingo_client.create_index(index_name, dimension=dimension) self._index_name = index_name self._embedding = embedding
Initialize with Dingo client.
_import_bibtex
from langchain_community.utilities.bibtex import BibtexparserWrapper return BibtexparserWrapper
def _import_bibtex() ->Any: from langchain_community.utilities.bibtex import BibtexparserWrapper return BibtexparserWrapper
null
requires_input
return False
@property def requires_input(self) ->bool: return False
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'llms', 'bedrock']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'llms', 'bedrock']
Get the namespace of the langchain object.
test_google_documentai_warehoure_retriever
"""In order to run this test, you should provide a project_id and user_ldap. Example: export USER_LDAP=... export PROJECT_NUMBER=... """ project_number = os.environ['PROJECT_NUMBER'] user_ldap = os.environ['USER_LDAP'] docai_wh_retriever = GoogleDocumentAIWarehouseRetriever(project_number= project_number) documents = docai_wh_retriever.get_relevant_documents( "What are Alphabet's Other Bets?", user_ldap=user_ldap) assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document)
def test_google_documentai_warehoure_retriever() ->None: """In order to run this test, you should provide a project_id and user_ldap. Example: export USER_LDAP=... export PROJECT_NUMBER=... """ project_number = os.environ['PROJECT_NUMBER'] user_ldap = os.environ['USER_LDAP'] docai_wh_retriever = GoogleDocumentAIWarehouseRetriever(project_number= project_number) documents = docai_wh_retriever.get_relevant_documents( "What are Alphabet's Other Bets?", user_ldap=user_ldap) assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document)
In order to run this test, you should provide a project_id and user_ldap. Example: export USER_LDAP=... export PROJECT_NUMBER=...
test_criteria_eval_chain
chain = CriteriaEvalChain.from_llm(llm=FakeLLM(queries={'text': """The meaning of life Y"""}, sequential_responses=True), criteria={ 'my criterion': 'my criterion description'}) with pytest.warns(UserWarning, match=chain._skip_reference_warning): result = chain.evaluate_strings(prediction='my prediction', reference= 'my reference', input='my input') assert result['reasoning'] == 'The meaning of life'
def test_criteria_eval_chain() ->None: chain = CriteriaEvalChain.from_llm(llm=FakeLLM(queries={'text': 'The meaning of life\nY'}, sequential_responses=True), criteria={ 'my criterion': 'my criterion description'}) with pytest.warns(UserWarning, match=chain._skip_reference_warning): result = chain.evaluate_strings(prediction='my prediction', reference='my reference', input='my input') assert result['reasoning'] == 'The meaning of life'
null
extract_functions_classes
raise NotImplementedError()
@abstractmethod def extract_functions_classes(self) ->List[str]: raise NotImplementedError()
null
_get_session_pool
assert all([self.username, self.password, self.address, self.port, self.space] ), 'Please provide all of the following parameters: username, password, address, port, space' from nebula3.Config import SessionPoolConfig from nebula3.Exception import AuthFailedException, InValidHostname from nebula3.gclient.net.SessionPool import SessionPool config = SessionPoolConfig() config.max_size = self.session_pool_size try: session_pool = SessionPool(self.username, self.password, self.space, [( self.address, self.port)]) except InValidHostname: raise ValueError( 'Could not connect to NebulaGraph database. Please ensure that the address and port are correct' ) try: session_pool.init(config) except AuthFailedException: raise ValueError( 'Could not connect to NebulaGraph database. Please ensure that the username and password are correct' ) except RuntimeError as e: raise ValueError(f'Error initializing session pool. Error: {e}') return session_pool
def _get_session_pool(self) ->Any: assert all([self.username, self.password, self.address, self.port, self .space] ), 'Please provide all of the following parameters: username, password, address, port, space' from nebula3.Config import SessionPoolConfig from nebula3.Exception import AuthFailedException, InValidHostname from nebula3.gclient.net.SessionPool import SessionPool config = SessionPoolConfig() config.max_size = self.session_pool_size try: session_pool = SessionPool(self.username, self.password, self.space, [(self.address, self.port)]) except InValidHostname: raise ValueError( 'Could not connect to NebulaGraph database. Please ensure that the address and port are correct' ) try: session_pool.init(config) except AuthFailedException: raise ValueError( 'Could not connect to NebulaGraph database. Please ensure that the username and password are correct' ) except RuntimeError as e: raise ValueError(f'Error initializing session pool. Error: {e}') return session_pool
null
run
"""Run the API.""" if mode == 'get_task': output = self.get_task(query) elif mode == 'get_task_attribute': output = self.get_task_attribute(query) elif mode == 'get_teams': output = self.get_authorized_teams() elif mode == 'create_task': output = self.create_task(query) elif mode == 'create_list': output = self.create_list(query) elif mode == 'create_folder': output = self.create_folder(query) elif mode == 'get_lists': output = self.get_lists() elif mode == 'get_folders': output = self.get_folders() elif mode == 'get_spaces': output = self.get_spaces() elif mode == 'update_task': output = self.update_task(query) elif mode == 'update_task_assignees': output = self.update_task_assignees(query) else: output = {'ModeError': f'Got unexpected mode {mode}.'} try: return json.dumps(output) except Exception: return str(output)
def run(self, mode: str, query: str) ->str: """Run the API.""" if mode == 'get_task': output = self.get_task(query) elif mode == 'get_task_attribute': output = self.get_task_attribute(query) elif mode == 'get_teams': output = self.get_authorized_teams() elif mode == 'create_task': output = self.create_task(query) elif mode == 'create_list': output = self.create_list(query) elif mode == 'create_folder': output = self.create_folder(query) elif mode == 'get_lists': output = self.get_lists() elif mode == 'get_folders': output = self.get_folders() elif mode == 'get_spaces': output = self.get_spaces() elif mode == 'update_task': output = self.update_task(query) elif mode == 'update_task_assignees': output = self.update_task_assignees(query) else: output = {'ModeError': f'Got unexpected mode {mode}.'} try: return json.dumps(output) except Exception: return str(output)
Run the API.
_get_root_referenced_parameter
"""Get the root reference or err.""" from openapi_pydantic import Reference parameter = self._get_referenced_parameter(ref) while isinstance(parameter, Reference): parameter = self._get_referenced_parameter(parameter) return parameter
def _get_root_referenced_parameter(self, ref: Reference) ->Parameter: """Get the root reference or err.""" from openapi_pydantic import Reference parameter = self._get_referenced_parameter(ref) while isinstance(parameter, Reference): parameter = self._get_referenced_parameter(parameter) return parameter
Get the root reference or err.