method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
run
for task in self.tasks: print(f'running {task}') if task.pending() and self.check_dependency(task): self.update_args(task) task.run() if self.completed(): self.status = 'completed' elif self.failed(): self.status = 'failed' else: self.status = 'pending' return self.status
def run(self) ->str: for task in self.tasks: print(f'running {task}') if task.pending() and self.check_dependency(task): self.update_args(task) task.run() if self.completed(): self.status = 'completed' elif self.failed(): self.status = 'failed' else: self.status = 'pending' return self.status
null
_call
"""Invoke on a single list of chat messages.""" inputs = self.custom_preprocess(messages) responses = self.get_generation(inputs=inputs, stop=stop, labels=labels, ** kwargs) outputs = self.custom_postprocess(responses) return outputs
def _call(self, messages: List[BaseMessage], stop: Optional[Sequence[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, labels: Optional[dict]=None, **kwargs: Any) ->str: """Invoke on a single list of chat messages.""" inputs = self.custom_preprocess(messages) responses = self.get_generation(inputs=inputs, stop=stop, labels=labels, **kwargs) outputs = self.custom_postprocess(responses) return outputs
Invoke on a single list of chat messages.
embeddings
return self.embedding_func
@property def embeddings(self) ->Embeddings: return self.embedding_func
null
_extract_body_params
"""Extract the request body params from the deserialized input.""" body_params = None if self.param_mapping.body_params: body_params = {} for param in self.param_mapping.body_params: if param in args: body_params[param] = args.pop(param) return body_params
def _extract_body_params(self, args: Dict[str, str]) ->Optional[Dict[str, str] ]: """Extract the request body params from the deserialized input.""" body_params = None if self.param_mapping.body_params: body_params = {} for param in self.param_mapping.body_params: if param in args: body_params[param] = args.pop(param) return body_params
Extract the request body params from the deserialized input.
get_format_instructions
return FORMAT_INSTRUCTIONS
def get_format_instructions(self) ->str: return FORMAT_INSTRUCTIONS
null
parse
retries = 0 while retries <= self.max_retries: try: return self.parser.parse_folder(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run(instructions=self.parser. get_format_instructions(), completion=completion, error=repr(e) ) raise OutputParserException('Failed to parse')
def parse(self, completion: str) ->T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse_folder(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run(instructions=self.parser. get_format_instructions(), completion=completion, error =repr(e)) raise OutputParserException('Failed to parse')
null
_scopes
"""Return required scopes.""" return ['sharepoint', 'basic']
@property def _scopes(self) ->List[str]: """Return required scopes.""" return ['sharepoint', 'basic']
Return required scopes.
load
return self.client.load(self.query)
def load(self) ->List[Document]: return self.client.load(self.query)
null
test_tracer_nested_run
"""Test tracer on a nested run.""" tracer = FakeTracer() chain_uuid = uuid4() tool_uuid = uuid4() llm_uuid1 = uuid4() llm_uuid2 = uuid4() for _ in range(10): tracer.on_chain_start(serialized={'name': 'chain'}, inputs={}, run_id= chain_uuid) tracer.on_tool_start(serialized={'name': 'tool'}, input_str='test', run_id=tool_uuid, parent_run_id=chain_uuid) tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=llm_uuid1, parent_run_id=tool_uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1) tracer.on_tool_end('test', run_id=tool_uuid) tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=llm_uuid2, parent_run_id=chain_uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid2) tracer.on_chain_end(outputs={}, run_id=chain_uuid) compare_run = Run(id=str(chain_uuid), error=None, start_time=datetime.now( timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'end', 'time': datetime.now(timezone.utc)}], extra={}, execution_order=1, child_execution_order=4, serialized={'name': 'chain'}, inputs={}, outputs={}, run_type='chain', trace_id=chain_uuid, dotted_order= f'20230101T000000000000Z{chain_uuid}', child_runs=[Run(id=tool_uuid, parent_run_id=chain_uuid, start_time=datetime.now(timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'end', 'time': datetime.now( timezone.utc)}], extra={}, execution_order=2, child_execution_order=3, serialized={'name': 'tool'}, inputs=dict(input='test'), outputs=dict( output='test'), error=None, run_type='tool', trace_id=chain_uuid, dotted_order= f'20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}', child_runs=[Run(id=str(llm_uuid1), parent_run_id=str(tool_uuid), error= None, start_time=datetime.now(timezone.utc), end_time=datetime.now( timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone. utc)}, {'name': 'end', 'time': datetime.now(timezone.utc)}], extra={}, execution_order=3, child_execution_order=3, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]]), run_type= 'llm', trace_id=chain_uuid, dotted_order= f'20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}.20230101T000000000000Z{llm_uuid1}' )]), Run(id=str(llm_uuid2), parent_run_id=str(chain_uuid), error=None, start_time=datetime.now(timezone.utc), end_time=datetime.now(timezone. utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, { 'name': 'end', 'time': datetime.now(timezone.utc)}], extra={}, execution_order=4, child_execution_order=4, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]]), run_type= 'llm', trace_id=chain_uuid, dotted_order= f'20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{llm_uuid2}')]) assert tracer.runs[0] == compare_run assert tracer.runs == [compare_run] * 10
@freeze_time('2023-01-01') def test_tracer_nested_run() ->None: """Test tracer on a nested run.""" tracer = FakeTracer() chain_uuid = uuid4() tool_uuid = uuid4() llm_uuid1 = uuid4() llm_uuid2 = uuid4() for _ in range(10): tracer.on_chain_start(serialized={'name': 'chain'}, inputs={}, run_id=chain_uuid) tracer.on_tool_start(serialized={'name': 'tool'}, input_str='test', run_id=tool_uuid, parent_run_id=chain_uuid) tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id= llm_uuid1, parent_run_id=tool_uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id= llm_uuid1) tracer.on_tool_end('test', run_id=tool_uuid) tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id= llm_uuid2, parent_run_id=chain_uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id= llm_uuid2) tracer.on_chain_end(outputs={}, run_id=chain_uuid) compare_run = Run(id=str(chain_uuid), error=None, start_time=datetime. now(timezone.utc), end_time=datetime.now(timezone.utc), events=[{ 'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'end', 'time': datetime.now(timezone.utc)}], extra={}, execution_order=1, child_execution_order=4, serialized={'name': 'chain'}, inputs={}, outputs={}, run_type='chain', trace_id= chain_uuid, dotted_order=f'20230101T000000000000Z{chain_uuid}', child_runs=[Run(id=tool_uuid, parent_run_id=chain_uuid, start_time= datetime.now(timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, { 'name': 'end', 'time': datetime.now(timezone.utc)}], extra={}, execution_order=2, child_execution_order=3, serialized={'name': 'tool'}, inputs=dict(input='test'), outputs=dict(output='test'), error=None, run_type='tool', trace_id=chain_uuid, dotted_order= f'20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}' , child_runs=[Run(id=str(llm_uuid1), parent_run_id=str(tool_uuid), error=None, start_time=datetime.now(timezone.utc), end_time= datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'end', 'time': datetime.now( timezone.utc)}], extra={}, execution_order=3, child_execution_order =3, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs= LLMResult(generations=[[]]), run_type='llm', trace_id=chain_uuid, dotted_order= f'20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}.20230101T000000000000Z{llm_uuid1}' )]), Run(id=str(llm_uuid2), parent_run_id=str(chain_uuid), error= None, start_time=datetime.now(timezone.utc), end_time=datetime.now( timezone.utc), events=[{'name': 'start', 'time': datetime.now( timezone.utc)}, {'name': 'end', 'time': datetime.now(timezone.utc)} ], extra={}, execution_order=4, child_execution_order=4, serialized =SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations =[[]]), run_type='llm', trace_id=chain_uuid, dotted_order= f'20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{llm_uuid2}' )]) assert tracer.runs[0] == compare_run assert tracer.runs == [compare_run] * 10
Test tracer on a nested run.
output_keys
return [self.output_key]
@property def output_keys(self) ->List[str]: return [self.output_key]
null
save
with open(self.model_path, 'wb') as f: logger.info(f'storing rl_chain model in: {self.model_path}') f.write(workspace.serialize()) if self.with_history: shutil.copyfile(self.model_path, self.folder / f'model-{self.get_tag()}.vw' )
def save(self, workspace: 'vw.Workspace') ->None: with open(self.model_path, 'wb') as f: logger.info(f'storing rl_chain model in: {self.model_path}') f.write(workspace.serialize()) if self.with_history: shutil.copyfile(self.model_path, self.folder / f'model-{self.get_tag()}.vw')
null
test_get_relevant_documents
"""Test end to end construction and MRR search.""" from weaviate import Client texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] client = Client(weaviate_url) retriever = WeaviateHybridSearchRetriever(client=client, index_name= f'LangChain_{uuid4().hex}', text_key='text', attributes=['page']) for i, text in enumerate(texts): retriever.add_documents([Document(page_content=text, metadata=metadatas [i])]) output = retriever.get_relevant_documents('foo') assert output == [Document(page_content='foo', metadata={'page': 0}), Document(page_content='baz', metadata={'page': 2}), Document( page_content='bar', metadata={'page': 1})]
@pytest.mark.vcr(ignore_localhost=True) def test_get_relevant_documents(self, weaviate_url: str) ->None: """Test end to end construction and MRR search.""" from weaviate import Client texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] client = Client(weaviate_url) retriever = WeaviateHybridSearchRetriever(client=client, index_name= f'LangChain_{uuid4().hex}', text_key='text', attributes=['page']) for i, text in enumerate(texts): retriever.add_documents([Document(page_content=text, metadata= metadatas[i])]) output = retriever.get_relevant_documents('foo') assert output == [Document(page_content='foo', metadata={'page': 0}), Document(page_content='baz', metadata={'page': 2}), Document( page_content='bar', metadata={'page': 1})]
Test end to end construction and MRR search.
embed_documents
"""Return simple embeddings.""" return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in range( len(texts))]
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Return simple embeddings.""" return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in range(len(texts))]
Return simple embeddings.
__init__
""" Initialize the RunCollectorCallbackHandler. Parameters ---------- example_id : Optional[Union[UUID, str]], default=None The ID of the example being traced. It can be either a UUID or a string. """ super().__init__(**kwargs) self.example_id = UUID(example_id) if isinstance(example_id, str ) else example_id self.traced_runs: List[Run] = []
def __init__(self, example_id: Optional[Union[UUID, str]]=None, **kwargs: Any ) ->None: """ Initialize the RunCollectorCallbackHandler. Parameters ---------- example_id : Optional[Union[UUID, str]], default=None The ID of the example being traced. It can be either a UUID or a string. """ super().__init__(**kwargs) self.example_id = UUID(example_id) if isinstance(example_id, str ) else example_id self.traced_runs: List[Run] = []
Initialize the RunCollectorCallbackHandler. Parameters ---------- example_id : Optional[Union[UUID, str]], default=None The ID of the example being traced. It can be either a UUID or a string.
test_from_texts_with_metadatas_euclidean_distance
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'The fence is purple.'] metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, metadatas=metadatas, collection=collection, index_name=INDEX_NAME) vectorstore.create_index(num_lists, dimensions, CosmosDBSimilarityType.L2) sleep(2) output = vectorstore.similarity_search('Sandwich', k=1) assert output assert output[0].page_content == 'What is a sandwich?' assert output[0].metadata['c'] == 1 vectorstore.delete_index()
def test_from_texts_with_metadatas_euclidean_distance(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'The fence is purple.'] metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, metadatas=metadatas, collection=collection, index_name=INDEX_NAME) vectorstore.create_index(num_lists, dimensions, CosmosDBSimilarityType.L2) sleep(2) output = vectorstore.similarity_search('Sandwich', k=1) assert output assert output[0].page_content == 'What is a sandwich?' assert output[0].metadata['c'] == 1 vectorstore.delete_index()
null
_identifying_params
"""Get the identifying parameters.""" return {'model': self.model, **self._default_params, **{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()}}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {'model': self.model, **self._default_params, **{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()}}
Get the identifying parameters.
_get_question
"""Get the human message at the end of a list of input messages to a chat model.""" if not messages: raise ValueError( 'You should provide at least one message to start the chat!') question = messages[-1] if not isinstance(question, HumanMessage): raise ValueError( f'Last message in the list should be from human, got {question.type}.') return question
def _get_question(messages: List[BaseMessage]) ->HumanMessage: """Get the human message at the end of a list of input messages to a chat model.""" if not messages: raise ValueError( 'You should provide at least one message to start the chat!') question = messages[-1] if not isinstance(question, HumanMessage): raise ValueError( f'Last message in the list should be from human, got {question.type}.' ) return question
Get the human message at the end of a list of input messages to a chat model.
_Assign
self.fill() for target in t.targets: self.dispatch(target) self.write(' = ') self.dispatch(t.value)
def _Assign(self, t): self.fill() for target in t.targets: self.dispatch(target) self.write(' = ') self.dispatch(t.value)
null
test_model_garden_generate
"""In order to run this test, you should provide endpoint names. Example: export FALCON_ENDPOINT_ID=... export LLAMA_ENDPOINT_ID=... export PROJECT=... """ endpoint_id = os.environ[endpoint_os_variable_name] project = os.environ['PROJECT'] location = 'europe-west4' llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project, result_arg=result_arg, location=location) output = llm.generate(['What is the meaning of life?', 'How much is 2+2']) assert isinstance(output, LLMResult) assert len(output.generations) == 2
@pytest.mark.parametrize('endpoint_os_variable_name,result_arg', [( 'FALCON_ENDPOINT_ID', 'generated_text'), ('LLAMA_ENDPOINT_ID', None)]) def test_model_garden_generate(endpoint_os_variable_name: str, result_arg: Optional[str]) ->None: """In order to run this test, you should provide endpoint names. Example: export FALCON_ENDPOINT_ID=... export LLAMA_ENDPOINT_ID=... export PROJECT=... """ endpoint_id = os.environ[endpoint_os_variable_name] project = os.environ['PROJECT'] location = 'europe-west4' llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project, result_arg=result_arg, location=location) output = llm.generate(['What is the meaning of life?', 'How much is 2+2']) assert isinstance(output, LLMResult) assert len(output.generations) == 2
In order to run this test, you should provide endpoint names. Example: export FALCON_ENDPOINT_ID=... export LLAMA_ENDPOINT_ID=... export PROJECT=...
from_texts
""" Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids= ids, collection_name=collection_name, distance_strategy= distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs)
@classmethod def from_texts(cls: Type[TimescaleVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str= _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy =DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]]=None, pre_delete_collection: bool=False, **kwargs: Any) ->TimescaleVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy= distance_strategy, pre_delete_collection=pre_delete_collection, ** kwargs)
Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable.
_assert_messages_are_equal
assert len(actual_messages) == len(expected_messages) for actual, expected in zip(actual_messages, expected_messages): assert actual.content == expected.content assert actual.additional_kwargs['sender'] == expected.additional_kwargs[ 'sender']
def _assert_messages_are_equal(actual_messages: Sequence[BaseMessage], expected_messages: Sequence[BaseMessage]) ->None: assert len(actual_messages) == len(expected_messages) for actual, expected in zip(actual_messages, expected_messages): assert actual.content == expected.content assert actual.additional_kwargs['sender' ] == expected.additional_kwargs['sender']
null
_llm_type
"""Return type of llm.""" return 'openai-chat'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'openai-chat'
Return type of llm.
on_agent_finish
"""Do nothing""" pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None: """Do nothing""" pass
Do nothing
describe
return self.__str__()
def describe(self) ->str: return self.__str__()
null
_call
prompt, stop = self.prep_prompts(input_list, run_manager=run_manager) self.history.question = prompt.to_string() ideas = self._ideate(stop, run_manager) self.history.ideas = ideas critique = self._critique(stop, run_manager) self.history.critique = critique resolution = self._resolve(stop, run_manager) if self.return_intermediate_steps: return {'ideas': ideas, 'critique': critique, self.output_key: resolution} return {self.output_key: resolution}
def _call(self, input_list: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, Any]: prompt, stop = self.prep_prompts(input_list, run_manager=run_manager) self.history.question = prompt.to_string() ideas = self._ideate(stop, run_manager) self.history.ideas = ideas critique = self._critique(stop, run_manager) self.history.critique = critique resolution = self._resolve(stop, run_manager) if self.return_intermediate_steps: return {'ideas': ideas, 'critique': critique, self.output_key: resolution} return {self.output_key: resolution}
null
test_neo4jvector_hybrid_from_existing
"""Test hybrid search with missing keyword_index_search.""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) Neo4jVector.from_embeddings(text_embeddings=text_embedding_pairs, embedding =FakeEmbeddingsWithOsDimension(), url=url, username=username, password= password, pre_delete_collection=True, search_type=SearchType.HYBRID) existing = Neo4jVector.from_existing_index(embedding= FakeEmbeddingsWithOsDimension(), url=url, username=username, password= password, index_name='vector', keyword_index_name='keyword', search_type=SearchType.HYBRID) output = existing.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] drop_vector_indexes(existing)
def test_neo4jvector_hybrid_from_existing() ->None: """Test hybrid search with missing keyword_index_search.""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) Neo4jVector.from_embeddings(text_embeddings=text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url=url, username= username, password=password, pre_delete_collection=True, search_type=SearchType.HYBRID) existing = Neo4jVector.from_existing_index(embedding= FakeEmbeddingsWithOsDimension(), url=url, username=username, password=password, index_name='vector', keyword_index_name= 'keyword', search_type=SearchType.HYBRID) output = existing.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] drop_vector_indexes(existing)
Test hybrid search with missing keyword_index_search.
_get_google_finance
return GoogleFinanceQueryRun(api_wrapper=GoogleFinanceAPIWrapper(**kwargs))
def _get_google_finance(**kwargs: Any) ->BaseTool: return GoogleFinanceQueryRun(api_wrapper=GoogleFinanceAPIWrapper(**kwargs))
null
test__signature
secret_key = SecretStr('YOUR_SECRET_KEY') url = 'https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions' result = _signature(secret_key=secret_key, url=url, payload={'app_id': 'YOUR_APP_ID', 'secret_id': 'YOUR_SECRET_ID', 'query_id': 'test_query_id_cb5d8156-0ce2-45af-86b4-d02f5c26a142', 'messages': [{ 'role': 'user', 'content': 'You are a helpful assistant that translates English to French.Translate this sentence from English to French. I love programming.' }], 'temperature': 0.0, 'top_p': 0.8, 'stream': 1, 'timestamp': 1697738378, 'expired': 1697824778}) expected_output = 'MXBvqNCXyxJWfEyBwk1pYBVnxzo=' assert result == expected_output
def test__signature() ->None: secret_key = SecretStr('YOUR_SECRET_KEY') url = 'https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions' result = _signature(secret_key=secret_key, url=url, payload={'app_id': 'YOUR_APP_ID', 'secret_id': 'YOUR_SECRET_ID', 'query_id': 'test_query_id_cb5d8156-0ce2-45af-86b4-d02f5c26a142', 'messages': [ {'role': 'user', 'content': 'You are a helpful assistant that translates English to French.Translate this sentence from English to French. I love programming.' }], 'temperature': 0.0, 'top_p': 0.8, 'stream': 1, 'timestamp': 1697738378, 'expired': 1697824778}) expected_output = 'MXBvqNCXyxJWfEyBwk1pYBVnxzo=' assert result == expected_output
null
__init__
try: import elasticsearch except ImportError: raise ImportError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) warnings.warn( 'ElasticKnnSearch will be removed in a future release.Use ElasticsearchStore instead. See Elasticsearch integration docs on how to upgrade.' ) self.embedding = embedding self.index_name = index_name self.query_field = query_field self.vector_query_field = vector_query_field if es_connection is not None: self.client = es_connection elif es_cloud_id and es_user and es_password: self.client = elasticsearch.Elasticsearch(cloud_id=es_cloud_id, basic_auth=(es_user, es_password)) else: raise ValueError( 'Either provide a pre-existing Elasticsearch connection, or valid credentials for creating a new connection.' )
def __init__(self, index_name: str, embedding: Embeddings, es_connection: Optional['Elasticsearch']=None, es_cloud_id: Optional[str]=None, es_user: Optional[str]=None, es_password: Optional[str]=None, vector_query_field: Optional[str]='vector', query_field: Optional[str]= 'text'): try: import elasticsearch except ImportError: raise ImportError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) warnings.warn( 'ElasticKnnSearch will be removed in a future release.Use ElasticsearchStore instead. See Elasticsearch integration docs on how to upgrade.' ) self.embedding = embedding self.index_name = index_name self.query_field = query_field self.vector_query_field = vector_query_field if es_connection is not None: self.client = es_connection elif es_cloud_id and es_user and es_password: self.client = elasticsearch.Elasticsearch(cloud_id=es_cloud_id, basic_auth=(es_user, es_password)) else: raise ValueError( 'Either provide a pre-existing Elasticsearch connection, or valid credentials for creating a new connection.' )
null
draw_ascii
from langchain_core.runnables.base import Runnable def node_data(node: Node) ->str: if isinstance(node.data, Runnable): try: data = str(node.data) if data.startswith('<') or data[0] != data[0].upper() or len(data .splitlines()) > 1: data = node.data.__class__.__name__ elif len(data) > 42: data = data[:42] + '...' except Exception: data = node.data.__class__.__name__ else: data = node.data.__name__ return data if not data.startswith('Runnable') else data[8:] return draw({node.id: node_data(node) for node in self.nodes.values()}, [( edge.source, edge.target) for edge in self.edges])
def draw_ascii(self) ->str: from langchain_core.runnables.base import Runnable def node_data(node: Node) ->str: if isinstance(node.data, Runnable): try: data = str(node.data) if data.startswith('<') or data[0] != data[0].upper() or len( data.splitlines()) > 1: data = node.data.__class__.__name__ elif len(data) > 42: data = data[:42] + '...' except Exception: data = node.data.__class__.__name__ else: data = node.data.__name__ return data if not data.startswith('Runnable') else data[8:] return draw({node.id: node_data(node) for node in self.nodes.values()}, [(edge.source, edge.target) for edge in self.edges])
null
get_schema
return db.get_table_info()
def get_schema(_): return db.get_table_info()
null
on_retriever_start
"""Run when Retriever starts running.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.now(timezone.utc) if metadata: kwargs.update({'metadata': metadata}) retrieval_run = Run(id=run_id, name=name or 'Retriever', parent_run_id= parent_run_id, serialized=serialized, inputs={'query': query}, extra= kwargs, events=[{'name': 'start', 'time': start_time}], start_time= start_time, execution_order=execution_order, child_execution_order= execution_order, tags=tags, child_runs=[], run_type='retriever') self._start_trace(retrieval_run) self._on_retriever_start(retrieval_run) return retrieval_run
def on_retriever_start(self, serialized: Dict[str, Any], query: str, *, run_id: UUID, parent_run_id: Optional[UUID]=None, tags: Optional[List[ str]]=None, metadata: Optional[Dict[str, Any]]=None, name: Optional[str ]=None, **kwargs: Any) ->Run: """Run when Retriever starts running.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.now(timezone.utc) if metadata: kwargs.update({'metadata': metadata}) retrieval_run = Run(id=run_id, name=name or 'Retriever', parent_run_id= parent_run_id, serialized=serialized, inputs={'query': query}, extra=kwargs, events=[{'name': 'start', 'time': start_time}], start_time=start_time, execution_order=execution_order, child_execution_order=execution_order, tags=tags, child_runs=[], run_type='retriever') self._start_trace(retrieval_run) self._on_retriever_start(retrieval_run) return retrieval_run
Run when Retriever starts running.
__init__
self.query = query self.client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max, **kwargs)
def __init__(self, query: str, doc_content_chars_max: Optional[int]=None, **kwargs: Any): self.query = query self.client = ArxivAPIWrapper(doc_content_chars_max= doc_content_chars_max, **kwargs)
null
get_schema
"""Returns the schema of the Graph database""" return ''
@property def get_schema(self) ->str: """Returns the schema of the Graph database""" return ''
Returns the schema of the Graph database
test_no_imports_code_validation
"""Test the validator.""" PALChain.validate_code(_SAMPLE_CODE_4, _MINIMAL_VALIDATIONS)
def test_no_imports_code_validation() ->None: """Test the validator.""" PALChain.validate_code(_SAMPLE_CODE_4, _MINIMAL_VALIDATIONS)
Test the validator.
test_create
answer = 'I know the answer!' llm = FakeListLLM(responses=[answer]) retriever = FakeParrotRetriever() question_gen_prompt = PromptTemplate.from_template('hi! {input} {chat_history}' ) chain = create_history_aware_retriever(llm, retriever, question_gen_prompt) expected_output = [Document(page_content='What is the answer?')] output = chain.invoke({'input': 'What is the answer?', 'chat_history': []}) assert output == expected_output output = chain.invoke({'input': 'What is the answer?'}) assert output == expected_output expected_output = [Document(page_content='I know the answer!')] output = chain.invoke({'input': 'What is the answer?', 'chat_history': [ 'hi', 'hi']}) assert output == expected_output
def test_create() ->None: answer = 'I know the answer!' llm = FakeListLLM(responses=[answer]) retriever = FakeParrotRetriever() question_gen_prompt = PromptTemplate.from_template( 'hi! {input} {chat_history}') chain = create_history_aware_retriever(llm, retriever, question_gen_prompt) expected_output = [Document(page_content='What is the answer?')] output = chain.invoke({'input': 'What is the answer?', 'chat_history': []}) assert output == expected_output output = chain.invoke({'input': 'What is the answer?'}) assert output == expected_output expected_output = [Document(page_content='I know the answer!')] output = chain.invoke({'input': 'What is the answer?', 'chat_history': ['hi', 'hi']}) assert output == expected_output
null
lc_attributes
attributes: Dict[str, Any] = {} if self.region_name: attributes['region_name'] = self.region_name return attributes
@property def lc_attributes(self) ->Dict[str, Any]: attributes: Dict[str, Any] = {} if self.region_name: attributes['region_name'] = self.region_name return attributes
null
_import_pai_eas_endpoint
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint return PaiEasEndpoint
def _import_pai_eas_endpoint() ->Any: from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint return PaiEasEndpoint
null
test_chat_openai_get_num_tokens
"""Test get_tokens.""" llm = ChatOpenAI(model=model) assert llm.get_num_tokens('表情符号是\n🦜🔗') == _EXPECTED_NUM_TOKENS[model]
@pytest.mark.parametrize('model', _CHAT_MODELS) def test_chat_openai_get_num_tokens(model: str) ->None: """Test get_tokens.""" llm = ChatOpenAI(model=model) assert llm.get_num_tokens('表情符号是\n🦜🔗') == _EXPECTED_NUM_TOKENS[model]
Test get_tokens.
parse
"""Parse the output of an LLM call.""" return text.strip().split(', ')
def parse(self, text: str) ->List[str]: """Parse the output of an LLM call.""" return text.strip().split(', ')
Parse the output of an LLM call.
test_batching
long_text = 'foo ' * 500 long_texts = [long_text for _ in range(0, 250)] documents251 = ['foo bar' for _ in range(0, 251)] five_elem = VertexAIEmbeddings._prepare_batches(long_texts, 5) default250_elem = VertexAIEmbeddings._prepare_batches(long_texts, 250) batches251 = VertexAIEmbeddings._prepare_batches(documents251, 250) assert len(five_elem) == 50 assert len(five_elem[0]) == 5 assert len(default250_elem[0]) == 10 assert len(default250_elem) == 25 assert len(batches251[0]) == 250 assert len(batches251[1]) == 1
def test_batching() ->None: long_text = 'foo ' * 500 long_texts = [long_text for _ in range(0, 250)] documents251 = ['foo bar' for _ in range(0, 251)] five_elem = VertexAIEmbeddings._prepare_batches(long_texts, 5) default250_elem = VertexAIEmbeddings._prepare_batches(long_texts, 250) batches251 = VertexAIEmbeddings._prepare_batches(documents251, 250) assert len(five_elem) == 50 assert len(five_elem[0]) == 5 assert len(default250_elem[0]) == 10 assert len(default250_elem) == 25 assert len(batches251[0]) == 250 assert len(batches251[1]) == 1
null
_make_request
"""Make a request to the Arcee API Args: method: The HTTP method to use route: The route to call body: The body of the request params: The query params of the request headers: The headers of the request """ headers = self._make_request_headers(headers=headers) url = self._make_request_url(route=route) req_type = getattr(requests, method) response = req_type(url, json=body, params=params, headers=headers) if response.status_code not in (200, 201): raise Exception(f'Failed to make request. Response: {response.text}') return response.json()
def _make_request(self, method: Literal['post', 'get'], route: Union[ ArceeRoute, str], body: Optional[Mapping[str, Any]]=None, params: Optional[dict]=None, headers: Optional[dict]=None) ->dict: """Make a request to the Arcee API Args: method: The HTTP method to use route: The route to call body: The body of the request params: The query params of the request headers: The headers of the request """ headers = self._make_request_headers(headers=headers) url = self._make_request_url(route=route) req_type = getattr(requests, method) response = req_type(url, json=body, params=params, headers=headers) if response.status_code not in (200, 201): raise Exception(f'Failed to make request. Response: {response.text}') return response.json()
Make a request to the Arcee API Args: method: The HTTP method to use route: The route to call body: The body of the request params: The query params of the request headers: The headers of the request
test_list_directory_with_root_dir
"""Test the DirectoryListing tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = ListDirectoryTool(root_dir=temp_dir) file_1 = Path(temp_dir) / 'file1.txt' file_2 = Path(temp_dir) / 'file2.txt' file_1.write_text('File 1 content') file_2.write_text('File 2 content') entries = tool.run({'dir_path': '.'}).split('\n') assert set(entries) == {'file1.txt', 'file2.txt'}
def test_list_directory_with_root_dir() ->None: """Test the DirectoryListing tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = ListDirectoryTool(root_dir=temp_dir) file_1 = Path(temp_dir) / 'file1.txt' file_2 = Path(temp_dir) / 'file2.txt' file_1.write_text('File 1 content') file_2.write_text('File 2 content') entries = tool.run({'dir_path': '.'}).split('\n') assert set(entries) == {'file1.txt', 'file2.txt'}
Test the DirectoryListing tool when a root dir is specified.
test_visit_comparison_number
comp = Comparison(comparator=Comparator.GT, attribute='foo', value=1.4) expected = {'operator': 'GreaterThan', 'path': ['foo'], 'valueNumber': 1.4} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
def test_visit_comparison_number() ->None: comp = Comparison(comparator=Comparator.GT, attribute='foo', value=1.4) expected = {'operator': 'GreaterThan', 'path': ['foo'], 'valueNumber': 1.4} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
null
_llm_type
"""Return type of llm.""" return 'petals'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'petals'
Return type of llm.
test_enforce_stop_tokens_none
"""Test removing stop tokens when they do not occur.""" text = 'foo bar baz' output = enforce_stop_tokens(text, ['moo']) assert output == 'foo bar baz'
def test_enforce_stop_tokens_none() ->None: """Test removing stop tokens when they do not occur.""" text = 'foo bar baz' output = enforce_stop_tokens(text, ['moo']) assert output == 'foo bar baz'
Test removing stop tokens when they do not occur.
exact_match_string_evaluator_ignore_case
"""Create an ExactMatchStringEvaluator with ignore_case set to True.""" return ExactMatchStringEvaluator(ignore_case=True)
@pytest.fixture def exact_match_string_evaluator_ignore_case() ->ExactMatchStringEvaluator: """Create an ExactMatchStringEvaluator with ignore_case set to True.""" return ExactMatchStringEvaluator(ignore_case=True)
Create an ExactMatchStringEvaluator with ignore_case set to True.
_import_requests_tool_RequestsPostTool
from langchain_community.tools.requests.tool import RequestsPostTool return RequestsPostTool
def _import_requests_tool_RequestsPostTool() ->Any: from langchain_community.tools.requests.tool import RequestsPostTool return RequestsPostTool
null
load
"""Load documents.""" paths = list(Path(self.file_path).glob('**/*.md')) docs = [] for p in paths: with open(p, encoding=self.encoding) as f: text = f.read() metadata = {'source': str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs
def load(self) ->List[Document]: """Load documents.""" paths = list(Path(self.file_path).glob('**/*.md')) docs = [] for p in paths: with open(p, encoding=self.encoding) as f: text = f.read() metadata = {'source': str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs
Load documents.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
Get the namespace of the langchain object.
schema
return self.__schema
@property def schema(self) ->Dict[str, Any]: return self.__schema
null
test_redis_cache_ttl
import redis set_llm_cache(RedisCache(redis_=redis.Redis.from_url(REDIS_TEST_URL), ttl=1)) llm_cache = cast(RedisCache, get_llm_cache()) llm_cache.update('foo', 'bar', [Generation(text='fizz')]) key = llm_cache._key('foo', 'bar') assert llm_cache.redis.pttl(key) > 0
def test_redis_cache_ttl() ->None: import redis set_llm_cache(RedisCache(redis_=redis.Redis.from_url(REDIS_TEST_URL), ttl=1)) llm_cache = cast(RedisCache, get_llm_cache()) llm_cache.update('foo', 'bar', [Generation(text='fizz')]) key = llm_cache._key('foo', 'bar') assert llm_cache.redis.pttl(key) > 0
null
test_sklearn_mmr
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings()) output = docsearch.max_marginal_relevance_search('foo', k=1, fetch_k=3) assert len(output) == 1 assert output[0].page_content == 'foo'
@pytest.mark.requires('numpy', 'sklearn') def test_sklearn_mmr() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings()) output = docsearch.max_marginal_relevance_search('foo', k=1, fetch_k=3) assert len(output) == 1 assert output[0].page_content == 'foo'
Test end to end construction and search.
test_similarity_search_with_ssl_verify
"""Test end to end construction and search with ssl verify.""" ssl_verify = {'verify_certs': True, 'basic_auth': ('ES_USER', 'ES_PASSWORD' ), 'ca_certs': 'ES_CA_CERTS_PATH'} texts = ['foo', 'bar', 'baz'] docsearch = ElasticVectorSearch.from_texts(texts, FakeEmbeddings(), elasticsearch_url='http://localhost:9200', ssl_verify=ssl_verify) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
@pytest.mark.skip(reason= 'Docker build has no ssl certs. Enable this test when testing with ssl.') def test_similarity_search_with_ssl_verify(self, elasticsearch_url: str ) ->None: """Test end to end construction and search with ssl verify.""" ssl_verify = {'verify_certs': True, 'basic_auth': ('ES_USER', 'ES_PASSWORD'), 'ca_certs': 'ES_CA_CERTS_PATH'} texts = ['foo', 'bar', 'baz'] docsearch = ElasticVectorSearch.from_texts(texts, FakeEmbeddings(), elasticsearch_url='http://localhost:9200', ssl_verify=ssl_verify) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search with ssl verify.
test_tiledb_vector_sim_with_score_threshold
"""Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = TileDB.from_texts(texts=texts, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat', index_type='FLAT') query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2) assert output == [Document(page_content='foo')] docsearch = TileDB.from_texts(texts=texts, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat', index_type='IVF_FLAT') query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2, nprobe=docsearch.vector_index.partitions) assert output == [Document(page_content='foo')]
@pytest.mark.requires('tiledb-vector-search') def test_tiledb_vector_sim_with_score_threshold(tmp_path: Path) ->None: """Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = TileDB.from_texts(texts=texts, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat', index_type='FLAT') query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2) assert output == [Document(page_content='foo')] docsearch = TileDB.from_texts(texts=texts, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat', index_type='IVF_FLAT') query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2, nprobe=docsearch.vector_index.partitions) assert output == [Document(page_content='foo')]
Test vector similarity.
test_promptlayer_openai_stop_error
"""Test promptlayer openai stop logic on bad configuration.""" llm = PromptLayerOpenAI(stop='3', temperature=0) with pytest.raises(ValueError): llm('write an ordered list of five items', stop=['\n'])
def test_promptlayer_openai_stop_error() ->None: """Test promptlayer openai stop logic on bad configuration.""" llm = PromptLayerOpenAI(stop='3', temperature=0) with pytest.raises(ValueError): llm('write an ordered list of five items', stop=['\n'])
Test promptlayer openai stop logic on bad configuration.
_get_elements
return get_elements_from_api(file_path=self.file_path, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs)
def _get_elements(self) ->List: return get_elements_from_api(file_path=self.file_path, api_key=self. api_key, api_url=self.url, **self.unstructured_kwargs)
null
_extract_email_content
from_email = None for values in msg['payload']['headers']: name = values['name'] if name == 'From': from_email = values['value'] if from_email is None: raise ValueError for part in msg['payload']['parts']: if part['mimeType'] == 'text/plain': data = part['body']['data'] data = base64.urlsafe_b64decode(data).decode('utf-8') pattern = re.compile('\\r\\nOn .+(\\r\\n)*wrote:\\r\\n') newest_response = re.split(pattern, data)[0] message = HumanMessage(content=newest_response, additional_kwargs={ 'sender': from_email}) return message raise ValueError
def _extract_email_content(msg: Any) ->HumanMessage: from_email = None for values in msg['payload']['headers']: name = values['name'] if name == 'From': from_email = values['value'] if from_email is None: raise ValueError for part in msg['payload']['parts']: if part['mimeType'] == 'text/plain': data = part['body']['data'] data = base64.urlsafe_b64decode(data).decode('utf-8') pattern = re.compile('\\r\\nOn .+(\\r\\n)*wrote:\\r\\n') newest_response = re.split(pattern, data)[0] message = HumanMessage(content=newest_response, additional_kwargs={'sender': from_email}) return message raise ValueError
null
_get_next_tokenId
value_type = self._detect_value_type(tokenId) if value_type == 'hex_0x': value_int = int(tokenId, 16) elif value_type == 'hex_0xbf': value_int = int(tokenId[2:], 16) else: value_int = int(tokenId) result = value_int + 1 if value_type == 'hex_0x': return '0x' + format(result, '0' + str(len(tokenId) - 2) + 'x') elif value_type == 'hex_0xbf': return '0xbf' + format(result, '0' + str(len(tokenId) - 4) + 'x') else: return str(result)
def _get_next_tokenId(self, tokenId: str) ->str: value_type = self._detect_value_type(tokenId) if value_type == 'hex_0x': value_int = int(tokenId, 16) elif value_type == 'hex_0xbf': value_int = int(tokenId[2:], 16) else: value_int = int(tokenId) result = value_int + 1 if value_type == 'hex_0x': return '0x' + format(result, '0' + str(len(tokenId) - 2) + 'x') elif value_type == 'hex_0xbf': return '0xbf' + format(result, '0' + str(len(tokenId) - 4) + 'x') else: return str(result)
null
_stream
try: for stream_resp in self._create_chat_stream(messages, stop, **kwargs): if stream_resp: chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, verbose=self.verbose) except OllamaEndpointNotFoundError: yield from self._legacy_stream(messages, stop, **kwargs)
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: try: for stream_resp in self._create_chat_stream(messages, stop, **kwargs): if stream_resp: chunk = _chat_stream_response_to_chat_generation_chunk( stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, verbose=self. verbose) except OllamaEndpointNotFoundError: yield from self._legacy_stream(messages, stop, **kwargs)
null
embed_query
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed(text)
def embed_query(self, text: str) ->List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed(text)
Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text.
InputType
"""Get the input type for this runnable.""" from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue return Union[str, Union[StringPromptValue, ChatPromptValueConcrete], List[ AnyMessage]]
@property def InputType(self) ->TypeAlias: """Get the input type for this runnable.""" from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue return Union[str, Union[StringPromptValue, ChatPromptValueConcrete], List[AnyMessage]]
Get the input type for this runnable.
test_loads_openai_llm
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello') llm_string = dumps(llm) llm2 = loads(llm_string, secrets_map={'OPENAI_API_KEY': 'hello'}) assert llm2 == llm assert dumps(llm2) == llm_string assert isinstance(llm2, OpenAI)
@pytest.mark.requires('openai') def test_loads_openai_llm() ->None: llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello') llm_string = dumps(llm) llm2 = loads(llm_string, secrets_map={'OPENAI_API_KEY': 'hello'}) assert llm2 == llm assert dumps(llm2) == llm_string assert isinstance(llm2, OpenAI)
null
_make_request
request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {'source': url} return [Document(page_content=text, metadata=metadata)]
def _make_request(self, url: str) ->List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {'source': url} return [Document(page_content=text, metadata=metadata)]
null
get_default_document_variable_name
"""Get default document variable name, if not provided.""" if 'document_variable_name' not in values: llm_chain_variables = values['initial_llm_chain'].prompt.input_variables if len(llm_chain_variables) == 1: values['document_variable_name'] = llm_chain_variables[0] else: raise ValueError( 'document_variable_name must be provided if there are multiple llm_chain input_variables' ) else: llm_chain_variables = values['initial_llm_chain'].prompt.input_variables if values['document_variable_name'] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was not found in llm_chain input_variables: {llm_chain_variables}" ) return values
@root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) ->Dict: """Get default document variable name, if not provided.""" if 'document_variable_name' not in values: llm_chain_variables = values['initial_llm_chain' ].prompt.input_variables if len(llm_chain_variables) == 1: values['document_variable_name'] = llm_chain_variables[0] else: raise ValueError( 'document_variable_name must be provided if there are multiple llm_chain input_variables' ) else: llm_chain_variables = values['initial_llm_chain' ].prompt.input_variables if values['document_variable_name'] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was not found in llm_chain input_variables: {llm_chain_variables}" ) return values
Get default document variable name, if not provided.
_generate
message_dicts, params = self._create_message_dicts(messages, stop) _params = {'messages': message_dicts} final_params = {**params, **kwargs, **_params} response = self.client.run(final_params).json() return self._create_chat_result(response)
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) _params = {'messages': message_dicts} final_params = {**params, **kwargs, **_params} response = self.client.run(final_params).json() return self._create_chat_result(response)
null
generate
"""Run the LLM on the given prompt and input.""" if not isinstance(prompts, list): raise ValueError( f"Argument 'prompts' is expected to be of type List[str], received argument of type {type(prompts)}." ) if isinstance(callbacks, list) and callbacks and (isinstance(callbacks[0], (list, BaseCallbackManager)) or callbacks[0] is None): assert len(callbacks) == len(prompts) assert tags is None or isinstance(tags, list) and len(tags) == len(prompts) assert metadata is None or isinstance(metadata, list) and len(metadata ) == len(prompts) assert run_name is None or isinstance(run_name, list) and len(run_name ) == len(prompts) callbacks = cast(List[Callbacks], callbacks) tags_list = cast(List[Optional[List[str]]], tags or [None] * len(prompts)) metadata_list = cast(List[Optional[Dict[str, Any]]], metadata or [{}] * len(prompts)) run_name_list = run_name or cast(List[Optional[str]], [None] * len(prompts) ) callback_managers = [CallbackManager.configure(callback, self.callbacks, self.verbose, tag, self.tags, meta, self.metadata) for callback, tag, meta in zip(callbacks, tags_list, metadata_list)] else: callback_managers = [CallbackManager.configure(cast(Callbacks, callbacks), self.callbacks, self.verbose, cast(List[str], tags), self.tags, cast(Dict[str, Any], metadata), self.metadata)] * len( prompts) run_name_list = [cast(Optional[str], run_name)] * len(prompts) params = self.dict() params['stop'] = stop options = {'stop': stop} existing_prompts, llm_string, missing_prompt_idxs, missing_prompts = ( get_prompts(params, prompts)) disregard_cache = self.cache is not None and not self.cache new_arg_supported = inspect.signature(self._generate).parameters.get( 'run_manager') if get_llm_cache() is None or disregard_cache: if self.cache is not None and self.cache: raise ValueError( 'Asked to cache, but no cache found at `langchain.cache`.') run_managers = [callback_manager.on_llm_start(dumpd(self), [prompt], invocation_params=params, options=options, name=run_name, batch_size=len(prompts))[0] for callback_manager, prompt, run_name in zip(callback_managers, prompts, run_name_list)] output = self._generate_helper(prompts, stop, run_managers, bool( new_arg_supported), **kwargs) return output if len(missing_prompts) > 0: run_managers = [callback_managers[idx].on_llm_start(dumpd(self), [ prompts[idx]], invocation_params=params, options=options, name= run_name_list[idx], batch_size=len(missing_prompts))[0] for idx in missing_prompt_idxs] new_results = self._generate_helper(missing_prompts, stop, run_managers, bool(new_arg_supported), **kwargs) llm_output = update_cache(existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts) run_info = [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] if run_managers else None else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
def generate(self, prompts: List[str], stop: Optional[List[str]]=None, callbacks: Optional[Union[Callbacks, List[Callbacks]]]=None, *, tags: Optional[Union[List[str], List[List[str]]]]=None, metadata: Optional[ Union[Dict[str, Any], List[Dict[str, Any]]]]=None, run_name: Optional[ Union[str, List[str]]]=None, **kwargs: Any) ->LLMResult: """Run the LLM on the given prompt and input.""" if not isinstance(prompts, list): raise ValueError( f"Argument 'prompts' is expected to be of type List[str], received argument of type {type(prompts)}." ) if isinstance(callbacks, list) and callbacks and (isinstance(callbacks[ 0], (list, BaseCallbackManager)) or callbacks[0] is None): assert len(callbacks) == len(prompts) assert tags is None or isinstance(tags, list) and len(tags) == len( prompts) assert metadata is None or isinstance(metadata, list) and len(metadata ) == len(prompts) assert run_name is None or isinstance(run_name, list) and len(run_name ) == len(prompts) callbacks = cast(List[Callbacks], callbacks) tags_list = cast(List[Optional[List[str]]], tags or [None] * len( prompts)) metadata_list = cast(List[Optional[Dict[str, Any]]], metadata or [{ }] * len(prompts)) run_name_list = run_name or cast(List[Optional[str]], [None] * len( prompts)) callback_managers = [CallbackManager.configure(callback, self. callbacks, self.verbose, tag, self.tags, meta, self.metadata) for callback, tag, meta in zip(callbacks, tags_list, metadata_list)] else: callback_managers = [CallbackManager.configure(cast(Callbacks, callbacks), self.callbacks, self.verbose, cast(List[str], tags), self.tags, cast(Dict[str, Any], metadata), self.metadata)] * len( prompts) run_name_list = [cast(Optional[str], run_name)] * len(prompts) params = self.dict() params['stop'] = stop options = {'stop': stop} existing_prompts, llm_string, missing_prompt_idxs, missing_prompts = ( get_prompts(params, prompts)) disregard_cache = self.cache is not None and not self.cache new_arg_supported = inspect.signature(self._generate).parameters.get( 'run_manager') if get_llm_cache() is None or disregard_cache: if self.cache is not None and self.cache: raise ValueError( 'Asked to cache, but no cache found at `langchain.cache`.') run_managers = [callback_manager.on_llm_start(dumpd(self), [prompt], invocation_params=params, options=options, name=run_name, batch_size=len(prompts))[0] for callback_manager, prompt, run_name in zip(callback_managers, prompts, run_name_list)] output = self._generate_helper(prompts, stop, run_managers, bool( new_arg_supported), **kwargs) return output if len(missing_prompts) > 0: run_managers = [callback_managers[idx].on_llm_start(dumpd(self), [ prompts[idx]], invocation_params=params, options=options, name= run_name_list[idx], batch_size=len(missing_prompts))[0] for idx in missing_prompt_idxs] new_results = self._generate_helper(missing_prompts, stop, run_managers, bool(new_arg_supported), **kwargs) llm_output = update_cache(existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts) run_info = [RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] if run_managers else None else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run= run_info)
Run the LLM on the given prompt and input.
_llm_type
"""Return type of LLM.""" return '__package_name_short__-llm'
@property def _llm_type(self) ->str: """Return type of LLM.""" return '__package_name_short__-llm'
Return type of LLM.
test_pickbest_textembedder_no_label_no_emb
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_actions = {'action1': ['0', '1', '2']} expected = """shared |context context |action1 0 |action1 1 |action1 2 """ event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_actions, based_on={'context': 'context'}) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected
@pytest.mark.requires('vowpal_wabbit_next') def test_pickbest_textembedder_no_label_no_emb() ->None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) named_actions = {'action1': ['0', '1', '2']} expected = ( 'shared |context context \n|action1 0 \n|action1 1 \n|action1 2 ') event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_actions, based_on={'context': 'context'}) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected
null
_get_docs_with_query
docs = self.vectorstore.search(query, self.search_type, **search_kwargs) return docs
def _get_docs_with_query(self, query: str, search_kwargs: Dict[str, Any] ) ->List[Document]: docs = self.vectorstore.search(query, self.search_type, **search_kwargs) return docs
null
similarity_search_with_score
"""Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of documents most similar to the query text and distance for each. """ return self.similarity_search_with_score_by_vector(self._embed_query(query), k, filter=filter, search_params=search_params, offset=offset, score_threshold=score_threshold, consistency=consistency, **kwargs)
def similarity_search_with_score(self, query: str, k: int=4, filter: Optional[MetadataFilter]=None, search_params: Optional[common_types. SearchParams]=None, offset: int=0, score_threshold: Optional[float]= None, consistency: Optional[common_types.ReadConsistency]=None, ** kwargs: Any) ->List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of documents most similar to the query text and distance for each. """ return self.similarity_search_with_score_by_vector(self._embed_query( query), k, filter=filter, search_params=search_params, offset= offset, score_threshold=score_threshold, consistency=consistency, **kwargs)
Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. score_threshold: Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - int - number of replicas to query, values should present in all queried replicas - 'majority' - query all replicas, but return values present in the majority of replicas - 'quorum' - query the majority of replicas, return values present in all of them - 'all' - query all replicas, and return values present in all replicas **kwargs: Any other named arguments to pass through to QdrantClient.search() Returns: List of documents most similar to the query text and distance for each.
requires_reference
"""Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return True
@property def requires_reference(self) ->bool: """Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return True
Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise.
__getattr__
raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imports replacing 'langchain' with 'langchain_experimental'" )
def __getattr__(name: str='') ->Any: raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imports replacing 'langchain' with 'langchain_experimental'" )
null
_import_mlflow_chat
from langchain_community.chat_models.mlflow import ChatMlflow return ChatMlflow
def _import_mlflow_chat() ->Any: from langchain_community.chat_models.mlflow import ChatMlflow return ChatMlflow
null
_approximate_search_query_with_efficient_filter
"""For Approximate k-NN Search, with Efficient Filter for Lucene and Faiss Engines.""" search_query = _default_approximate_search_query(query_vector, k=k, vector_field=vector_field) search_query['query']['knn'][vector_field]['filter'] = efficient_filter return search_query
def _approximate_search_query_with_efficient_filter(query_vector: List[ float], efficient_filter: Dict, k: int=4, vector_field: str='vector_field' ) ->Dict: """For Approximate k-NN Search, with Efficient Filter for Lucene and Faiss Engines.""" search_query = _default_approximate_search_query(query_vector, k=k, vector_field=vector_field) search_query['query']['knn'][vector_field]['filter'] = efficient_filter return search_query
For Approximate k-NN Search, with Efficient Filter for Lucene and Faiss Engines.
_default_params
"""Get the default parameters for calling Titan Takeoff Server.""" params = {'generate_max_length': self.generate_max_length, 'sampling_topk': self.sampling_topk, 'sampling_topp': self.sampling_topp, 'sampling_temperature': self.sampling_temperature, 'repetition_penalty': self.repetition_penalty, 'no_repeat_ngram_size': self.no_repeat_ngram_size} return params
@property def _default_params(self) ->Mapping[str, Any]: """Get the default parameters for calling Titan Takeoff Server.""" params = {'generate_max_length': self.generate_max_length, 'sampling_topk': self.sampling_topk, 'sampling_topp': self. sampling_topp, 'sampling_temperature': self.sampling_temperature, 'repetition_penalty': self.repetition_penalty, 'no_repeat_ngram_size': self.no_repeat_ngram_size} return params
Get the default parameters for calling Titan Takeoff Server.
test_call_tgi
"""Test valid call to oci model deployment endpoint.""" endpoint = 'https://MD_OCID/predict' responses.add(responses.POST, endpoint, json={'generated_text': 'This is a completion.'}, status=200) mocker.patch('ads.common.auth.default_signer', return_value=dict(signer=None)) llm = OCIModelDeploymentTGI(endpoint=endpoint) output = llm.invoke('This is a prompt.') assert isinstance(output, str)
@pytest.mark.requires('ads') @responses.activate def test_call_tgi(mocker: MockerFixture) ->None: """Test valid call to oci model deployment endpoint.""" endpoint = 'https://MD_OCID/predict' responses.add(responses.POST, endpoint, json={'generated_text': 'This is a completion.'}, status=200) mocker.patch('ads.common.auth.default_signer', return_value=dict(signer =None)) llm = OCIModelDeploymentTGI(endpoint=endpoint) output = llm.invoke('This is a prompt.') assert isinstance(output, str)
Test valid call to oci model deployment endpoint.
_convert_website_search_response
"""Converts a sequence of search results to a list of LangChain documents.""" from google.protobuf.json_format import MessageToDict documents: List[Document] = [] for result in results: document_dict = MessageToDict(result.document._pb, preserving_proto_field_name=True) derived_struct_data = document_dict.get('derived_struct_data') if not derived_struct_data: continue doc_metadata = document_dict.get('struct_data', {}) doc_metadata['id'] = document_dict['id'] doc_metadata['source'] = derived_struct_data.get('link', '') if chunk_type not in derived_struct_data: continue text_field = 'snippet' if chunk_type == 'snippets' else 'content' for chunk in derived_struct_data[chunk_type]: documents.append(Document(page_content=chunk.get(text_field, ''), metadata=doc_metadata)) if not documents: print(f'No {chunk_type} could be found.') if chunk_type == 'extractive_answers': print( """Make sure that your data store is using Advanced Website Indexing. https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing""" ) return documents
def _convert_website_search_response(self, results: Sequence[SearchResult], chunk_type: str) ->List[Document]: """Converts a sequence of search results to a list of LangChain documents.""" from google.protobuf.json_format import MessageToDict documents: List[Document] = [] for result in results: document_dict = MessageToDict(result.document._pb, preserving_proto_field_name=True) derived_struct_data = document_dict.get('derived_struct_data') if not derived_struct_data: continue doc_metadata = document_dict.get('struct_data', {}) doc_metadata['id'] = document_dict['id'] doc_metadata['source'] = derived_struct_data.get('link', '') if chunk_type not in derived_struct_data: continue text_field = 'snippet' if chunk_type == 'snippets' else 'content' for chunk in derived_struct_data[chunk_type]: documents.append(Document(page_content=chunk.get(text_field, '' ), metadata=doc_metadata)) if not documents: print(f'No {chunk_type} could be found.') if chunk_type == 'extractive_answers': print( """Make sure that your data store is using Advanced Website Indexing. https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing""" ) return documents
Converts a sequence of search results to a list of LangChain documents.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_type
return 'xml-agent'
@property def _type(self) ->str: return 'xml-agent'
null
test_pypdf_parser
"""Test PyPDF parser.""" _assert_with_parser(PyPDFParser())
def test_pypdf_parser() ->None: """Test PyPDF parser.""" _assert_with_parser(PyPDFParser())
Test PyPDF parser.
_process_response
"""Process response from DataForSEO SERP API.""" toret = 'No good search result found' for task in res.get('tasks', []): for result in task.get('result', []): item_types = result.get('item_types') items = result.get('items', []) if 'answer_box' in item_types: toret = next(item for item in items if item.get('type') == 'answer_box').get('text') elif 'knowledge_graph' in item_types: toret = next(item for item in items if item.get('type') == 'knowledge_graph').get('description') elif 'featured_snippet' in item_types: toret = next(item for item in items if item.get('type') == 'featured_snippet').get('description') elif 'shopping' in item_types: toret = next(item for item in items if item.get('type') == 'shopping').get('price') elif 'organic' in item_types: toret = next(item for item in items if item.get('type') == 'organic').get('description') if toret: break return toret
def _process_response(self, res: dict) ->str: """Process response from DataForSEO SERP API.""" toret = 'No good search result found' for task in res.get('tasks', []): for result in task.get('result', []): item_types = result.get('item_types') items = result.get('items', []) if 'answer_box' in item_types: toret = next(item for item in items if item.get('type') == 'answer_box').get('text') elif 'knowledge_graph' in item_types: toret = next(item for item in items if item.get('type') == 'knowledge_graph').get('description') elif 'featured_snippet' in item_types: toret = next(item for item in items if item.get('type') == 'featured_snippet').get('description') elif 'shopping' in item_types: toret = next(item for item in items if item.get('type') == 'shopping').get('price') elif 'organic' in item_types: toret = next(item for item in items if item.get('type') == 'organic').get('description') if toret: break return toret
Process response from DataForSEO SERP API.
file_description
if len(self.files) == 0: return '' lines = ['The following files available in the evaluation environment:'] for target_path, file_info in self.files.items(): peek_content = head_file(file_info.source_path, 4) lines.append( f"""- path: `{target_path}` first four lines: {peek_content} description: `{file_info.description}`""" ) return '\n'.join(lines)
@property def file_description(self) ->str: if len(self.files) == 0: return '' lines = ['The following files available in the evaluation environment:'] for target_path, file_info in self.files.items(): peek_content = head_file(file_info.source_path, 4) lines.append( f"""- path: `{target_path}` first four lines: {peek_content} description: `{file_info.description}`""" ) return '\n'.join(lines)
null
_get_layer_properties
"""Get the layer properties from the FeatureLayer.""" import arcgis layer_number_pattern = re.compile('/\\d+$') props = self.layer.properties if lyr_desc is None: try: if self.BEAUTIFULSOUP: lyr_desc = self.BEAUTIFULSOUP(props['description']).text else: lyr_desc = props['description'] lyr_desc = lyr_desc or _NOT_PROVIDED except KeyError: lyr_desc = _NOT_PROVIDED try: item_id = props['serviceItemId'] item = self.gis.content.get(item_id) or arcgis.features.FeatureLayer(re .sub(layer_number_pattern, '', self.url)) try: raw_desc = item.description except AttributeError: raw_desc = item.properties.description if self.BEAUTIFULSOUP: item_desc = self.BEAUTIFULSOUP(raw_desc).text else: item_desc = raw_desc item_desc = item_desc or _NOT_PROVIDED except KeyError: item_desc = _NOT_PROVIDED return {'layer_description': lyr_desc, 'item_description': item_desc, 'layer_properties': props}
def _get_layer_properties(self, lyr_desc: Optional[str]=None) ->dict: """Get the layer properties from the FeatureLayer.""" import arcgis layer_number_pattern = re.compile('/\\d+$') props = self.layer.properties if lyr_desc is None: try: if self.BEAUTIFULSOUP: lyr_desc = self.BEAUTIFULSOUP(props['description']).text else: lyr_desc = props['description'] lyr_desc = lyr_desc or _NOT_PROVIDED except KeyError: lyr_desc = _NOT_PROVIDED try: item_id = props['serviceItemId'] item = self.gis.content.get(item_id) or arcgis.features.FeatureLayer(re .sub(layer_number_pattern, '', self.url)) try: raw_desc = item.description except AttributeError: raw_desc = item.properties.description if self.BEAUTIFULSOUP: item_desc = self.BEAUTIFULSOUP(raw_desc).text else: item_desc = raw_desc item_desc = item_desc or _NOT_PROVIDED except KeyError: item_desc = _NOT_PROVIDED return {'layer_description': lyr_desc, 'item_description': item_desc, 'layer_properties': props}
Get the layer properties from the FeatureLayer.
_import_rocksetdb
from langchain_community.vectorstores.rocksetdb import Rockset return Rockset
def _import_rocksetdb() ->Any: from langchain_community.vectorstores.rocksetdb import Rockset return Rockset
null
_get_parameter_names
"""Get the parameter names of the callable.""" sig = inspect.signature(callable_) return list(sig.parameters.keys())
def _get_parameter_names(callable_: GetSessionHistoryCallable) ->List[str]: """Get the parameter names of the callable.""" sig = inspect.signature(callable_) return list(sig.parameters.keys())
Get the parameter names of the callable.
log_error_once
"""Log an error once.""" global _LOGGED if (method, type(exception)) in _LOGGED: return _LOGGED.add((method, type(exception))) logger.error(exception)
def log_error_once(method: str, exception: Exception) ->None: """Log an error once.""" global _LOGGED if (method, type(exception)) in _LOGGED: return _LOGGED.add((method, type(exception))) logger.error(exception)
Log an error once.
remove_uploaded_file
"""Remove uploaded file from the sandbox.""" self.session.filesystem.remove(uploaded_file.remote_path) self._uploaded_files = [f for f in self._uploaded_files if f.remote_path != uploaded_file.remote_path] self.description = self.description + '\n' + self.uploaded_files_description
def remove_uploaded_file(self, uploaded_file: UploadedFile) ->None: """Remove uploaded file from the sandbox.""" self.session.filesystem.remove(uploaded_file.remote_path) self._uploaded_files = [f for f in self._uploaded_files if f. remote_path != uploaded_file.remote_path] self.description = (self.description + '\n' + self. uploaded_files_description)
Remove uploaded file from the sandbox.
test_run_success
"""Test that returns the correct answer""" search_string = ( 'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature' ) output = api_client.run(search_string) test_string = ( 'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature: Findings and Implications' ) assert test_string in output assert len(output) == api_client.doc_content_chars_max
def test_run_success(api_client: PubMedAPIWrapper) ->None: """Test that returns the correct answer""" search_string = ( 'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature' ) output = api_client.run(search_string) test_string = ( 'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature: Findings and Implications' ) assert test_string in output assert len(output) == api_client.doc_content_chars_max
Test that returns the correct answer
test_search_call
search = DataForSeoAPIWrapper() output = search.run('pi value') assert '3.14159' in output
def test_search_call() ->None: search = DataForSeoAPIWrapper() output = search.run('pi value') assert '3.14159' in output
null
test_get_action_and_input_whitespace
"""Test getting an action from text.""" llm_output = """Thought: I need to search for NBA Action: Search Action Input: NBA""" action, action_input = get_action_and_input(llm_output) assert action == 'Search' assert action_input == 'NBA'
def test_get_action_and_input_whitespace() ->None: """Test getting an action from text.""" llm_output = ( 'Thought: I need to search for NBA\nAction: Search \nAction Input: NBA' ) action, action_input = get_action_and_input(llm_output) assert action == 'Search' assert action_input == 'NBA'
Test getting an action from text.
embeddings
return self.embedding
@property def embeddings(self) ->Embeddings: return self.embedding
null
embed_documents
"""Embed a list of documents. Args: texts: List[str] The list of texts to embed. batch_size: [int] The batch size of embeddings to send to the model. If zero, then the largest batch size will be detected dynamically at the first request, starting from 250, down to 5. Returns: List of embeddings, one for each text. """ return self.embed(texts, batch_size, 'RETRIEVAL_DOCUMENT')
def embed_documents(self, texts: List[str], batch_size: int=0) ->List[List[ float]]: """Embed a list of documents. Args: texts: List[str] The list of texts to embed. batch_size: [int] The batch size of embeddings to send to the model. If zero, then the largest batch size will be detected dynamically at the first request, starting from 250, down to 5. Returns: List of embeddings, one for each text. """ return self.embed(texts, batch_size, 'RETRIEVAL_DOCUMENT')
Embed a list of documents. Args: texts: List[str] The list of texts to embed. batch_size: [int] The batch size of embeddings to send to the model. If zero, then the largest batch size will be detected dynamically at the first request, starting from 250, down to 5. Returns: List of embeddings, one for each text.
__init__
self.value = value
def __init__(self, value: Any): self.value = value
null
resize_base64_image
""" Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode('utf-8')
def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode('utf-8')
Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image.
search_code
""" Searches code in the repository. # Todo: limit total tokens returned... Parameters: query(str): The search query Returns: str: A string containing, at most, the top 5 search results """ search_result = self.github.search_code(query=query, repo=self. github_repository) if search_result.totalCount == 0: return '0 results found.' max_results = min(5, search_result.totalCount) results = [f'Showing top {max_results} of {search_result.totalCount} results:'] count = 0 for code in search_result: if count >= max_results: break file_content = self.github_repo_instance.get_contents(code.path, ref= self.active_branch).decoded_content.decode() results.append( f'Filepath: `{code.path}`\nFile contents: {file_content}\n<END OF FILE>' ) count += 1 return '\n'.join(results)
def search_code(self, query: str) ->str: """ Searches code in the repository. # Todo: limit total tokens returned... Parameters: query(str): The search query Returns: str: A string containing, at most, the top 5 search results """ search_result = self.github.search_code(query=query, repo=self. github_repository) if search_result.totalCount == 0: return '0 results found.' max_results = min(5, search_result.totalCount) results = [ f'Showing top {max_results} of {search_result.totalCount} results:'] count = 0 for code in search_result: if count >= max_results: break file_content = self.github_repo_instance.get_contents(code.path, ref=self.active_branch).decoded_content.decode() results.append( f'Filepath: `{code.path}`\nFile contents: {file_content}\n<END OF FILE>' ) count += 1 return '\n'.join(results)
Searches code in the repository. # Todo: limit total tokens returned... Parameters: query(str): The search query Returns: str: A string containing, at most, the top 5 search results
_identifying_params
"""Get the identifying parameters.""" return {**{'model_name': self.model_name}, **self._default_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'model_name': self.model_name}, **self._default_params}
Get the identifying parameters.
test_chat_model_caching
prompt: List[BaseMessage] = [HumanMessage(content='How are you?')] response = 'Test response' cached_response = 'Cached test response' cached_message = AIMessage(content=cached_response) llm = FakeListChatModel(responses=[response]) if get_llm_cache(): get_llm_cache().update(prompt=dumps(prompt), llm_string=llm. _get_llm_string(), return_val=[ChatGeneration(message=cached_message)]) result = llm(prompt) assert isinstance(result, AIMessage) assert result.content == cached_response else: raise ValueError( 'The cache not set. This should never happen, as the pytest fixture `set_cache_and_teardown` always sets the cache.' )
def test_chat_model_caching() ->None: prompt: List[BaseMessage] = [HumanMessage(content='How are you?')] response = 'Test response' cached_response = 'Cached test response' cached_message = AIMessage(content=cached_response) llm = FakeListChatModel(responses=[response]) if get_llm_cache(): get_llm_cache().update(prompt=dumps(prompt), llm_string=llm. _get_llm_string(), return_val=[ChatGeneration(message= cached_message)]) result = llm(prompt) assert isinstance(result, AIMessage) assert result.content == cached_response else: raise ValueError( 'The cache not set. This should never happen, as the pytest fixture `set_cache_and_teardown` always sets the cache.' )
null
_call
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = {**inputs, self.combine_documents_chain.input_key: docs} outputs = self.combine_documents_chain.run(_inputs, callbacks=_run_manager. get_child()) return {self.output_key: outputs}
def _call(self, inputs: Dict[str, str], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = {**inputs, self.combine_documents_chain. input_key: docs} outputs = self.combine_documents_chain.run(_inputs, callbacks= _run_manager.get_child()) return {self.output_key: outputs}
null
embed_documents
"""Compute doc embeddings using a modelscope embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace('\n', ' '), texts)) inputs = {'source_sentence': texts} embeddings = self.embed(input=inputs)['text_embedding'] return embeddings.tolist()
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Compute doc embeddings using a modelscope embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace('\n', ' '), texts)) inputs = {'source_sentence': texts} embeddings = self.embed(input=inputs)['text_embedding'] return embeddings.tolist()
Compute doc embeddings using a modelscope embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.