method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
get_model
|
"""Download model. f
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path."""
model_url = (
'https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin'
)
tokenizer_url = (
'https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model'
)
conversion_script = (
'https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py'
)
local_filename = model_url.split('/')[-1]
if not os.path.exists('convert-unversioned-ggml-to-ggml.py'):
urlretrieve(conversion_script, 'convert-unversioned-ggml-to-ggml.py')
if not os.path.exists('tokenizer.model'):
urlretrieve(tokenizer_url, 'tokenizer.model')
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
os.system(f'python convert-unversioned-ggml-to-ggml.py . tokenizer.model')
return local_filename
|
def get_model() ->str:
"""Download model. f
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path."""
model_url = (
'https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin'
)
tokenizer_url = (
'https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model'
)
conversion_script = (
'https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py'
)
local_filename = model_url.split('/')[-1]
if not os.path.exists('convert-unversioned-ggml-to-ggml.py'):
urlretrieve(conversion_script, 'convert-unversioned-ggml-to-ggml.py')
if not os.path.exists('tokenizer.model'):
urlretrieve(tokenizer_url, 'tokenizer.model')
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
os.system(
f'python convert-unversioned-ggml-to-ggml.py . tokenizer.model')
return local_filename
|
Download model. f
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path.
|
on_tool_end
|
"""Run when tool ends running."""
self.metrics['step'] += 1
self.metrics['tool_ends'] += 1
self.metrics['ends'] += 1
tool_ends = self.metrics['tool_ends']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_end', 'output': output})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f'tool_end_{tool_ends}')
|
def on_tool_end(self, output: str, **kwargs: Any) ->None:
"""Run when tool ends running."""
self.metrics['step'] += 1
self.metrics['tool_ends'] += 1
self.metrics['ends'] += 1
tool_ends = self.metrics['tool_ends']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_end', 'output': output})
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f'tool_end_{tool_ends}')
|
Run when tool ends running.
|
_get_tmdb_api
|
tmdb_bearer_token = kwargs['tmdb_bearer_token']
chain = APIChain.from_llm_and_api_docs(llm, tmdb_docs.TMDB_DOCS, headers={
'Authorization': f'Bearer {tmdb_bearer_token}'}, limit_to_domains=[
'https://api.themoviedb.org/'])
return Tool(name='TMDB-API', description=
'Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.'
, func=chain.run)
|
def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) ->BaseTool:
tmdb_bearer_token = kwargs['tmdb_bearer_token']
chain = APIChain.from_llm_and_api_docs(llm, tmdb_docs.TMDB_DOCS,
headers={'Authorization': f'Bearer {tmdb_bearer_token}'},
limit_to_domains=['https://api.themoviedb.org/'])
return Tool(name='TMDB-API', description=
'Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.'
, func=chain.run)
| null |
similarity_search_with_score
|
"""Pass through to `knn_search including score`"""
return self.knn_search(query=query, k=k, **kwargs)
|
def similarity_search_with_score(self, query: str, k: int=10, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""Pass through to `knn_search including score`"""
return self.knn_search(query=query, k=k, **kwargs)
|
Pass through to `knn_search including score`
|
test_chat_google_genai_invoke_multimodal
|
messages: list = [HumanMessage(content=[{'type': 'text', 'text':
"Guess what's in this picture! You have 3 guesses."}, {'type':
'image_url', 'image_url': 'data:image/png;base64,' + _B64_string}])]
llm = ChatGoogleGenerativeAI(model=_VISION_MODEL)
response = llm.invoke(messages)
assert isinstance(response.content, str)
assert len(response.content.strip()) > 0
for chunk in llm.stream(messages):
print(chunk)
assert isinstance(chunk.content, str)
assert len(chunk.content.strip()) > 0
|
def test_chat_google_genai_invoke_multimodal() ->None:
messages: list = [HumanMessage(content=[{'type': 'text', 'text':
"Guess what's in this picture! You have 3 guesses."}, {'type':
'image_url', 'image_url': 'data:image/png;base64,' + _B64_string}])]
llm = ChatGoogleGenerativeAI(model=_VISION_MODEL)
response = llm.invoke(messages)
assert isinstance(response.content, str)
assert len(response.content.strip()) > 0
for chunk in llm.stream(messages):
print(chunk)
assert isinstance(chunk.content, str)
assert len(chunk.content.strip()) > 0
| null |
similarity_search_with_score_by_vector
|
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
nprobe: Optional, number of partitions to check if using IVF_FLAT index
score_threshold: Optional, a floating point value to filter the
resulting set of retrieved docs
Returns:
List of documents most similar to the query text and distance
in float for each. Lower score represents more similarity.
"""
if 'score_threshold' in kwargs:
score_threshold = kwargs.pop('score_threshold')
else:
score_threshold = MAX_FLOAT
d, i = self.vector_index.query(np.array([np.array(embedding).astype(np.
float32)]).astype(np.float32), k=k if filter is None else fetch_k, **kwargs
)
return self.process_index_results(ids=i[0], scores=d[0], filter=filter, k=k,
score_threshold=score_threshold)
|
def similarity_search_with_score_by_vector(self, embedding: List[float], *,
k: int=4, filter: Optional[Dict[str, Any]]=None, fetch_k: int=20, **
kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
nprobe: Optional, number of partitions to check if using IVF_FLAT index
score_threshold: Optional, a floating point value to filter the
resulting set of retrieved docs
Returns:
List of documents most similar to the query text and distance
in float for each. Lower score represents more similarity.
"""
if 'score_threshold' in kwargs:
score_threshold = kwargs.pop('score_threshold')
else:
score_threshold = MAX_FLOAT
d, i = self.vector_index.query(np.array([np.array(embedding).astype(np.
float32)]).astype(np.float32), k=k if filter is None else fetch_k,
**kwargs)
return self.process_index_results(ids=i[0], scores=d[0], filter=filter,
k=k, score_threshold=score_threshold)
|
Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
nprobe: Optional, number of partitions to check if using IVF_FLAT index
score_threshold: Optional, a floating point value to filter the
resulting set of retrieved docs
Returns:
List of documents most similar to the query text and distance
in float for each. Lower score represents more similarity.
|
_create_chat_result
|
generations = []
for choice in response['choices']:
message = _convert_dict_to_message(choice['messages'])
generations.append(ChatGeneration(message=message))
token_usage = response['usage']
llm_output = {'token_usage': token_usage}
return ChatResult(generations=generations, llm_output=llm_output)
|
def _create_chat_result(response: Mapping[str, Any]) ->ChatResult:
generations = []
for choice in response['choices']:
message = _convert_dict_to_message(choice['messages'])
generations.append(ChatGeneration(message=message))
token_usage = response['usage']
llm_output = {'token_usage': token_usage}
return ChatResult(generations=generations, llm_output=llm_output)
| null |
_get_next_response_in_sequence
|
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
|
@property
def _get_next_response_in_sequence(self) ->str:
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
| null |
output_keys
|
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
|
@property
def output_keys(self) ->List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
|
Output keys.
:meta private:
|
__init__
|
"""Initialize with necessary components."""
self.embedding = embedding
self.index = index
self.docstore = docstore
self.ids = ids
|
def __init__(self, embedding: Embeddings, index: Any, docstore: Docstore,
ids: List[str]):
"""Initialize with necessary components."""
self.embedding = embedding
self.index = index
self.docstore = docstore
self.ids = ids
|
Initialize with necessary components.
|
_create_prediction
|
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
'Could not import replicate python package. Please install it with `pip install replicate`.'
)
if self.version_obj is None:
model_str, version_str = self.model.split(':')
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
input_properties = sorted(self.version_obj.openapi_schema['components']
['schemas']['Input']['properties'].items(), key=lambda item: item[1
].get('x-order', 0))
self.prompt_key = input_properties[0][0]
input_: Dict = {self.prompt_key: prompt, **self.model_kwargs, **kwargs}
return replicate_python.predictions.create(version=self.version_obj, input=
input_)
|
def _create_prediction(self, prompt: str, **kwargs: Any) ->Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
'Could not import replicate python package. Please install it with `pip install replicate`.'
)
if self.version_obj is None:
model_str, version_str = self.model.split(':')
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
input_properties = sorted(self.version_obj.openapi_schema[
'components']['schemas']['Input']['properties'].items(), key=lambda
item: item[1].get('x-order', 0))
self.prompt_key = input_properties[0][0]
input_: Dict = {self.prompt_key: prompt, **self.model_kwargs, **kwargs}
return replicate_python.predictions.create(version=self.version_obj,
input=input_)
| null |
_transform
|
prev_parsed = None
acc_gen = None
for chunk in input:
if isinstance(chunk, BaseMessageChunk):
chunk_gen: Generation = ChatGenerationChunk(message=chunk)
elif isinstance(chunk, BaseMessage):
chunk_gen = ChatGenerationChunk(message=BaseMessageChunk(**chunk.
dict()))
else:
chunk_gen = GenerationChunk(text=chunk)
if acc_gen is None:
acc_gen = chunk_gen
else:
acc_gen = acc_gen + chunk_gen
parsed = self.parse_result([acc_gen], partial=True)
if parsed is not None and parsed != prev_parsed:
if self.diff:
yield self._diff(prev_parsed, parsed)
else:
yield parsed
prev_parsed = parsed
|
def _transform(self, input: Iterator[Union[str, BaseMessage]]) ->Iterator[Any]:
prev_parsed = None
acc_gen = None
for chunk in input:
if isinstance(chunk, BaseMessageChunk):
chunk_gen: Generation = ChatGenerationChunk(message=chunk)
elif isinstance(chunk, BaseMessage):
chunk_gen = ChatGenerationChunk(message=BaseMessageChunk(**
chunk.dict()))
else:
chunk_gen = GenerationChunk(text=chunk)
if acc_gen is None:
acc_gen = chunk_gen
else:
acc_gen = acc_gen + chunk_gen
parsed = self.parse_result([acc_gen], partial=True)
if parsed is not None and parsed != prev_parsed:
if self.diff:
yield self._diff(prev_parsed, parsed)
else:
yield parsed
prev_parsed = parsed
| null |
_run
|
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
response = page.goto(url)
status = response.status if response else 'unknown'
return f'Navigating to {url} returned status code {status}'
|
def _run(self, url: str, run_manager: Optional[CallbackManagerForToolRun]=None
) ->str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
response = page.goto(url)
status = response.status if response else 'unknown'
return f'Navigating to {url} returned status code {status}'
|
Use the tool.
|
test_embeddings_clustering_filter
|
texts = ['What happened to all of my cookies?',
'A cookie is a small, baked sweet treat and you can find it in the cookie',
"monsters' jar.", 'Cookies are good.',
'I have nightmares about the cookie monster.',
'The most popular pizza styles are: Neapolitan, New York-style and',
'Chicago-style. You can find them on iconic restaurants in major cities.',
'Neapolitan pizza: This is the original pizza style,hailing from Naples,',
'Italy.',
'I wish there were better Italian Pizza restaurants in my neighborhood.',
'New York-style pizza: This is characterized by its large, thin crust, and'
, 'generous toppings.',
"The first movie to feature a robot was 'A Trip to the Moon' (1902).",
'The first movie to feature a robot that could pass for a human was',
"'Blade Runner' (1982)",
'The first movie to feature a robot that could fall in love with a human',
"was 'Her' (2013)",
'A robot is a machine capable of carrying out complex actions automatically.'
,
'There are certainly hundreds, if not thousands movies about robots like:',
"'Blade Runner', 'Her' and 'A Trip to the Moon'"]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsClusteringFilter(embeddings=embeddings,
num_clusters=3, num_closest=1, sorted=True)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 3
assert texts[1] in [d.page_content for d in actual]
assert texts[4] in [d.page_content for d in actual]
assert texts[11] in [d.page_content for d in actual]
|
def test_embeddings_clustering_filter() ->None:
texts = ['What happened to all of my cookies?',
'A cookie is a small, baked sweet treat and you can find it in the cookie'
, "monsters' jar.", 'Cookies are good.',
'I have nightmares about the cookie monster.',
'The most popular pizza styles are: Neapolitan, New York-style and',
'Chicago-style. You can find them on iconic restaurants in major cities.'
,
'Neapolitan pizza: This is the original pizza style,hailing from Naples,'
, 'Italy.',
'I wish there were better Italian Pizza restaurants in my neighborhood.'
,
'New York-style pizza: This is characterized by its large, thin crust, and'
, 'generous toppings.',
"The first movie to feature a robot was 'A Trip to the Moon' (1902).",
'The first movie to feature a robot that could pass for a human was',
"'Blade Runner' (1982)",
'The first movie to feature a robot that could fall in love with a human'
, "was 'Her' (2013)",
'A robot is a machine capable of carrying out complex actions automatically.'
,
'There are certainly hundreds, if not thousands movies about robots like:'
, "'Blade Runner', 'Her' and 'A Trip to the Moon'"]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsClusteringFilter(embeddings=embeddings,
num_clusters=3, num_closest=1, sorted=True)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 3
assert texts[1] in [d.page_content for d in actual]
assert texts[4] in [d.page_content for d in actual]
assert texts[11] in [d.page_content for d in actual]
| null |
_get_chat_params
|
if len(prompts) > 1:
raise ValueError(
f'OpenAIChat currently only supports single prompt, got {prompts}')
messages = self.prefix_messages + [{'role': 'user', 'content': prompts[0]}]
params: Dict[str, Any] = {**{'model': self.model_name}, **self._default_params}
if stop is not None:
if 'stop' in params:
raise ValueError('`stop` found in both the input and default params.')
params['stop'] = stop
if params.get('max_tokens') == -1:
del params['max_tokens']
return messages, params
|
def _get_chat_params(self, prompts: List[str], stop: Optional[List[str]]=None
) ->Tuple:
if len(prompts) > 1:
raise ValueError(
f'OpenAIChat currently only supports single prompt, got {prompts}')
messages = self.prefix_messages + [{'role': 'user', 'content': prompts[0]}]
params: Dict[str, Any] = {**{'model': self.model_name}, **self.
_default_params}
if stop is not None:
if 'stop' in params:
raise ValueError(
'`stop` found in both the input and default params.')
params['stop'] = stop
if params.get('max_tokens') == -1:
del params['max_tokens']
return messages, params
| null |
lazy_load
|
"""Lazy load records."""
from sodapy import Socrata
client = Socrata(self.city_id, None)
results = client.get(self.dataset_id, limit=self.limit)
for record in results:
yield Document(page_content=str(record), metadata={'source': self.
city_id + '_' + self.dataset_id})
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load records."""
from sodapy import Socrata
client = Socrata(self.city_id, None)
results = client.get(self.dataset_id, limit=self.limit)
for record in results:
yield Document(page_content=str(record), metadata={'source': self.
city_id + '_' + self.dataset_id})
|
Lazy load records.
|
_import_stackexchange
|
from langchain_community.utilities.stackexchange import StackExchangeAPIWrapper
return StackExchangeAPIWrapper
|
def _import_stackexchange() ->Any:
from langchain_community.utilities.stackexchange import StackExchangeAPIWrapper
return StackExchangeAPIWrapper
| null |
test_docai_parser_invalid_processor_name
|
with patch('google.cloud.documentai.DocumentProcessorServiceClient'):
with pytest.raises(ValueError):
_ = DocAIParser(processor_name=processor_name, location='us')
|
@pytest.mark.requires('google.cloud', 'google.cloud.documentai')
@pytest.mark.parametrize('processor_name', [
'projects/123456/locations/us-central1/processors/ab123dfg:publish',
'ab123dfg'])
def test_docai_parser_invalid_processor_name(processor_name: str) ->None:
with patch('google.cloud.documentai.DocumentProcessorServiceClient'):
with pytest.raises(ValueError):
_ = DocAIParser(processor_name=processor_name, location='us')
| null |
test_api_key_masked
|
"""Test that the API key is masked"""
azure_chat = request.getfixturevalue(fixture_name)
print(azure_chat.endpoint_api_key, end='')
captured = capsys.readouterr()
assert str(azure_chat.endpoint_api_key) == '**********' and repr(azure_chat
.endpoint_api_key
) == "SecretStr('**********')" and captured.out == '**********'
|
def test_api_key_masked(self, fixture_name: str, request: FixtureRequest,
capsys: CaptureFixture) ->None:
"""Test that the API key is masked"""
azure_chat = request.getfixturevalue(fixture_name)
print(azure_chat.endpoint_api_key, end='')
captured = capsys.readouterr()
assert str(azure_chat.endpoint_api_key) == '**********' and repr(azure_chat
.endpoint_api_key
) == "SecretStr('**********')" and captured.out == '**********'
|
Test that the API key is masked
|
test_sitemap_block_only_one
|
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml',
blocksize=1000000, blocknum=0)
documents = loader.load()
assert len(documents) > 1
assert 'LangChain Python API' in documents[0].page_content
|
def test_sitemap_block_only_one() ->None:
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml',
blocksize=1000000, blocknum=0)
documents = loader.load()
assert len(documents) > 1
assert 'LangChain Python API' in documents[0].page_content
|
Test sitemap loader.
|
test_flat_vector_field_defaults
|
"""Test defaults for FlatVectorField."""
flat_vector_field_data = {'name': 'example', 'dims': 100, 'algorithm': 'FLAT'}
flat_vector = FlatVectorField(**flat_vector_field_data)
assert flat_vector.datatype == 'FLOAT32'
assert flat_vector.distance_metric == 'COSINE'
assert flat_vector.initial_cap is None
assert flat_vector.block_size is None
|
def test_flat_vector_field_defaults() ->None:
"""Test defaults for FlatVectorField."""
flat_vector_field_data = {'name': 'example', 'dims': 100, 'algorithm':
'FLAT'}
flat_vector = FlatVectorField(**flat_vector_field_data)
assert flat_vector.datatype == 'FLOAT32'
assert flat_vector.distance_metric == 'COSINE'
assert flat_vector.initial_cap is None
assert flat_vector.block_size is None
|
Test defaults for FlatVectorField.
|
test_self_hosted_embedding_documents
|
"""Test self-hosted huggingface instruct embeddings."""
documents = ['foo bar'] * 2
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings(model_load_fn=get_pipeline, hardware=gpu,
inference_fn=inference_fn)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 50265
|
def test_self_hosted_embedding_documents() ->None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ['foo bar'] * 2
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings(model_load_fn=get_pipeline, hardware=
gpu, inference_fn=inference_fn)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 50265
|
Test self-hosted huggingface instruct embeddings.
|
test_run_chat_model_all_formats
|
llm = FakeChatModel()
_run_llm(llm, inputs, mock.MagicMock())
|
@pytest.mark.parametrize('inputs', _VALID_MESSAGES + _VALID_PROMPTS)
def test_run_chat_model_all_formats(inputs: Dict[str, Any]) ->None:
llm = FakeChatModel()
_run_llm(llm, inputs, mock.MagicMock())
| null |
test_huggingface_pipeline_text_generation
|
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFacePipeline.from_model_id(model_id='gpt2', task=
'text-generation', pipeline_kwargs={'max_new_tokens': 10})
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_huggingface_pipeline_text_generation() ->None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFacePipeline.from_model_id(model_id='gpt2', task=
'text-generation', pipeline_kwargs={'max_new_tokens': 10})
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to HuggingFace text generation model.
|
__init__
|
super().__init__()
self.auto_close = auto_close
self.include_names = include_names
self.include_types = include_types
self.include_tags = include_tags
self.exclude_names = exclude_names
self.exclude_types = exclude_types
self.exclude_tags = exclude_tags
send_stream: Any
receive_stream: Any
send_stream, receive_stream = create_memory_object_stream(math.inf)
self.lock = threading.Lock()
self.send_stream = send_stream
self.receive_stream = receive_stream
self._key_map_by_run_id: Dict[UUID, str] = {}
self._counter_map_by_name: Dict[str, int] = defaultdict(int)
self.root_id: Optional[UUID] = None
|
def __init__(self, *, auto_close: bool=True, include_names: Optional[
Sequence[str]]=None, include_types: Optional[Sequence[str]]=None,
include_tags: Optional[Sequence[str]]=None, exclude_names: Optional[
Sequence[str]]=None, exclude_types: Optional[Sequence[str]]=None,
exclude_tags: Optional[Sequence[str]]=None) ->None:
super().__init__()
self.auto_close = auto_close
self.include_names = include_names
self.include_types = include_types
self.include_tags = include_tags
self.exclude_names = exclude_names
self.exclude_types = exclude_types
self.exclude_tags = exclude_tags
send_stream: Any
receive_stream: Any
send_stream, receive_stream = create_memory_object_stream(math.inf)
self.lock = threading.Lock()
self.send_stream = send_stream
self.receive_stream = receive_stream
self._key_map_by_run_id: Dict[UUID, str] = {}
self._counter_map_by_name: Dict[str, int] = defaultdict(int)
self.root_id: Optional[UUID] = None
| null |
attempt_parse_teams
|
"""Parse appropriate content from the list of teams."""
parsed_teams: Dict[str, List[dict]] = {'teams': []}
for team in input_dict['teams']:
try:
team = parse_dict_through_component(team, Team, fault_tolerant=False)
parsed_teams['teams'].append(team)
except Exception as e:
warnings.warn(f'Error parsing a team {e}')
return parsed_teams
|
def attempt_parse_teams(self, input_dict: dict) ->Dict[str, List[dict]]:
"""Parse appropriate content from the list of teams."""
parsed_teams: Dict[str, List[dict]] = {'teams': []}
for team in input_dict['teams']:
try:
team = parse_dict_through_component(team, Team, fault_tolerant=
False)
parsed_teams['teams'].append(team)
except Exception as e:
warnings.warn(f'Error parsing a team {e}')
return parsed_teams
|
Parse appropriate content from the list of teams.
|
deprecated_method
|
"""original doc"""
return 'This is a deprecated method.'
|
@deprecated(since='2.0.0', removal='3.0.0')
def deprecated_method(self) ->str:
"""original doc"""
return 'This is a deprecated method.'
|
original doc
|
log
|
pass
|
def log(self, event: TEvent) ->None:
pass
| null |
_import_vectorstore_tool_VectorStoreQATool
|
from langchain_community.tools.vectorstore.tool import VectorStoreQATool
return VectorStoreQATool
|
def _import_vectorstore_tool_VectorStoreQATool() ->Any:
from langchain_community.tools.vectorstore.tool import VectorStoreQATool
return VectorStoreQATool
| null |
get_output_schema
|
return self.runnable.get_output_schema(config)
|
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return self.runnable.get_output_schema(config)
| null |
from_llm
|
"""Create a `CriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt template will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples
--------
>>> from langchain_community.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
"""
prompt_ = cls._resolve_prompt(prompt)
if criteria == Criteria.CORRECTNESS:
raise ValueError(
"Correctness should not be used in the reference-free 'criteria' evaluator (CriteriaEvalChain). Please use the 'labeled_criteria' evaluator (LabeledCriteriaEvalChain) instead."
)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = '\n'.join(f'{k}: {v}' for k, v in criteria_.items())
prompt_ = prompt_.partial(criteria=criteria_str)
return cls(llm=llm, prompt=prompt_, criterion_name='-'.join(criteria_), **
kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, criteria: Optional[CRITERIA_TYPE]
=None, *, prompt: Optional[BasePromptTemplate]=None, **kwargs: Any
) ->CriteriaEvalChain:
"""Create a `CriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt template will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples
--------
>>> from langchain_community.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
"""
prompt_ = cls._resolve_prompt(prompt)
if criteria == Criteria.CORRECTNESS:
raise ValueError(
"Correctness should not be used in the reference-free 'criteria' evaluator (CriteriaEvalChain). Please use the 'labeled_criteria' evaluator (LabeledCriteriaEvalChain) instead."
)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = '\n'.join(f'{k}: {v}' for k, v in criteria_.items())
prompt_ = prompt_.partial(criteria=criteria_str)
return cls(llm=llm, prompt=prompt_, criterion_name='-'.join(criteria_),
**kwargs)
|
Create a `CriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt template will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples
--------
>>> from langchain_community.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
|
lc_secrets
|
return {'credentials': 'GIGACHAT_CREDENTIALS', 'access_token':
'GIGACHAT_ACCESS_TOKEN', 'password': 'GIGACHAT_PASSWORD',
'key_file_password': 'GIGACHAT_KEY_FILE_PASSWORD'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'credentials': 'GIGACHAT_CREDENTIALS', 'access_token':
'GIGACHAT_ACCESS_TOKEN', 'password': 'GIGACHAT_PASSWORD',
'key_file_password': 'GIGACHAT_KEY_FILE_PASSWORD'}
| null |
__init__
|
super().__init__(*ops)
self.state = state
|
def __init__(self, *ops: Dict[str, Any], state: RunState) ->None:
super().__init__(*ops)
self.state = state
| null |
_chain_type
|
return 'sql_database_sequential_chain'
|
@property
def _chain_type(self) ->str:
return 'sql_database_sequential_chain'
| null |
chain
|
...
|
@overload
def chain(func: Callable[[Input], AsyncIterator[Output]]) ->Runnable[Input,
Output]:
...
| null |
test_json_equality_evaluator_evaluation_name
|
assert json_equality_evaluator.evaluation_name == 'json_equality'
|
def test_json_equality_evaluator_evaluation_name(json_equality_evaluator:
JsonEqualityEvaluator) ->None:
assert json_equality_evaluator.evaluation_name == 'json_equality'
| null |
from_texts
|
"""Construct USearch wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the USearch database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import USearch
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
usearch = USearch.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
documents: List[Document] = []
if ids is None:
ids = np.array([str(id) for id, _ in enumerate(texts)])
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
docstore = InMemoryDocstore(dict(zip(ids, documents)))
usearch = dependable_usearch_import()
index = usearch.Index(ndim=len(embeddings[0]), metric=metric)
index.add(np.array(ids), np.array(embeddings))
return cls(embedding, index, docstore, ids.tolist())
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[Dict]]=None, ids: Optional[np.ndarray]=None, metric: str=
'cos', **kwargs: Any) ->USearch:
"""Construct USearch wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the USearch database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import USearch
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
usearch = USearch.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
documents: List[Document] = []
if ids is None:
ids = np.array([str(id) for id, _ in enumerate(texts)])
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
docstore = InMemoryDocstore(dict(zip(ids, documents)))
usearch = dependable_usearch_import()
index = usearch.Index(ndim=len(embeddings[0]), metric=metric)
index.add(np.array(ids), np.array(embeddings))
return cls(embedding, index, docstore, ids.tolist())
|
Construct USearch wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the USearch database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import USearch
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
usearch = USearch.from_texts(texts, embeddings)
|
test_given_no_connection_or_engine_args_provided_default_engine_should_be_used
|
"""When no connection or engine arguments are provided then the default configuration must be used."""
pgvector.PGVector(connection_string=_CONNECTION_STRING, embedding_function=
_EMBEDDING_FUNCTION)
create_engine.assert_called_with(url=_CONNECTION_STRING)
|
@pytest.mark.requires('pgvector')
@mock.patch('sqlalchemy.create_engine')
def test_given_no_connection_or_engine_args_provided_default_engine_should_be_used(
create_engine: Mock) ->None:
"""When no connection or engine arguments are provided then the default configuration must be used."""
pgvector.PGVector(connection_string=_CONNECTION_STRING,
embedding_function=_EMBEDDING_FUNCTION)
create_engine.assert_called_with(url=_CONNECTION_STRING)
|
When no connection or engine arguments are provided then the default configuration must be used.
|
batch
|
return self._batch_with_config(self._batch, inputs, config,
return_exceptions=return_exceptions, **kwargs)
|
def batch(self, inputs: List[str], config: Optional[Union[RunnableConfig,
List[RunnableConfig]]]=None, *, return_exceptions: bool=False, **kwargs:
Any) ->List[str]:
return self._batch_with_config(self._batch, inputs, config,
return_exceptions=return_exceptions, **kwargs)
| null |
_diff
|
"""Convert parsed outputs into a diff format. The semantics of this are
up to the output parser."""
raise NotImplementedError()
|
def _diff(self, prev: Optional[T], next: T) ->T:
"""Convert parsed outputs into a diff format. The semantics of this are
up to the output parser."""
raise NotImplementedError()
|
Convert parsed outputs into a diff format. The semantics of this are
up to the output parser.
|
test_analyticdb_with_filter_match
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'0'})
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)]
|
def test_analyticdb_with_filter_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '0'})
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)]
|
Test end to end construction and search.
|
_run
|
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='dir_path', value=dir_path)
try:
entries = os.listdir(dir_path_)
if entries:
return '\n'.join(entries)
else:
return f'No files found in directory {dir_path}'
except Exception as e:
return 'Error: ' + str(e)
|
def _run(self, dir_path: str='.', run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='dir_path', value=dir_path
)
try:
entries = os.listdir(dir_path_)
if entries:
return '\n'.join(entries)
else:
return f'No files found in directory {dir_path}'
except Exception as e:
return 'Error: ' + str(e)
| null |
__getitem__
|
return getattr(self, item)
|
def __getitem__(self, item: str) ->Any:
return getattr(self, item)
| null |
_make_request
|
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {'source': url}
return [Document(page_content=text, metadata=metadata)]
|
def _make_request(self, url: str) ->List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {'source': url}
return [Document(page_content=text, metadata=metadata)]
| null |
test_blob_mimetype_from_str_data
|
"""Test reading blob from a file path."""
content = b'Hello, World!'
mimetype = 'text/html'
blob = Blob.from_data(content, mime_type=mimetype)
assert blob.mimetype == mimetype
|
def test_blob_mimetype_from_str_data() ->None:
"""Test reading blob from a file path."""
content = b'Hello, World!'
mimetype = 'text/html'
blob = Blob.from_data(content, mime_type=mimetype)
assert blob.mimetype == mimetype
|
Test reading blob from a file path.
|
test_initialize_watsonxllm_cloud_bad_path
|
try:
WatsonxLLM(model_id='google/flan-ul2', url=
'https://us-south.ml.cloud.ibm.com')
except ValueError as e:
assert 'WATSONX_APIKEY' in e.__str__()
|
def test_initialize_watsonxllm_cloud_bad_path() ->None:
try:
WatsonxLLM(model_id='google/flan-ul2', url=
'https://us-south.ml.cloud.ibm.com')
except ValueError as e:
assert 'WATSONX_APIKEY' in e.__str__()
| null |
similarity_search
|
"""Return pinecone documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, k=k, filter=
filter, namespace=namespace, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search(self, query: str, k: int=4, filter: Optional[dict]=
None, namespace: Optional[str]=None, **kwargs: Any) ->List[Document]:
"""Return pinecone documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, k=k, filter=
filter, namespace=namespace, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
Return pinecone documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
|
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(self) ->bool:
return True
| null |
_get_invocation_params
|
params = self.dict()
params['stop'] = stop
return {**params, **kwargs}
|
def _get_invocation_params(self, stop: Optional[List[str]]=None, **kwargs: Any
) ->dict:
params = self.dict()
params['stop'] = stop
return {**params, **kwargs}
| null |
on_chain_error
|
"""Do nothing when LLM chain outputs an error."""
pass
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Do nothing when LLM chain outputs an error."""
pass
|
Do nothing when LLM chain outputs an error.
|
_convert_lc_run_to_wb_span
|
"""Utility to convert any generic LangChain Run into a W&B Trace Span.
:param run: The LangChain Run to convert.
:return: The converted W&B Trace Span.
"""
if run.run_type == 'llm':
return self._convert_llm_run_to_wb_span(run)
elif run.run_type == 'chain':
return self._convert_chain_run_to_wb_span(run)
elif run.run_type == 'tool':
return self._convert_tool_run_to_wb_span(run)
else:
return self._convert_run_to_wb_span(run)
|
def _convert_lc_run_to_wb_span(self, run: Run) ->'Span':
"""Utility to convert any generic LangChain Run into a W&B Trace Span.
:param run: The LangChain Run to convert.
:return: The converted W&B Trace Span.
"""
if run.run_type == 'llm':
return self._convert_llm_run_to_wb_span(run)
elif run.run_type == 'chain':
return self._convert_chain_run_to_wb_span(run)
elif run.run_type == 'tool':
return self._convert_tool_run_to_wb_span(run)
else:
return self._convert_run_to_wb_span(run)
|
Utility to convert any generic LangChain Run into a W&B Trace Span.
:param run: The LangChain Run to convert.
:return: The converted W&B Trace Span.
|
test_chat_model_on_kv_singleio_dataset
|
llm = ChatOpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.
CRITERIA])
run_on_dataset(dataset_name=kv_singleio_dataset_name, llm_or_chain_factory=
llm, evaluation=eval_config, client=client, project_name=
eval_project_name, tags=['shouldpass'])
_check_all_feedback_passed(eval_project_name, client)
|
def test_chat_model_on_kv_singleio_dataset(kv_singleio_dataset_name: str,
eval_project_name: str, client: Client) ->None:
llm = ChatOpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType
.CRITERIA])
run_on_dataset(dataset_name=kv_singleio_dataset_name,
llm_or_chain_factory=llm, evaluation=eval_config, client=client,
project_name=eval_project_name, tags=['shouldpass'])
_check_all_feedback_passed(eval_project_name, client)
| null |
_get_bilibili_subs_and_info
|
try:
from bilibili_api import sync, video
except ImportError:
raise ImportError(
'requests package not found, please install it with `pip install bilibili-api-python`'
)
bvid = re.search('BV\\w+', url)
if bvid is not None:
v = video.Video(bvid=bvid.group())
else:
aid = re.search('av[0-9]+', url)
if aid is not None:
try:
v = video.Video(aid=int(aid.group()[2:]))
except AttributeError:
raise ValueError(f'{url} is not bilibili url.')
else:
raise ValueError(f'{url} is not bilibili url.')
video_info = sync(v.get_info())
video_info.update({'url': url})
sub = sync(v.get_subtitle(video_info['cid']))
sub_list = sub['subtitles']
if sub_list:
sub_url = sub_list[0]['subtitle_url']
if not sub_url.startswith('http'):
sub_url = 'https:' + sub_url
result = requests.get(sub_url)
raw_sub_titles = json.loads(result.content)['body']
raw_transcript = ' '.join([c['content'] for c in raw_sub_titles])
raw_transcript_with_meta_info = f"""Video Title: {video_info['title']},description: {video_info['desc']}
Transcript: {raw_transcript}"""
return raw_transcript_with_meta_info, video_info
else:
raw_transcript = ''
warnings.warn(
f"""
No subtitles found for video: {url}.
Return Empty transcript.
"""
)
return raw_transcript, video_info
|
def _get_bilibili_subs_and_info(self, url: str) ->Tuple[str, dict]:
try:
from bilibili_api import sync, video
except ImportError:
raise ImportError(
'requests package not found, please install it with `pip install bilibili-api-python`'
)
bvid = re.search('BV\\w+', url)
if bvid is not None:
v = video.Video(bvid=bvid.group())
else:
aid = re.search('av[0-9]+', url)
if aid is not None:
try:
v = video.Video(aid=int(aid.group()[2:]))
except AttributeError:
raise ValueError(f'{url} is not bilibili url.')
else:
raise ValueError(f'{url} is not bilibili url.')
video_info = sync(v.get_info())
video_info.update({'url': url})
sub = sync(v.get_subtitle(video_info['cid']))
sub_list = sub['subtitles']
if sub_list:
sub_url = sub_list[0]['subtitle_url']
if not sub_url.startswith('http'):
sub_url = 'https:' + sub_url
result = requests.get(sub_url)
raw_sub_titles = json.loads(result.content)['body']
raw_transcript = ' '.join([c['content'] for c in raw_sub_titles])
raw_transcript_with_meta_info = f"""Video Title: {video_info['title']},description: {video_info['desc']}
Transcript: {raw_transcript}"""
return raw_transcript_with_meta_info, video_info
else:
raw_transcript = ''
warnings.warn(
f"""
No subtitles found for video: {url}.
Return Empty transcript.
"""
)
return raw_transcript, video_info
| null |
test_embaas_embed_documents
|
"""Test embaas embeddings with multiple texts."""
texts = ['foo bar', 'bar foo', 'foo']
embedding = EmbaasEmbeddings()
output = embedding.embed_documents(texts)
assert len(output) == 3
assert len(output[0]) == 1024
assert len(output[1]) == 1024
assert len(output[2]) == 1024
|
def test_embaas_embed_documents() ->None:
"""Test embaas embeddings with multiple texts."""
texts = ['foo bar', 'bar foo', 'foo']
embedding = EmbaasEmbeddings()
output = embedding.embed_documents(texts)
assert len(output) == 3
assert len(output[0]) == 1024
assert len(output[1]) == 1024
assert len(output[2]) == 1024
|
Test embaas embeddings with multiple texts.
|
load_json
|
try:
return json.loads(s)
except Exception:
return {}
|
def load_json(s):
try:
return json.loads(s)
except Exception:
return {}
| null |
delete_collection
|
self.logger.debug('Trying to delete collection')
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning('Collection not found')
return
session.delete(collection)
session.commit()
|
def delete_collection(self) ->None:
self.logger.debug('Trying to delete collection')
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning('Collection not found')
return
session.delete(collection)
session.commit()
| null |
_call
|
"""
Compute the string distance between the prediction and the reference.
Args:
inputs (Dict[str, Any]): The input values.
run_manager (Optional[CallbackManagerForChainRun]):
The callback manager.
Returns:
Dict[str, Any]: The evaluation results containing the score.
"""
return {'score': self.compute_metric(inputs['reference'], inputs['prediction'])
}
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
"""
Compute the string distance between the prediction and the reference.
Args:
inputs (Dict[str, Any]): The input values.
run_manager (Optional[CallbackManagerForChainRun]):
The callback manager.
Returns:
Dict[str, Any]: The evaluation results containing the score.
"""
return {'score': self.compute_metric(inputs['reference'], inputs[
'prediction'])}
|
Compute the string distance between the prediction and the reference.
Args:
inputs (Dict[str, Any]): The input values.
run_manager (Optional[CallbackManagerForChainRun]):
The callback manager.
Returns:
Dict[str, Any]: The evaluation results containing the score.
|
uppercase_and_check_dtype
|
if v.upper() not in REDIS_VECTOR_DTYPE_MAP:
raise ValueError(
f'datatype must be one of {REDIS_VECTOR_DTYPE_MAP.keys()}. Got {v}')
return v.upper()
|
@validator('datatype', pre=True)
def uppercase_and_check_dtype(cls, v: str) ->str:
if v.upper() not in REDIS_VECTOR_DTYPE_MAP:
raise ValueError(
f'datatype must be one of {REDIS_VECTOR_DTYPE_MAP.keys()}. Got {v}'
)
return v.upper()
| null |
similarity_search
|
"""Return docs most similar to query."""
query_vector = self._embedding.embed_query(query)
return [doc for doc, score in self.similarity_search_with_score_by_vector(
query_vector, k, distance_func, **kwargs)]
|
def similarity_search(self, query: str, k: int=4, distance_func: Literal[
'sqrt_euclid', 'neg_dot_prod', 'ned_cos']='sqrt_euclid', **kwargs: Any
) ->List[Document]:
"""Return docs most similar to query."""
query_vector = self._embedding.embed_query(query)
return [doc for doc, score in self.
similarity_search_with_score_by_vector(query_vector, k,
distance_func, **kwargs)]
|
Return docs most similar to query.
|
test_anthropic_streaming
|
"""Test streaming tokens from anthropic."""
llm = Anthropic(model='claude-instant-1')
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
|
def test_anthropic_streaming() ->None:
"""Test streaming tokens from anthropic."""
llm = Anthropic(model='claude-instant-1')
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token, str)
|
Test streaming tokens from anthropic.
|
test_chroma_with_metadatas
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
|
def test_chroma_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
|
Test end to end construction and search.
|
embed_general_texts
|
"""Call out to Voyage Embedding endpoint for embedding general text.
Args:
texts: The list of texts to embed.
input_type: Type of the input text. Default to None, meaning the type is
unspecified. Other options: query, document.
Returns:
Embedding for the text.
"""
return self._get_embeddings(texts, batch_size=self.batch_size, input_type=
input_type)
|
def embed_general_texts(self, texts: List[str], *, input_type: Optional[str
]=None) ->List[List[float]]:
"""Call out to Voyage Embedding endpoint for embedding general text.
Args:
texts: The list of texts to embed.
input_type: Type of the input text. Default to None, meaning the type is
unspecified. Other options: query, document.
Returns:
Embedding for the text.
"""
return self._get_embeddings(texts, batch_size=self.batch_size,
input_type=input_type)
|
Call out to Voyage Embedding endpoint for embedding general text.
Args:
texts: The list of texts to embed.
input_type: Type of the input text. Default to None, meaning the type is
unspecified. Other options: query, document.
Returns:
Embedding for the text.
|
_invocation_params
|
api_key = cast(SecretStr, self.voyage_api_key).get_secret_value()
params = {'url': self.voyage_api_base, 'headers': {'Authorization':
f'Bearer {api_key}'}, 'json': {'model': self.model, 'input': input,
'input_type': input_type}, 'timeout': self.request_timeout}
return params
|
def _invocation_params(self, input: List[str], input_type: Optional[str]=None
) ->Dict:
api_key = cast(SecretStr, self.voyage_api_key).get_secret_value()
params = {'url': self.voyage_api_base, 'headers': {'Authorization':
f'Bearer {api_key}'}, 'json': {'model': self.model, 'input': input,
'input_type': input_type}, 'timeout': self.request_timeout}
return params
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model': self.model}, **self._default_params}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {**{'model': self.model}, **self._default_params}
|
Get the identifying parameters.
|
test_unnamed_decorator
|
"""Test functionality with unnamed decorator."""
@tool
def search_api(query: str) ->str:
"""Search the API for the query."""
return 'API result'
assert isinstance(search_api, BaseTool)
assert search_api.name == 'search_api'
assert not search_api.return_direct
assert search_api('test') == 'API result'
|
def test_unnamed_decorator() ->None:
"""Test functionality with unnamed decorator."""
@tool
def search_api(query: str) ->str:
"""Search the API for the query."""
return 'API result'
assert isinstance(search_api, BaseTool)
assert search_api.name == 'search_api'
assert not search_api.return_direct
assert search_api('test') == 'API result'
|
Test functionality with unnamed decorator.
|
test_agent_bad_action
|
"""Test react chain when bad action given."""
agent = _get_agent()
output = agent.run('when was langchain made')
assert output == 'curses foiled again'
|
def test_agent_bad_action() ->None:
"""Test react chain when bad action given."""
agent = _get_agent()
output = agent.run('when was langchain made')
assert output == 'curses foiled again'
|
Test react chain when bad action given.
|
_Global
|
self.fill('global ')
interleave(lambda : self.write(', '), self.write, t.names)
|
def _Global(self, t):
self.fill('global ')
interleave(lambda : self.write(', '), self.write, t.names)
| null |
_import_ddg_search_tool_DuckDuckGoSearchRun
|
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun
return DuckDuckGoSearchRun
|
def _import_ddg_search_tool_DuckDuckGoSearchRun() ->Any:
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun
return DuckDuckGoSearchRun
| null |
add_texts
|
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
ids = []
embeddings: Optional[List[List[float]]] = None
if self._embedding:
if not isinstance(texts, list):
texts = list(texts)
embeddings = self._embedding.embed_documents(texts)
with self._client.batch as batch:
for i, text in enumerate(texts):
data_properties = {self._text_key: text}
if metadatas is not None:
for key, val in metadatas[i].items():
data_properties[key] = _json_serializable(val)
_id = get_valid_uuid(uuid4())
if 'uuids' in kwargs:
_id = kwargs['uuids'][i]
elif 'ids' in kwargs:
_id = kwargs['ids'][i]
batch.add_data_object(data_object=data_properties, class_name=self.
_index_name, uuid=_id, vector=embeddings[i] if embeddings else
None, tenant=kwargs.get('tenant'))
ids.append(_id)
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
ids = []
embeddings: Optional[List[List[float]]] = None
if self._embedding:
if not isinstance(texts, list):
texts = list(texts)
embeddings = self._embedding.embed_documents(texts)
with self._client.batch as batch:
for i, text in enumerate(texts):
data_properties = {self._text_key: text}
if metadatas is not None:
for key, val in metadatas[i].items():
data_properties[key] = _json_serializable(val)
_id = get_valid_uuid(uuid4())
if 'uuids' in kwargs:
_id = kwargs['uuids'][i]
elif 'ids' in kwargs:
_id = kwargs['ids'][i]
batch.add_data_object(data_object=data_properties, class_name=
self._index_name, uuid=_id, vector=embeddings[i] if
embeddings else None, tenant=kwargs.get('tenant'))
ids.append(_id)
return ids
|
Upload texts with metadata (properties) to Weaviate.
|
astra_db_collection
|
from astrapy.db import AstraDB
astra_db = AstraDB(token=ASTRA_DB_APPLICATION_TOKEN, api_endpoint=
ASTRA_DB_API_ENDPOINT, namespace=ASTRA_DB_KEYSPACE)
collection_name = f"lc_test_loader_{str(uuid.uuid4()).split('-')[0]}"
collection = astra_db.create_collection(collection_name)
yield collection
astra_db.delete_collection(collection_name)
|
@pytest.fixture
def astra_db_collection():
from astrapy.db import AstraDB
astra_db = AstraDB(token=ASTRA_DB_APPLICATION_TOKEN, api_endpoint=
ASTRA_DB_API_ENDPOINT, namespace=ASTRA_DB_KEYSPACE)
collection_name = f"lc_test_loader_{str(uuid.uuid4()).split('-')[0]}"
collection = astra_db.create_collection(collection_name)
yield collection
astra_db.delete_collection(collection_name)
| null |
return_values
|
"""Return values of the agent."""
return ['output']
|
@property
def return_values(self) ->List[str]:
"""Return values of the agent."""
return ['output']
|
Return values of the agent.
|
read_schema
|
"""Reads in the index schema from a dict or yaml file.
Check if it is a dict and return RedisModel otherwise, check if it's a path and
read in the file assuming it's a yaml file and return a RedisModel
"""
if isinstance(index_schema, dict):
return index_schema
elif isinstance(index_schema, Path):
with open(index_schema, 'rb') as f:
return yaml.safe_load(f)
elif isinstance(index_schema, str):
if Path(index_schema).resolve().is_file():
with open(index_schema, 'rb') as f:
return yaml.safe_load(f)
else:
raise FileNotFoundError(
f'index_schema file {index_schema} does not exist')
else:
raise TypeError(
f'index_schema must be a dict, or path to a yaml file Got {type(index_schema)}'
)
|
def read_schema(index_schema: Optional[Union[Dict[str, List[Any]], str, os.
PathLike]]) ->Dict[str, Any]:
"""Reads in the index schema from a dict or yaml file.
Check if it is a dict and return RedisModel otherwise, check if it's a path and
read in the file assuming it's a yaml file and return a RedisModel
"""
if isinstance(index_schema, dict):
return index_schema
elif isinstance(index_schema, Path):
with open(index_schema, 'rb') as f:
return yaml.safe_load(f)
elif isinstance(index_schema, str):
if Path(index_schema).resolve().is_file():
with open(index_schema, 'rb') as f:
return yaml.safe_load(f)
else:
raise FileNotFoundError(
f'index_schema file {index_schema} does not exist')
else:
raise TypeError(
f'index_schema must be a dict, or path to a yaml file Got {type(index_schema)}'
)
|
Reads in the index schema from a dict or yaml file.
Check if it is a dict and return RedisModel otherwise, check if it's a path and
read in the file assuming it's a yaml file and return a RedisModel
|
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.
values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
Build extra kwargs from additional params that were passed in.
|
_auth
|
"""Authenticate with Microsoft Graph API"""
if self.access_token != '':
return
if self.auth_with_token:
with self.token_path.open('r') as token_file:
self.access_token = token_file.read()
else:
try:
from msal import ConfidentialClientApplication
except ImportError as e:
raise ImportError(
'MSAL package not found, please install it with `pip install msal`'
) from e
client_instance = ConfidentialClientApplication(client_id=self.settings
.client_id, client_credential=self.settings.client_secret.
get_secret_value(), authority=self.authority_url)
authorization_request_url = client_instance.get_authorization_request_url(
self._scopes)
print('Visit the following url to give consent:')
print(authorization_request_url)
authorization_url = input('Paste the authenticated url here:\n')
authorization_code = authorization_url.split('code=')[1].split('&')[0]
access_token_json = client_instance.acquire_token_by_authorization_code(
code=authorization_code, scopes=self._scopes)
self.access_token = access_token_json['access_token']
try:
if not self.token_path.parent.exists():
self.token_path.parent.mkdir(parents=True)
except Exception as e:
raise Exception(
f'Could not create the folder {self.token_path.parent} ' +
'to store the access token.') from e
with self.token_path.open('w') as token_file:
token_file.write(self.access_token)
|
def _auth(self) ->None:
"""Authenticate with Microsoft Graph API"""
if self.access_token != '':
return
if self.auth_with_token:
with self.token_path.open('r') as token_file:
self.access_token = token_file.read()
else:
try:
from msal import ConfidentialClientApplication
except ImportError as e:
raise ImportError(
'MSAL package not found, please install it with `pip install msal`'
) from e
client_instance = ConfidentialClientApplication(client_id=self.
settings.client_id, client_credential=self.settings.
client_secret.get_secret_value(), authority=self.authority_url)
authorization_request_url = (client_instance.
get_authorization_request_url(self._scopes))
print('Visit the following url to give consent:')
print(authorization_request_url)
authorization_url = input('Paste the authenticated url here:\n')
authorization_code = authorization_url.split('code=')[1].split('&')[0]
access_token_json = (client_instance.
acquire_token_by_authorization_code(code=authorization_code,
scopes=self._scopes))
self.access_token = access_token_json['access_token']
try:
if not self.token_path.parent.exists():
self.token_path.parent.mkdir(parents=True)
except Exception as e:
raise Exception(
f'Could not create the folder {self.token_path.parent} ' +
'to store the access token.') from e
with self.token_path.open('w') as token_file:
token_file.write(self.access_token)
|
Authenticate with Microsoft Graph API
|
embed_query
|
return self._query([text])[0]
|
def embed_query(self, text: str) ->List[float]:
return self._query([text])[0]
| null |
from_texts
|
"""Return VectorStore initialized from texts and embeddings."""
if not collection_name:
raise ValueError('Collection name must be provided')
if not vector_size:
raise ValueError('Vector size must be provided')
if not api_key:
raise ValueError('API key must be provided')
semadb = cls(collection_name, vector_size, embedding, distance_strategy=
distance_strategy, api_key=api_key)
if not semadb.create_collection():
raise ValueError('Error creating collection')
semadb.add_texts(texts, metadatas=metadatas)
return semadb
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, collection_name: str='', vector_size: int=0,
api_key: str='', distance_strategy: DistanceStrategy=DistanceStrategy.
EUCLIDEAN_DISTANCE, **kwargs: Any) ->'SemaDB':
"""Return VectorStore initialized from texts and embeddings."""
if not collection_name:
raise ValueError('Collection name must be provided')
if not vector_size:
raise ValueError('Vector size must be provided')
if not api_key:
raise ValueError('API key must be provided')
semadb = cls(collection_name, vector_size, embedding, distance_strategy
=distance_strategy, api_key=api_key)
if not semadb.create_collection():
raise ValueError('Error creating collection')
semadb.add_texts(texts, metadatas=metadatas)
return semadb
|
Return VectorStore initialized from texts and embeddings.
|
test_memory_with_message_store
|
"""Test the memory with a message store."""
message_history = CosmosDBChatMessageHistory(cosmos_endpoint=endpoint,
cosmos_database='chat_history', cosmos_container='messages', credential
=credential, session_id='my-test-session', user_id='my-test-user', ttl=10)
message_history.prepare_cosmos()
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
|
def test_memory_with_message_store() ->None:
"""Test the memory with a message store."""
message_history = CosmosDBChatMessageHistory(cosmos_endpoint=endpoint,
cosmos_database='chat_history', cosmos_container='messages',
credential=credential, session_id='my-test-session', user_id=
'my-test-user', ttl=10)
message_history.prepare_cosmos()
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
|
Test the memory with a message store.
|
input_keys
|
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ['reference', 'prediction']
|
@property
def input_keys(self) ->List[str]:
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ['reference', 'prediction']
|
Get the input keys.
Returns:
List[str]: The input keys.
|
drop_tables
|
with self._conn.begin():
Base.metadata.drop_all(self._conn)
|
def drop_tables(self) ->None:
with self._conn.begin():
Base.metadata.drop_all(self._conn)
| null |
from_gml
|
try:
import networkx as nx
except ImportError:
raise ImportError(
'Could not import networkx python package. Please install it with `pip install networkx`.'
)
graph = nx.read_gml(gml_path)
return cls(graph)
|
@classmethod
def from_gml(cls, gml_path: str) ->NetworkxEntityGraph:
try:
import networkx as nx
except ImportError:
raise ImportError(
'Could not import networkx python package. Please install it with `pip install networkx`.'
)
graph = nx.read_gml(gml_path)
return cls(graph)
| null |
test_color_question_1
|
"""Test simple question."""
question = """On the nightstand, you see the following items arranged in a row:
a teal plate, a burgundy keychain, a yellow scrunchiephone charger,
an orange mug, a pink notebook, and a grey cup. How many non-orange
items do you see to the left of the teal item?"""
prompt = COLORED_OBJECT_PROMPT.format(question=question)
queries = {prompt: _COLORED_OBJECT_SOLUTION_1}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_colored_object_prompt(fake_llm, timeout=None)
output = fake_pal_chain.run(question)
assert output == '0'
|
def test_color_question_1() ->None:
"""Test simple question."""
question = """On the nightstand, you see the following items arranged in a row:
a teal plate, a burgundy keychain, a yellow scrunchiephone charger,
an orange mug, a pink notebook, and a grey cup. How many non-orange
items do you see to the left of the teal item?"""
prompt = COLORED_OBJECT_PROMPT.format(question=question)
queries = {prompt: _COLORED_OBJECT_SOLUTION_1}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_colored_object_prompt(fake_llm, timeout=None
)
output = fake_pal_chain.run(question)
assert output == '0'
|
Test simple question.
|
get_full_header
|
"""Return a full header of the agent's status, summary, and current time."""
now = datetime.now() if now is None else now
summary = self.get_summary(force_refresh=force_refresh, now=now)
current_time_str = now.strftime('%B %d, %Y, %I:%M %p')
return f"""{summary}
It is {current_time_str}.
{self.name}'s status: {self.status}"""
|
def get_full_header(self, force_refresh: bool=False, now: Optional[datetime
]=None) ->str:
"""Return a full header of the agent's status, summary, and current time."""
now = datetime.now() if now is None else now
summary = self.get_summary(force_refresh=force_refresh, now=now)
current_time_str = now.strftime('%B %d, %Y, %I:%M %p')
return (
f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}"
)
|
Return a full header of the agent's status, summary, and current time.
|
test_openai_multiple_prompts
|
"""Test completion with multiple prompts."""
output = llm.generate(["I'm Pickle Rick", "I'm Pickle Rick"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
|
@pytest.mark.scheduled
def test_openai_multiple_prompts(llm: AzureOpenAI) ->None:
"""Test completion with multiple prompts."""
output = llm.generate(["I'm Pickle Rick", "I'm Pickle Rick"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
|
Test completion with multiple prompts.
|
on_tool_error
|
"""Do nothing when tool outputs an error."""
|
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None:
"""Do nothing when tool outputs an error."""
|
Do nothing when tool outputs an error.
|
test_alibabacloud_opensearch_with_text_and_meta_score_query
|
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search_with_relevance_scores(query='foo',
search_filter={'string_field': 'value1', 'int_field': 1, 'float_field':
1.0, 'double_field': 2.0}, k=1)
assert output == [(Document(page_content='foo', metadata={'string_field':
'value1', 'int_field': 1, 'float_field': 1.0, 'double_field': 2.0}), 0.0)]
|
def test_alibabacloud_opensearch_with_text_and_meta_score_query() ->None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search_with_relevance_scores(query='foo',
search_filter={'string_field': 'value1', 'int_field': 1,
'float_field': 1.0, 'double_field': 2.0}, k=1)
assert output == [(Document(page_content='foo', metadata={
'string_field': 'value1', 'int_field': 1, 'float_field': 1.0,
'double_field': 2.0}), 0.0)]
| null |
test_aql_generation
|
"""Test that AQL statement is correctly generated and executed."""
db = get_arangodb_client()
populate_arangodb_database(db)
graph = ArangoGraph(db)
chain = ArangoGraphQAChain.from_llm(OpenAI(temperature=0), graph=graph)
chain.return_aql_result = True
output = chain('Is Ned Stark alive?')
assert output['aql_result'] == [True]
assert 'Yes' in output['result']
output = chain('How old is Arya Stark?')
assert output['aql_result'] == [11]
assert '11' in output['result']
output = chain('What is the relationship between Arya Stark and Ned Stark?')
assert len(output['aql_result']) == 1
assert 'child of' in output['result']
|
def test_aql_generation() ->None:
"""Test that AQL statement is correctly generated and executed."""
db = get_arangodb_client()
populate_arangodb_database(db)
graph = ArangoGraph(db)
chain = ArangoGraphQAChain.from_llm(OpenAI(temperature=0), graph=graph)
chain.return_aql_result = True
output = chain('Is Ned Stark alive?')
assert output['aql_result'] == [True]
assert 'Yes' in output['result']
output = chain('How old is Arya Stark?')
assert output['aql_result'] == [11]
assert '11' in output['result']
output = chain('What is the relationship between Arya Stark and Ned Stark?'
)
assert len(output['aql_result']) == 1
assert 'child of' in output['result']
|
Test that AQL statement is correctly generated and executed.
|
_identifying_params
|
"""Get the identifying parameters."""
return {}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
|
Get the identifying parameters.
|
func
|
return idchain
|
def func(__input: dict) ->Runnable:
return idchain
| null |
parse_sitemap
|
"""Parse sitemap xml and load into a list of dicts.
Args:
soup: BeautifulSoup object.
Returns:
List of dicts.
"""
els = []
for url in soup.find_all('url'):
loc = url.find('loc')
if not loc:
continue
loc_text = loc.text.strip()
if self.restrict_to_same_domain and not self.is_local:
if _extract_scheme_and_domain(loc_text) != _extract_scheme_and_domain(
self.web_path):
continue
if self.allow_url_patterns and not any(re.match(regexp_pattern,
loc_text) for regexp_pattern in self.allow_url_patterns):
continue
els.append({tag: prop.text for tag in ['loc', 'lastmod', 'changefreq',
'priority'] if (prop := url.find(tag))})
for sitemap in soup.find_all('sitemap'):
loc = sitemap.find('loc')
if not loc:
continue
soup_child = self.scrape_all([loc.text], 'xml')[0]
els.extend(self.parse_sitemap(soup_child))
return els
|
def parse_sitemap(self, soup: Any) ->List[dict]:
"""Parse sitemap xml and load into a list of dicts.
Args:
soup: BeautifulSoup object.
Returns:
List of dicts.
"""
els = []
for url in soup.find_all('url'):
loc = url.find('loc')
if not loc:
continue
loc_text = loc.text.strip()
if self.restrict_to_same_domain and not self.is_local:
if _extract_scheme_and_domain(loc_text
) != _extract_scheme_and_domain(self.web_path):
continue
if self.allow_url_patterns and not any(re.match(regexp_pattern,
loc_text) for regexp_pattern in self.allow_url_patterns):
continue
els.append({tag: prop.text for tag in ['loc', 'lastmod',
'changefreq', 'priority'] if (prop := url.find(tag))})
for sitemap in soup.find_all('sitemap'):
loc = sitemap.find('loc')
if not loc:
continue
soup_child = self.scrape_all([loc.text], 'xml')[0]
els.extend(self.parse_sitemap(soup_child))
return els
|
Parse sitemap xml and load into a list of dicts.
Args:
soup: BeautifulSoup object.
Returns:
List of dicts.
|
clear
|
"""Clear cache."""
self._cache = {}
|
def clear(self, **kwargs: Any) ->None:
"""Clear cache."""
self._cache = {}
|
Clear cache.
|
_llm_type
|
"""Return type of llm."""
return 'titan_takeoff_pro'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'titan_takeoff_pro'
|
Return type of llm.
|
_format_output
|
return output['output']['choices'][0]['text']
|
def _format_output(self, output: dict) ->str:
return output['output']['choices'][0]['text']
| null |
_type
|
return 'json_functions'
|
@property
def _type(self) ->str:
return 'json_functions'
| null |
_default_params
|
params: Dict[str, Any] = {'target_uri': self.target_uri, 'endpoint': self.
endpoint, 'temperature': self.temperature, 'n': self.n, 'stop': self.
stop, 'max_tokens': self.max_tokens, 'extra_params': self.extra_params}
return params
|
@property
def _default_params(self) ->Dict[str, Any]:
params: Dict[str, Any] = {'target_uri': self.target_uri, 'endpoint':
self.endpoint, 'temperature': self.temperature, 'n': self.n, 'stop':
self.stop, 'max_tokens': self.max_tokens, 'extra_params': self.
extra_params}
return params
| null |
save
|
with open(self.persist_path, 'wb') as fp:
fp.write(self.bson.dumps(data))
|
def save(self, data: Any) ->None:
with open(self.persist_path, 'wb') as fp:
fp.write(self.bson.dumps(data))
| null |
test_metadata_with_tags_and_frontmatter
|
"""Verify a doc with frontmatter and tags/dataview tags are all added to
metadata."""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'tags_and_frontmatter.md')
FRONTMATTER_FIELDS = {'aBool', 'aFloat', 'anInt', 'anArray', 'aString',
'aDict', 'tags'}
DATAVIEW_FIELDS = {'dataview1', 'dataview2', 'dataview3'}
assert set(doc.metadata
) == STANDARD_METADATA_FIELDS | FRONTMATTER_FIELDS | DATAVIEW_FIELDS
|
def test_metadata_with_tags_and_frontmatter() ->None:
"""Verify a doc with frontmatter and tags/dataview tags are all added to
metadata."""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'tags_and_frontmatter.md')
FRONTMATTER_FIELDS = {'aBool', 'aFloat', 'anInt', 'anArray', 'aString',
'aDict', 'tags'}
DATAVIEW_FIELDS = {'dataview1', 'dataview2', 'dataview3'}
assert set(doc.metadata
) == STANDARD_METADATA_FIELDS | FRONTMATTER_FIELDS | DATAVIEW_FIELDS
|
Verify a doc with frontmatter and tags/dataview tags are all added to
metadata.
|
_format_reference
|
"""Format the reference text.
Args:
reference (str): The reference text.
Returns:
str: The formatted reference text.
"""
if not reference:
return ''
return f"""
The following is the expected answer. Use this to measure correctness:
[GROUND_TRUTH]
{reference}
[END_GROUND_TRUTH]
"""
|
@staticmethod
def _format_reference(reference: Optional[str]) ->str:
"""Format the reference text.
Args:
reference (str): The reference text.
Returns:
str: The formatted reference text.
"""
if not reference:
return ''
return f"""
The following is the expected answer. Use this to measure correctness:
[GROUND_TRUTH]
{reference}
[END_GROUND_TRUTH]
"""
|
Format the reference text.
Args:
reference (str): The reference text.
Returns:
str: The formatted reference text.
|
test_init
|
CohereRerank()
CohereRerank(top_n=5, model='rerank-english_v2.0', cohere_api_key='foo',
user_agent='bar')
|
@pytest.mark.requires('cohere')
def test_init() ->None:
CohereRerank()
CohereRerank(top_n=5, model='rerank-english_v2.0', cohere_api_key='foo',
user_agent='bar')
| null |
__init__
|
"""Initializes the JsonSchemaEvaluator.
Args:
**kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema
except ImportError:
raise ImportError(
'The JsonSchemaEvaluator requires the jsonschema package. Please install it with `pip install jsonschema`.'
)
|
def __init__(self, **kwargs: Any) ->None:
"""Initializes the JsonSchemaEvaluator.
Args:
**kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema
except ImportError:
raise ImportError(
'The JsonSchemaEvaluator requires the jsonschema package. Please install it with `pip install jsonschema`.'
)
|
Initializes the JsonSchemaEvaluator.
Args:
**kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
|
load
|
"""Load documents."""
psychic_docs = self.psychic.get_documents(connector_id=self.connector_id,
account_id=self.account_id)
return [Document(page_content=doc['content'], metadata={'title': doc[
'title'], 'source': doc['uri']}) for doc in psychic_docs.documents]
|
def load(self) ->List[Document]:
"""Load documents."""
psychic_docs = self.psychic.get_documents(connector_id=self.
connector_id, account_id=self.account_id)
return [Document(page_content=doc['content'], metadata={'title': doc[
'title'], 'source': doc['uri']}) for doc in psychic_docs.documents]
|
Load documents.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.