method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
query_params
|
return [property.name for property in self.properties if property.location ==
APIPropertyLocation.QUERY]
|
@property
def query_params(self) ->List[str]:
return [property.name for property in self.properties if property.
location == APIPropertyLocation.QUERY]
| null |
_import_pipelineai
|
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
|
def _import_pipelineai() ->Any:
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
| null |
_type
|
return 'yaml'
|
@property
def _type(self) ->str:
return 'yaml'
| null |
process_svg
|
try:
import pytesseract
from PIL import Image
from reportlab.graphics import renderPM
from svglib.svglib import svg2rlg
except ImportError:
raise ImportError(
'`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, please run `pip install pytesseract Pillow reportlab svglib`'
)
response = self.confluence.request(path=link, absolute=True)
text = ''
if response.status_code != 200 or response.content == b'' or response.content is None:
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt='PNG')
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image, lang=ocr_languages)
|
def process_svg(self, link: str, ocr_languages: Optional[str]=None) ->str:
try:
import pytesseract
from PIL import Image
from reportlab.graphics import renderPM
from svglib.svglib import svg2rlg
except ImportError:
raise ImportError(
'`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, please run `pip install pytesseract Pillow reportlab svglib`'
)
response = self.confluence.request(path=link, absolute=True)
text = ''
if (response.status_code != 200 or response.content == b'' or response.
content is None):
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt='PNG')
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image, lang=ocr_languages)
| null |
_llm_type
|
"""Return type of llm."""
return 'ctranslate2'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'ctranslate2'
|
Return type of llm.
|
test_chat_wasm_service
|
"""This test requires the port 8080 is not occupied."""
service_url = 'https://b008-54-186-154-209.ngrok-free.app'
chat = WasmChatService(service_url=service_url)
system_message = SystemMessage(content='You are an AI assistant')
user_message = HumanMessage(content='What is the capital of France?')
messages = [system_message, user_message]
response = chat(messages)
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
assert 'Paris' in response.content
|
@pytest.mark.enable_socket
def test_chat_wasm_service() ->None:
"""This test requires the port 8080 is not occupied."""
service_url = 'https://b008-54-186-154-209.ngrok-free.app'
chat = WasmChatService(service_url=service_url)
system_message = SystemMessage(content='You are an AI assistant')
user_message = HumanMessage(content='What is the capital of France?')
messages = [system_message, user_message]
response = chat(messages)
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
assert 'Paris' in response.content
|
This test requires the port 8080 is not occupied.
|
test_anthropic_model_name_param
|
llm = ChatAnthropic(model_name='foo')
assert llm.model == 'foo'
|
@pytest.mark.requires('anthropic')
def test_anthropic_model_name_param() ->None:
llm = ChatAnthropic(model_name='foo')
assert llm.model == 'foo'
| null |
play
|
"""Play the text as speech."""
elevenlabs = _import_elevenlabs()
with open(speech_file, mode='rb') as f:
speech = f.read()
elevenlabs.play(speech)
|
def play(self, speech_file: str) ->None:
"""Play the text as speech."""
elevenlabs = _import_elevenlabs()
with open(speech_file, mode='rb') as f:
speech = f.read()
elevenlabs.play(speech)
|
Play the text as speech.
|
_type
|
return 'numbered-list'
|
@property
def _type(self) ->str:
return 'numbered-list'
| null |
_generate
|
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
output_str = self._call(messages, stop=stop, run_manager=run_manager,
**kwargs)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
| null |
_import_replicate
|
from langchain_community.llms.replicate import Replicate
return Replicate
|
def _import_replicate() ->Any:
from langchain_community.llms.replicate import Replicate
return Replicate
| null |
run_creation
|
"""Creates a Python file which will be deployed on beam."""
script = textwrap.dedent(
"""
import os
import transformers
from transformers import GPT2LMHeadModel, GPT2Tokenizer
model_name = "{model_name}"
def beam_langchain(**inputs):
prompt = inputs["prompt"]
length = inputs["max_length"]
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
encodedPrompt = tokenizer.encode(prompt, return_tensors='pt')
outputs = model.generate(encodedPrompt, max_length=int(length),
do_sample=True, pad_token_id=tokenizer.eos_token_id)
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(output)
return {{"text": output}}
"""
)
script_name = 'run.py'
with open(script_name, 'w') as file:
file.write(script.format(model_name=self.model_name))
|
def run_creation(self) ->None:
"""Creates a Python file which will be deployed on beam."""
script = textwrap.dedent(
"""
import os
import transformers
from transformers import GPT2LMHeadModel, GPT2Tokenizer
model_name = "{model_name}"
def beam_langchain(**inputs):
prompt = inputs["prompt"]
length = inputs["max_length"]
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
encodedPrompt = tokenizer.encode(prompt, return_tensors='pt')
outputs = model.generate(encodedPrompt, max_length=int(length),
do_sample=True, pad_token_id=tokenizer.eos_token_id)
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(output)
return {{"text": output}}
"""
)
script_name = 'run.py'
with open(script_name, 'w') as file:
file.write(script.format(model_name=self.model_name))
|
Creates a Python file which will be deployed on beam.
|
on_tool_end
|
"""Do nothing when tool ends."""
pass
|
def on_tool_end(self, output: str, observation_prefix: Optional[str]=None,
llm_prefix: Optional[str]=None, **kwargs: Any) ->None:
"""Do nothing when tool ends."""
pass
|
Do nothing when tool ends.
|
__init__
|
"""Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account. Defaults to 100.
exclude_replies: Whether to exclude reply toots from the load.
Defaults to False.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
Defaults to "https://mastodon.social".
"""
mastodon = _dependable_mastodon_import()
access_token = access_token or os.environ.get('MASTODON_ACCESS_TOKEN')
self.api = mastodon.Mastodon(access_token=access_token, api_base_url=
api_base_url)
self.mastodon_accounts = mastodon_accounts
self.number_toots = number_toots
self.exclude_replies = exclude_replies
|
def __init__(self, mastodon_accounts: Sequence[str], number_toots: Optional
[int]=100, exclude_replies: bool=False, access_token: Optional[str]=
None, api_base_url: str='https://mastodon.social'):
"""Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account. Defaults to 100.
exclude_replies: Whether to exclude reply toots from the load.
Defaults to False.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
Defaults to "https://mastodon.social".
"""
mastodon = _dependable_mastodon_import()
access_token = access_token or os.environ.get('MASTODON_ACCESS_TOKEN')
self.api = mastodon.Mastodon(access_token=access_token, api_base_url=
api_base_url)
self.mastodon_accounts = mastodon_accounts
self.number_toots = number_toots
self.exclude_replies = exclude_replies
|
Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account. Defaults to 100.
exclude_replies: Whether to exclude reply toots from the load.
Defaults to False.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
Defaults to "https://mastodon.social".
|
_call
|
"""Call out to ForefrontAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ForefrontAI("Tell me a joke.")
"""
auth_value = f'Bearer {self.forefrontai_api_key.get_secret_value()}'
response = requests.post(url=self.endpoint_url, headers={'Authorization':
auth_value, 'Content-Type': 'application/json'}, json={'text': prompt,
**self._default_params, **kwargs})
response_json = response.json()
text = response_json['result'][0]['completion']
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to ForefrontAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ForefrontAI("Tell me a joke.")
"""
auth_value = f'Bearer {self.forefrontai_api_key.get_secret_value()}'
response = requests.post(url=self.endpoint_url, headers={
'Authorization': auth_value, 'Content-Type': 'application/json'},
json={'text': prompt, **self._default_params, **kwargs})
response_json = response.json()
text = response_json['result'][0]['completion']
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
Call out to ForefrontAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ForefrontAI("Tell me a joke.")
|
test_run_success_multiple_arxiv_identifiers
|
"""Test a query of multiple arxiv identifiers that returns the correct answer"""
output = api_client.run('1605.08386v1 2212.00794v2 2308.07912')
assert 'Heat-bath random walks with Markov bases' in output
assert 'Scaling Language-Image Pre-training via Masking' in output
assert 'Ultra-low mass PBHs in the early universe can explain the PTA signal' in output
|
def test_run_success_multiple_arxiv_identifiers(api_client: ArxivAPIWrapper
) ->None:
"""Test a query of multiple arxiv identifiers that returns the correct answer"""
output = api_client.run('1605.08386v1 2212.00794v2 2308.07912')
assert 'Heat-bath random walks with Markov bases' in output
assert 'Scaling Language-Image Pre-training via Masking' in output
assert 'Ultra-low mass PBHs in the early universe can explain the PTA signal' in output
|
Test a query of multiple arxiv identifiers that returns the correct answer
|
test_call_vllm
|
"""Test valid call to oci model deployment endpoint."""
endpoint = 'https://MD_OCID/predict'
responses.add(responses.POST, endpoint, json={'choices': [{'index': 0,
'text': 'This is a completion.'}]}, status=200)
mocker.patch('ads.common.auth.default_signer', return_value=dict(signer=None))
llm = OCIModelDeploymentVLLM(endpoint=endpoint, model='my_model')
output = llm.invoke('This is a prompt.')
assert isinstance(output, str)
|
@pytest.mark.requires('ads')
@responses.activate
def test_call_vllm(mocker: MockerFixture) ->None:
"""Test valid call to oci model deployment endpoint."""
endpoint = 'https://MD_OCID/predict'
responses.add(responses.POST, endpoint, json={'choices': [{'index': 0,
'text': 'This is a completion.'}]}, status=200)
mocker.patch('ads.common.auth.default_signer', return_value=dict(signer
=None))
llm = OCIModelDeploymentVLLM(endpoint=endpoint, model='my_model')
output = llm.invoke('This is a prompt.')
assert isinstance(output, str)
|
Test valid call to oci model deployment endpoint.
|
test_scann_search_not_found
|
"""Test what happens when document is not found."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search('foo')
|
def test_scann_search_not_found() ->None:
"""Test what happens when document is not found."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search('foo')
|
Test what happens when document is not found.
|
format_auto_embed_on
|
chosen_action, cost, prob = self.get_label(event)
context_emb, action_embs = self.get_context_and_action_embeddings(event)
indexed_dot_product = self.get_indexed_dot_product(context_emb, action_embs)
action_lines = []
for i, action in enumerate(action_embs):
line_parts = []
dot_prods = []
if cost is not None and chosen_action == i:
line_parts.append(f'{chosen_action}:{cost}:{prob}')
for ns, action in action.items():
line_parts.append(f'|{ns}')
elements = action if isinstance(action, list) else [action]
nsa = []
for elem in elements:
line_parts.append(f'{elem}')
ns_a = f'{ns}={elem}'
nsa.append(ns_a)
for k, v in indexed_dot_product.items():
dot_prods.append(v[ns_a])
nsa_str = ' '.join(nsa)
line_parts.append(f'|# {nsa_str}')
line_parts.append(f'|dotprod {self._str(dot_prods)}')
action_lines.append(' '.join(line_parts))
shared = []
for item in context_emb:
for ns, context in item.items():
shared.append(f'|{ns}')
elements = context if isinstance(context, list) else [context]
nsc = []
for elem in elements:
shared.append(f'{elem}')
nsc.append(f'{ns}={elem}')
nsc_str = ' '.join(nsc)
shared.append(f'|@ {nsc_str}')
return 'shared ' + ' '.join(shared) + '\n' + '\n'.join(action_lines)
|
def format_auto_embed_on(self, event: PickBestEvent) ->str:
chosen_action, cost, prob = self.get_label(event)
context_emb, action_embs = self.get_context_and_action_embeddings(event)
indexed_dot_product = self.get_indexed_dot_product(context_emb, action_embs
)
action_lines = []
for i, action in enumerate(action_embs):
line_parts = []
dot_prods = []
if cost is not None and chosen_action == i:
line_parts.append(f'{chosen_action}:{cost}:{prob}')
for ns, action in action.items():
line_parts.append(f'|{ns}')
elements = action if isinstance(action, list) else [action]
nsa = []
for elem in elements:
line_parts.append(f'{elem}')
ns_a = f'{ns}={elem}'
nsa.append(ns_a)
for k, v in indexed_dot_product.items():
dot_prods.append(v[ns_a])
nsa_str = ' '.join(nsa)
line_parts.append(f'|# {nsa_str}')
line_parts.append(f'|dotprod {self._str(dot_prods)}')
action_lines.append(' '.join(line_parts))
shared = []
for item in context_emb:
for ns, context in item.items():
shared.append(f'|{ns}')
elements = context if isinstance(context, list) else [context]
nsc = []
for elem in elements:
shared.append(f'{elem}')
nsc.append(f'{ns}={elem}')
nsc_str = ' '.join(nsc)
shared.append(f'|@ {nsc_str}')
return 'shared ' + ' '.join(shared) + '\n' + '\n'.join(action_lines)
| null |
output_keys
|
return ['destination', 'next_inputs']
|
@property
def output_keys(self) ->List[str]:
return ['destination', 'next_inputs']
| null |
__init__
|
"""Override init to support instantiation by position for backward compat."""
super().__init__(return_values=return_values, log=log, **kwargs)
|
def __init__(self, return_values: dict, log: str, **kwargs: Any):
"""Override init to support instantiation by position for backward compat."""
super().__init__(return_values=return_values, log=log, **kwargs)
|
Override init to support instantiation by position for backward compat.
|
test_scann_vector_mips_l2
|
"""Test vector similarity with MIPS and L2."""
texts = ['foo', 'bar', 'baz']
euclidean_search = ScaNN.from_texts(texts, FakeEmbeddings())
output = euclidean_search.similarity_search_with_score('foo', k=1)
expected_euclidean = [(Document(page_content='foo', metadata={}), 0.0)]
assert output == expected_euclidean
mips_search = ScaNN.from_texts(texts, FakeEmbeddings(), distance_strategy=
DistanceStrategy.MAX_INNER_PRODUCT, normalize_L2=True)
output = mips_search.similarity_search_with_score('foo', k=1)
expected_mips = [(Document(page_content='foo', metadata={}), 1.0)]
assert output == expected_mips
|
def test_scann_vector_mips_l2() ->None:
"""Test vector similarity with MIPS and L2."""
texts = ['foo', 'bar', 'baz']
euclidean_search = ScaNN.from_texts(texts, FakeEmbeddings())
output = euclidean_search.similarity_search_with_score('foo', k=1)
expected_euclidean = [(Document(page_content='foo', metadata={}), 0.0)]
assert output == expected_euclidean
mips_search = ScaNN.from_texts(texts, FakeEmbeddings(),
distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT, normalize_L2=True
)
output = mips_search.similarity_search_with_score('foo', k=1)
expected_mips = [(Document(page_content='foo', metadata={}), 1.0)]
assert output == expected_mips
|
Test vector similarity with MIPS and L2.
|
train_unsupervised
|
...
|
@abstractmethod
def train_unsupervised(self, inputs: Sequence[str], **kwargs: Any
) ->TrainResult:
...
| null |
selector
|
"""Get length based selector to use in tests."""
prompts = PromptTemplate(input_variables=['question'], template='{question}')
selector = LengthBasedExampleSelector(examples=EXAMPLES, example_prompt=
prompts, max_length=30)
return selector
|
@pytest.fixture
def selector() ->LengthBasedExampleSelector:
"""Get length based selector to use in tests."""
prompts = PromptTemplate(input_variables=['question'], template=
'{question}')
selector = LengthBasedExampleSelector(examples=EXAMPLES, example_prompt
=prompts, max_length=30)
return selector
|
Get length based selector to use in tests.
|
_import_zapier_tool_ZapierNLAListActions
|
from langchain_community.tools.zapier.tool import ZapierNLAListActions
return ZapierNLAListActions
|
def _import_zapier_tool_ZapierNLAListActions() ->Any:
from langchain_community.tools.zapier.tool import ZapierNLAListActions
return ZapierNLAListActions
| null |
__from
|
num_dimensions = len(embeddings[0])
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if service_url is None:
service_url = cls.get_service_url(kwargs)
store = cls(service_url=service_url, num_dimensions=num_dimensions,
collection_name=collection_name, embedding=embedding, distance_strategy
=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs)
store.add_embeddings(texts=texts, embeddings=embeddings, metadatas=
metadatas, ids=ids, **kwargs)
return store
|
@classmethod
def __from(cls, texts: List[str], embeddings: List[List[float]], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[
str]]=None, collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy=DEFAULT_DISTANCE_STRATEGY,
service_url: Optional[str]=None, pre_delete_collection: bool=False, **
kwargs: Any) ->TimescaleVector:
num_dimensions = len(embeddings[0])
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if service_url is None:
service_url = cls.get_service_url(kwargs)
store = cls(service_url=service_url, num_dimensions=num_dimensions,
collection_name=collection_name, embedding=embedding,
distance_strategy=distance_strategy, pre_delete_collection=
pre_delete_collection, **kwargs)
store.add_embeddings(texts=texts, embeddings=embeddings, metadatas=
metadatas, ids=ids, **kwargs)
return store
| null |
test_parse_json_with_code_blocks
|
parsed = parse_json_markdown(JSON_WITH_MARKDOWN_CODE_BLOCK)
assert parsed == {'foo': '```bar```'}
parsed = parse_json_markdown(JSON_WITH_MARKDOWN_CODE_BLOCK_AND_NEWLINES)
assert parsed == {'action': 'Final Answer', 'action_input':
"""```bar
<div id="1" class="value">
text
</div>```"""}
|
def test_parse_json_with_code_blocks() ->None:
parsed = parse_json_markdown(JSON_WITH_MARKDOWN_CODE_BLOCK)
assert parsed == {'foo': '```bar```'}
parsed = parse_json_markdown(JSON_WITH_MARKDOWN_CODE_BLOCK_AND_NEWLINES)
assert parsed == {'action': 'Final Answer', 'action_input':
"""```bar
<div id="1" class="value">
text
</div>```"""}
| null |
test_api_key_masked_when_passed_via_constructor
|
llm = Fireworks(fireworks_api_key='secret-api-key')
print(llm.fireworks_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
@pytest.mark.requires('fireworks')
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
llm = Fireworks(fireworks_api_key='secret-api-key')
print(llm.fireworks_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
apply
|
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
|
def apply(self, input_list: List[Dict[str, Any]], callbacks: Callbacks=None
) ->List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
|
Call the chain on all inputs in the list.
|
get_topological_sort
|
"""Get a list of entity names in the graph sorted by causal dependence."""
import networkx as nx
return list(nx.topological_sort(self._graph))
|
def get_topological_sort(self) ->List[str]:
"""Get a list of entity names in the graph sorted by causal dependence."""
import networkx as nx
return list(nx.topological_sort(self._graph))
|
Get a list of entity names in the graph sorted by causal dependence.
|
on_agent_action
|
self._require_current_thought().on_agent_action(action, color, **kwargs)
self._prune_old_thought_containers()
|
def on_agent_action(self, action: AgentAction, color: Optional[str]=None,
**kwargs: Any) ->Any:
self._require_current_thought().on_agent_action(action, color, **kwargs)
self._prune_old_thought_containers()
| null |
recommended_games
|
try:
import steamspypi
except ImportError:
raise ImportError('steamspypi library is not installed.')
users_games = self.get_users_games(steam_id)
result = {}
most_popular_genre = ''
most_popular_genre_count = 0
for game in users_games['games']:
appid = game['appid']
data_request = {'request': 'appdetails', 'appid': appid}
genreStore = steamspypi.download(data_request)
genreList = genreStore.get('genre', '').split(', ')
for genre in genreList:
if genre in result:
result[genre] += 1
else:
result[genre] = 1
if result[genre] > most_popular_genre_count:
most_popular_genre_count = result[genre]
most_popular_genre = genre
data_request = dict()
data_request['request'] = 'genre'
data_request['genre'] = most_popular_genre
data = steamspypi.download(data_request)
sorted_data = sorted(data.values(), key=lambda x: x.get('average_forever',
0), reverse=True)
owned_games = [game['appid'] for game in users_games['games']]
remaining_games = [game for game in sorted_data if game['appid'] not in
owned_games]
top_5_popular_not_owned = [game['name'] for game in remaining_games[:5]]
return str(top_5_popular_not_owned)
|
def recommended_games(self, steam_id: str) ->str:
try:
import steamspypi
except ImportError:
raise ImportError('steamspypi library is not installed.')
users_games = self.get_users_games(steam_id)
result = {}
most_popular_genre = ''
most_popular_genre_count = 0
for game in users_games['games']:
appid = game['appid']
data_request = {'request': 'appdetails', 'appid': appid}
genreStore = steamspypi.download(data_request)
genreList = genreStore.get('genre', '').split(', ')
for genre in genreList:
if genre in result:
result[genre] += 1
else:
result[genre] = 1
if result[genre] > most_popular_genre_count:
most_popular_genre_count = result[genre]
most_popular_genre = genre
data_request = dict()
data_request['request'] = 'genre'
data_request['genre'] = most_popular_genre
data = steamspypi.download(data_request)
sorted_data = sorted(data.values(), key=lambda x: x.get(
'average_forever', 0), reverse=True)
owned_games = [game['appid'] for game in users_games['games']]
remaining_games = [game for game in sorted_data if game['appid'] not in
owned_games]
top_5_popular_not_owned = [game['name'] for game in remaining_games[:5]]
return str(top_5_popular_not_owned)
| null |
get_langserve_export
|
with open(filepath) as f:
data: Dict[str, Any] = load(f)
try:
module = data['tool']['langserve']['export_module']
attr = data['tool']['langserve']['export_attr']
package_name = data['tool']['poetry']['name']
except KeyError as e:
raise KeyError('Invalid LangServe PyProject.toml') from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
def get_langserve_export(filepath: Path) ->LangServeExport:
with open(filepath) as f:
data: Dict[str, Any] = load(f)
try:
module = data['tool']['langserve']['export_module']
attr = data['tool']['langserve']['export_attr']
package_name = data['tool']['poetry']['name']
except KeyError as e:
raise KeyError('Invalid LangServe PyProject.toml') from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
| null |
validate_environment
|
"""Validate that api key exists in environment."""
values['alphavantage_api_key'] = get_from_dict_or_env(values,
'alphavantage_api_key', 'ALPHAVANTAGE_API_KEY')
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
values['alphavantage_api_key'] = get_from_dict_or_env(values,
'alphavantage_api_key', 'ALPHAVANTAGE_API_KEY')
return values
|
Validate that api key exists in environment.
|
format_response_payload
|
return json.loads(output)[0]['0']
|
def format_response_payload(self, output: bytes) ->str:
return json.loads(output)[0]['0']
| null |
test_prompt_from_jinja2_template_multiple_inputs
|
"""Test with multiple input variables."""
template = """Hello world
Your variable: {{ foo }}
{# This will not get rendered #}
{% if bar %}
You just set bar boolean variable to true
{% endif %}
{% for i in foo_list %}
{{ i }}
{% endfor %}
"""
prompt = PromptTemplate.from_template(template, template_format='jinja2')
expected_prompt = PromptTemplate(template=template, input_variables=['bar',
'foo', 'foo_list'], template_format='jinja2')
assert prompt == expected_prompt
|
@pytest.mark.requires('jinja2')
def test_prompt_from_jinja2_template_multiple_inputs() ->None:
"""Test with multiple input variables."""
template = """Hello world
Your variable: {{ foo }}
{# This will not get rendered #}
{% if bar %}
You just set bar boolean variable to true
{% endif %}
{% for i in foo_list %}
{{ i }}
{% endfor %}
"""
prompt = PromptTemplate.from_template(template, template_format='jinja2')
expected_prompt = PromptTemplate(template=template, input_variables=[
'bar', 'foo', 'foo_list'], template_format='jinja2')
assert prompt == expected_prompt
|
Test with multiple input variables.
|
test_visit_structured_query_one_attr
|
query = 'What is the capital of France?'
comp = Comparison(comparator=Comparator.IN, attribute='qty', value=[5, 15, 20])
structured_query = StructuredQuery(query=query, filter=comp)
expected = query, {'pre_filter': {'qty': {'$in': [5, 15, 20]}}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
|
def test_visit_structured_query_one_attr() ->None:
query = 'What is the capital of France?'
comp = Comparison(comparator=Comparator.IN, attribute='qty', value=[5,
15, 20])
structured_query = StructuredQuery(query=query, filter=comp)
expected = query, {'pre_filter': {'qty': {'$in': [5, 15, 20]}}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| null |
load
|
"""
Loads the query result from Wikipedia into a list of Documents.
Returns:
List[Document]: A list of Document objects representing the loaded
Wikipedia pages.
"""
client = WikipediaAPIWrapper(lang=self.lang, top_k_results=self.
load_max_docs, load_all_available_meta=self.load_all_available_meta,
doc_content_chars_max=self.doc_content_chars_max)
docs = client.load(self.query)
return docs
|
def load(self) ->List[Document]:
"""
Loads the query result from Wikipedia into a list of Documents.
Returns:
List[Document]: A list of Document objects representing the loaded
Wikipedia pages.
"""
client = WikipediaAPIWrapper(lang=self.lang, top_k_results=self.
load_max_docs, load_all_available_meta=self.load_all_available_meta,
doc_content_chars_max=self.doc_content_chars_max)
docs = client.load(self.query)
return docs
|
Loads the query result from Wikipedia into a list of Documents.
Returns:
List[Document]: A list of Document objects representing the loaded
Wikipedia pages.
|
_import_openai
|
from langchain_community.llms.openai import OpenAI
return OpenAI
|
def _import_openai() ->Any:
from langchain_community.llms.openai import OpenAI
return OpenAI
| null |
on_llm_new_token
|
"""Run on new LLM token. Only available when streaming is enabled.
Args:
token (str): The new token.
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
containing content and other information.
"""
|
def on_llm_new_token(self, token: str, *, chunk: Optional[Union[
GenerationChunk, ChatGenerationChunk]]=None, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
"""Run on new LLM token. Only available when streaming is enabled.
Args:
token (str): The new token.
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
containing content and other information.
"""
|
Run on new LLM token. Only available when streaming is enabled.
Args:
token (str): The new token.
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
containing content and other information.
|
load
|
"""Load documents."""
data = self.nua.run({'action': 'pull', 'id': self.id, 'path': None, 'text':
None})
if not data:
return []
obj = json.loads(data)
text = obj['extracted_text'][0]['body']['text']
print(text)
metadata = {'file': obj['file_extracted_data'][0], 'metadata': obj[
'field_metadata'][0]}
return [Document(page_content=text, metadata=metadata)]
|
def load(self) ->List[Document]:
"""Load documents."""
data = self.nua.run({'action': 'pull', 'id': self.id, 'path': None,
'text': None})
if not data:
return []
obj = json.loads(data)
text = obj['extracted_text'][0]['body']['text']
print(text)
metadata = {'file': obj['file_extracted_data'][0], 'metadata': obj[
'field_metadata'][0]}
return [Document(page_content=text, metadata=metadata)]
|
Load documents.
|
visit_comparison
|
try:
from timescale_vector import client
except ImportError as e:
raise ImportError(
'Cannot import timescale-vector. Please install with `pip install timescale-vector`.'
) from e
return client.Predicates((comparison.attribute, self._format_func(
comparison.comparator), comparison.value))
|
def visit_comparison(self, comparison: Comparison) ->client.Predicates:
try:
from timescale_vector import client
except ImportError as e:
raise ImportError(
'Cannot import timescale-vector. Please install with `pip install timescale-vector`.'
) from e
return client.Predicates((comparison.attribute, self._format_func(
comparison.comparator), comparison.value))
| null |
from_texts
|
"""Return VectorStore initialized from texts and embeddings."""
raise NotImplementedError
|
@classmethod
def from_texts(cls: Type[VST], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[dict]]=None, **kwargs: Any) ->VST:
"""Return VectorStore initialized from texts and embeddings."""
raise NotImplementedError
|
Return VectorStore initialized from texts and embeddings.
|
_get_path_strict
|
path_item = self._paths_strict.get(path)
if not path_item:
raise ValueError(f'No path found for {path}')
return path_item
|
def _get_path_strict(self, path: str) ->PathItem:
path_item = self._paths_strict.get(path)
if not path_item:
raise ValueError(f'No path found for {path}')
return path_item
| null |
update_document
|
"""Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
return self.update_documents([document_id], [document])
|
def update_document(self, document_id: str, document: Document) ->None:
"""Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
return self.update_documents([document_id], [document])
|
Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
|
update_token_usage
|
"""Update token usage."""
_keys_to_use = keys.intersection(response['usage'])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response['usage'][_key]
else:
token_usage[_key] += response['usage'][_key]
|
def update_token_usage(keys: Set[str], response: Dict[str, Any],
token_usage: Dict[str, Any]) ->None:
"""Update token usage."""
_keys_to_use = keys.intersection(response['usage'])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response['usage'][_key]
else:
token_usage[_key] += response['usage'][_key]
|
Update token usage.
|
from_llm
|
"""Convenience method to load chain from LLM and retriever.
This provides some logic to create the `question_generator` chain
as well as the combine_docs_chain.
Args:
llm: The default language model to use at every part of this chain
(eg in both the question generation and the answering)
retriever: The retriever to use to fetch relevant documents from.
condense_question_prompt: The prompt to use to condense the chat history
and new question into a standalone question.
chain_type: The chain type to use to create the combine_docs_chain, will
be sent to `load_qa_chain`.
verbose: Verbosity flag for logging to stdout.
condense_question_llm: The language model to use for condensing the chat
history and new question into a standalone question. If none is
provided, will default to `llm`.
combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain`
when constructing the combine_docs_chain.
callbacks: Callbacks to pass to all subchains.
**kwargs: Additional parameters to pass when initializing
ConversationalRetrievalChain
"""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(llm, chain_type=chain_type, verbose=verbose,
callbacks=callbacks, **combine_docs_chain_kwargs)
_llm = condense_question_llm or llm
condense_question_chain = LLMChain(llm=_llm, prompt=
condense_question_prompt, verbose=verbose, callbacks=callbacks)
return cls(retriever=retriever, combine_docs_chain=doc_chain,
question_generator=condense_question_chain, callbacks=callbacks, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, retriever: BaseRetriever,
condense_question_prompt: BasePromptTemplate=CONDENSE_QUESTION_PROMPT,
chain_type: str='stuff', verbose: bool=False, condense_question_llm:
Optional[BaseLanguageModel]=None, combine_docs_chain_kwargs: Optional[
Dict]=None, callbacks: Callbacks=None, **kwargs: Any
) ->BaseConversationalRetrievalChain:
"""Convenience method to load chain from LLM and retriever.
This provides some logic to create the `question_generator` chain
as well as the combine_docs_chain.
Args:
llm: The default language model to use at every part of this chain
(eg in both the question generation and the answering)
retriever: The retriever to use to fetch relevant documents from.
condense_question_prompt: The prompt to use to condense the chat history
and new question into a standalone question.
chain_type: The chain type to use to create the combine_docs_chain, will
be sent to `load_qa_chain`.
verbose: Verbosity flag for logging to stdout.
condense_question_llm: The language model to use for condensing the chat
history and new question into a standalone question. If none is
provided, will default to `llm`.
combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain`
when constructing the combine_docs_chain.
callbacks: Callbacks to pass to all subchains.
**kwargs: Additional parameters to pass when initializing
ConversationalRetrievalChain
"""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(llm, chain_type=chain_type, verbose=verbose,
callbacks=callbacks, **combine_docs_chain_kwargs)
_llm = condense_question_llm or llm
condense_question_chain = LLMChain(llm=_llm, prompt=
condense_question_prompt, verbose=verbose, callbacks=callbacks)
return cls(retriever=retriever, combine_docs_chain=doc_chain,
question_generator=condense_question_chain, callbacks=callbacks, **
kwargs)
|
Convenience method to load chain from LLM and retriever.
This provides some logic to create the `question_generator` chain
as well as the combine_docs_chain.
Args:
llm: The default language model to use at every part of this chain
(eg in both the question generation and the answering)
retriever: The retriever to use to fetch relevant documents from.
condense_question_prompt: The prompt to use to condense the chat history
and new question into a standalone question.
chain_type: The chain type to use to create the combine_docs_chain, will
be sent to `load_qa_chain`.
verbose: Verbosity flag for logging to stdout.
condense_question_llm: The language model to use for condensing the chat
history and new question into a standalone question. If none is
provided, will default to `llm`.
combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain`
when constructing the combine_docs_chain.
callbacks: Callbacks to pass to all subchains.
**kwargs: Additional parameters to pass when initializing
ConversationalRetrievalChain
|
_create_message_dicts
|
params: Dict[str, Any] = {}
message_dicts = [self._convert_message_to_dict(m) for m in messages]
return message_dicts, params
|
def _create_message_dicts(self, messages: List[BaseMessage]) ->Tuple[List[
Dict[str, Any]], Dict[str, Any]]:
params: Dict[str, Any] = {}
message_dicts = [self._convert_message_to_dict(m) for m in messages]
return message_dicts, params
| null |
on_chat_model_start
|
"""Save the prompts in memory when an LLM starts."""
if self.input_type != 'Paragraphs':
raise ValueError(
f"""
Label Studio project "{self.project_name}" has an input type <{self.input_type}>. To make it work with the mode="chat", the input type should be <Paragraphs>.
Read more here https://labelstud.io/tags/paragraphs"""
)
prompts = []
for message_list in messages:
dialog = []
for message in message_list:
dialog.append({'role': self._get_message_role(message), 'content':
message.content})
prompts.append(dialog)
self.payload[str(run_id)] = {'prompts': prompts, 'tags': tags, 'metadata':
metadata, 'run_id': run_id, 'parent_run_id': parent_run_id, 'kwargs':
kwargs}
|
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[
List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID]=None,
tags: Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None,
**kwargs: Any) ->Any:
"""Save the prompts in memory when an LLM starts."""
if self.input_type != 'Paragraphs':
raise ValueError(
f"""
Label Studio project "{self.project_name}" has an input type <{self.input_type}>. To make it work with the mode="chat", the input type should be <Paragraphs>.
Read more here https://labelstud.io/tags/paragraphs"""
)
prompts = []
for message_list in messages:
dialog = []
for message in message_list:
dialog.append({'role': self._get_message_role(message),
'content': message.content})
prompts.append(dialog)
self.payload[str(run_id)] = {'prompts': prompts, 'tags': tags,
'metadata': metadata, 'run_id': run_id, 'parent_run_id':
parent_run_id, 'kwargs': kwargs}
|
Save the prompts in memory when an LLM starts.
|
add_texts
|
"""Add texts to the Clarifai vectorstore. This will push the text
to a Clarifai application.
Application use a base workflow that create and store embedding for each text.
Make sure you are using a base workflow that is compatible with text
(such as Language Understanding).
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
"""
try:
from clarifai.client.input import Inputs
from google.protobuf.struct_pb2 import Struct
except ImportError as e:
raise ImportError(
'Could not import clarifai python package. Please install it with `pip install clarifai`.'
) from e
ltexts = list(texts)
length = len(ltexts)
assert length > 0, 'No texts provided to add to the vectorstore.'
if metadatas is not None:
assert length == len(metadatas
), 'Number of texts and metadatas should be the same.'
if ids is not None:
assert len(ltexts) == len(ids
), 'Number of text inputs and input ids should be the same.'
input_obj = Inputs(app_id=self._app_id, user_id=self._user_id)
batch_size = 32
input_job_ids = []
for idx in range(0, length, batch_size):
try:
batch_texts = ltexts[idx:idx + batch_size]
batch_metadatas = metadatas[idx:idx + batch_size
] if metadatas else None
if ids is None:
batch_ids = [uuid.uuid4().hex for _ in range(len(batch_texts))]
else:
batch_ids = ids[idx:idx + batch_size]
if batch_metadatas is not None:
meta_list = []
for meta in batch_metadatas:
meta_struct = Struct()
meta_struct.update(meta)
meta_list.append(meta_struct)
input_batch = [input_obj.get_text_input(input_id=batch_ids[i],
raw_text=text, metadata=meta_list[i] if batch_metadatas else
None) for i, text in enumerate(batch_texts)]
result_id = input_obj.upload_inputs(inputs=input_batch)
input_job_ids.extend(result_id)
logger.debug('Input posted successfully.')
except Exception as error:
logger.warning(f'Post inputs failed: {error}')
traceback.print_exc()
return input_job_ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]:
"""Add texts to the Clarifai vectorstore. This will push the text
to a Clarifai application.
Application use a base workflow that create and store embedding for each text.
Make sure you are using a base workflow that is compatible with text
(such as Language Understanding).
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
"""
try:
from clarifai.client.input import Inputs
from google.protobuf.struct_pb2 import Struct
except ImportError as e:
raise ImportError(
'Could not import clarifai python package. Please install it with `pip install clarifai`.'
) from e
ltexts = list(texts)
length = len(ltexts)
assert length > 0, 'No texts provided to add to the vectorstore.'
if metadatas is not None:
assert length == len(metadatas
), 'Number of texts and metadatas should be the same.'
if ids is not None:
assert len(ltexts) == len(ids
), 'Number of text inputs and input ids should be the same.'
input_obj = Inputs(app_id=self._app_id, user_id=self._user_id)
batch_size = 32
input_job_ids = []
for idx in range(0, length, batch_size):
try:
batch_texts = ltexts[idx:idx + batch_size]
batch_metadatas = metadatas[idx:idx + batch_size
] if metadatas else None
if ids is None:
batch_ids = [uuid.uuid4().hex for _ in range(len(batch_texts))]
else:
batch_ids = ids[idx:idx + batch_size]
if batch_metadatas is not None:
meta_list = []
for meta in batch_metadatas:
meta_struct = Struct()
meta_struct.update(meta)
meta_list.append(meta_struct)
input_batch = [input_obj.get_text_input(input_id=batch_ids[i],
raw_text=text, metadata=meta_list[i] if batch_metadatas else
None) for i, text in enumerate(batch_texts)]
result_id = input_obj.upload_inputs(inputs=input_batch)
input_job_ids.extend(result_id)
logger.debug('Input posted successfully.')
except Exception as error:
logger.warning(f'Post inputs failed: {error}')
traceback.print_exc()
return input_job_ids
|
Add texts to the Clarifai vectorstore. This will push the text
to a Clarifai application.
Application use a base workflow that create and store embedding for each text.
Make sure you are using a base workflow that is compatible with text
(such as Language Understanding).
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
|
test_confluence_loader_initialization_invalid
|
with pytest.raises(ValueError):
ConfluenceLoader(self.CONFLUENCE_URL, username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN, token='foo')
with pytest.raises(ValueError):
ConfluenceLoader(self.CONFLUENCE_URL, username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN, oauth2={'access_token': 'bar',
'access_token_secret': 'bar', 'consumer_key': 'bar', 'key_cert': 'bar'}
)
with pytest.raises(ValueError):
ConfluenceLoader(self.CONFLUENCE_URL, username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN, session=requests.Session())
|
def test_confluence_loader_initialization_invalid(self) ->None:
with pytest.raises(ValueError):
ConfluenceLoader(self.CONFLUENCE_URL, username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN, token='foo')
with pytest.raises(ValueError):
ConfluenceLoader(self.CONFLUENCE_URL, username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN, oauth2={'access_token': 'bar',
'access_token_secret': 'bar', 'consumer_key': 'bar', 'key_cert':
'bar'})
with pytest.raises(ValueError):
ConfluenceLoader(self.CONFLUENCE_URL, username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN, session=requests.Session())
| null |
test_parse_operation
|
op = 'and(eq("foo", "bar"), lt("baz", 1995.25))'
eq = Comparison(comparator=Comparator.EQ, attribute='foo', value='bar')
lt = Comparison(comparator=Comparator.LT, attribute='baz', value=1995.25)
expected = Operation(operator=Operator.AND, arguments=[eq, lt])
for input in (op, op.replace('"', "'"), op.replace(' ', ''), op.replace(' ',
' '), op.replace('(', ' ('), op.replace(',', ', '), op.replace('25',
'250')):
actual = DEFAULT_PARSER.parse_folder(input)
assert expected == actual
|
def test_parse_operation() ->None:
op = 'and(eq("foo", "bar"), lt("baz", 1995.25))'
eq = Comparison(comparator=Comparator.EQ, attribute='foo', value='bar')
lt = Comparison(comparator=Comparator.LT, attribute='baz', value=1995.25)
expected = Operation(operator=Operator.AND, arguments=[eq, lt])
for input in (op, op.replace('"', "'"), op.replace(' ', ''), op.replace
(' ', ' '), op.replace('(', ' ('), op.replace(',', ', '), op.
replace('25', '250')):
actual = DEFAULT_PARSER.parse_folder(input)
assert expected == actual
| null |
validate_environment
|
"""Validate that the python package exists in the environment."""
try:
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
except ImportError as e:
raise ImportError(
f'Could not import gql python package. Try installing it with `pip install gql`. Received error: {e}'
)
headers = values.get('custom_headers')
transport = RequestsHTTPTransport(url=values['graphql_endpoint'], headers=
headers)
client = Client(transport=transport, fetch_schema_from_transport=True)
values['gql_client'] = client
values['gql_function'] = gql
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that the python package exists in the environment."""
try:
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
except ImportError as e:
raise ImportError(
f'Could not import gql python package. Try installing it with `pip install gql`. Received error: {e}'
)
headers = values.get('custom_headers')
transport = RequestsHTTPTransport(url=values['graphql_endpoint'],
headers=headers)
client = Client(transport=transport, fetch_schema_from_transport=True)
values['gql_client'] = client
values['gql_function'] = gql
return values
|
Validate that the python package exists in the environment.
|
visit_comparison
|
value = comparison.value
if isinstance(value, str):
if comparison.comparator == Comparator.LIKE:
value = f"'%{value}%'"
else:
value = f"'{value}'"
return f'{comparison.attribute}{self._format_func(comparison.comparator)}{value}'
|
def visit_comparison(self, comparison: Comparison) ->str:
value = comparison.value
if isinstance(value, str):
if comparison.comparator == Comparator.LIKE:
value = f"'%{value}%'"
else:
value = f"'{value}'"
return (
f'{comparison.attribute}{self._format_func(comparison.comparator)}{value}'
)
| null |
test_similarity_search_with_doc_builder
|
texts = ['foo', 'foo', 'foo']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, **elasticsearch_connection, index_name=index_name)
def custom_document_builder(_: Dict) ->Document:
return Document(page_content='Mock content!', metadata={'page_number':
-1, 'original_filename': 'Mock filename!'})
output = docsearch.similarity_search(query='foo', k=1, doc_builder=
custom_document_builder)
assert output[0].page_content == 'Mock content!'
assert output[0].metadata['page_number'] == -1
assert output[0].metadata['original_filename'] == 'Mock filename!'
|
def test_similarity_search_with_doc_builder(self, elasticsearch_connection:
dict, index_name: str) ->None:
texts = ['foo', 'foo', 'foo']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, **elasticsearch_connection, index_name=index_name)
def custom_document_builder(_: Dict) ->Document:
return Document(page_content='Mock content!', metadata={
'page_number': -1, 'original_filename': 'Mock filename!'})
output = docsearch.similarity_search(query='foo', k=1, doc_builder=
custom_document_builder)
assert output[0].page_content == 'Mock content!'
assert output[0].metadata['page_number'] == -1
assert output[0].metadata['original_filename'] == 'Mock filename!'
| null |
_convert_unstructured_search_response
|
"""Converts a sequence of search results to a list of LangChain documents."""
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(result.document._pb,
preserving_proto_field_name=True)
derived_struct_data = document_dict.get('derived_struct_data')
if not derived_struct_data:
continue
doc_metadata = document_dict.get('struct_data', {})
doc_metadata['id'] = document_dict['id']
if chunk_type not in derived_struct_data:
continue
for chunk in derived_struct_data[chunk_type]:
doc_metadata['source'] = derived_struct_data.get('link', '')
if chunk_type == 'extractive_answers':
doc_metadata['source'] += f":{chunk.get('pageNumber', '')}"
documents.append(Document(page_content=chunk.get('content', ''),
metadata=doc_metadata))
return documents
|
def _convert_unstructured_search_response(self, results: Sequence[
SearchResult], chunk_type: str) ->List[Document]:
"""Converts a sequence of search results to a list of LangChain documents."""
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(result.document._pb,
preserving_proto_field_name=True)
derived_struct_data = document_dict.get('derived_struct_data')
if not derived_struct_data:
continue
doc_metadata = document_dict.get('struct_data', {})
doc_metadata['id'] = document_dict['id']
if chunk_type not in derived_struct_data:
continue
for chunk in derived_struct_data[chunk_type]:
doc_metadata['source'] = derived_struct_data.get('link', '')
if chunk_type == 'extractive_answers':
doc_metadata['source'] += f":{chunk.get('pageNumber', '')}"
documents.append(Document(page_content=chunk.get('content', ''),
metadata=doc_metadata))
return documents
|
Converts a sequence of search results to a list of LangChain documents.
|
fakeget
|
def fn(url: str, **kwargs: Any) ->Any:
if url.endswith('/processing/pull'):
return FakePullResponse()
else:
raise Exception('Invalid GET URL')
return fn
|
def fakeget(**kwargs: Any) ->Any:
def fn(url: str, **kwargs: Any) ->Any:
if url.endswith('/processing/pull'):
return FakePullResponse()
else:
raise Exception('Invalid GET URL')
return fn
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_predict_method
|
"""Test predict method works."""
output = fake_llm_chain.predict(bar='baz')
assert output == 'foo'
|
def test_predict_method(fake_llm_chain: LLMChain) ->None:
"""Test predict method works."""
output = fake_llm_chain.predict(bar='baz')
assert output == 'foo'
|
Test predict method works.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat']
|
Get the namespace of the langchain object.
|
project
|
projects = self.jira.projects()
parsed_projects = self.parse_projects(projects)
parsed_projects_str = 'Found ' + str(len(parsed_projects)
) + ' projects:\n' + str(parsed_projects)
return parsed_projects_str
|
def project(self) ->str:
projects = self.jira.projects()
parsed_projects = self.parse_projects(projects)
parsed_projects_str = 'Found ' + str(len(parsed_projects)
) + ' projects:\n' + str(parsed_projects)
return parsed_projects_str
| null |
_build_qstr
|
q_emb_str = ','.join(map(str, q_emb))
if where_str:
where_str = f'PREWHERE {where_str}'
else:
where_str = ''
q_str = f"""
SELECT {self.config.column_map['text']}, dist,
{','.join(self.must_have_cols)}
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
return q_str
|
def _build_qstr(self, q_emb: List[float], topk: int, where_str: Optional[
str]=None) ->str:
q_emb_str = ','.join(map(str, q_emb))
if where_str:
where_str = f'PREWHERE {where_str}'
else:
where_str = ''
q_str = f"""
SELECT {self.config.column_map['text']}, dist,
{','.join(self.must_have_cols)}
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
return q_str
| null |
create_index
|
"""
Create retriever that indexes docs and their propositions
:param docs: Documents to index
:param indexer: Runnable creates additional propositions per doc
:param docstore_id_key: Key to use to store the docstore id
:return: Retriever
"""
logger.info('Creating multi-vector retriever')
retriever = get_multi_vector_retriever(docstore_id_key)
propositions = indexer.batch([{'input': doc.page_content} for doc in docs],
{'max_concurrency': 10})
add_documents(retriever, propositions, docs, id_key=docstore_id_key)
return retriever
|
def create_index(docs: Sequence[Document], indexer: Runnable,
docstore_id_key: str=DOCSTORE_ID_KEY):
"""
Create retriever that indexes docs and their propositions
:param docs: Documents to index
:param indexer: Runnable creates additional propositions per doc
:param docstore_id_key: Key to use to store the docstore id
:return: Retriever
"""
logger.info('Creating multi-vector retriever')
retriever = get_multi_vector_retriever(docstore_id_key)
propositions = indexer.batch([{'input': doc.page_content} for doc in
docs], {'max_concurrency': 10})
add_documents(retriever, propositions, docs, id_key=docstore_id_key)
return retriever
|
Create retriever that indexes docs and their propositions
:param docs: Documents to index
:param indexer: Runnable creates additional propositions per doc
:param docstore_id_key: Key to use to store the docstore id
:return: Retriever
|
_initialize_comet_modules
|
comet_llm_api = import_comet_llm_api()
self._chain: ModuleType = comet_llm_api.chain
self._span: ModuleType = comet_llm_api.span
self._chain_api: ModuleType = comet_llm_api.chain_api
self._experiment_info: ModuleType = comet_llm_api.experiment_info
self._flush: Callable[[], None] = comet_llm_api.flush
|
def _initialize_comet_modules(self) ->None:
comet_llm_api = import_comet_llm_api()
self._chain: ModuleType = comet_llm_api.chain
self._span: ModuleType = comet_llm_api.span
self._chain_api: ModuleType = comet_llm_api.chain_api
self._experiment_info: ModuleType = comet_llm_api.experiment_info
self._flush: Callable[[], None] = comet_llm_api.flush
| null |
completed
|
return all(task.completed() for task in self.tasks)
|
def completed(self) ->bool:
return all(task.completed() for task in self.tasks)
| null |
requires_reference
|
"""Whether the evaluation requires a reference text."""
return True
|
@property
def requires_reference(self) ->bool:
"""Whether the evaluation requires a reference text."""
return True
|
Whether the evaluation requires a reference text.
|
test_openai_streaming_callback
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = _get_llm(max_tokens=10, streaming=True, temperature=0,
callback_manager=callback_manager, verbose=True)
llm('Write me a sentence with 100 words.')
assert callback_handler.llm_streams == 11
|
def test_openai_streaming_callback() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = _get_llm(max_tokens=10, streaming=True, temperature=0,
callback_manager=callback_manager, verbose=True)
llm('Write me a sentence with 100 words.')
assert callback_handler.llm_streams == 11
|
Test that streaming correctly invokes on_llm_new_token callback.
|
_parts_to_content
|
"""Converts a list of Gemini API Part objects into a list of LangChain messages."""
if len(parts) == 1 and parts[0].text is not None and not parts[0].inline_data:
return parts[0].text
elif not parts:
logger.warning('Gemini produced an empty response.')
return ''
messages = []
for part in parts:
if part.text is not None:
messages.append({'type': 'text', 'text': part.text})
else:
raise ChatGoogleGenerativeAIError(f'Unexpected part type. {part}')
return messages
|
def _parts_to_content(parts: List[genai.types.PartType]) ->Union[List[dict],
str]:
"""Converts a list of Gemini API Part objects into a list of LangChain messages."""
if len(parts) == 1 and parts[0].text is not None and not parts[0
].inline_data:
return parts[0].text
elif not parts:
logger.warning('Gemini produced an empty response.')
return ''
messages = []
for part in parts:
if part.text is not None:
messages.append({'type': 'text', 'text': part.text})
else:
raise ChatGoogleGenerativeAIError(f'Unexpected part type. {part}')
return messages
|
Converts a list of Gemini API Part objects into a list of LangChain messages.
|
model
|
return Llama2Chat(llm=FakeLLM())
|
@pytest.fixture
def model() ->Llama2Chat:
return Llama2Chat(llm=FakeLLM())
| null |
on_retriever_error
|
"""Run when retriever errors."""
handle_event(self.handlers, 'on_retriever_error', 'ignore_retriever', error,
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags,
**kwargs)
|
def on_retriever_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when retriever errors."""
handle_event(self.handlers, 'on_retriever_error', 'ignore_retriever',
error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=
self.tags, **kwargs)
|
Run when retriever errors.
|
__init__
|
"""Initialize RunnableWithMessageHistory.
Args:
runnable: The base Runnable to be wrapped. Must take as input one of:
1. A sequence of BaseMessages
2. A dict with one key for all messages
3. A dict with one key for the current input string/message(s) and
a separate key for historical messages. If the input key points
to a string, it will be treated as a HumanMessage in history.
Must return as output one of:
1. A string which can be treated as an AIMessage
2. A BaseMessage or sequence of BaseMessages
3. A dict with a key for a BaseMessage or sequence of BaseMessages
get_session_history: Function that returns a new BaseChatMessageHistory.
This function should either take a single positional argument
`session_id` of type string and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
session_id: str,
*,
user_id: Optional[str]=None
) -> BaseChatMessageHistory:
...
Or it should take keyword arguments that match the keys of
`session_history_config_specs` and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
*,
user_id: str,
thread_id: str,
) -> BaseChatMessageHistory:
...
input_messages_key: Must be specified if the base runnable accepts a dict
as input.
output_messages_key: Must be specified if the base runnable returns a dict
as output.
history_messages_key: Must be specified if the base runnable accepts a dict
as input and expects a separate key for historical messages.
history_factory_config: Configure fields that should be passed to the
chat history factory. See ``ConfigurableFieldSpec`` for more details.
Specifying these allows you to pass multiple config keys
into the get_session_history factory.
**kwargs: Arbitrary additional kwargs to pass to parent class
``RunnableBindingBase`` init.
"""
history_chain: Runnable = RunnableLambda(self._enter_history, self.
_aenter_history).with_config(run_name='load_history')
messages_key = history_messages_key or input_messages_key
if messages_key:
history_chain = RunnablePassthrough.assign(**{messages_key: history_chain}
).with_config(run_name='insert_history')
bound = (history_chain | runnable.with_listeners(on_end=self._exit_history)
).with_config(run_name='RunnableWithMessageHistory')
if history_factory_config:
_config_specs = history_factory_config
else:
_config_specs = [ConfigurableFieldSpec(id='session_id', annotation=str,
name='Session ID', description='Unique identifier for a session.',
default='', is_shared=True)]
super().__init__(get_session_history=get_session_history,
input_messages_key=input_messages_key, output_messages_key=
output_messages_key, bound=bound, history_messages_key=
history_messages_key, history_factory_config=_config_specs, **kwargs)
|
def __init__(self, runnable: Runnable[MessagesOrDictWithMessages, Union[str,
BaseMessage, MessagesOrDictWithMessages]], get_session_history:
GetSessionHistoryCallable, *, input_messages_key: Optional[str]=None,
output_messages_key: Optional[str]=None, history_messages_key: Optional
[str]=None, history_factory_config: Optional[Sequence[
ConfigurableFieldSpec]]=None, **kwargs: Any) ->None:
"""Initialize RunnableWithMessageHistory.
Args:
runnable: The base Runnable to be wrapped. Must take as input one of:
1. A sequence of BaseMessages
2. A dict with one key for all messages
3. A dict with one key for the current input string/message(s) and
a separate key for historical messages. If the input key points
to a string, it will be treated as a HumanMessage in history.
Must return as output one of:
1. A string which can be treated as an AIMessage
2. A BaseMessage or sequence of BaseMessages
3. A dict with a key for a BaseMessage or sequence of BaseMessages
get_session_history: Function that returns a new BaseChatMessageHistory.
This function should either take a single positional argument
`session_id` of type string and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
session_id: str,
*,
user_id: Optional[str]=None
) -> BaseChatMessageHistory:
...
Or it should take keyword arguments that match the keys of
`session_history_config_specs` and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
*,
user_id: str,
thread_id: str,
) -> BaseChatMessageHistory:
...
input_messages_key: Must be specified if the base runnable accepts a dict
as input.
output_messages_key: Must be specified if the base runnable returns a dict
as output.
history_messages_key: Must be specified if the base runnable accepts a dict
as input and expects a separate key for historical messages.
history_factory_config: Configure fields that should be passed to the
chat history factory. See ``ConfigurableFieldSpec`` for more details.
Specifying these allows you to pass multiple config keys
into the get_session_history factory.
**kwargs: Arbitrary additional kwargs to pass to parent class
``RunnableBindingBase`` init.
"""
history_chain: Runnable = RunnableLambda(self._enter_history, self.
_aenter_history).with_config(run_name='load_history')
messages_key = history_messages_key or input_messages_key
if messages_key:
history_chain = RunnablePassthrough.assign(**{messages_key:
history_chain}).with_config(run_name='insert_history')
bound = (history_chain | runnable.with_listeners(on_end=self._exit_history)
).with_config(run_name='RunnableWithMessageHistory')
if history_factory_config:
_config_specs = history_factory_config
else:
_config_specs = [ConfigurableFieldSpec(id='session_id', annotation=
str, name='Session ID', description=
'Unique identifier for a session.', default='', is_shared=True)]
super().__init__(get_session_history=get_session_history,
input_messages_key=input_messages_key, output_messages_key=
output_messages_key, bound=bound, history_messages_key=
history_messages_key, history_factory_config=_config_specs, **kwargs)
|
Initialize RunnableWithMessageHistory.
Args:
runnable: The base Runnable to be wrapped. Must take as input one of:
1. A sequence of BaseMessages
2. A dict with one key for all messages
3. A dict with one key for the current input string/message(s) and
a separate key for historical messages. If the input key points
to a string, it will be treated as a HumanMessage in history.
Must return as output one of:
1. A string which can be treated as an AIMessage
2. A BaseMessage or sequence of BaseMessages
3. A dict with a key for a BaseMessage or sequence of BaseMessages
get_session_history: Function that returns a new BaseChatMessageHistory.
This function should either take a single positional argument
`session_id` of type string and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
session_id: str,
*,
user_id: Optional[str]=None
) -> BaseChatMessageHistory:
...
Or it should take keyword arguments that match the keys of
`session_history_config_specs` and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
*,
user_id: str,
thread_id: str,
) -> BaseChatMessageHistory:
...
input_messages_key: Must be specified if the base runnable accepts a dict
as input.
output_messages_key: Must be specified if the base runnable returns a dict
as output.
history_messages_key: Must be specified if the base runnable accepts a dict
as input and expects a separate key for historical messages.
history_factory_config: Configure fields that should be passed to the
chat history factory. See ``ConfigurableFieldSpec`` for more details.
Specifying these allows you to pass multiple config keys
into the get_session_history factory.
**kwargs: Arbitrary additional kwargs to pass to parent class
``RunnableBindingBase`` init.
|
__init__
|
super().__init__(**kwargs)
self.auth_header = {'Authorization': 'Basic {}'.format(self.api_token)}
|
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self.auth_header = {'Authorization': 'Basic {}'.format(self.api_token)}
| null |
load
|
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
|
def load(self, query: str) ->List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
|
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
|
_load_image_from_gcs
|
try:
from google.cloud import storage
except ImportError:
raise ImportError(
'google-cloud-storage is required to load images from GCS. Install it with `pip install google-cloud-storage`'
)
if PIL is None:
raise ImportError(
'PIL is required to load images. Please install it with `pip install pillow`'
)
gcs_client = storage.Client(project=project)
pieces = path.split('/')
blobs = list(gcs_client.list_blobs(pieces[2], prefix='/'.join(pieces[3:])))
if len(blobs) > 1:
raise ValueError(f'Found more than one candidate for {path}!')
img_bytes = blobs[0].download_as_bytes()
return PIL.Image.open(BytesIO(img_bytes))
|
def _load_image_from_gcs(path: str, project: Optional[str]=None) ->Image:
try:
from google.cloud import storage
except ImportError:
raise ImportError(
'google-cloud-storage is required to load images from GCS. Install it with `pip install google-cloud-storage`'
)
if PIL is None:
raise ImportError(
'PIL is required to load images. Please install it with `pip install pillow`'
)
gcs_client = storage.Client(project=project)
pieces = path.split('/')
blobs = list(gcs_client.list_blobs(pieces[2], prefix='/'.join(pieces[3:])))
if len(blobs) > 1:
raise ValueError(f'Found more than one candidate for {path}!')
img_bytes = blobs[0].download_as_bytes()
return PIL.Image.open(BytesIO(img_bytes))
| null |
llm_prefix
|
"""Prefix to append the llm call with."""
return 'Thought:'
|
@property
def llm_prefix(self) ->str:
"""Prefix to append the llm call with."""
return 'Thought:'
|
Prefix to append the llm call with.
|
__is_headers_available_for_html
|
_unstructured_version = self.__version.split('-')[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split('.')]
)
return unstructured_version >= (0, 5, 7)
|
def __is_headers_available_for_html(self) ->bool:
_unstructured_version = self.__version.split('-')[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.
split('.')])
return unstructured_version >= (0, 5, 7)
| null |
test_similarity_search_with_score_by_vector
|
"""Test vector similarity with score by vector."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content
='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]:
Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
|
@pytest.mark.requires('faiss')
def test_similarity_search_with_score_by_vector() ->None:
"""Test vector similarity with score by vector."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(
page_content='foo'), index_to_id[1]: Document(page_content='bar'),
index_to_id[2]: Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
|
Test vector similarity with score by vector.
|
__init__
|
"""Initialize with parameters."""
super().__init__()
self.client = client
self.places = places
|
def __init__(self, client: OpenWeatherMapAPIWrapper, places: Sequence[str]
) ->None:
"""Initialize with parameters."""
super().__init__()
self.client = client
self.places = places
|
Initialize with parameters.
|
test_tensorflowhub_embedding_documents
|
"""Test tensorflowhub embeddings."""
documents = ['foo bar']
embedding = TensorflowHubEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 512
|
def test_tensorflowhub_embedding_documents() ->None:
"""Test tensorflowhub embeddings."""
documents = ['foo bar']
embedding = TensorflowHubEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 512
|
Test tensorflowhub embeddings.
|
test_log_lock
|
"""Test that example assigned at callback start/end is honored."""
client = unittest.mock.MagicMock(spec=Client)
tracer = LangChainTracer(client=client)
with unittest.mock.patch.object(tracer, '_persist_run_single', new=lambda _: _
):
run_id_1 = UUID('9d878ab3-e5ca-4218-aef6-44cbdc90160a')
lock = threading.Lock()
tracer.on_chain_start({'name': 'example_1'}, {'input': lock}, run_id=
run_id_1)
tracer.on_chain_end({}, run_id=run_id_1)
tracer.wait_for_futures()
|
def test_log_lock() ->None:
"""Test that example assigned at callback start/end is honored."""
client = unittest.mock.MagicMock(spec=Client)
tracer = LangChainTracer(client=client)
with unittest.mock.patch.object(tracer, '_persist_run_single', new=lambda
_: _):
run_id_1 = UUID('9d878ab3-e5ca-4218-aef6-44cbdc90160a')
lock = threading.Lock()
tracer.on_chain_start({'name': 'example_1'}, {'input': lock},
run_id=run_id_1)
tracer.on_chain_end({}, run_id=run_id_1)
tracer.wait_for_futures()
|
Test that example assigned at callback start/end is honored.
|
_construct_scratchpad
|
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
if not isinstance(agent_scratchpad, str):
raise ValueError('agent_scratchpad should be of type string.')
if agent_scratchpad:
return f"""This was your previous work (but I haven't seen any of it! I only see what you return as final answer):
{agent_scratchpad}"""
else:
return agent_scratchpad
|
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction,
str]]) ->str:
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
if not isinstance(agent_scratchpad, str):
raise ValueError('agent_scratchpad should be of type string.')
if agent_scratchpad:
return f"""This was your previous work (but I haven't seen any of it! I only see what you return as final answer):
{agent_scratchpad}"""
else:
return agent_scratchpad
| null |
_stream
|
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See llama-cpp-python docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(
model_path="/path/to/local/model.bin",
temperature = 0.5
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","
"]):
result = chunk["choices"][0]
print(result["text"], end='', flush=True)
"""
params = {**self._get_parameters(stop), **kwargs}
result = self.client(prompt=prompt, stream=True, **params)
for part in result:
logprobs = part['choices'][0].get('logprobs', None)
chunk = GenerationChunk(text=part['choices'][0]['text'],
generation_info={'logprobs': logprobs})
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text, verbose=self.verbose,
log_probs=logprobs)
|
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See llama-cpp-python docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(
model_path="/path/to/local/model.bin",
temperature = 0.5
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","
"]):
result = chunk["choices"][0]
print(result["text"], end='', flush=True)
"""
params = {**self._get_parameters(stop), **kwargs}
result = self.client(prompt=prompt, stream=True, **params)
for part in result:
logprobs = part['choices'][0].get('logprobs', None)
chunk = GenerationChunk(text=part['choices'][0]['text'],
generation_info={'logprobs': logprobs})
yield chunk
if run_manager:
run_manager.on_llm_new_token(token=chunk.text, verbose=self.
verbose, log_probs=logprobs)
|
Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See llama-cpp-python docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(
model_path="/path/to/local/model.bin",
temperature = 0.5
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","
"]):
result = chunk["choices"][0]
print(result["text"], end='', flush=True)
|
on_llm_end
|
"""Log records to Argilla when an LLM ends."""
if kwargs['parent_run_id']:
return
prompts = self.prompts[str(kwargs['run_id'])]
for prompt, generations in zip(prompts, response.generations):
self.dataset.add_records(records=[{'fields': {'prompt': prompt,
'response': generation.text.strip()}} for generation in generations])
self.prompts.pop(str(kwargs['run_id']))
if parse(self.ARGILLA_VERSION) < parse('1.14.0'):
self.dataset.push_to_argilla()
|
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None:
"""Log records to Argilla when an LLM ends."""
if kwargs['parent_run_id']:
return
prompts = self.prompts[str(kwargs['run_id'])]
for prompt, generations in zip(prompts, response.generations):
self.dataset.add_records(records=[{'fields': {'prompt': prompt,
'response': generation.text.strip()}} for generation in
generations])
self.prompts.pop(str(kwargs['run_id']))
if parse(self.ARGILLA_VERSION) < parse('1.14.0'):
self.dataset.push_to_argilla()
|
Log records to Argilla when an LLM ends.
|
record_manager
|
"""Timestamped set fixture."""
record_manager = SQLRecordManager('kittens', db_url='sqlite:///:memory:')
record_manager.create_schema()
return record_manager
|
@pytest.fixture
def record_manager() ->SQLRecordManager:
"""Timestamped set fixture."""
record_manager = SQLRecordManager('kittens', db_url='sqlite:///:memory:')
record_manager.create_schema()
return record_manager
|
Timestamped set fixture.
|
_default_script_query
|
"""For Script Scoring Search, this is the default query."""
if not pre_filter:
pre_filter = MATCH_ALL_QUERY
return {'size': k, 'query': {'script_score': {'query': pre_filter, 'script':
{'source': 'knn_score', 'lang': 'knn', 'params': {'field': vector_field,
'query_value': query_vector, 'space_type': space_type}}}}}
|
def _default_script_query(query_vector: List[float], k: int=4, space_type:
str='l2', pre_filter: Optional[Dict]=None, vector_field: str='vector_field'
) ->Dict:
"""For Script Scoring Search, this is the default query."""
if not pre_filter:
pre_filter = MATCH_ALL_QUERY
return {'size': k, 'query': {'script_score': {'query': pre_filter,
'script': {'source': 'knn_score', 'lang': 'knn', 'params': {'field':
vector_field, 'query_value': query_vector, 'space_type': space_type}}}}
}
|
For Script Scoring Search, this is the default query.
|
validate_environment
|
"""Validate api key, python package exists."""
google_api_key = get_from_dict_or_env(values, 'google_api_key',
'GOOGLE_API_KEY')
try:
import google.generativeai as genai
genai.configure(api_key=google_api_key)
except ImportError:
raise ImportError('Could not import google.generativeai python package.')
values['client'] = genai
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate api key, python package exists."""
google_api_key = get_from_dict_or_env(values, 'google_api_key',
'GOOGLE_API_KEY')
try:
import google.generativeai as genai
genai.configure(api_key=google_api_key)
except ImportError:
raise ImportError(
'Could not import google.generativeai python package.')
values['client'] = genai
return values
|
Validate api key, python package exists.
|
from_llm
|
"""Create a chain from an LLM."""
fallacy_critique_chain = LLMChain(llm=llm, prompt=fallacy_critique_prompt)
fallacy_revision_chain = LLMChain(llm=llm, prompt=fallacy_revision_prompt)
return cls(chain=chain, fallacy_critique_chain=fallacy_critique_chain,
fallacy_revision_chain=fallacy_revision_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, chain: LLMChain,
fallacy_critique_prompt: BasePromptTemplate=FALLACY_CRITIQUE_PROMPT,
fallacy_revision_prompt: BasePromptTemplate=FALLACY_REVISION_PROMPT, **
kwargs: Any) ->'FallacyChain':
"""Create a chain from an LLM."""
fallacy_critique_chain = LLMChain(llm=llm, prompt=fallacy_critique_prompt)
fallacy_revision_chain = LLMChain(llm=llm, prompt=fallacy_revision_prompt)
return cls(chain=chain, fallacy_critique_chain=fallacy_critique_chain,
fallacy_revision_chain=fallacy_revision_chain, **kwargs)
|
Create a chain from an LLM.
|
_llm_type
|
return 'azure-openai-chat'
|
@property
def _llm_type(self) ->str:
return 'azure-openai-chat'
| null |
embeddings
|
return self.embedding
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self.embedding
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'base']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'base']
|
Get the namespace of the langchain object.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
get_tools
|
"""Get the tools in the toolkit."""
return self.tools
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
|
Get the tools in the toolkit.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
Get the namespace of the langchain object.
|
add_documents
|
"""Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]: Documents to add to the vectorstore.
Returns:
List[str]: List of IDs of the added texts.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.add_texts(texts, metadatas, **kwargs)
|
def add_documents(self, documents: List[Document], **kwargs: Any) ->List[str]:
"""Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]: Documents to add to the vectorstore.
Returns:
List[str]: List of IDs of the added texts.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.add_texts(texts, metadatas, **kwargs)
|
Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]: Documents to add to the vectorstore.
Returns:
List[str]: List of IDs of the added texts.
|
test_dependency_string
|
_assert_dependency_equals(parse_dependency_string(
'git+ssh://git@github.com/efriis/myrepo.git', None, None, None), git=
'ssh://git@github.com/efriis/myrepo.git', ref=None, subdirectory=None)
_assert_dependency_equals(parse_dependency_string(
'git+https://github.com/efriis/myrepo.git#subdirectory=src', None, None,
None), git='https://github.com/efriis/myrepo.git', subdirectory='src',
ref=None)
_assert_dependency_equals(parse_dependency_string(
'git+ssh://git@github.com:efriis/myrepo.git#develop', None, None, None),
git='ssh://git@github.com:efriis/myrepo.git', ref='develop',
subdirectory=None)
_assert_dependency_equals(parse_dependency_string(
'git+ssh://git@github.com/efriis/myrepo.git#develop', None, None, None),
git='ssh://git@github.com/efriis/myrepo.git', ref='develop',
subdirectory=None)
_assert_dependency_equals(parse_dependency_string(
'git+ssh://git@github.com:efriis/myrepo.git@develop', None, None, None),
git='ssh://git@github.com:efriis/myrepo.git', ref='develop',
subdirectory=None)
_assert_dependency_equals(parse_dependency_string('simple-pirate', None,
None, None), git=DEFAULT_GIT_REPO, subdirectory=
f'{DEFAULT_GIT_SUBDIRECTORY}/simple-pirate', ref=DEFAULT_GIT_REF)
|
def test_dependency_string() ->None:
_assert_dependency_equals(parse_dependency_string(
'git+ssh://git@github.com/efriis/myrepo.git', None, None, None),
git='ssh://git@github.com/efriis/myrepo.git', ref=None,
subdirectory=None)
_assert_dependency_equals(parse_dependency_string(
'git+https://github.com/efriis/myrepo.git#subdirectory=src', None,
None, None), git='https://github.com/efriis/myrepo.git',
subdirectory='src', ref=None)
_assert_dependency_equals(parse_dependency_string(
'git+ssh://git@github.com:efriis/myrepo.git#develop', None, None,
None), git='ssh://git@github.com:efriis/myrepo.git', ref='develop',
subdirectory=None)
_assert_dependency_equals(parse_dependency_string(
'git+ssh://git@github.com/efriis/myrepo.git#develop', None, None,
None), git='ssh://git@github.com/efriis/myrepo.git', ref='develop',
subdirectory=None)
_assert_dependency_equals(parse_dependency_string(
'git+ssh://git@github.com:efriis/myrepo.git@develop', None, None,
None), git='ssh://git@github.com:efriis/myrepo.git', ref='develop',
subdirectory=None)
_assert_dependency_equals(parse_dependency_string('simple-pirate', None,
None, None), git=DEFAULT_GIT_REPO, subdirectory=
f'{DEFAULT_GIT_SUBDIRECTORY}/simple-pirate', ref=DEFAULT_GIT_REF)
| null |
embeddings
|
return self._embedding
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self._embedding
| null |
similarity_search_by_vector_with_score
|
"""
Performs similarity search from a embeddings vector.
Args:
query_embedding: Embeddings vector to search for.
k: Number of results to return.
custom_query: Use this custom query instead default query (kwargs)
kwargs: other vector store specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if 'custom_query' in kwargs:
query = kwargs['custom_query']
else:
query = self._create_query(query_embedding, k, **kwargs)
try:
response = self._vespa_app.query(body=query)
except Exception as e:
raise RuntimeError(
f"Could not retrieve data from Vespa: {e.args[0][0]['summary']}. Error: {e.args[0][0]['message']}"
)
if not str(response.status_code).startswith('2'):
raise RuntimeError(
f"Could not retrieve data from Vespa. Error code: {response.status_code}. Message: {response.json['message']}"
)
root = response.json['root']
if 'errors' in root:
import json
raise RuntimeError(json.dumps(root['errors']))
if response is None or response.hits is None:
return []
docs = []
for child in response.hits:
page_content = child['fields'][self._page_content_field]
score = child['relevance']
metadata = {'id': child['id']}
if self._metadata_fields is not None:
for field in self._metadata_fields:
metadata[field] = child['fields'].get(field)
doc = Document(page_content=page_content, metadata=metadata)
docs.append((doc, score))
return docs
|
def similarity_search_by_vector_with_score(self, query_embedding: List[
float], k: int=4, **kwargs: Any) ->List[Tuple[Document, float]]:
"""
Performs similarity search from a embeddings vector.
Args:
query_embedding: Embeddings vector to search for.
k: Number of results to return.
custom_query: Use this custom query instead default query (kwargs)
kwargs: other vector store specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if 'custom_query' in kwargs:
query = kwargs['custom_query']
else:
query = self._create_query(query_embedding, k, **kwargs)
try:
response = self._vespa_app.query(body=query)
except Exception as e:
raise RuntimeError(
f"Could not retrieve data from Vespa: {e.args[0][0]['summary']}. Error: {e.args[0][0]['message']}"
)
if not str(response.status_code).startswith('2'):
raise RuntimeError(
f"Could not retrieve data from Vespa. Error code: {response.status_code}. Message: {response.json['message']}"
)
root = response.json['root']
if 'errors' in root:
import json
raise RuntimeError(json.dumps(root['errors']))
if response is None or response.hits is None:
return []
docs = []
for child in response.hits:
page_content = child['fields'][self._page_content_field]
score = child['relevance']
metadata = {'id': child['id']}
if self._metadata_fields is not None:
for field in self._metadata_fields:
metadata[field] = child['fields'].get(field)
doc = Document(page_content=page_content, metadata=metadata)
docs.append((doc, score))
return docs
|
Performs similarity search from a embeddings vector.
Args:
query_embedding: Embeddings vector to search for.
k: Number of results to return.
custom_query: Use this custom query instead default query (kwargs)
kwargs: other vector store specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
|
pop
|
"""Pop the top n elements of the stack and return the last one."""
if len(self.stack) < n:
return None
for _ in range(n):
node = self.stack.pop()
return node
|
def pop(self, n: int=1) ->Optional[Thought]:
"""Pop the top n elements of the stack and return the last one."""
if len(self.stack) < n:
return None
for _ in range(n):
node = self.stack.pop()
return node
|
Pop the top n elements of the stack and return the last one.
|
test_embed_documents
|
texts = ['1', '22', 'a', '333']
vectors = cache_embeddings.embed_documents(texts)
expected_vectors: List[List[float]] = [[1, 2.0], [2.0, 3.0], [1.0, 2.0], [
3.0, 4.0]]
assert vectors == expected_vectors
keys = list(cache_embeddings.document_embedding_store.yield_keys())
assert len(keys) == 4
assert keys[0] == 'test_namespace812b86c1-8ebf-5483-95c6-c95cf2b52d12'
|
def test_embed_documents(cache_embeddings: CacheBackedEmbeddings) ->None:
texts = ['1', '22', 'a', '333']
vectors = cache_embeddings.embed_documents(texts)
expected_vectors: List[List[float]] = [[1, 2.0], [2.0, 3.0], [1.0, 2.0],
[3.0, 4.0]]
assert vectors == expected_vectors
keys = list(cache_embeddings.document_embedding_store.yield_keys())
assert len(keys) == 4
assert keys[0] == 'test_namespace812b86c1-8ebf-5483-95c6-c95cf2b52d12'
| null |
setUp
|
self.conn_string = '<enter-valid-couchbase-connection-string>'
self.database_user = '<enter-valid-couchbase-user>'
self.database_password = '<enter-valid-couchbase-password>'
self.valid_query = 'select h.* from `travel-sample`.inventory.hotel h limit 10'
self.valid_page_content_fields = ['country', 'name', 'description']
self.valid_metadata_fields = ['id']
|
def setUp(self) ->None:
self.conn_string = '<enter-valid-couchbase-connection-string>'
self.database_user = '<enter-valid-couchbase-user>'
self.database_password = '<enter-valid-couchbase-password>'
self.valid_query = (
'select h.* from `travel-sample`.inventory.hotel h limit 10')
self.valid_page_content_fields = ['country', 'name', 'description']
self.valid_metadata_fields = ['id']
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.