id
stringlengths 14
15
| text
stringlengths 49
2.47k
| source
stringlengths 61
166
|
|---|---|---|
074ec12427ee-4
|
example_prompt=example_prompt,
)
final_prompt = ChatPromptTemplate.from_messages(
[
('system', 'You are a helpful AI Assistant'),
few_shot_prompt,
('human', '{input}'),
]
)
final_prompt.format(input="What is 4+4?")
Prompt template with dynamically selected examples:
.. code-block:: python
from langchain.prompts import SemanticSimilarityExampleSelector
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
# ...
]
to_vectorize = [
" ".join(example.values())
for example in examples
]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(
to_vectorize, embeddings, metadatas=examples
)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore
)
from langchain.schema import SystemMessage
from langchain.prompts import HumanMessagePromptTemplate
from langchain.prompts.few_shot import FewShotChatMessagePromptTemplate
few_shot_prompt = FewShotChatMessagePromptTemplate(
# Which variable(s) will be passed to the example selector.
input_variables=["input"],
example_selector=example_selector,
# Define how each example will be formatted.
# In this case, each example will become 2 messages:
# 1 human, and 1 AI
example_prompt=(
HumanMessagePromptTemplate.from_template("{input}")
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
|
074ec12427ee-5
|
example_prompt=(
HumanMessagePromptTemplate.from_template("{input}")
+ AIMessagePromptTemplate.from_template("{output}")
),
)
# Define the overall prompt.
final_prompt = (
SystemMessagePromptTemplate.from_template(
"You are a helpful AI Assistant"
)
+ few_shot_prompt
+ HumanMessagePromptTemplate.from_template("{input}")
)
# Show the prompt
print(final_prompt.format_messages(input="What's 3+3?"))
# Use within an LLM
from langchain.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic()
chain.invoke({"input": "What's 3+3?"})
"""
@property
def lc_serializable(self) -> bool:
"""Return whether the prompt template is lc_serializable.
Returns:
Boolean indicating whether the prompt template is lc_serializable.
"""
return False
input_variables: List[str] = Field(default_factory=list)
"""A list of the names of the variables the prompt template will use
to pass to the example_selector, if provided."""
example_prompt: Union[BaseMessagePromptTemplate, BaseChatPromptTemplate]
"""The class to format each example."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
"""Format kwargs into a list of messages.
Args:
**kwargs: keyword arguments to use for filling in templates in messages.
Returns:
A list of formatted messages with all template variables filled in.
"""
# Get the examples to use.
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
|
074ec12427ee-6
|
"""
# Get the examples to use.
examples = self._get_examples(**kwargs)
examples = [
{k: e[k] for k in self.example_prompt.input_variables} for e in examples
]
# Format the examples.
messages = [
message
for example in examples
for message in self.example_prompt.format_messages(**example)
]
return messages
[docs] def format(self, **kwargs: Any) -> str:
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
|
c78460ade956-0
|
Source code for langchain.prompts.example_selector.base
"""Interface for selecting examples to include in prompts."""
from abc import ABC, abstractmethod
from typing import Any, Dict, List
[docs]class BaseExampleSelector(ABC):
"""Interface for selecting examples to include in prompts."""
[docs] @abstractmethod
def add_example(self, example: Dict[str, str]) -> Any:
"""Add new example to store for a key."""
[docs] @abstractmethod
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on the inputs."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/base.html
|
96400aa8c408-0
|
Source code for langchain.prompts.example_selector.semantic_similarity
"""Example selector that selects examples based on SemanticSimilarity."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.vectorstores.base import VectorStore
[docs]def sorted_values(values: Dict[str, str]) -> List[Any]:
"""Return a list of values in dict sorted by key."""
return [values[val] for val in sorted(values)]
[docs]class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel):
"""Example selector that selects examples based on SemanticSimilarity."""
vectorstore: VectorStore
"""VectorStore than contains information about examples."""
k: int = 4
"""Number of examples to select."""
example_keys: Optional[List[str]] = None
"""Optional keys to filter examples to."""
input_keys: Optional[List[str]] = None
"""Optional keys to filter input to. If provided, the search is based on
the input variables instead of all variables."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def add_example(self, example: Dict[str, str]) -> str:
"""Add new example to vectorstore."""
if self.input_keys:
string_example = " ".join(
sorted_values({key: example[key] for key in self.input_keys})
)
else:
string_example = " ".join(sorted_values(example))
ids = self.vectorstore.add_texts([string_example], metadatas=[example])
return ids[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
|
96400aa8c408-1
|
return ids[0]
[docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic similarity."""
# Get the docs with the highest similarity.
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys}
query = " ".join(sorted_values(input_variables))
example_docs = self.vectorstore.similarity_search(query, k=self.k)
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in example_docs]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
[docs] @classmethod
def from_examples(
cls,
examples: List[dict],
embeddings: Embeddings,
vectorstore_cls: Type[VectorStore],
k: int = 4,
input_keys: Optional[List[str]] = None,
**vectorstore_cls_kwargs: Any,
) -> SemanticSimilarityExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
|
96400aa8c408-2
|
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [
" ".join(sorted_values({k: eg[k] for k in input_keys}))
for eg in examples
]
else:
string_examples = [" ".join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(vectorstore=vectorstore, k=k, input_keys=input_keys)
[docs]class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector):
"""ExampleSelector that selects examples based on Max Marginal Relevance.
This was shown to improve performance in this paper:
https://arxiv.org/pdf/2211.13892.pdf
"""
fetch_k: int = 20
"""Number of examples to fetch to rerank."""
[docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic similarity."""
# Get the docs with the highest similarity.
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys}
query = " ".join(sorted_values(input_variables))
example_docs = self.vectorstore.max_marginal_relevance_search(
query, k=self.k, fetch_k=self.fetch_k
)
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in example_docs]
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
|
96400aa8c408-3
|
examples = [dict(e.metadata) for e in example_docs]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
[docs] @classmethod
def from_examples(
cls,
examples: List[dict],
embeddings: Embeddings,
vectorstore_cls: Type[VectorStore],
k: int = 4,
input_keys: Optional[List[str]] = None,
fetch_k: int = 20,
**vectorstore_cls_kwargs: Any,
) -> MaxMarginalRelevanceExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [
" ".join(sorted_values({k: eg[k] for k in input_keys}))
for eg in examples
]
else:
string_examples = [" ".join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
|
96400aa8c408-4
|
)
return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys)
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
|
3c5b48c193e1-0
|
Source code for langchain.prompts.example_selector.ngram_overlap
"""Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from typing import Dict, List
import numpy as np
from pydantic import BaseModel, root_validator
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
[docs]def ngram_overlap_score(source: List[str], example: List[str]) -> float:
"""Compute ngram overlap score of source and example as sentence_bleu score.
Use sentence_bleu with method1 smoothing function and auto reweighting.
Return float value between 0.0 and 1.0 inclusive.
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import (
SmoothingFunction, # type: ignore
sentence_bleu,
)
hypotheses = source[0].split()
references = [s.split() for s in example]
return float(
sentence_bleu(
references,
hypotheses,
smoothing_function=SmoothingFunction().method1,
auto_reweigh=True,
)
)
[docs]class NGramOverlapExampleSelector(BaseExampleSelector, BaseModel):
"""Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
examples: List[dict]
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html
|
3c5b48c193e1-1
|
"""
examples: List[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
threshold: float = -1.0
"""Threshold at which algorithm stops. Set to -1.0 by default.
For negative threshold:
select_examples sorts examples by ngram_overlap_score, but excludes none.
For threshold greater than 1.0:
select_examples excludes all examples, and returns an empty list.
For threshold equal to 0.0:
select_examples sorts examples by ngram_overlap_score,
and excludes examples with no ngram overlap with input.
"""
@root_validator(pre=True)
def check_dependencies(cls, values: Dict) -> Dict:
"""Check that valid dependencies exist."""
try:
from nltk.translate.bleu_score import ( # noqa: disable=F401
SmoothingFunction,
sentence_bleu,
)
except ImportError as e:
raise ImportError(
"Not all the correct dependencies for this ExampleSelect exist."
"Please install nltk with `pip install nltk`."
) from e
return values
[docs] def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to list."""
self.examples.append(example)
[docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Return list of examples sorted by ngram_overlap_score with input.
Descending order.
Excludes any examples with ngram_overlap_score less than or equal to threshold.
"""
inputs = list(input_variables.values())
examples = []
k = len(self.examples)
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html
|
3c5b48c193e1-2
|
examples = []
k = len(self.examples)
score = [0.0] * k
first_prompt_template_key = self.example_prompt.input_variables[0]
for i in range(k):
score[i] = ngram_overlap_score(
inputs, [self.examples[i][first_prompt_template_key]]
)
while True:
arg_max = np.argmax(score)
if (score[arg_max] < self.threshold) or abs(
score[arg_max] - self.threshold
) < 1e-9:
break
examples.append(self.examples[arg_max])
score[arg_max] = self.threshold - 1.0
return examples
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html
|
f60b172941fc-0
|
Source code for langchain.prompts.example_selector.length_based
"""Select examples based on length."""
import re
from typing import Callable, Dict, List
from pydantic import BaseModel, validator
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
def _get_length_based(text: str) -> int:
return len(re.split("\n| ", text))
[docs]class LengthBasedExampleSelector(BaseExampleSelector, BaseModel):
"""Select examples based on length."""
examples: List[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
get_text_length: Callable[[str], int] = _get_length_based
"""Function to measure prompt length. Defaults to word count."""
max_length: int = 2048
"""Max length for the prompt, beyond which examples are cut."""
example_text_lengths: List[int] = [] #: :meta private:
[docs] def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to list."""
self.examples.append(example)
string_example = self.example_prompt.format(**example)
self.example_text_lengths.append(self.get_text_length(string_example))
@validator("example_text_lengths", always=True)
def calculate_example_text_lengths(cls, v: List[int], values: Dict) -> List[int]:
"""Calculate text lengths if they don't exist."""
# Check if text lengths were passed in
if v:
return v
# If they were not, calculate them
example_prompt = values["example_prompt"]
get_text_length = values["get_text_length"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html
|
f60b172941fc-1
|
get_text_length = values["get_text_length"]
string_examples = [example_prompt.format(**eg) for eg in values["examples"]]
return [get_text_length(eg) for eg in string_examples]
[docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on the input lengths."""
inputs = " ".join(input_variables.values())
remaining_length = self.max_length - self.get_text_length(inputs)
i = 0
examples = []
while remaining_length > 0 and i < len(self.examples):
new_length = remaining_length - self.example_text_lengths[i]
if new_length < 0:
break
else:
examples.append(self.examples[i])
remaining_length = new_length
i += 1
return examples
|
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html
|
564c5c651552-0
|
Source code for langchain.embeddings.jina
import os
from typing import Any, Dict, List, Optional
import requests
from pydantic import BaseModel, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class JinaEmbeddings(BaseModel, Embeddings):
"""Jina embedding models."""
client: Any #: :meta private:
model_name: str = "ViT-B-32::openai"
"""Model name to use."""
jina_auth_token: Optional[str] = None
jina_api_url: str = "https://api.clip.jina.ai/api/v1/models/"
request_headers: Optional[dict] = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that auth token exists in environment."""
# Set Auth
jina_auth_token = get_from_dict_or_env(
values, "jina_auth_token", "JINA_AUTH_TOKEN"
)
values["jina_auth_token"] = jina_auth_token
values["request_headers"] = (("authorization", jina_auth_token),)
# Test that package is installed
try:
import jina
except ImportError:
raise ImportError(
"Could not import `jina` python package. "
"Please install it with `pip install jina`."
)
# Setup client
jina_api_url = os.environ.get("JINA_API_URL", values["jina_api_url"])
model_name = values["model_name"]
try:
resp = requests.get(
jina_api_url + f"?model_name={model_name}",
headers={"Authorization": jina_auth_token},
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
|
564c5c651552-1
|
headers={"Authorization": jina_auth_token},
)
if resp.status_code == 401:
raise ValueError(
"The given Jina auth token is invalid. "
"Please check your Jina auth token."
)
elif resp.status_code == 404:
raise ValueError(
f"The given model name `{model_name}` is not valid. "
f"Please go to https://cloud.jina.ai/user/inference "
f"and create a model with the given model name."
)
resp.raise_for_status()
endpoint = resp.json()["endpoints"]["grpc"]
values["client"] = jina.Client(host=endpoint)
except requests.exceptions.HTTPError as err:
raise ValueError(f"Error: {err!r}")
return values
def _post(self, docs: List[Any], **kwargs: Any) -> Any:
payload = dict(inputs=docs, metadata=self.request_headers, **kwargs)
return self.client.post(on="/encode", **payload)
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Jina's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
from docarray import Document, DocumentArray
embeddings = self._post(
docs=DocumentArray([Document(text=t) for t in texts])
).embeddings
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Jina's embedding endpoint.
Args:
text: The text to embed.
Returns:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
|
564c5c651552-2
|
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
from docarray import Document, DocumentArray
embedding = self._post(docs=DocumentArray([Document(text=text)])).embeddings[0]
return list(map(float, embedding))
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
|
9dde1c820add-0
|
Source code for langchain.embeddings.elasticsearch
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.utils import get_from_env
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
from langchain.embeddings.base import Embeddings
[docs]class ElasticsearchEmbeddings(Embeddings):
"""Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
""" # noqa: E501
[docs] def __init__(
self,
client: MlClient,
model_id: str,
*,
input_field: str = "text_field",
):
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
[docs] @classmethod
def from_credentials(
cls,
model_id: str,
*,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
|
9dde1c820add-1
|
es_user: Optional[str] = None,
es_password: Optional[str] = None,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from Elasticsearch credentials.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to.
es_user: (str, optional): Elasticsearch username.
es_password: (str, optional): Elasticsearch password.
Example:
.. code-block:: python
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
try:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
|
9dde1c820add-2
|
from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
"elasticsearch package not found, please install with 'pip install "
"elasticsearch'"
)
es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID")
es_user = es_user or get_from_env("es_user", "ES_USER")
es_password = es_password or get_from_env("es_password", "ES_PASSWORD")
# Connect to Elasticsearch
es_connection = Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
[docs] @classmethod
def from_es_connection(
cls,
model_id: str,
es_connection: Elasticsearch,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
|
9dde1c820add-3
|
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
# Importing MlClient from elasticsearch.client within the method to
# avoid unnecessary import if the method is not used
from elasticsearch.client import MlClient
# Create an MlClient from the given Elasticsearch connection
client = MlClient(es_connection)
# Return a new instance of the ElasticsearchEmbeddings class with
# the MlClient, model_id, and input_field
return cls(client, model_id, input_field=input_field)
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
|
9dde1c820add-4
|
list.
"""
response = self.client.infer_trained_model(
model_id=self.model_id, docs=[{self.input_field: text} for text in texts]
)
embeddings = [doc["predicted_value"] for doc in response["inference_results"]]
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for a list of documents.
Args:
texts (List[str]): A list of document text strings to generate embeddings
for.
Returns:
List[List[float]]: A list of embeddings, one for each document in the input
list.
"""
return self._embedding_func(texts)
[docs] def embed_query(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
|
d4dcacce1f41-0
|
Source code for langchain.embeddings.edenai
from typing import Dict, List, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.embeddings.base import Embeddings
from langchain.requests import Requests
from langchain.utils import get_from_dict_or_env
[docs]class EdenAiEmbeddings(BaseModel, Embeddings):
"""EdenAI embedding.
environment variable ``EDENAI_API_KEY`` set with your API key, or pass
it as a named parameter.
"""
edenai_api_key: Optional[str] = Field(None, description="EdenAI API Token")
provider: Optional[str] = "openai"
"""embedding provider to use (eg: openai,google etc.)"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
def _generate_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Compute embeddings using EdenAi api."""
url = "https://api.edenai.run/v2/text/embeddings"
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {self.edenai_api_key}",
}
payload = {"texts": texts, "providers": self.provider}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/edenai.html
|
d4dcacce1f41-1
|
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
temp = response.json()
embeddings = []
for embed_item in temp[self.provider]["items"]:
embedding = embed_item["embedding"]
embeddings.append(embedding)
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._generate_embeddings(texts)
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using EdenAI.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._generate_embeddings([text])[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/edenai.html
|
4a9cb5d3cb93-0
|
Source code for langchain.embeddings.huggingface_hub
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2"
VALID_TASKS = ("feature-extraction",)
[docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings):
"""HuggingFaceHub embedding models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceHubEmbeddings
repo_id = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceHubEmbeddings(
repo_id=repo_id,
task="feature-extraction",
huggingfacehub_api_token="my-api-key",
)
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = "feature-extraction"
"""Task to call the model with."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
|
4a9cb5d3cb93-1
|
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
if not repo_id.startswith("sentence-transformers"):
raise ValueError(
"Currently only 'sentence-transformers' embedding models "
f"are supported. Got invalid 'repo_id' {repo_id}."
)
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ImportError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
# replace newlines, which can negatively affect performance.
texts = [text.replace("\n", " ") for text in texts]
_model_kwargs = self.model_kwargs or {}
responses = self.client(inputs=texts, params=_model_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
|
4a9cb5d3cb93-2
|
responses = self.client(inputs=texts, params=_model_kwargs)
return responses
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
response = self.embed_documents([text])[0]
return response
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
|
21b1f8475b10-0
|
Source code for langchain.embeddings.octoai_embeddings
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_EMBED_INSTRUCTION = "Represent this input: "
DEFAULT_QUERY_INSTRUCTION = "Represent the question for retrieving similar documents: "
[docs]class OctoAIEmbeddings(BaseModel, Embeddings):
"""OctoAI Compute Service embedding models.
The environment variable ``OCTOAI_API_TOKEN`` should be set
with your API token, or it can be passed
as a named parameter to the constructor.
"""
endpoint_url: Optional[str] = Field(None, description="Endpoint URL to use.")
model_kwargs: Optional[dict] = Field(
None, description="Keyword arguments to pass to the model."
)
octoai_api_token: Optional[str] = Field(None, description="OCTOAI API Token")
embed_instruction: str = Field(
DEFAULT_EMBED_INSTRUCTION,
description="Instruction to use for embedding documents.",
)
query_instruction: str = Field(
DEFAULT_QUERY_INSTRUCTION, description="Instruction to use for embedding query."
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Ensure that the API key and python package exist in environment."""
values["octoai_api_token"] = get_from_dict_or_env(
values, "octoai_api_token", "OCTOAI_API_TOKEN"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
|
21b1f8475b10-1
|
)
values["endpoint_url"] = get_from_dict_or_env(
values, "endpoint_url", "ENDPOINT_URL"
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Return the identifying parameters."""
return {
"endpoint_url": self.endpoint_url,
"model_kwargs": self.model_kwargs or {},
}
def _compute_embeddings(
self, texts: List[str], instruction: str
) -> List[List[float]]:
"""Compute embeddings using an OctoAI instruct model."""
from octoai import client
embeddings = []
octoai_client = client.Client(token=self.octoai_api_token)
for text in texts:
parameter_payload = {
"sentence": str([text]), # for item in text]),
"instruction": str([instruction]), # for item in text]),
"parameters": self.model_kwargs or {},
}
try:
resp_json = octoai_client.infer(self.endpoint_url, parameter_payload)
embedding = resp_json["embeddings"]
except Exception as e:
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
embeddings.append(embedding)
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute document embeddings using an OctoAI instruct model."""
texts = list(map(lambda x: x.replace("\n", " "), texts))
return self._compute_embeddings(texts, self.embed_instruction)
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embedding using an OctoAI instruct model."""
text = text.replace("\n", " ")
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
|
21b1f8475b10-2
|
text = text.replace("\n", " ")
return self._compute_embeddings([text], self.embed_instruction)[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
|
cd4654156ecd-0
|
Source code for langchain.embeddings.base
from abc import ABC, abstractmethod
from typing import List
[docs]class Embeddings(ABC):
"""Interface for embedding models."""
[docs] @abstractmethod
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed search docs."""
[docs] @abstractmethod
def embed_query(self, text: str) -> List[float]:
"""Embed query text."""
[docs] async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Asynchronous Embed search docs."""
raise NotImplementedError
[docs] async def aembed_query(self, text: str) -> List[float]:
"""Asynchronous Embed query text."""
raise NotImplementedError
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/base.html
|
53fb652e994e-0
|
Source code for langchain.embeddings.xinference
"""Wrapper around Xinference embedding models."""
from typing import Any, List, Optional
from langchain.embeddings.base import Embeddings
[docs]class XinferenceEmbeddings(Embeddings):
"""Wrapper around xinference embedding models.
To use, you should have the xinference library installed:
.. code-block:: bash
pip install xinference
Check out: https://github.com/xorbitsai/inference
To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers
Example:
To start a local instance of Xinference, run
.. code-block:: bash
$ xinference
You can also deploy Xinference in a distributed cluster. Here are the steps:
Starting the supervisor:
.. code-block:: bash
$ xinference-supervisor
Starting the worker:
.. code-block:: bash
$ xinference-worker
Then, launch a model using command line interface (CLI).
Example:
.. code-block:: bash
$ xinference launch -n orca -s 3 -q q4_0
It will return a model UID. Then you can use Xinference Embedding with LangChain.
Example:
.. code-block:: python
from langchain.embeddings import XinferenceEmbeddings
xinference = XinferenceEmbeddings(
server_url="http://0.0.0.0:9997",
model_uid = {model_uid} # replace model_uid with the model UID return from launching the model
)
""" # noqa: E501
client: Any
server_url: Optional[str]
"""URL of the xinference server"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/xinference.html
|
53fb652e994e-1
|
server_url: Optional[str]
"""URL of the xinference server"""
model_uid: Optional[str]
"""UID of the launched model"""
[docs] def __init__(
self, server_url: Optional[str] = None, model_uid: Optional[str] = None
):
try:
from xinference.client import RESTfulClient
except ImportError as e:
raise ImportError(
"Could not import RESTfulClient from xinference. Please install it"
" with `pip install xinference`."
) from e
super().__init__()
if server_url is None:
raise ValueError("Please provide server URL")
if model_uid is None:
raise ValueError("Please provide the model UID")
self.server_url = server_url
self.model_uid = model_uid
self.client = RESTfulClient(server_url)
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using Xinference.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
model = self.client.get_model(self.model_uid)
embeddings = [
model.create_embedding(text)["data"][0]["embedding"] for text in texts
]
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query of documents using Xinference.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
model = self.client.get_model(self.model_uid)
embedding_res = model.create_embedding(text)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/xinference.html
|
53fb652e994e-2
|
embedding_res = model.create_embedding(text)
embedding = embedding_res["data"][0]["embedding"]
return list(map(float, embedding))
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/xinference.html
|
272e4a93d182-0
|
Source code for langchain.embeddings.awa
from typing import Any, Dict, List
from pydantic import BaseModel, root_validator
from langchain.embeddings.base import Embeddings
[docs]class AwaEmbeddings(BaseModel, Embeddings):
client: Any #: :meta private:
model: str = "all-mpnet-base-v2"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that awadb library is installed."""
try:
from awadb import AwaEmbedding
except ImportError as exc:
raise ImportError(
"Could not import awadb library. "
"Please install it with `pip install awadb`"
) from exc
values["client"] = AwaEmbedding()
return values
[docs] def set_model(self, model_name: str) -> None:
"""Set the model used for embedding.
The default model used is all-mpnet-base-v2
Args:
model_name: A string which represents the name of model.
"""
self.model = model_name
self.client.model_name = model_name
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using AwaEmbedding.
Args:
texts: The list of texts need to be embedded
Returns:
List of embeddings, one for each text.
"""
return self.client.EmbeddingBatch(texts)
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using AwaEmbedding.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/awa.html
|
272e4a93d182-1
|
Returns:
Embeddings for the text.
"""
return self.client.Embedding(text)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/awa.html
|
ac0ceeb93219-0
|
Source code for langchain.embeddings.modelscope_hub
from typing import Any, List, Optional
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
[docs]class ModelScopeEmbeddings(BaseModel, Embeddings):
"""ModelScopeHub embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import ModelScopeEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddings(model_id=model_id, model_revision="v1.0.0")
"""
embed: Any
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
model_revision: Optional[str] = None
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
except ImportError as e:
raise ImportError(
"Could not import some python packages."
"Please install it with `pip install modelscope`."
) from e
self.embed = pipeline(
Tasks.sentence_embedding,
model=self.model_id,
model_revision=self.model_revision,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html
|
ac0ceeb93219-1
|
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
inputs = {"source_sentence": texts}
embeddings = self.embed(input=inputs)["text_embedding"]
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
inputs = {"source_sentence": [text]}
embedding = self.embed(input=inputs)["text_embedding"][0]
return embedding.tolist()
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html
|
8e3cc427c02e-0
|
Source code for langchain.embeddings.fake
import hashlib
from typing import List
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
[docs]class FakeEmbeddings(Embeddings, BaseModel):
"""Fake embedding model."""
size: int
"""The size of the embedding vector."""
def _get_embedding(self) -> List[float]:
return list(np.random.normal(size=self.size))
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding() for _ in texts]
[docs] def embed_query(self, text: str) -> List[float]:
return self._get_embedding()
[docs]class DeterministicFakeEmbedding(Embeddings, BaseModel):
"""
Fake embedding model that always returns
the same embedding vector for the same text.
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self, seed: int) -> List[float]:
# set the seed for the random generator
np.random.seed(seed)
return list(np.random.normal(size=self.size))
def _get_seed(self, text: str) -> int:
"""
Get a seed for the random generator, using the hash of the text.
"""
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
[docs] def embed_query(self, text: str) -> List[float]:
return self._get_embedding(seed=self._get_seed(text))
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html
|
8fbe1bc6a299-0
|
Source code for langchain.embeddings.google_palm
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import BaseModel, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(google.api_core.exceptions.ResourceExhausted)
| retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable)
| retry_if_exception_type(google.api_core.exceptions.GoogleAPIError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs]def embed_with_retry(
embeddings: GooglePalmEmbeddings, *args: Any, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.client.generate_embeddings(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/google_palm.html
|
8fbe1bc6a299-1
|
return _embed_with_retry(*args, **kwargs)
[docs]class GooglePalmEmbeddings(BaseModel, Embeddings):
"""Google's PaLM Embeddings APIs."""
client: Any
google_api_key: Optional[str]
model_name: str = "models/embedding-gecko-001"
"""Model name to use."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
try:
import google.generativeai as genai
genai.configure(api_key=google_api_key)
except ImportError:
raise ImportError("Could not import google.generativeai python package.")
values["client"] = genai
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(text) for text in texts]
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed query text."""
embedding = embed_with_retry(self, self.model_name, text)
return embedding["embedding"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/google_palm.html
|
8e2cd2d8ad9c-0
|
Source code for langchain.embeddings.clarifai
import logging
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class ClarifaiEmbeddings(BaseModel, Embeddings):
"""Clarifai embedding models.
To use, you should have the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your personal access token or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import ClarifaiEmbeddings
clarifai = ClarifaiEmbeddings(
model="embed-english-light-v2.0", clarifai_api_key="my-api-key"
)
"""
stub: Any #: :meta private:
"""Clarifai stub."""
userDataObject: Any
"""Clarifai user data object."""
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = None
"""Clarifai personal access token to use."""
api_base: str = "https://api.clarifai.com"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/clarifai.html
|
8e2cd2d8ad9c-1
|
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT")
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
if values["pat"] is None:
raise ValueError("Please provide a pat.")
if user_id is None:
raise ValueError("Please provide a user_id.")
if app_id is None:
raise ValueError("Please provide a app_id.")
if model_id is None:
raise ValueError("Please provide a model_id.")
try:
from clarifai.auth.helper import ClarifaiAuthHelper
from clarifai.client import create_stub
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
auth = ClarifaiAuthHelper(
user_id=user_id,
app_id=app_id,
pat=values["pat"],
base=values["api_base"],
)
values["userDataObject"] = auth.get_user_app_id_proto()
values["stub"] = create_stub(auth)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Clarifai's embedding models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/clarifai.html
|
8e2cd2d8ad9c-2
|
List of embeddings, one for each text.
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=t))
)
for t in texts
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_output_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs[0])
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_output_failure}"
)
embeddings = [
list(o.data.embeddings[0].vector)
for o in post_model_outputs_response.outputs
]
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Clarifai's embedding models.
Args:
text: The text to embed.
Returns:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/clarifai.html
|
8e2cd2d8ad9c-3
|
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=text))
)
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_output_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs[0])
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_output_failure}"
)
embeddings = [
list(o.data.embeddings[0].vector)
for o in post_model_outputs_response.outputs
]
return embeddings[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/clarifai.html
|
333bd69d1b98-0
|
Source code for langchain.embeddings.localai
from __future__ import annotations
import logging
import warnings
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from pydantic import BaseModel, Extra, Field, root_validator
from tenacity import (
AsyncRetrying,
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: LocalAIEmbeddings) -> Callable[[Any], Any]:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _async_retry_decorator(embeddings: LocalAIEmbeddings) -> Any:
import openai
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/localai.html
|
333bd69d1b98-1
|
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
async_retrying = AsyncRetrying(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def wrap(func: Callable) -> Callable:
async def wrapped_f(*args: Any, **kwargs: Any) -> Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError("this is unreachable")
return wrapped_f
return wrap
# https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings
def _check_response(response: dict) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]):
import openai
raise openai.error.APIError("LocalAI API returned an empty embedding")
return response
[docs]def embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/localai.html
|
333bd69d1b98-2
|
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
response = embeddings.client.create(**kwargs)
return _check_response(response)
return _embed_with_retry(**kwargs)
[docs]async def async_embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
@_async_retry_decorator(embeddings)
async def _async_embed_with_retry(**kwargs: Any) -> Any:
response = await embeddings.client.acreate(**kwargs)
return _check_response(response)
return await _async_embed_with_retry(**kwargs)
[docs]class LocalAIEmbeddings(BaseModel, Embeddings):
"""LocalAI embedding models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set to a random string. You need to
specify ``OPENAI_API_BASE`` to point to your LocalAI service endpoint.
Example:
.. code-block:: python
from langchain.embeddings import LocalAIEmbeddings
openai = LocalAIEmbeddings(
openai_api_key="random-key",
openai_api_base="http://localhost:8080"
)
"""
client: Any #: :meta private:
model: str = "text-embedding-ada-002"
deployment: str = model
openai_api_version: Optional[str] = None
openai_api_base: Optional[str] = None
# to support explicit proxy for LocalAI
openai_proxy: Optional[str] = None
embedding_ctx_length: int = 8191
"""The maximum number of tokens to embed at once."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/localai.html
|
333bd69d1b98-3
|
"""The maximum number of tokens to embed at once."""
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
allowed_special: Union[Literal["all"], Set[str]] = set()
disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all"
chunk_size: int = 1000
"""Maximum number of texts to embed in each batch"""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout in seconds for the LocalAI request."""
headers: Any = None
show_progress_bar: bool = False
"""Whether to show a progress bar when embedding."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/localai.html
|
333bd69d1b98-4
|
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
default_api_version = ""
values["openai_api_version"] = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
default=default_api_version,
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
values["client"] = openai.Embedding
except ImportError:
raise ImportError(
"Could not import openai python package. "
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/localai.html
|
333bd69d1b98-5
|
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _invocation_params(self) -> Dict:
openai_args = {
"model": self.model,
"request_timeout": self.request_timeout,
"headers": self.headers,
"api_key": self.openai_api_key,
"organization": self.openai_organization,
"api_base": self.openai_api_base,
"api_version": self.openai_api_version,
**self.model_kwargs,
}
if self.openai_proxy:
import openai
openai.proxy = {
"http": self.openai_proxy,
"https": self.openai_proxy,
} # type: ignore[assignment] # noqa: E501
return openai_args
def _embedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint."""
# handle large input text
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return embed_with_retry(
self,
input=[text],
**self._invocation_params,
)["data"][
0
]["embedding"]
async def _aembedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint."""
# handle large input text
if self.model.endswith("001"):
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/localai.html
|
333bd69d1b98-6
|
# handle large input text
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return (
await async_embed_with_retry(
self,
input=[text],
**self._invocation_params,
)
)["data"][0]["embedding"]
[docs] def embed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to LocalAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
# call _embedding_func for each text
return [self._embedding_func(text, engine=self.deployment) for text in texts]
[docs] async def aembed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to LocalAI's embedding endpoint async for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
for text in texts:
response = await self._aembedding_func(text, engine=self.deployment)
embeddings.append(response)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/localai.html
|
333bd69d1b98-7
|
embeddings.append(response)
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = self._embedding_func(text, engine=self.deployment)
return embedding
[docs] async def aembed_query(self, text: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint async for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = await self._aembedding_func(text, engine=self.deployment)
return embedding
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/localai.html
|
df08c3d633f1-0
|
Source code for langchain.embeddings.dashscope
from __future__ import annotations
import logging
from typing import (
Any,
Callable,
Dict,
List,
Optional,
)
from pydantic import BaseModel, Extra, root_validator
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]:
multiplier = 1
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 1 seconds, then up to 4 seconds, then 4 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs]def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
resp = embeddings.client.call(**kwargs)
if resp.status_code == 200:
return resp.output["embeddings"]
elif resp.status_code in [400, 401]:
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
|
df08c3d633f1-1
|
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
return _embed_with_retry(**kwargs)
[docs]class DashScopeEmbeddings(BaseModel, Embeddings):
"""DashScope embedding models.
To use, you should have the ``dashscope`` python package installed, and the
environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key")
Example:
.. code-block:: python
import os
os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY"
from langchain.embeddings.dashscope import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(
model="text-embedding-v1",
)
text = "This is a test query."
query_result = embeddings.embed_query(text)
"""
client: Any #: :meta private:
"""The DashScope client."""
model: str = "text-embedding-v1"
dashscope_api_key: Optional[str] = None
max_retries: int = 5
"""Maximum number of retries to make when generating."""
class Config:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
|
df08c3d633f1-2
|
"""Maximum number of retries to make when generating."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
import dashscope
"""Validate that api key and python package exists in environment."""
values["dashscope_api_key"] = get_from_dict_or_env(
values, "dashscope_api_key", "DASHSCOPE_API_KEY"
)
dashscope.api_key = values["dashscope_api_key"]
try:
import dashscope
values["client"] = dashscope.TextEmbedding
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to DashScope's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(
self, input=texts, text_type="document", model=self.model
)
embedding_list = [item["embedding"] for item in embeddings]
return embedding_list
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to DashScope's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
|
df08c3d633f1-3
|
Returns:
Embedding for the text.
"""
embedding = embed_with_retry(
self, input=text, text_type="query", model=self.model
)[0]["embedding"]
return embedding
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
|
8f0aa746719d-0
|
Source code for langchain.embeddings.huggingface
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_BGE_MODEL = "BAAI/bge-large-en"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_EN = (
"Represent this question for searching relevant passages: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_ZH = "为这个句子生成表示以用于检索相关文章:"
[docs]class HuggingFaceEmbeddings(BaseModel, Embeddings):
"""HuggingFace sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
hf = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
client: Any #: :meta private:
model_name: str = DEFAULT_MODEL_NAME
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
|
8f0aa746719d-1
|
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass when calling the `encode` method of the model."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence_transformers`."
) from exc
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client.encode(texts, **self.encode_kwargs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.client.encode(text, **self.encode_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
|
8f0aa746719d-2
|
embedding = self.client.encode(text, **self.encode_kwargs)
return embedding.tolist()
[docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings):
"""Wrapper around sentence_transformers embedding models.
To use, you should have the ``sentence_transformers``
and ``InstructorEmbedding`` python packages installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-large"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
hf = HuggingFaceInstructEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
client: Any #: :meta private:
model_name: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass when calling the `encode` method of the model."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
from InstructorEmbedding import INSTRUCTOR
self.client = INSTRUCTOR(
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
|
8f0aa746719d-3
|
from InstructorEmbedding import INSTRUCTOR
self.client = INSTRUCTOR(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
except ImportError as e:
raise ValueError("Dependencies for InstructorEmbedding not found.") from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [[self.embed_instruction, text] for text in texts]
embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0]
return embedding.tolist()
[docs]class HuggingFaceBgeEmbeddings(BaseModel, Embeddings):
"""HuggingFace BGE sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceBgeEmbeddings
model_name = "BAAI/bge-large-en"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
|
8f0aa746719d-4
|
encode_kwargs = {'normalize_embeddings': True}
hf = HuggingFaceBgeEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
client: Any #: :meta private:
model_name: str = DEFAULT_BGE_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass when calling the `encode` method of the model."""
query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN
"""Instruction to use for embedding query."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence_transformers`."
) from exc
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
if "-zh" in self.model_name:
self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
|
8f0aa746719d-5
|
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = [t.replace("\n", " ") for t in texts]
embeddings = self.client.encode(texts, **self.encode_kwargs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.client.encode(
self.query_instruction + text, **self.encode_kwargs
)
return embedding.tolist()
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
|
e53ce664eea6-0
|
Source code for langchain.embeddings.self_hosted
from typing import Any, Callable, List
from pydantic import Extra
from langchain.embeddings.base import Embeddings
from langchain.llms import SelfHostedPipeline
def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return pipeline(*args, **kwargs)
[docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings):
"""Custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example using a model load function:
.. code-block:: python
from langchain.embeddings import SelfHostedEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
def get_pipeline():
model_id = "facebook/bart-large"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
|
e53ce664eea6-1
|
model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:
.. code-block:: python
from langchain.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from transformers import pipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
pipeline = pipeline(model="bert-base-uncased", task="feature-extraction")
rh.blob(pickle.dumps(pipeline),
path="models/pipeline.pkl").save().to(gpu, path="models")
embeddings = SelfHostedHFEmbeddings.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
"""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings on the remote hardware."""
inference_kwargs: Any = None
"""Any kwargs to pass to the model's inference function."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.client(self.pipeline_ref, texts)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
|
e53ce664eea6-2
|
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embeddings = self.client(self.pipeline_ref, text)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
|
f4468a3ca135-0
|
Source code for langchain.embeddings.mosaicml
from typing import Any, Dict, List, Mapping, Optional, Tuple
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class MosaicMLInstructorEmbeddings(BaseModel, Embeddings):
"""MosaicML embedding service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import MosaicMLInstructorEmbeddings
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict"
)
mosaic_llm = MosaicMLInstructorEmbeddings(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict"
)
"""Endpoint URL to use."""
embed_instruction: str = "Represent the document for retrieval: "
"""Instruction used to embed documents."""
query_instruction: str = (
"Represent the question for retrieving supporting documents: "
)
"""Instruction used to embed the query."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
|
f4468a3ca135-1
|
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"endpoint_url": self.endpoint_url}
def _embed(
self, input: List[Tuple[str, str]], is_retry: bool = False
) -> List[List[float]]:
payload = {"input_strings": input}
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
parsed_response = response.json()
if "error" in parsed_response:
# if we get rate limited, try sleeping for 1 second
if (
not is_retry
and "rate limit exceeded" in parsed_response["error"].lower()
):
import time
time.sleep(self.retry_sleep)
return self._embed(input, is_retry=True)
raise ValueError(
f"Error raised by inference API: {parsed_response['error']}"
)
# The inference API has changed a couple of times, so we add some handling
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
|
f4468a3ca135-2
|
# The inference API has changed a couple of times, so we add some handling
# to be robust to multiple response formats.
if isinstance(parsed_response, dict):
if "data" in parsed_response:
output_item = parsed_response["data"]
elif "output" in parsed_response:
output_item = parsed_response["output"]
else:
raise ValueError(
f"No key data or output in response: {parsed_response}"
)
if isinstance(output_item, list) and isinstance(output_item[0], list):
embeddings = output_item
else:
embeddings = [output_item]
elif isinstance(parsed_response, list):
first_item = parsed_response[0]
if isinstance(first_item, list):
embeddings = parsed_response
elif isinstance(first_item, dict):
if "output" in first_item:
embeddings = [item["output"] for item in parsed_response]
else:
raise ValueError(
f"No key data or output in response: {parsed_response}"
)
else:
raise ValueError(f"Unexpected response format: {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MosaicML deployed instructor embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
|
f4468a3ca135-3
|
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [(self.embed_instruction, text) for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MosaicML deployed instructor embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = (self.query_instruction, text)
embedding = self._embed([instruction_pair])[0]
return embedding
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
|
6ccab17b937a-0
|
Source code for langchain.embeddings.cohere
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class CohereEmbeddings(BaseModel, Embeddings):
"""Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v2.0", cohere_api_key="my-api-key"
)
"""
client: Any #: :meta private:
"""Cohere client."""
async_client: Any #: :meta private:
"""Cohere async client."""
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
|
6ccab17b937a-1
|
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
values["async_client"] = cohere.AsyncClient(cohere_api_key)
except ImportError:
raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model, texts=texts, truncate=self.truncate
).embeddings
return [list(map(float, e)) for e in embeddings]
[docs] async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = await self.async_client.embed(
model=self.model, texts=texts, truncate=self.truncate
)
return [list(map(float, e)) for e in embeddings.embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
[docs] async def aembed_query(self, text: str) -> List[float]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
|
6ccab17b937a-2
|
[docs] async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = await self.aembed_documents([text])
return embeddings[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
|
d4cb9b380e73-0
|
Source code for langchain.embeddings.vertexai
from typing import Dict, List
from pydantic import root_validator
from langchain.embeddings.base import Embeddings
from langchain.llms.vertexai import _VertexAICommon
from langchain.utilities.vertexai import raise_vertex_import_error
[docs]class VertexAIEmbeddings(_VertexAICommon, Embeddings):
"""Google Cloud VertexAI embedding models."""
model_name: str = "textembedding-gecko"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the python package exists in environment."""
cls._try_init_vertexai(values)
try:
from vertexai.preview.language_models import TextEmbeddingModel
except ImportError:
raise_vertex_import_error()
values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"])
return values
[docs] def embed_documents(
self, texts: List[str], batch_size: int = 5
) -> List[List[float]]:
"""Embed a list of strings. Vertex AI currently
sets a max batch size of 5 strings.
Args:
texts: List[str] The list of strings to embed.
batch_size: [int] The batch size of embeddings to send to the model
Returns:
List of embeddings, one for each text.
"""
embeddings = []
for batch in range(0, len(texts), batch_size):
text_batch = texts[batch : batch + batch_size]
embeddings_batch = self.client.get_embeddings(text_batch)
embeddings.extend([el.values for el in embeddings_batch])
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a text.
Args:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/vertexai.html
|
d4cb9b380e73-1
|
"""Embed a text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embeddings = self.client.get_embeddings([text])
return embeddings[0].values
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/vertexai.html
|
326180954b71-0
|
Source code for langchain.embeddings.mlflow_gateway
from __future__ import annotations
from typing import Any, Iterator, List, Optional
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
[docs]class MlflowAIGatewayEmbeddings(Embeddings, BaseModel):
"""
Wrapper around embeddings LLMs in the MLflow AI Gateway.
To use, you should have the ``mlflow[gateway]`` python package installed.
For more information, see https://mlflow.org/docs/latest/gateway/index.html.
Example:
.. code-block:: python
from langchain.embeddings import MlflowAIGatewayEmbeddings
embeddings = MlflowAIGatewayEmbeddings(
gateway_uri="<your-mlflow-ai-gateway-uri>",
route="<your-mlflow-ai-gateway-embeddings-route>"
)
"""
route: str
"""The route to use for the MLflow AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the MLflow AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mlflow_gateway.html
|
326180954b71-1
|
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
def _query(self, texts: List[str]) -> List[List[float]]:
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
embeddings = []
for txt in _chunk(texts, 20):
resp = mlflow.gateway.query(self.route, data={"text": txt})
embeddings.append(resp["embeddings"])
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self._query(texts)
[docs] def embed_query(self, text: str) -> List[float]:
return self._query([text])[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mlflow_gateway.html
|
d78ef15adf74-0
|
Source code for langchain.embeddings.minimax
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs]def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
[docs]class MiniMaxEmbeddings(BaseModel, Embeddings):
"""MiniMax's embedding service.
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
the constructor.
Example:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
|
d78ef15adf74-1
|
the constructor.
Example:
.. code-block:: python
from langchain.embeddings import MiniMaxEmbeddings
embeddings = MiniMaxEmbeddings()
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
"""
endpoint_url: str = "https://api.minimax.chat/v1/embeddings"
"""Endpoint URL to use."""
model: str = "embo-01"
"""Embeddings model name to use."""
embed_type_db: str = "db"
"""For embed_documents"""
embed_type_query: str = "query"
"""For embed_query"""
minimax_group_id: Optional[str] = None
"""Group ID for MiniMax API."""
minimax_api_key: Optional[str] = None
"""API Key for MiniMax API."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that group id and api key exists in environment."""
minimax_group_id = get_from_dict_or_env(
values, "minimax_group_id", "MINIMAX_GROUP_ID"
)
minimax_api_key = get_from_dict_or_env(
values, "minimax_api_key", "MINIMAX_API_KEY"
)
values["minimax_group_id"] = minimax_group_id
values["minimax_api_key"] = minimax_api_key
return values
[docs] def embed(
self,
texts: List[str],
embed_type: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
|
d78ef15adf74-2
|
self,
texts: List[str],
embed_type: str,
) -> List[List[float]]:
payload = {
"model": self.model,
"type": embed_type,
"texts": texts,
}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.minimax_api_key}",
"Content-Type": "application/json",
}
params = {
"GroupId": self.minimax_group_id,
}
# send request
response = requests.post(
self.endpoint_url, params=params, headers=headers, json=payload
)
parsed_response = response.json()
# check for errors
if parsed_response["base_resp"]["status_code"] != 0:
raise ValueError(
f"MiniMax API returned an error: {parsed_response['base_resp']}"
)
embeddings = parsed_response["vectors"]
return embeddings
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MiniMax embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db)
return embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = embed_with_retry(
self, texts=[text], embed_type=self.embed_type_query
)
return embeddings[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
|
1984fde0fcea-0
|
Source code for langchain.embeddings.self_hosted_hugging_face
import importlib
import logging
from typing import Any, Callable, List, Optional
from langchain.embeddings.self_hosted import SelfHostedEmbeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
logger = logging.getLogger(__name__)
def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return client.encode(*args, **kwargs)
[docs]def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any:
"""Load the embedding model."""
if not instruct:
import sentence_transformers
client = sentence_transformers.SentenceTransformer(model_id)
else:
from InstructorEmbedding import INSTRUCTOR
client = INSTRUCTOR(model_id)
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
|
1984fde0fcea-1
|
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
client = client.to(device)
return client
[docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings):
"""HuggingFace embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import SelfHostedHuggingFaceEmbeddings
import runhouse as rh
model_name = "sentence-transformers/all-mpnet-base-v2"
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu)
"""
client: Any #: :meta private:
model_id: str = DEFAULT_MODEL_NAME
"""Model name to use."""
model_reqs: List[str] = ["./", "sentence_transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_load_fn: Callable = load_embedding_model
"""Function to load the model remotely on the server."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
|
1984fde0fcea-2
|
"""Function to load the model remotely on the server."""
load_fn_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model load function."""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
[docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings):
"""HuggingFace InstructEmbedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings
import runhouse as rh
model_name = "hkunlp/instructor-large"
gpu = rh.cluster(name='rh-a10x', instance_type='A100:1')
hf = SelfHostedHuggingFaceInstructEmbeddings(
model_name=model_name, hardware=gpu)
"""
model_id: str = DEFAULT_INSTRUCT_MODEL
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
|
1984fde0fcea-3
|
"""
model_id: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"]
"""Requirements to install on hardware to inference the model."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get(
"model_id", DEFAULT_INSTRUCT_MODEL
)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = []
for text in texts:
instruction_pairs.append([self.embed_instruction, text])
embeddings = self.client(self.pipeline_ref, instruction_pairs)
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
|
1984fde0fcea-4
|
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client(self.pipeline_ref, [instruction_pair])[0]
return embedding.tolist()
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
|
c141d7590258-0
|
Source code for langchain.embeddings.embaas
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from typing_extensions import NotRequired, TypedDict
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
# Currently supported maximum batch size for embedding requests
MAX_BATCH_SIZE = 256
EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/"
[docs]class EmbaasEmbeddingsPayload(TypedDict):
"""Payload for the embaas embeddings API."""
model: str
texts: List[str]
instruction: NotRequired[str]
[docs]class EmbaasEmbeddings(BaseModel, Embeddings):
"""Embaas's embedding service.
To use, you should have the
environment variable ``EMBAAS_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
# Initialise with default model and instruction
from langchain.embeddings import EmbaasEmbeddings
emb = EmbaasEmbeddings()
# Initialise with custom model and instruction
from langchain.embeddings import EmbaasEmbeddings
emb_model = "instructor-large"
emb_inst = "Represent the Wikipedia document for retrieval"
emb = EmbaasEmbeddings(
model=emb_model,
instruction=emb_inst
)
"""
model: str = "e5-large-v2"
"""The model used for embeddings."""
instruction: Optional[str] = None
"""Instruction used for domain-specific embeddings."""
api_url: str = EMBAAS_API_URL
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
|
c141d7590258-1
|
api_url: str = EMBAAS_API_URL
"""The URL for the embaas embeddings API."""
embaas_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
embaas_api_key = get_from_dict_or_env(
values, "embaas_api_key", "EMBAAS_API_KEY"
)
values["embaas_api_key"] = embaas_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying params."""
return {"model": self.model, "instruction": self.instruction}
def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload:
"""Generates payload for the API request."""
payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model)
if self.instruction:
payload["instruction"] = self.instruction
return payload
def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]:
"""Sends a request to the Embaas API and handles the response."""
headers = {
"Authorization": f"Bearer {self.embaas_api_key}",
"Content-Type": "application/json",
}
response = requests.post(self.api_url, headers=headers, json=payload)
response.raise_for_status()
parsed_response = response.json()
embeddings = [item["embedding"] for item in parsed_response["data"]]
return embeddings
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
|
c141d7590258-2
|
return embeddings
def _generate_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings using the Embaas API."""
payload = self._generate_payload(texts)
try:
return self._handle_request(payload)
except requests.exceptions.RequestException as e:
if e.response is None or not e.response.text:
raise ValueError(f"Error raised by embaas embeddings API: {e}")
parsed_response = e.response.json()
if "message" in parsed_response:
raise ValueError(
"Validation Error raised by embaas embeddings API:"
f"{parsed_response['message']}"
)
raise
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Get embeddings for a list of texts.
Args:
texts: The list of texts to get embeddings for.
Returns:
List of embeddings, one for each text.
"""
batches = [
texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE)
]
embeddings = [self._generate_embeddings(batch) for batch in batches]
# flatten the list of lists into a single list
return [embedding for batch in embeddings for embedding in batch]
[docs] def embed_query(self, text: str) -> List[float]:
"""Get embeddings for a single text.
Args:
text: The text to get embeddings for.
Returns:
List of embeddings.
"""
return self.embed_documents([text])[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
|
6a76f28151bb-0
|
Source code for langchain.embeddings.bedrock
import json
import os
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
[docs]class BedrockEmbeddings(BaseModel, Embeddings):
"""Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from langchain.bedrock_embeddings import BedrockEmbeddings
region_name ="us-east-1"
credentials_profile_name = "default"
model_id = "amazon.titan-e1t-medium"
be = BedrockEmbeddings(
credentials_profile_name=credentials_profile_name,
region_name=region_name,
model_id=model_id
)
"""
client: Any #: :meta private:
"""Bedrock client."""
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
|
6a76f28151bb-1
|
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str = "amazon.titan-e1t-medium"
"""Id of the model to call, e.g., amazon.titan-e1t-medium, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
|
6a76f28151bb-2
|
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
def _embedding_func(self, text: str) -> List[float]:
"""Call out to Bedrock embedding endpoint."""
# replace newlines, which can negatively affect performance.
text = text.replace(os.linesep, " ")
_model_kwargs = self.model_kwargs or {}
input_body = {**_model_kwargs, "inputText": text}
body = json.dumps(input_body)
try:
response = self.client.invoke_model(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
response_body = json.loads(response.get("body").read())
return response_body.get("embedding")
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
[docs] def embed_documents(
self, texts: List[str], chunk_size: int = 1
) -> List[List[float]]:
"""Compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed.
chunk_size: Bedrock currently only allows single string
inputs, so chunk size is always 1. This input is here
only for compatibility with the embeddings interface.
Returns:
List of embeddings, one for each text.
"""
results = []
for text in texts:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
|
6a76f28151bb-3
|
"""
results = []
for text in texts:
response = self._embedding_func(text)
results.append(response)
return results
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func(text)
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
|
826601fbe638-0
|
Source code for langchain.embeddings.llamacpp
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.embeddings.base import Embeddings
[docs]class LlamaCppEmbeddings(BaseModel, Embeddings):
"""llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
"""
client: Any #: :meta private:
model_path: str
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
|
826601fbe638-1
|
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use. If None, the number
of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_param_names = [
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
|
826601fbe638-2
|
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(text)
return list(map(float, embedding))
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
|
ffa1375114ac-0
|
Source code for langchain.embeddings.gpt4all
from typing import Any, Dict, List
from pydantic import BaseModel, root_validator
from langchain.embeddings.base import Embeddings
[docs]class GPT4AllEmbeddings(BaseModel, Embeddings):
"""GPT4All embedding models.
To use, you should have the gpt4all python package installed
Example:
.. code-block:: python
from langchain.embeddings import GPT4AllEmbeddings
embeddings = GPT4AllEmbeddings()
"""
client: Any #: :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that GPT4All library is installed."""
try:
from gpt4all import Embed4All
values["client"] = Embed4All()
except ImportError:
raise ImportError(
"Could not import gpt4all library. "
"Please install the gpt4all library to "
"use this embedding model: pip install gpt4all"
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using GPT4All.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
[docs] def embed_query(self, text: str) -> List[float]:
"""Embed a query using GPT4All.
Args:
text: The text to embed.
Returns:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/gpt4all.html
|
ffa1375114ac-1
|
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/gpt4all.html
|
b15f82bf337c-0
|
Source code for langchain.embeddings.tensorflow_hub
from typing import Any, List
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
[docs]class TensorflowHubEmbeddings(BaseModel, Embeddings):
"""TensorflowHub embedding models.
To use, you should have the ``tensorflow_text`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import TensorflowHubEmbeddings
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
tf = TensorflowHubEmbeddings(model_url=url)
"""
embed: Any #: :meta private:
model_url: str = DEFAULT_MODEL_URL
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the tensorflow_hub and tensorflow_text."""
super().__init__(**kwargs)
try:
import tensorflow_hub
except ImportError:
raise ImportError(
"Could not import tensorflow-hub python package. "
"Please install it with `pip install tensorflow-hub``."
)
try:
import tensorflow_text # noqa
except ImportError:
raise ImportError(
"Could not import tensorflow_text python package. "
"Please install it with `pip install tensorflow_text``."
)
self.embed = tensorflow_hub.load(self.model_url)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a TensorflowHub embedding model.
Args:
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
|
b15f82bf337c-1
|
"""Compute doc embeddings using a TensorflowHub embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.embed(texts).numpy()
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a TensorflowHub embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.embed([text]).numpy()[0]
return embedding.tolist()
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
|
8230019c2354-0
|
Source code for langchain.embeddings.deepinfra
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32"
[docs]class DeepInfraEmbeddings(BaseModel, Embeddings):
"""Deep Infra's embedding inference service.
To use, you should have the
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
There are multiple embeddings models available,
see https://deepinfra.com/models?type=embeddings.
Example:
.. code-block:: python
from langchain.embeddings import DeepInfraEmbeddings
deepinfra_emb = DeepInfraEmbeddings(
model_id="sentence-transformers/clip-ViT-B-32",
deepinfra_api_token="my-api-key"
)
r1 = deepinfra_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = deepinfra_emb.embed_query(
"What is the second letter of Greek alphabet"
)
"""
model_id: str = DEFAULT_MODEL_ID
"""Embeddings model to use."""
normalize: bool = False
"""whether to normalize the computed embeddings"""
embed_instruction: str = "passage: "
"""Instruction used to embed documents."""
query_instruction: str = "query: "
"""Instruction used to embed the query."""
model_kwargs: Optional[dict] = None
|
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.