id stringlengths 14 16 | text stringlengths 44 2.73k | source stringlengths 49 114 |
|---|---|---|
43a4bce2b6b7-4 | )
self.callback_manager.on_text(
str(table_names_to_use), color="yellow", verbose=self.verbose
)
new_inputs = {
self.sql_chain.input_key: inputs[self.input_key],
"table_names_to_use": table_names_to_use,
}
return self.sql_chain(new_inputs, retu... | https://python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html |
ea9765481171-0 | Source code for langchain.chains.conversation.base
"""Chain that carries on a conversation and calls an LLM."""
from typing import Dict, List
from pydantic import Extra, Field, root_validator
from langchain.chains.conversation.prompt import PROMPT
from langchain.chains.llm import LLMChain
from langchain.memory.buffer i... | https://python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html |
ea9765481171-1 | f"The input key {input_key} was also found in the memory keys "
f"({memory_keys}) - please provide keys that don't overlap."
)
prompt_variables = values["prompt"].input_variables
expected_keys = memory_keys + [input_key]
if set(expected_keys) != set(prompt_variables):... | https://python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html |
248a46d5109f-0 | Source code for langchain.chains.qa_with_sources.vector_db
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any, Dict, List
from pydantic import Field, root_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_so... | https://python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html |
248a46d5109f-1 | num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
... | https://python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html |
6699ad3c863d-0 | Source code for langchain.chains.qa_with_sources.retrieval
"""Question-answering with sources over an index."""
from typing import Any, Dict, List
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
... | https://python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html |
6699ad3c863d-1 | docs = self.retriever.get_relevant_documents(question)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
question = inputs[self.question_key]
docs = await self.retriever.aget_relevant_documents(question)
return self._re... | https://python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html |
8ca251376aa5-0 | Source code for langchain.chains.qa_with_sources.base
"""Question answering with sources over documents."""
from __future__ import annotations
import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import Extra, root_validator
from langchain.chains.base import Chain
fro... | https://python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
8ca251376aa5-1 | combine_prompt: BasePromptTemplate = COMBINE_PROMPT,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Construct the chain from an LLM."""
llm_question_chain = LLMChain(llm=llm, prompt=question_prompt)
llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt)
combine_results_... | https://python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
8ca251376aa5-2 | :meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
_output_keys = [self.answer_key, self.sources_answer_key]
if self.return_source_documents:
_output_keys = _outp... | https://python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
8ca251376aa5-3 | docs = await self._aget_docs(inputs)
answer = await self.combine_documents_chain.arun(input_documents=docs, **inputs)
if re.search(r"SOURCES:\s", answer):
answer, sources = re.split(r"SOURCES:\s", answer)
else:
sources = ""
result: Dict[str, Any] = {
s... | https://python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html |
4c3594fef42b-0 | Source code for langchain.chains.pal.base
"""Implements Program-Aided Language Models.
As in https://arxiv.org/pdf/2211.10435.pdf.
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMCh... | https://python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html |
4c3594fef42b-1 | else:
return [self.output_key, "intermediate_steps"]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
llm_chain = LLMChain(llm=self.llm, prompt=self.prompt)
code = llm_chain.predict(stop=[self.stop], **inputs)
self.callback_manager.on_text(
code, color="gree... | https://python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html |
4c3594fef42b-2 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 25, 2023. | https://python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html |
a48b7b062e7a-0 | Source code for langchain.output_parsers.retry
from __future__ import annotations
from typing import TypeVar
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import (
BaseLanguageModel,
BaseO... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html |
a48b7b062e7a-1 | chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain)
[docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
try:
parsed_completion = self.parser.parse(completion)
except OutputParserException:
new_completio... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html |
a48b7b062e7a-2 | ) -> RetryWithErrorOutputParser[T]:
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain)
[docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
try:
parsed_completion = self.parser.parse(completion)
except Outp... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/retry.html |
0c62f3c8450e-0 | Source code for langchain.output_parsers.pydantic
import json
import re
from typing import Type, TypeVar
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS
from langchain.schema import BaseOutputParser, OutputParserException
T = TypeVar(... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/pydantic.html |
0c62f3c8450e-1 | @property
def _type(self) -> str:
return "pydantic"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 25, 2023. | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/pydantic.html |
1e1de6f3100a-0 | Source code for langchain.output_parsers.list
from __future__ import annotations
from abc import abstractmethod
from typing import List
from langchain.schema import BaseOutputParser
[docs]class ListOutputParser(BaseOutputParser):
"""Class to parse the output of an LLM call to a list."""
@property
def _type(... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/list.html |
4e108b5847a2-0 | Source code for langchain.output_parsers.structured
from __future__ import annotations
import json
from typing import Any, List
from pydantic import BaseModel
from langchain.output_parsers.format_instructions import STRUCTURED_FORMAT_INSTRUCTIONS
from langchain.schema import BaseOutputParser, OutputParserException
line... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/structured.html |
4e108b5847a2-1 | raise OutputParserException(f"Got invalid JSON object. Error: {e}")
for schema in self.response_schemas:
if schema.name not in json_obj:
raise OutputParserException(
f"Got invalid return object. Expected key `{schema.name}` "
f"to be present, b... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/structured.html |
d4b284e2a2e4-0 | Source code for langchain.output_parsers.fix
from __future__ import annotations
from typing import TypeVar
from langchain.chains.llm import LLMChain
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel, BaseOut... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/fix.html |
131121348fe4-0 | Source code for langchain.output_parsers.regex
from __future__ import annotations
import re
from typing import Dict, List, Optional
from langchain.schema import BaseOutputParser
[docs]class RegexParser(BaseOutputParser):
"""Class to parse the output into a dictionary."""
regex: str
output_keys: List[str]
... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/regex.html |
f1ab3c408d3f-0 | Source code for langchain.output_parsers.rail_parser
from __future__ import annotations
from typing import Any, Dict
from langchain.schema import BaseOutputParser
[docs]class GuardrailsOutputParser(BaseOutputParser):
guard: Any
@property
def _type(self) -> str:
return "guardrails"
[docs] @classme... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/rail_parser.html |
592b2ed001ff-0 | Source code for langchain.output_parsers.regex_dict
from __future__ import annotations
import re
from typing import Dict, Optional
from langchain.schema import BaseOutputParser
[docs]class RegexDictParser(BaseOutputParser):
"""Class to parse the output into a dictionary."""
regex_pattern: str = r"{}:\s?([^.'\n'... | https://python.langchain.com/en/latest/_modules/langchain/output_parsers/regex_dict.html |
1f98b1262685-0 | Source code for langchain.embeddings.llamacpp
"""Wrapper around llama.cpp embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.embeddings.base import Embeddings
[docs]class LlamaCppEmbeddings(BaseModel, Embeddings):
"""Wrapper ... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
1f98b1262685-1 | use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use. If None, the number
of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
1f98b1262685-2 | raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception:
raise NameError(f"Could not load Llama m... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html |
50828bd5a74f-0 | Source code for langchain.embeddings.huggingface
"""Wrapper around HuggingFace embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_M... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
50828bd5a74f-1 | raise ValueError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence_transformers`."
) from exc
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, *... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
50828bd5a74f-2 | hf = HuggingFaceInstructEmbeddings(
model_name=model_name, model_kwargs=model_kwargs
)
"""
client: Any #: :meta private:
model_name: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
50828bd5a74f-3 | [docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedd... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html |
d0f38ddda42f-0 | Source code for langchain.embeddings.aleph_alpha
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
"""... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
d0f38ddda42f-1 | """Attention control parameters only apply to those tokens that have
explicitly been set in the request."""
control_log_additive: Optional[bool] = True
"""Apply controls on prompt items by adding the log(control_factor)
to attention scores."""
@root_validator()
def validate_environment(cls, va... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
d0f38ddda42f-2 | "representation": SemanticRepresentation.Document,
"compress_to_size": self.compress_to_size,
"normalize": self.normalize,
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
}
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
d0f38ddda42f-3 | """The symmetric version of the Aleph Alpha's semantic embeddings.
The main difference is that here, both the documents and
queries are embedded with a SemanticRepresentation.Symmetric
Example:
.. code-block:: python
from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
d0f38ddda42f-4 | """
document_embeddings = []
for text in texts:
document_embeddings.append(self._embed(text))
return document_embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to Aleph Alpha's asymmetric, query embedding endpoint
Args:
text... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html |
c81dba63ba12-0 | Source code for langchain.embeddings.tensorflow_hub
"""Wrapper around TensorflowHub embedding models."""
from typing import Any, List
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
[docs]clas... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html |
c81dba63ba12-1 | Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.embed(texts).numpy()
return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using ... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html |
8ff7d5d6338f-0 | Source code for langchain.embeddings.sagemaker_endpoint
"""Wrapper around Sagemaker InvokeEndpoint API."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
8ff7d5d6338f-1 | credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model ... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
8ff7d5d6338f-2 | """ # noqa: E501
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/ap... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
8ff7d5d6338f-3 | # replace newlines, which can negatively affect performance.
texts = list(map(lambda x: x.replace("\n", " "), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_ty... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
8ff7d5d6338f-4 | """Compute query embeddings using a SageMaker inference endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func([text])[0]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Ap... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html |
d88e06bce2db-0 | Source code for langchain.embeddings.huggingface_hub
"""Wrapper around HuggingFace Hub embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
d88e06bce2db-1 | @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
d88e06bce2db-2 | texts = [text.replace("\n", " ") for text in texts]
_model_kwargs = self.model_kwargs or {}
responses = self.client(inputs=texts, params=_model_kwargs)
return responses
[docs] def embed_query(self, text: str) -> List[float]:
"""Call out to HuggingFaceHub's embedding endpoint for embed... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html |
43fc1820e818-0 | Source code for langchain.embeddings.self_hosted
"""Running custom embedding models on self-hosted remote hardware."""
from typing import Any, Callable, List
from pydantic import Extra
from langchain.embeddings.base import Embeddings
from langchain.llms import SelfHostedPipeline
def _embed_documents(pipeline: Any, *arg... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
43fc1820e818-1 | model_load_fn=get_pipeline,
hardware=gpu
model_reqs=["./", "torch", "transformers"],
)
Example passing in a pipeline path:
.. code-block:: python
from langchain.embeddings import SelfHostedHFEmbeddings
import runhouse as rh
from... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
43fc1820e818-2 | [docs] def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embeddings = self.clie... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html |
eeebc8cd435a-0 | Source code for langchain.embeddings.cohere
"""Wrapper around Cohere embedding models."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
[docs]class CohereEmbeddings(Base... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
eeebc8cd435a-1 | raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html |
5783b44702b3-0 | Source code for langchain.embeddings.openai
"""Wrapper around OpenAI embedding models."""
from __future__ import annotations
import logging
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Set,
Tuple,
Union,
)
import numpy as np
from pydantic import BaseModel, Extra... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
5783b44702b3-1 | """Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
return embeddings.client.create(**kwargs)
return _embed_with_retry(**kwargs)
[docs]class OpenAIEmbeddings(BaseModel, Embeddings):
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
5783b44702b3-2 | text = "This is a test query."
query_result = embeddings.embed_query(text)
"""
client: Any #: :meta private:
model: str = "text-embedding-ada-002"
deployment: str = model # to support Azure OpenAI Service custom deployment names
embedding_ctx_length: int = 8191
openai_api_key: Opti... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
5783b44702b3-3 | "Please install it with `pip install openai`."
)
return values
# please refer to
# https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb
def _get_len_safe_embeddings(
self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None
)... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
5783b44702b3-4 | results[indices[i]].append(batched_embeddings[i])
lens[indices[i]].append(len(batched_embeddings[i]))
for i in range(len(texts)):
_result = results[i]
if len(_result) == 0:
average = embed_with_retry(self, input="", engine=self.deployment)[... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
5783b44702b3-5 | specified by the class.
Returns:
List of embeddings, one for each text.
"""
# handle batches of large input text
if self.embedding_ctx_length > 0:
return self._get_len_safe_embeddings(texts, engine=self.deployment)
else:
results = []
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html |
b8c9f941b7f6-0 | Source code for langchain.embeddings.self_hosted_hugging_face
"""Wrapper around HuggingFace embedding models for self-hosted remote hardware."""
import importlib
import logging
from typing import Any, Callable, List, Optional
from langchain.embeddings.self_hosted import SelfHostedEmbeddings
DEFAULT_MODEL_NAME = "senten... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
b8c9f941b7f6-1 | if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated wi... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
b8c9f941b7f6-2 | model_load_fn: Callable = load_embedding_model
"""Function to load the model remotely on the server."""
load_fn_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model load function."""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings."""
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
b8c9f941b7f6-3 | model_name=model_name, hardware=gpu)
"""
model_id: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
b8c9f941b7f6-4 | text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client(self.pipeline_ref, [instruction_pair])[0]
return embedding.tolist()
By Harrison Chase
© Copyright 2023, Harrison Chase.
... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html |
ea414bd6704f-0 | Source code for langchain.embeddings.fake
from typing import List
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
[docs]class FakeEmbeddings(Embeddings, BaseModel):
size: int
def _get_embedding(self) -> List[float]:
return list(np.random.normal(size=sel... | https://python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html |
fa629123646a-0 | Source code for langchain.chat_models.azure_openai
"""Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict
from pydantic import root_validator
from langchain.chat_models.openai import ChatOpenAI
from langchain.utils import get_from_dict_or_env
logger = logging.get... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
fa629123646a-1 | openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values,
... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
fa629123646a-2 | "`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
035e172adecb-0 | Source code for langchain.chat_models.openai
"""OpenAI chat wrapper."""
from __future__ import annotations
import logging
import sys
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-1 | ),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwar... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-2 | message_dict["name"] = message.additional_kwargs["name"]
return message_dict
[docs]class ChatOpenAI(BaseChatModel):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-3 | """Configuration for this pydantic object."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fie... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-4 | "due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when strea... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-5 | ),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs] def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-6 | token,
verbose=self.verbose,
)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
response = self.completion_with_retry(messages... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-7 | inner_completion = ""
role = "assistant"
params["stream"] = True
async for stream_resp in await acompletion_with_retry(
self, messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
to... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-8 | "This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
# create a GPT-3.5-Turbo encoder instance
enc = tiktoken.encoding_for_model(self.model_name)
# encode the text using the GPT-3.5-Turbo encoder
tokenized_... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
035e172adecb-9 | model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
1f1c066d9691-0 | Source code for langchain.chat_models.promptlayer_openai
"""PromptLayer wrapper."""
import datetime
from typing import List, Optional
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, ChatResult
[docs]class PromptLayerChatOpenAI(ChatOpenAI):
"""Wrapper around OpenAI Chat large l... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
1f1c066d9691-1 | request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
1f1c066d9691-2 | "langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_p... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
c7d1e569488e-0 | Source code for langchain.chat_models.anthropic
from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.chat_models.base import BaseChatModel
from langchain.llms.anthropic import _AnthropicCommon
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMe... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
c7d1e569488e-1 | elif isinstance(message, SystemMessage):
message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str:
"""Format... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
c7d1e569488e-2 | if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = self.client.completion_stream(**params)
for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]... | https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
c7d1e569488e-3 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 25, 2023. | https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
1098e202edb9-0 | .md
.pdf
Cloud Hosted Setup
Contents
Installation
Environment Setup
Cloud Hosted Setup#
We offer a hosted version of tracing at langchainplus.vercel.app. You can use this to view traces from your run without having to run the server locally.
Note: we are currently only offering this to a limited number of users. The ... | https://python.langchain.com/en/latest/tracing/hosted_installation.html |
1098e202edb9-1 | os.environ["LANGCHAIN_API_KEY"] = "my_api_key" # Don't commit this to your repo! Better to set it in your terminal.
Contents
Installation
Environment Setup
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 25, 2023. | https://python.langchain.com/en/latest/tracing/hosted_installation.html |
841575633edc-0 | .ipynb
.pdf
Tracing Walkthrough
Tracing Walkthrough#
import os
os.environ["LANGCHAIN_HANDLER"] = "langchain"
## Uncomment this if using hosted setup.
# os.environ["LANGCHAIN_ENDPOINT"] = "https://langchain-api-gateway-57eoxz8z.uc.gateway.dev"
## Uncomment this if you want traces to be recorded to "my_session" instead ... | https://python.langchain.com/en/latest/tracing/agent_with_tracing.html |
841575633edc-1 | # Agent run with tracing using a chat model
agent = initialize_agent(
tools, ChatOpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?")
> Entering new AgentExecutor chain...
Question: What is 2 raised to .123243 power?
Thought: I need a cal... | https://python.langchain.com/en/latest/tracing/agent_with_tracing.html |
841575633edc-2 | '1.0891804557407723'
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 25, 2023. | https://python.langchain.com/en/latest/tracing/agent_with_tracing.html |
c64c84e608de-0 | .md
.pdf
Locally Hosted Setup
Contents
Installation
Environment Setup
Locally Hosted Setup#
This page contains instructions for installing and then setting up the environment to use the locally hosted version of tracing.
Installation#
Ensure you have Docker installed (see Get Docker) and that it’s running.
Install th... | https://python.langchain.com/en/latest/tracing/local_installation.html |
c64c84e608de-1 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 25, 2023. | https://python.langchain.com/en/latest/tracing/local_installation.html |
fcdfc99e17b1-0 | .md
.pdf
Question Answering over Docs
Contents
Document Question Answering
Adding in sources
Additional Related Resources
End-to-end examples
Question Answering over Docs#
Conceptual Guide
Question answering in this context refers to question answering over your document data.
For question answering over other types ... | https://python.langchain.com/en/latest/use_cases/question_answering.html |
fcdfc99e17b1-1 | The LLM response will contain the answer to your question, based on the content of the documents.
The recommended way to get started using a question answering chain is:
from langchain.chains.question_answering import load_qa_chain
chain = load_qa_chain(llm, chain_type="stuff")
chain.run(input_documents=docs, question=... | https://python.langchain.com/en/latest/use_cases/question_answering.html |
fcdfc99e17b1-2 | Additional Related Resources#
Additional related resources include:
Utilities for working with Documents: Guides on how to use several of the utilities which will prove helpful for this task, including Text Splitters (for splitting up long documents) and Embeddings & Vectorstores (useful for the above Vector DB example... | https://python.langchain.com/en/latest/use_cases/question_answering.html |
bb39de463c5b-0 | .md
.pdf
Agent Simulations
Contents
Simulations with Two Agents
Generative Agents
Agent Simulations#
Agent simulations involve interacting one of more agents with eachother.
Agent simulations generally involve two main components:
Long Term Memory
Simulation Environment
Specific implementations of agent simulations (... | https://python.langchain.com/en/latest/use_cases/agent_simulations.html |
b9cdd37eca32-0 | .md
.pdf
Summarization
Summarization#
Conceptual Guide
Summarization involves creating a smaller summary of multiple longer documents.
This can be useful for distilling long documents into the core pieces of information.
The recommended way to get started using a summarization chain is:
from langchain.chains.summarize ... | https://python.langchain.com/en/latest/use_cases/summarization.html |
4fbfef61b211-0 | .md
.pdf
Chatbots
Chatbots#
Conceptual Guide
Since language models are good at producing text, that makes them ideal for creating chatbots.
Aside from the base prompts/LLMs, an important concept to know for Chatbots is memory.
Most chat based applications rely on remembering what happened in previous interactions, whic... | https://python.langchain.com/en/latest/use_cases/chatbots.html |
339e48c1ecf3-0 | .md
.pdf
Extraction
Extraction#
Conceptual Guide
Most APIs and databases still deal with structured information.
Therefore, in order to better work with those, it can be useful to extract structured information from text.
Examples of this include:
Extracting a structured row to insert into a database from a sentence
Ex... | https://python.langchain.com/en/latest/use_cases/extraction.html |
122bf7f8508b-0 | .md
.pdf
Code Understanding
Contents
Conversational Retriever Chain
Code Understanding#
Overview
LangChain is a useful tool designed to parse GitHub code repositories. By leveraging VectorStores, Conversational RetrieverChain, and GPT-4, it can answer questions in the context of an entire GitHub repository or generat... | https://python.langchain.com/en/latest/use_cases/code.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.