Spaces:
Sleeping
Sleeping
File size: 1,723 Bytes
fff1c68 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
# src/utils/process.py
import deeplake
import openai
import os
import subprocess
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import DeepLake
from langchain.text_splitter import RecursiveCharacterTextSplitter
from src.utils.load_and_split import load_docs, split_docs # Updated import
def clone_repository(repo_url, local_path):
"""Clone the specified git repository to the given local path."""
subprocess.run(["git", "clone", repo_url, local_path], check=True, capture_output=True)
def create_deeplake_dataset(activeloop_dataset_path, activeloop_token):
"""Create an empty DeepLake dataset with the specified path and token."""
ds = deeplake.empty(
activeloop_dataset_path,
token=activeloop_token,
overwrite=True,
)
ds.create_tensor("ids")
ds.create_tensor("metadata")
ds.create_tensor("embedding")
ds.create_tensor("text")
def process(
repo_url, include_file_extensions, activeloop_dataset_path, repo_destination
):
"""
Process a git repository by cloning it, filtering files, splitting documents,
creating embeddings, and storing everything in a DeepLake dataset.
"""
activeloop_token = os.getenv("ACTIVELOOP_TOKEN")
create_deeplake_dataset(activeloop_dataset_path, activeloop_token)
clone_repository(repo_url, repo_destination)
docs = load_docs(repo_destination, include_file_extensions)
texts = split_docs(docs)
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
db = DeepLake(dataset_path=activeloop_dataset_path, embedding_function=embeddings)
db.add_documents(texts) |