File size: 2,972 Bytes
519938b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os
os.environ["LANGCHAIN_PROJECT"] = f"RAG_workflow_2"
os.environ['LANGCHAIN_TRACING_V2'] = 'true'
os.environ['LANGCHAIN_ENDPOINT'] = 'https://api.smith.langchain.com'
os.environ['LANGCHAIN_API_KEY'] = "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"


with open("openai.txt","r") as f:
    key=f.read()
with open("google.txt","r") as f1:
    key2=f1.read()
os.environ['OPENAI_API_KEY'] = "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"

from langchain_community.document_loaders import WebBaseLoader
import os
import gradio as gr
from langchain_groq import ChatGroq
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain import hub
from langchain_community.document_loaders import PyPDFLoader
from langchain_openai import OpenAIEmbeddings  # Importing OpenAI Embeddings

# Set the google key
os.environ["GOOGLE_API_KEY"]="AIzaSyCHgWZbKzPsgA4DjWUPB8FJHqXLglhSwmI"

from langchain_google_genai import ChatGoogleGenerativeAI
# Initialize the google LLM
llm = ChatGoogleGenerativeAI(
    model="gemini-1.5-pro",
    temperature=0,
    max_tokens=None,
    timeout=None,
    max_retries=2
)

# Function to process the content from a URL and get a response from LLM
def process_url(url, user_query):
    # Load the content from the URL
    loader = WebBaseLoader(web_paths=[url])
    docs = loader.load()
    
    # Split the text
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    splits = text_splitter.split_documents(docs)
    
    # Embed the splits
    embeddings = OpenAIEmbeddings()  # Initialize OpenAI Embeddings
    vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
    
    retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
    
    # Prompt
    prompt = hub.pull("rlm/rag-prompt")
    
    # Chain
    rag_chain = (
        {"context": retriever | (lambda docs: "\n\n".join(doc.page_content for doc in docs)), "question": RunnablePassthrough()}
        | prompt
        | llm
        | StrOutputParser()
    )
    
    # Get the response from the LLM
    response = rag_chain.invoke(user_query)
    return response

# Gradio interface
def gradio_interface(url, user_query):
    response = process_url(url, user_query)
    return response

# Create Gradio app
demo_interface = gr.Interface(
    fn=gradio_interface,
    inputs=[
        gr.Textbox(label="输入 URL"),
        gr.Textbox(label="询问关于网站的问题")
    ],
    outputs="text",
    title="RAG连接问答",
    description="输入一个URL然后询问关于URL的相关的问题."
)

# Launch the app
demo_interface.launch(share=True, debug=True)