Spaces:
Build error
Build error
| import gradio as gr | |
| from langchain.chains import RetrievalQAWithSourcesChain | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.embeddings.openai import OpenAIEmbeddings | |
| from langchain.prompts.chat import ( | |
| ChatPromptTemplate, | |
| HumanMessagePromptTemplate, | |
| SystemMessagePromptTemplate, | |
| ) | |
| from langchain.vectorstores import Chroma | |
| from constants import persist_directory | |
| embedding = OpenAIEmbeddings() | |
| vector_store = Chroma(persist_directory=persist_directory, embedding_function=embedding) | |
| retriever = vector_store.as_retriever(search_kwargs={"k": 3}) | |
| llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) | |
| # gpt-3.5-turbo-0613 | |
| system_template = """Your name is UCA Academy bot. You act like a successful rich man. | |
| Never reveal that it is an AI. Use the following pieces of context to answer the users question shortly. | |
| Given the following summaries of a long document and a question, create a final answer with references. | |
| If you don't know the answer, just say that "I don't know", don't try to make up an answer. | |
| ---------------- | |
| {summaries} | |
| You MUST answer in Korean and in Markdown format:""" | |
| messages = [ | |
| SystemMessagePromptTemplate.from_template(system_template), | |
| HumanMessagePromptTemplate.from_template("{question}"), | |
| ] | |
| prompt = ChatPromptTemplate.from_messages(messages) | |
| chain_type_kwargs = {"prompt": prompt} | |
| chain = RetrievalQAWithSourcesChain.from_chain_type( | |
| llm=llm, | |
| chain_type="stuff", | |
| retriever=retriever, | |
| return_source_documents=True, | |
| chain_type_kwargs=chain_type_kwargs, | |
| reduce_k_below_max_tokens=True, | |
| verbose=True, | |
| ) | |
| # μ±ν λ΄μ μλ΅μ μ²λ¦¬νλ ν¨μλ₯Ό μ μν©λλ€. | |
| def respond(message, chat_history): | |
| result = chain(message) | |
| bot_message = result["answer"] | |
| # μ±ν κΈ°λ‘μ μ¬μ©μμ λ©μμ§μ λ΄μ μλ΅μ μΆκ°ν©λλ€. | |
| chat_history.append((message, bot_message)) | |
| # μμ λ μ±ν κΈ°λ‘μ λ°νν©λλ€. | |
| return "", chat_history | |
| # gr.Blocks()λ₯Ό μ¬μ©νμ¬ μΈν°νμ΄μ€λ₯Ό μμ±ν©λλ€. | |
| with gr.Blocks(theme="gstaff/sketch") as demo: | |
| gr.Markdown("# μλ νμΈμ. UCAμμΉ΄λ°λ―Έ μΈμ΄λ Έ μ±ν λ΄μ λνν΄λ³΄μΈμ.") | |
| chatbot = gr.Chatbot(label="μ±ν μ λ ₯μ°½") # 'μ±ν μ°½'μ΄λΌλ λ μ΄λΈμ κ°μ§ μ±ν λ΄ μ»΄ν¬λνΈλ₯Ό μμ±ν©λλ€. | |
| msg = gr.Textbox(label="μ λ ₯") # 'μ λ ₯'μ΄λΌλ λ μ΄λΈμ κ°μ§ ν μ€νΈλ°μ€λ₯Ό μμ±ν©λλ€. | |
| clear = gr.Button("μ λ ₯ μ΄κΈ°ν") # 'μ΄κΈ°ν'λΌλ λ μ΄λΈμ κ°μ§ λ²νΌμ μμ±ν©λλ€. | |
| msg.submit( | |
| respond, [msg, chatbot], [msg, chatbot] | |
| ) # ν μ€νΈλ°μ€μ λ©μμ§λ₯Ό μ λ ₯νκ³ μ μΆνλ©΄ respond ν¨μκ° νΈμΆλλλ‘ ν©λλ€. | |
| clear.click( | |
| lambda: None, None, chatbot, queue=False | |
| ) # 'μ΄κΈ°ν' λ²νΌμ ν΄λ¦νλ©΄ μ±ν κΈ°λ‘μ μ΄κΈ°νν©λλ€. | |
| demo.launch( | |
| debug=True | |
| ) # μΈν°νμ΄μ€λ₯Ό μ€νν©λλ€. μ€ννλ©΄ μ¬μ©μλ 'μ λ ₯' ν μ€νΈλ°μ€μ λ©μμ§λ₯Ό μμ±νκ³ μ μΆν μ μμΌλ©°, 'μ΄κΈ°ν' λ²νΌμ ν΅ν΄ μ±ν κΈ°λ‘μ μ΄κΈ°ν ν μ μμ΅λλ€. | |