| import os |
| import openai |
| from dotenv import load_dotenv |
|
|
|
|
| API_KEY = "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" |
|
|
| |
| with open(".env", "w") as env_file: |
| env_file.write(f"openai_api_key={API_KEY}\n") |
| |
| |
|
|
| load_dotenv(".env") |
|
|
| openai.api_key = os.environ.get("openai_api_key") |
|
|
| os.environ["OPENAI_API_KEY"] = openai.api_key |
|
|
| with open("sample_awards.csv") as f: |
| sample_awards = f.read() |
|
|
| from langchain.text_splitter import CharacterTextSplitter |
|
|
| text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0, separator = "\n") |
| texts = text_splitter.split_text(sample_awards) |
|
|
| from langchain.embeddings.openai import OpenAIEmbeddings |
|
|
| embeddings = OpenAIEmbeddings() |
|
|
| from langchain.vectorstores import Chroma |
|
|
| docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{"source": str(i)} for i in range(len(texts))]).as_retriever() |
|
|
| from langchain.chains.question_answering import load_qa_chain |
| from langchain.llms import OpenAI |
|
|
| chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff") |
|
|
| def question(text): |
| query = text |
| docs = docsearch.get_relevant_documents(query) |
| return chain.run(input_documents=docs, question=query) |
|
|
| import gradio as gr |
|
|
| gr.Interface( |
| question, |
| inputs="text", |
| outputs="text", |
| title="Awards Question Answering", |
| ).launch() |
|
|