Spaces:
Runtime error
Runtime error
File size: 2,412 Bytes
3d82232 bd941b2 3d82232 bd941b2 3d82232 bd941b2 3d82232 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 | from llama_index import LLMPredictor, PromptHelper, ServiceContext, GPTKeywordTableIndex
import gradio as gr
from llama_index.indices.knowledge_graph.base import GPTKnowledgeGraphIndex
import os
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
api_key = os.environ['tau_api_key']
os.environ["OPENAI_API_KEY"] = api_key
template = """
I want you to act as a document that I am having a conversation with. Your name is "AI Assistant" from Vegetable NZ.
You will provide me with answers from the given info. If the answer is not included, say exactly
"Unfortunately, I do not know the answer to your question." and stop after that.
Refuse to answer any question not about the info. Never break character.
User: What is the capital of France?
AI Assistant: The capital of France is Paris.
User: Who is the author of 'Pride and Prejudice'?
AI Assistant: The author of 'Pride and Prejudice' is Jane Austen.
User: {query}
AI Assistant: """
prompt_template = PromptTemplate(input_variables=["query"], template=template)
def chat(indexfile, chat_history, user_input):
max_input_size = 4096
num_outputs = 512
max_chunk_overlap = 20
chunk_size_limit = 600
prompt_helper = PromptHelper( max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.0, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index_filename = "index/"+ indexfile + ".json"
index = GPTKnowledgeGraphIndex.load_from_disk(index_filename, service_context=service_context)
bot_response = index.query(prompt_template.format(query=user_input), response_mode="compact")
response = ""
for letter in ''.join(bot_response.response):
response += letter + ""
yield chat_history + [(user_input, response)]
index_files = ["Crop Protection", "Environmental Guidance", "Good Management Practice Guides"]
with gr.Blocks() as demo:
gr.Markdown('Vegetable Expert Advisor')
with gr.Tab("Ask away"):
indexfile = gr.Radio(choices=list(index_files))
chatbot = gr.Chatbot()
message = gr.Textbox ()
message.submit(chat, [indexfile, chatbot, message], chatbot)
demo.queue().launch(debug=True)
|