Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext | |
| from langchain.agents import Tool | |
| from langchain.chains.conversation.memory import ConversationBufferWindowMemory | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.agents import initialize_agent | |
| from langchain import OpenAI | |
| from langchain.prompts import PromptTemplate | |
| PREFIX = ''' You are an Automobile expert AI scientist having all the knowledge about all the existing cars and bikes with their respective models and all the information around it. | |
| If the question is not related to cars, bikes, automobiles or their related models then please let the user know that you don't have the relevant information. | |
| Return the entire output in an HTML format. | |
| Make sure to follow each and every instructions before giving the response. | |
| ''' | |
| SUFFIX = ''' | |
| Begin! | |
| Previous conversation history: | |
| {chat_history} | |
| Instructions: {input} | |
| {agent_scratchpad} | |
| ''' | |
| index = GPTSimpleVectorIndex.load_from_disk('./cars_bikes(2).json') | |
| tools = [ | |
| Tool( | |
| name = "LlamaIndex", | |
| func=lambda q: str(index.query(q)), | |
| description="""You are an Automobile expert equipped with all the information related to all the existing cars, bikes and all its respective brands & models, features, parameters and specifications | |
| who is capable of perfectly answering everything related to every automobile brands in a tabular format or list. | |
| Answer using formatted tables or lists as when required. | |
| If the question is not related to cars, bikes, automobiles or their related models then please let the user know that you don't have the relevant information. | |
| Please answer keeping in mind the Indian context. | |
| Whenever possible do Comparitive Analysis. | |
| Return the entire output in an HTML format. | |
| Avoid word 'html' in output. | |
| Make sure to follow each and every instructions before giving the response. | |
| """, | |
| return_direct=True | |
| ),] | |
| num_outputs = 2000 | |
| conversational_memory = ConversationBufferWindowMemory( memory_key='chat_history', k=5, return_messages=True ) | |
| llm = OpenAI(temperature=0.5, model_name="gpt-4",max_tokens=num_outputs) | |
| agent_executor = initialize_agent(tools, llm, agent="conversational-react-description", memory=conversational_memory,agent_kwargs={'prefix':PREFIX,'suffix': SUFFIX}, handle_parsing_errors=True) | |
| add = "Return the output in a table format or an ordered list legible to the user.\n" | |
| def greet(Question): | |
| return agent_executor.run(input=add+Question) | |
| # demo = gr.Interface(greet, gr.Textbox(lines=2, label="Question",placeholder="What do you want to know...?",outputs="text",title="Here Auto",description="Know everything about Cars and Bikes"),["html"]) | |
| demo = gr.Interface( | |
| fn=greet, | |
| inputs=gr.Textbox(lines=2, label="Question", placeholder="What do you want to know...?"), | |
| outputs=gr.HTML(""), | |
| # examples=['what are the specifications of tata indica?'], | |
| title="Here Auto", | |
| description="Know everything about Cars and Bikes", | |
| ) | |
| demo.launch() |