abhivsh commited on
Commit
8055a93
·
verified ·
1 Parent(s): 284f004

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -0
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !pip install langchain
2
+ # !pip install langchain_community
3
+ # !pip install langchain_text_splitters
4
+ # !pip install langchain-google-genai
5
+ # !pip install gradio
6
+ # !pip install openai
7
+ # !pip install pypdf
8
+ # !pip install chromadb
9
+ # !pip install tiktoken
10
+ # !pip install python-dotenv
11
+
12
+ from langchain_openai import ChatOpenAI
13
+ from langchain.memory import ConversationBufferMemory
14
+ from langchain.chains import ConversationalRetrievalChain
15
+
16
+ import gradio as gr
17
+ import os
18
+ import requests
19
+
20
+ import sys
21
+ sys.path.append('../..')
22
+
23
+ # For Google Colab
24
+ '''
25
+ from google.colab import userdata
26
+ OPENAI_API_KEY = userdata.get('OPENAI_API_KEY')
27
+ hf_token = userdata.get('hf_token')
28
+ GEMINI_API_KEY = userdata.get('GEMINI_API_KEY')
29
+ # For Desktop
30
+ from dotenv import load_dotenv, find_dotenv
31
+ _ = load_dotenv(find_dotenv()) # Read local .env file
32
+ OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
33
+ hf_token = os.environ['hf_token']
34
+ GEMINI_API_KEY = os.environ['GEMINI_API_KEY']
35
+ '''
36
+
37
+ # For Hugging Face
38
+ OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
39
+ hf_token = os.environ.get('hf_token')
40
+ GEMINI_API_KEY = os.environ.get('GEMINI_API_KEY')
41
+ fs_token = os.environ.get('fs_token')
42
+
43
+ llm_name = "gpt-3.5-turbo"
44
+
45
+ def chat_query(question):
46
+
47
+ llm = ChatOpenAI(model=llm_name, temperature=0.1, api_key = OPENAI_API_KEY)
48
+
49
+ # Memory
50
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
51
+
52
+ # Conversation Retrival Chain
53
+ retriever=vectordb.as_retriever()
54
+ qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory)
55
+
56
+ # Replace input() with question variable for Gradio
57
+ result = qa({"question": question})
58
+ return result['answer']
59
+
60
+
61
+ # logo_path = os.path.join(os.getcwd(), "Logo.png")
62
+
63
+ iface = gr.Interface(
64
+ fn=chat_query,
65
+ inputs= gr.Textbox(lines = 6, placeholder="Enter your Query here....",label="Query :"),
66
+ outputs=gr.Textbox(label="Chatbot Reply : "),
67
+ title = " -----: ChatBot :----- ",
68
+ description="""-- Welcome to the Language Model trained on Model-TS (Engineering-SS).\n\n
69
+ -- The Model tries to answer the Query based on Model-Technical Specifications. \n\n
70
+ -- For precise reply, please input `Specific Keywords` in your Query. \n\n """,
71
+ concurrency_limit = None,
72
+
73
+ )
74
+
75
+
76
+ iface.launch(share=True, debug=True)
77
+
78
+ # What should be the GIB height outside the GIS hall ?