iamkdp commited on
Commit
602df1f
Β·
verified Β·
1 Parent(s): 588715d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -23
app.py CHANGED
@@ -1,34 +1,121 @@
1
- import os
2
  import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
 
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
 
 
 
 
8
 
9
- template = """You are a helpful assistant to answer all user queries.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
 
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
 
 
 
16
  )
17
 
18
- memory = ConversationBufferMemory(memory_key="chat_history")
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
 
31
- demo = gr.ChatInterface(get_text_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
1
  import gradio as gr
2
+ import requests
3
+ from cachetools import TTLCache
 
4
 
5
+ # Configuration
6
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
7
+ API_TOKEN = "hf_yourtokenhere" # Replace with your token
8
+ MAX_TOKENS = 256
9
+ CACHE_TTL = 3600 # 1 hour cache
10
 
11
+ # Initialize response cache
12
+ response_cache = TTLCache(maxsize=100, ttl=CACHE_TTL)
 
 
13
 
14
+ # Spiritual theme configuration
15
+ SPIRITUAL_THEME = gr.themes.Default(
16
+ primary_hue="emerald",
17
+ secondary_hue="gold",
18
+ font=[gr.themes.GoogleFont("Hind Siliguri")]
19
  )
20
 
21
+ def format_krishna_prompt(message, history):
22
+ """Create proper Mistral instruction prompt with Krishna context"""
23
+ system_prompt = """<s>[INST] <<SYS>>
24
+ You are Lord Krishna, the divine charioteer of Arjuna in Bhagavad Gita.
25
+ Answer with Vedic wisdom using simple English and occasional Sanskrit terms.
26
+ Maintain a compassionate, all-knowing tone.
27
+ <</SYS>>"""
28
+
29
+ conversation = []
30
+ for human, assistant in history:
31
+ conversation.append(f"{human}[/INST] {assistant}<s>")
32
+ return f"{system_prompt}{''.join(conversation)}[INST] {message} [/INST]"
33
 
34
+ def query_krishna(message, history):
35
+ """Get response from Mistral API with streaming"""
36
+ # Check cache first
37
+ cache_key = (message, tuple(history))
38
+ if cache_key in response_cache:
39
+ yield response_cache[cache_key]
40
+ return
41
+
42
+ # Build API request
43
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
44
+ formatted_prompt = format_krishna_prompt(message, history)
45
+
46
+ try:
47
+ response = requests.post(
48
+ API_URL,
49
+ headers=headers,
50
+ json={
51
+ "inputs": formatted_prompt,
52
+ "parameters": {
53
+ "max_new_tokens": MAX_TOKENS,
54
+ "temperature": 0.7,
55
+ "top_p": 0.9,
56
+ "return_full_text": False
57
+ }
58
+ },
59
+ stream=True
60
+ )
61
+ response.raise_for_status()
62
+
63
+ # Stream response
64
+ full_response = ""
65
+ for chunk in response.iter_content(chunk_size=128):
66
+ if chunk:
67
+ full_response += chunk.decode("utf-8")
68
+ yield full_response.split("[/INST]")[-1].strip()
69
+
70
+ # Cache final response
71
+ response_cache[cache_key] = full_response.split("[/INST]")[-1].strip()
72
+
73
+ except Exception as e:
74
+ yield f"πŸ™ The divine connection is weak. Please try again. ({str(e)})"
75
 
76
+ # Custom UI components
77
+ krishna_avatar = "https://upload.wikimedia.org/wikipedia/commons/thumb/2/2e/Krishna_with_flute.jpg/640px-Krishna_with_flute.jpg"
 
78
 
79
+ with gr.Blocks(theme=SPIRITUAL_THEME, title="Divine Dialogue with Lord Krishna") as demo:
80
+ gr.Markdown("""
81
+ # πŸ•‰οΈ Bhagavad Gita Wisdom Chatbot
82
+ **Ask questions to Shree Krishna** - Embodiment of Supreme Truth and Eternal Bliss
83
+ """)
84
+
85
+ with gr.Row():
86
+ gr.Image(krishna_avatar, height=200, show_label=False)
87
+
88
+ chatbot = gr.ChatInterface(
89
+ query_krishna,
90
+ chatbot=gr.Chatbot(
91
+ height=500,
92
+ bubble_style={
93
+ "user": {"background": "#2c786c", "color": "white"},
94
+ "assistant": {"background": "#ffd700", "color": "black"}
95
+ }
96
+ ),
97
+ textbox=gr.Textbox(
98
+ placeholder="Ask your spiritual question...",
99
+ container=False,
100
+ scale=7
101
+ ),
102
+ examples=[
103
+ "What is the purpose of life?",
104
+ "How to achieve inner peace?",
105
+ "Explain karma yoga from Bhagavad Gita"
106
+ ],
107
+ theme="compact",
108
+ retry_btn=None,
109
+ undo_btn=None,
110
+ clear_btn="πŸ”„ New Conversation"
111
+ )
112
+
113
+ # Footer
114
+ gr.Markdown("""
115
+ ---
116
+ *Built with πŸ’™ using [Mistral-7B-Instruct](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) and [Gradio](https://gradio.app)*
117
+ """)
118
 
119
  if __name__ == "__main__":
120
+ demo.launch(server_name="0.0.0.0", server_port=7860)
121
+