omar721 commited on
Commit
431b26d
Β·
verified Β·
1 Parent(s): 4764dc6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -0
app.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import os
4
+ import time
5
+
6
+ # Configuration
7
+ API_TOKEN = os.environ.get("HF_TOKEN", "")
8
+ MODEL_NAME = "deepseek-ai/Janus-Pro-7B"
9
+ API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
10
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
11
+
12
+ def query_janus_model(payload):
13
+ """Send request to Hugging Face Inference API"""
14
+ try:
15
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=30)
16
+
17
+ if response.status_code == 503:
18
+ return {"error": "Model is loading, please try again in 30-60 seconds..."}
19
+ elif response.status_code != 200:
20
+ return {"error": f"API Error: {response.status_code} - {response.text}"}
21
+
22
+ return response.json()
23
+ except requests.exceptions.Timeout:
24
+ return {"error": "Request timeout - model might be loading"}
25
+ except Exception as e:
26
+ return {"error": f"Connection error: {str(e)}"}
27
+
28
+ def chat_with_janus(message, history):
29
+ """Chat function for Janus-Pro model"""
30
+ if not API_TOKEN:
31
+ return "⚠️ Please add your Hugging Face token in Space Settings β†’ Repository secrets β†’ HF_TOKEN"
32
+
33
+ # Prepare the payload
34
+ payload = {
35
+ "inputs": message,
36
+ "parameters": {
37
+ "max_new_tokens": 300,
38
+ "temperature": 0.7,
39
+ "top_p": 0.9,
40
+ "do_sample": True,
41
+ "return_full_text": False
42
+ },
43
+ "options": {
44
+ "wait_for_model": True
45
+ }
46
+ }
47
+
48
+ # Show loading message
49
+ yield "πŸ”„ Processing your request... (Model might take 30-60 seconds to load)"
50
+
51
+ # Query the model
52
+ result = query_janus_model(payload)
53
+
54
+ # Process the response
55
+ if "error" in result:
56
+ yield f"❌ {result['error']}"
57
+ elif isinstance(result, list) and len(result) > 0:
58
+ if 'generated_text' in result[0]:
59
+ yield result[0]['generated_text']
60
+ else:
61
+ yield str(result[0])
62
+ elif isinstance(result, dict) and 'generated_text' in result:
63
+ yield result['generated_text']
64
+ else:
65
+ yield f"πŸ“„ Response: {str(result)}"
66
+
67
+ def clear_chat():
68
+ """Clear chat history"""
69
+ return [], []
70
+
71
+ # Custom CSS for better appearance
72
+ css = """
73
+ .gradio-container {
74
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
75
+ }
76
+ .chatbot {
77
+ background: white;
78
+ border-radius: 10px;
79
+ }
80
+ """
81
+
82
+ # Create the chat interface
83
+ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
84
+ gr.Markdown(
85
+ """
86
+ # πŸš€ Janus-Pro-7B Chat API
87
+ **Multimodal AI Model** - Understanding & Generation Capabilities
88
+
89
+ *Note: First request may take 30-60 seconds while the model loads*
90
+ """
91
+ )
92
+
93
+ with gr.Row():
94
+ with gr.Column(scale=1):
95
+ gr.Markdown("### ℹ️ About Janus-Pro")
96
+ gr.Markdown("""
97
+ - **Model**: Janus-Pro-7B by DeepSeek
98
+ - **Capabilities**: Text understanding & generation
99
+ - **License**: MIT
100
+ - **Framework**: Unified multimodal transformer
101
+ """)
102
+
103
+ with gr.Column(scale=2):
104
+ chatbot = gr.Chatbot(
105
+ label="Chat with Janus-Pro",
106
+ height=400,
107
+ show_copy_button=True
108
+ )
109
+
110
+ with gr.Row():
111
+ msg = gr.Textbox(
112
+ label="Your message",
113
+ placeholder="Type your message here... (Press Enter to send)",
114
+ scale=4,
115
+ container=False
116
+ )
117
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", scale=1)
118
+
119
+ with gr.Row():
120
+ gr.Examples(
121
+ examples=[
122
+ "Explain quantum computing in simple terms",
123
+ "Write a short poem about artificial intelligence",
124
+ "What are the benefits of renewable energy?",
125
+ "How does machine learning work?"
126
+ ],
127
+ inputs=msg,
128
+ label="Try these examples:"
129
+ )
130
+
131
+ # Event handlers
132
+ msg.submit(
133
+ fn=chat_with_janus,
134
+ inputs=[msg, chatbot],
135
+ outputs=chatbot
136
+ ).then(
137
+ lambda: "", # Clear input
138
+ outputs=msg
139
+ )
140
+
141
+ clear_btn.click(
142
+ fn=clear_chat,
143
+ outputs=[chatbot, msg]
144
+ )
145
+
146
+ gr.Markdown(
147
+ """
148
+ ---
149
+ **Tips**:
150
+ - Be specific in your questions for better responses
151
+ - The model excels at technical explanations and creative writing
152
+ - First message may be slow as the model loads
153
+ """
154
+ )
155
+
156
+ if __name__ == "__main__":
157
+ demo.launch(debug=True)