ainovatronsec commited on
Commit
c7eab17
·
verified ·
1 Parent(s): 0778744

Upload 2 files

Browse files
Files changed (2) hide show
  1. space_app_py.py +241 -0
  2. space_config.json +21 -0
space_app_py.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from unsloth import FastLanguageModel
4
+ import time
5
+
6
+ # Global variables for model
7
+ model = None
8
+ tokenizer = None
9
+
10
+ def load_model():
11
+ """Load the Ideon Audio support model"""
12
+ global model, tokenizer
13
+
14
+ if model is None:
15
+ print("🔄 Loading Ideon Audio Support Model...")
16
+
17
+ try:
18
+ model, tokenizer = FastLanguageModel.from_pretrained(
19
+ model_name="ainovatronsec/ideoaudio",
20
+ max_seq_length=2048,
21
+ dtype=None,
22
+ load_in_4bit=True,
23
+ trust_remote_code=False,
24
+ )
25
+
26
+ # Enable fast inference
27
+ FastLanguageModel.for_inference(model)
28
+ print("✅ Model loaded successfully!")
29
+
30
+ except Exception as e:
31
+ print(f"❌ Error loading model: {e}")
32
+ return False
33
+
34
+ return True
35
+
36
+ def get_ideon_response(message, history, temperature=0.7, max_tokens=256):
37
+ """Generate response from Ideon Audio support model"""
38
+
39
+ # Load model if not already loaded
40
+ if not load_model():
41
+ return "❌ Sorry, the model failed to load. Please try again later."
42
+
43
+ try:
44
+ # Format message for the model
45
+ messages = [{"role": "user", "content": message}]
46
+
47
+ # Apply chat template
48
+ inputs = tokenizer.apply_chat_template(
49
+ messages,
50
+ tokenize=True,
51
+ add_generation_prompt=True,
52
+ return_tensors="pt"
53
+ )
54
+
55
+ # Move to GPU if available
56
+ if torch.cuda.is_available():
57
+ inputs = inputs.to("cuda")
58
+
59
+ # Generate response
60
+ with torch.no_grad():
61
+ outputs = model.generate(
62
+ input_ids=inputs,
63
+ max_new_tokens=max_tokens,
64
+ temperature=temperature,
65
+ top_p=0.9,
66
+ do_sample=True,
67
+ use_cache=True,
68
+ pad_token_id=tokenizer.eos_token_id
69
+ )
70
+
71
+ # Decode the response
72
+ response = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True)
73
+
74
+ # Clean up the response
75
+ response = response.strip()
76
+ if not response:
77
+ response = "I apologize, but I couldn't generate a proper response. Could you please rephrase your question about Ideon Audio products?"
78
+
79
+ return response
80
+
81
+ except Exception as e:
82
+ print(f"Error generating response: {e}")
83
+ return f"❌ I encountered an error while processing your question. Please try again. Error details: {str(e)}"
84
+
85
+ def create_interface():
86
+ """Create the Gradio interface"""
87
+
88
+ # Custom CSS for better styling
89
+ custom_css = """
90
+ .gradio-container {
91
+ max-width: 1000px !important;
92
+ }
93
+ .header {
94
+ text-align: center;
95
+ background: linear-gradient(90deg, #1e3a8a, #3b82f6);
96
+ color: white;
97
+ padding: 20px;
98
+ border-radius: 10px;
99
+ margin-bottom: 20px;
100
+ }
101
+ .examples-row {
102
+ margin-top: 20px;
103
+ }
104
+ """
105
+
106
+ # Create the chat interface
107
+ with gr.Blocks(css=custom_css, title="Ideon Audio Support Assistant") as demo:
108
+
109
+ # Header
110
+ gr.HTML("""
111
+ <div class="header">
112
+ <h1>🎵 Ideon Audio Technical Support Assistant</h1>
113
+ <p>Expert knowledge for high-end audio equipment</p>
114
+ </div>
115
+ """)
116
+
117
+ # Description
118
+ gr.Markdown("""
119
+ Welcome to the **Ideon Audio Technical Support Assistant**! I'm here to help you with:
120
+
121
+ 🔹 **Product Information** - Specifications, features, and capabilities
122
+ 🔹 **Setup & Installation** - Connection procedures and configuration
123
+ 🔹 **Troubleshooting** - Diagnosing and resolving technical issues
124
+ 🔹 **Warranty Support** - Coverage details and service procedures
125
+ 🔹 **Technical Questions** - Performance metrics and compatibility
126
+
127
+ **Supported Products**: Absolute ε DAC, ΙΩΝ DAC, eos DAC, Absolute Stream, USB Re-clockers, and more!
128
+ """)
129
+
130
+ # Chat interface with advanced settings
131
+ with gr.Row():
132
+ with gr.Column(scale=3):
133
+ chatbot = gr.ChatInterface(
134
+ fn=get_ideon_response,
135
+ chatbot=gr.Chatbot(
136
+ height=500,
137
+ placeholder="Ask me anything about Ideon Audio products...",
138
+ avatar_images=(None, "🎵")
139
+ ),
140
+ textbox=gr.Textbox(
141
+ placeholder="Type your question about Ideon Audio products here...",
142
+ container=False,
143
+ scale=7
144
+ ),
145
+ submit_btn="Ask Question",
146
+ retry_btn="🔄 Retry",
147
+ undo_btn="↩��� Undo",
148
+ clear_btn="🗑️ Clear Chat",
149
+ )
150
+
151
+ with gr.Column(scale=1):
152
+ gr.Markdown("### ⚙️ Settings")
153
+
154
+ temperature = gr.Slider(
155
+ minimum=0.1,
156
+ maximum=1.0,
157
+ value=0.7,
158
+ step=0.1,
159
+ label="Response Creativity",
160
+ info="Higher = more creative, Lower = more focused"
161
+ )
162
+
163
+ max_tokens = gr.Slider(
164
+ minimum=50,
165
+ maximum=500,
166
+ value=256,
167
+ step=50,
168
+ label="Response Length",
169
+ info="Maximum tokens in response"
170
+ )
171
+
172
+ # Update the chat function with new parameters
173
+ def update_chat_fn(message, history):
174
+ return get_ideon_response(message, history, temperature.value, max_tokens.value)
175
+
176
+ chatbot.fn = update_chat_fn
177
+
178
+ # Example questions
179
+ gr.Markdown("### 💡 Example Questions")
180
+
181
+ example_questions = [
182
+ "What is the recommended burn-in period for the Absolute ε DAC?",
183
+ "How do I connect the Absolute DAC to my audio system?",
184
+ "My DAC won't lock onto the digital signal. What should I do?",
185
+ "What are the main technical specifications of the Absolute ε DAC?",
186
+ "What's covered under the Ideon Audio warranty?",
187
+ "How do I navigate to the General Settings screen?",
188
+ "What digital filters are available on the DAC?",
189
+ "What should I do if my DAC malfunctions?",
190
+ "Can I connect the DAC directly to a power amplifier?",
191
+ "What are the dimensions and weight of the Absolute E?"
192
+ ]
193
+
194
+ with gr.Row():
195
+ for i in range(0, len(example_questions), 2):
196
+ with gr.Column():
197
+ if i < len(example_questions):
198
+ gr.Examples(
199
+ examples=[[example_questions[i]]],
200
+ inputs=chatbot.textbox,
201
+ label=None
202
+ )
203
+ if i + 1 < len(example_questions):
204
+ gr.Examples(
205
+ examples=[[example_questions[i + 1]]],
206
+ inputs=chatbot.textbox,
207
+ label=None
208
+ )
209
+
210
+ # Footer
211
+ gr.Markdown("""
212
+ ---
213
+
214
+ **About this Assistant**: This AI model has been fine-tuned specifically on Ideon Audio product documentation
215
+ to provide expert technical support. It covers the complete product line including DACs, streamers,
216
+ USB re-clockers, and network optimizers.
217
+
218
+ **Note**: For warranty claims or complex technical issues, please contact your authorized Ideon Audio dealer
219
+ or email info@ideonaudio.com directly.
220
+
221
+ *Powered by Meta Llama 3.1 8B + Unsloth fine-tuning*
222
+ """)
223
+
224
+ return demo
225
+
226
+ # Create and launch the interface
227
+ if __name__ == "__main__":
228
+ print("🚀 Starting Ideon Audio Support Assistant...")
229
+
230
+ # Pre-load the model (optional, for faster first response)
231
+ print("🔄 Pre-loading model for faster responses...")
232
+ load_model()
233
+
234
+ # Create and launch the interface
235
+ demo = create_interface()
236
+ demo.launch(
237
+ share=False,
238
+ server_name="0.0.0.0",
239
+ server_port=7860,
240
+ show_error=True
241
+ )
space_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "title": "Ideon Audio Support Assistant",
3
+ "emoji": "🎵",
4
+ "colorFrom": "blue",
5
+ "colorTo": "purple",
6
+ "sdk": "gradio",
7
+ "sdk_version": "4.44.0",
8
+ "app_file": "app.py",
9
+ "pinned": false,
10
+ "license": "apache-2.0",
11
+ "models": ["ainovatronsec/ideoaudio"],
12
+ "tags": [
13
+ "technical-support",
14
+ "audio-equipment",
15
+ "customer-service",
16
+ "ideon-audio",
17
+ "llama"
18
+ ],
19
+ "short_description": "AI-powered technical support for Ideon Audio high-end audio equipment",
20
+ "hardware": "cpu-basic"
21
+ }