spanofzero commited on
Commit
2ef87cc
·
verified ·
1 Parent(s): 571c4c2

update cutoff correct qwen

Browse files
Files changed (1) hide show
  1. app.py +37 -1
app.py CHANGED
@@ -55,4 +55,40 @@ def generate_response(message, history):
55
 
56
  # Correct format for conversational task
57
  messages = [{"role": "system", "content": system_instruction}]
58
- for
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  # Correct format for conversational task
57
  messages = [{"role": "system", "content": system_instruction}]
58
+ for user_msg, assistant_msg in history:
59
+ messages.append({"role": "user", "content": user_msg})
60
+ messages.append({"role": "assistant", "content": assistant_msg})
61
+ messages.append({"role": "user", "content": message})
62
+
63
+ try:
64
+ # Switching to chat_completion for model compatibility
65
+ response = client.chat_completion(
66
+ messages,
67
+ max_tokens=1024,
68
+ stream=False
69
+ )
70
+ return response.choices[0].message.content
71
+ except Exception as error:
72
+ return f"System Error: {str(error)}. Verify your token permissions."
73
+
74
+ custom_css = """
75
+ body, .gradio-container { background-color: #0b0f19 !important; }
76
+ footer {display: none !important}
77
+ .message.user { background-color: #1e293b !important; border: 1px solid #3b82f6 !important; }
78
+ .message.bot { background-color: #0f172a !important; color: #60a5fa !important; }
79
+ """
80
+
81
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"), css=custom_css) as demo:
82
+ gr.Markdown("# Advanced Logic Interface")
83
+ gr.ChatInterface(
84
+ fn=generate_response,
85
+ description="Inference layer utilizing state-hold logic.",
86
+ examples=[
87
+ "Run grid diagnostic",
88
+ "Calculate the integer distribution for 120 units across 3 nodes.",
89
+ "Explain network latency using technical terminology."
90
+ ]
91
+ )
92
+
93
+ if __name__ == "__main__":
94
+ demo.launch()