|
|
import gradio as gr |
|
|
import requests |
|
|
import json |
|
|
import os |
|
|
from typing import List, Dict, Optional |
|
|
|
|
|
class BinericAPI: |
|
|
def __init__(self): |
|
|
self.api_key = os.environ.get("Key") |
|
|
if not self.api_key or self.api_key == "YOUR_API_KEY": |
|
|
raise ValueError("API Key not found. Please add your API key in the 'Secrets' tab (Key = 'YOUR_API_KEY')") |
|
|
|
|
|
self.base_url = "https://api.bineric.com/api/v1" |
|
|
self.headers = {'api-key': self.api_key} |
|
|
|
|
|
def get_balance(self): |
|
|
"""Fetch balance from Bineric API""" |
|
|
try: |
|
|
response = requests.get( |
|
|
f'{self.base_url}/monitoring/balance', |
|
|
headers=self.headers |
|
|
) |
|
|
response.raise_for_status() |
|
|
return response.json() |
|
|
except requests.exceptions.RequestException as e: |
|
|
return {"error": f"API request failed: {str(e)}"} |
|
|
|
|
|
def chat_completion(self, model: str, messages: List[Dict], |
|
|
stream: bool = False, temperature: float = 0.7, |
|
|
max_response_length: Optional[int] = None, |
|
|
top_p: float = 0.95): |
|
|
"""Get chat completion from various AI models""" |
|
|
try: |
|
|
|
|
|
payload = { |
|
|
"model": model, |
|
|
"messages": messages, |
|
|
"options": { |
|
|
"stream": stream, |
|
|
"temperature": temperature, |
|
|
"top_p": top_p |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if max_response_length: |
|
|
payload["options"]["max_response_length"] = max_response_length |
|
|
|
|
|
|
|
|
response = requests.post( |
|
|
f'{self.base_url}/ai/chat/completions', |
|
|
headers=self.headers, |
|
|
json=payload |
|
|
) |
|
|
response.raise_for_status() |
|
|
|
|
|
return response.json() |
|
|
except requests.exceptions.RequestException as e: |
|
|
return {"error": f"Chat completion failed: {str(e)}"} |
|
|
|
|
|
|
|
|
try: |
|
|
api_client = BinericAPI() |
|
|
except ValueError as e: |
|
|
api_client = None |
|
|
|
|
|
|
|
|
MODELS = { |
|
|
"OpenAI": [ |
|
|
"gpt-5", |
|
|
"gpt-5-nano", |
|
|
"gpt-4", |
|
|
"o1", |
|
|
"o3-mini", |
|
|
"gpt-oss-120b", |
|
|
"gpt-oss-20b" |
|
|
], |
|
|
"Google": [ |
|
|
"gemini-2.0-flash", |
|
|
"gemini-2.5-flash", |
|
|
"gemini-2.5-pro", |
|
|
"gemini-3-pro-preview" |
|
|
], |
|
|
"Anthropic": [ |
|
|
"claude-3.7-sonnet", |
|
|
"claude-sonnet-4", |
|
|
"claude-haiku-4-5" |
|
|
], |
|
|
"DeepSeek": [ |
|
|
"deepseek-v3.1-terminus", |
|
|
"deepseek-r1-0528" |
|
|
], |
|
|
"Meta": [ |
|
|
"meta-llama-3.1-8b", |
|
|
"meta-llama-3.1-405b", |
|
|
"meta-llama-3.3-70b" |
|
|
], |
|
|
"Other": [ |
|
|
"norskgpt", |
|
|
"qwen-3-coder-480b", |
|
|
"qwen-3-235b", |
|
|
"mistral_large_24_02", |
|
|
"llama4-maverick", |
|
|
"llama4-scout" |
|
|
] |
|
|
} |
|
|
|
|
|
def get_balance(): |
|
|
"""Fetch and display balance""" |
|
|
if not api_client: |
|
|
return "❌ API Key not found. Please add your API key in the 'Secrets' tab (Key = 'YOUR_API_KEY')" |
|
|
|
|
|
balance_data = api_client.get_balance() |
|
|
|
|
|
if "error" in balance_data: |
|
|
return f"❌ Error: {balance_data['error']}" |
|
|
|
|
|
|
|
|
formatted_response = json.dumps(balance_data, indent=2) |
|
|
return f"✅ Balance retrieved successfully:\n\n{formatted_response}" |
|
|
|
|
|
def chat_complete(model, system_prompt, user_message, temperature, max_tokens, top_p, history=None): |
|
|
"""Generate chat completion""" |
|
|
if not api_client: |
|
|
return "", history, "❌ API Key not found. Please add your API key in the 'Secrets' tab" |
|
|
|
|
|
|
|
|
messages = [] |
|
|
|
|
|
|
|
|
if system_prompt.strip(): |
|
|
messages.append({"role": "system", "content": system_prompt}) |
|
|
|
|
|
|
|
|
if history: |
|
|
for entry in history: |
|
|
messages.append({"role": "user", "content": entry["user"]}) |
|
|
if entry["assistant"]: |
|
|
messages.append({"role": "assistant", "content": entry["assistant"]}) |
|
|
|
|
|
|
|
|
messages.append({"role": "user", "content": user_message}) |
|
|
|
|
|
|
|
|
result = api_client.chat_completion( |
|
|
model=model, |
|
|
messages=messages, |
|
|
temperature=temperature, |
|
|
max_response_length=max_tokens if max_tokens > 0 else None, |
|
|
top_p=top_p |
|
|
) |
|
|
|
|
|
if "error" in result: |
|
|
return "", history, f"❌ Error: {result['error']}" |
|
|
|
|
|
|
|
|
if "choices" in result and len(result["choices"]) > 0: |
|
|
response_text = result["choices"][0].get("message", {}).get("content", "No response generated") |
|
|
|
|
|
|
|
|
if history is None: |
|
|
history = [] |
|
|
|
|
|
history.append({ |
|
|
"user": user_message, |
|
|
"assistant": response_text |
|
|
}) |
|
|
|
|
|
|
|
|
usage_info = "" |
|
|
if "usage" in result: |
|
|
usage = result["usage"] |
|
|
usage_info = f"\n\n**Usage:**\n- Prompt tokens: {usage.get('prompt_tokens', 'N/A')}\n- Completion tokens: {usage.get('completion_tokens', 'N/A')}\n- Total tokens: {usage.get('total_tokens', 'N/A')}" |
|
|
|
|
|
return "", history, response_text + usage_info |
|
|
else: |
|
|
return "", history, "❌ No response generated from the model" |
|
|
|
|
|
def clear_chat(): |
|
|
"""Clear chat history""" |
|
|
return [], "", "" |
|
|
|
|
|
|
|
|
with gr.Blocks(title="Bineric AI Dashboard") as demo: |
|
|
gr.Markdown("# 🤖 Bineric AI Dashboard") |
|
|
gr.Markdown("Balance checking and AI chat completion using Bineric API") |
|
|
|
|
|
|
|
|
with gr.Tabs(): |
|
|
|
|
|
with gr.Tab("💰 Balance"): |
|
|
gr.Markdown("### Check your API balance") |
|
|
with gr.Accordion("ℹ️ Setup Instructions", open=False): |
|
|
gr.Markdown(""" |
|
|
1. **Add your API key in Gradio Secrets:** |
|
|
- Go to the "Secrets" tab |
|
|
- Add a new secret with: |
|
|
- Key: `Key` |
|
|
- Value: `YOUR_API_KEY_HERE` |
|
|
|
|
|
2. **Click the button below to fetch balance** |
|
|
""") |
|
|
|
|
|
balance_output = gr.Textbox( |
|
|
label="Balance Information", |
|
|
placeholder="Click 'Get Balance' to see your balance...", |
|
|
lines=15 |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
get_balance_btn = gr.Button("🔄 Get Balance", variant="primary") |
|
|
clear_balance_btn = gr.Button("🗑️ Clear") |
|
|
|
|
|
get_balance_btn.click(get_balance, outputs=balance_output) |
|
|
clear_balance_btn.click(lambda: "", outputs=balance_output) |
|
|
|
|
|
|
|
|
with gr.Tab("💬 AI Chat"): |
|
|
gr.Markdown("### Chat with various AI models") |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
|
|
|
model_selector = gr.Dropdown( |
|
|
label="Select AI Model", |
|
|
choices=[model for models in MODELS.values() for model in models], |
|
|
value="gpt-5", |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
|
|
|
provider_filter = gr.Dropdown( |
|
|
label="Filter by Provider", |
|
|
choices=list(MODELS.keys()), |
|
|
value="OpenAI", |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
|
|
|
system_prompt = gr.Textbox( |
|
|
label="System Prompt (optional)", |
|
|
placeholder="You are a helpful assistant...", |
|
|
lines=3 |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Accordion("⚙️ Advanced Parameters", open=False): |
|
|
temperature = gr.Slider( |
|
|
label="Temperature", |
|
|
minimum=0.0, |
|
|
maximum=2.0, |
|
|
value=0.7, |
|
|
step=0.1 |
|
|
) |
|
|
|
|
|
max_tokens = gr.Slider( |
|
|
label="Max Tokens (0 = unlimited)", |
|
|
minimum=0, |
|
|
maximum=10000, |
|
|
value=0, |
|
|
step=100 |
|
|
) |
|
|
|
|
|
top_p = gr.Slider( |
|
|
label="Top-p", |
|
|
minimum=0.0, |
|
|
maximum=1.0, |
|
|
value=0.95, |
|
|
step=0.05 |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Column(scale=2): |
|
|
|
|
|
chatbot = gr.Chatbot( |
|
|
label="Conversation", |
|
|
height=400 |
|
|
) |
|
|
|
|
|
|
|
|
chat_state = gr.State([]) |
|
|
|
|
|
|
|
|
user_input = gr.Textbox( |
|
|
label="Your Message", |
|
|
placeholder="Type your message here...", |
|
|
lines=3 |
|
|
) |
|
|
|
|
|
|
|
|
response_info = gr.Markdown("") |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
send_btn = gr.Button("📤 Send", variant="primary") |
|
|
clear_chat_btn = gr.Button("🗑️ Clear Chat") |
|
|
|
|
|
with gr.Row(): |
|
|
gr.Markdown("**Tip:** Press Shift+Enter for new line, Enter to send") |
|
|
|
|
|
|
|
|
def update_model_list(provider): |
|
|
return gr.Dropdown(choices=MODELS[provider], value=MODELS[provider][0]) |
|
|
|
|
|
provider_filter.change( |
|
|
update_model_list, |
|
|
inputs=provider_filter, |
|
|
outputs=model_selector |
|
|
) |
|
|
|
|
|
|
|
|
def send_message(model, system, message, temp, tokens, top, history): |
|
|
return chat_complete(model, system, message, temp, tokens, top, history) |
|
|
|
|
|
|
|
|
send_btn.click( |
|
|
send_message, |
|
|
inputs=[model_selector, system_prompt, user_input, temperature, max_tokens, top_p, chat_state], |
|
|
outputs=[user_input, chat_state, response_info] |
|
|
) |
|
|
|
|
|
|
|
|
user_input.submit( |
|
|
send_message, |
|
|
inputs=[model_selector, system_prompt, user_input, temperature, max_tokens, top_p, chat_state], |
|
|
outputs=[user_input, chat_state, response_info] |
|
|
) |
|
|
|
|
|
|
|
|
clear_chat_btn.click( |
|
|
clear_chat, |
|
|
outputs=[chat_state, user_input, response_info] |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Tab("📚 Model Info"): |
|
|
gr.Markdown("### Available AI Models") |
|
|
|
|
|
for provider, models in MODELS.items(): |
|
|
with gr.Accordion(f"🔹 {provider}", open=False): |
|
|
model_table = [] |
|
|
for model in models: |
|
|
model_table.append(f"- **{model}**") |
|
|
|
|
|
gr.Markdown("\n".join(model_table)) |
|
|
|
|
|
gr.Markdown(""" |
|
|
--- |
|
|
**API Documentation:** |
|
|
- All models use the same chat completion endpoint |
|
|
- Request format: JSON with messages array |
|
|
- Support for system prompts and conversation history |
|
|
- Token usage tracking available in responses |
|
|
""") |
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
--- |
|
|
**Note:** This dashboard uses the Bineric API. Make sure your API key has access to the models you want to use. |
|
|
Balance information is fetched from the monitoring endpoint. |
|
|
""") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch( |
|
|
theme=gr.themes.Soft(), |
|
|
share=True |
|
|
) |