cryptodashboard / app.py
CryptoCreeper
Update app.py
07cb30f verified
import gradio as gr
import requests
import torch
import re
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
MARKET_CONTEXT = "Market data is loading..."
MODEL_ID = "Qwen/Qwen2.5-1.5B-Instruct"
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype="auto",
device_map="auto"
)
def fetch_crypto_data():
url = "https://api.coingecko.com/api/v3/coins/markets"
params = {
"vs_currency": "usd",
"ids": "bitcoin,ethereum,solana,binancecoin",
"order": "market_cap_desc",
"per_page": 4,
"page": 1,
"sparkline": "true",
"price_change_percentage": "24h"
}
global MARKET_CONTEXT
try:
response = requests.get(url, params=params, timeout=10)
if response.status_code != 200:
return None
data = response.json()
context_parts = []
processed_data = []
for coin in data:
symbol = coin['symbol'].upper()
price = coin['current_price']
chg_24 = coin.get('price_change_percentage_24h', 0) or 0
mcap = coin['market_cap'] or 0
history = coin.get('sparkline_in_7d', {}).get('price', [])
context_parts.append(f"[{symbol}: ${price}, 24h:{chg_24:.1f}%]")
processed_data.append({
"name": coin['name'], "symbol": symbol, "price": price,
"chg_24": chg_24, "mcap": mcap, "history": history
})
MARKET_CONTEXT = " | ".join(context_parts)
return processed_data
except Exception:
return None
def chat_logic(user_input, history):
fetch_crypto_data()
system_prompt = f"You are a professional Crypto Assistant. LIVE DATA: {MARKET_CONTEXT}. Answer concisely."
messages = [{"role": "system", "content": system_prompt}]
for human, assistant in history:
messages.append({"role": "user", "content": human})
messages.append({"role": "assistant", "content": assistant})
messages.append({"role": "user", "content": user_input})
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(
**model_inputs,
max_new_tokens=512,
do_sample=True,
temperature=0.7
)
response_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(response_ids, skip_special_tokens=True)[0]
cleaned_response = re.sub(r'<think>.*?</think>\s*\n?', '', response, flags=re.DOTALL).strip()
return cleaned_response
import plotly.graph_objects as go
def create_sparkline(history, chg_24):
color = "#10B981" if chg_24 >= 0 else "#EF4444"
fig = go.Figure()
fig.add_trace(go.Scatter(y=history, mode='lines', fill='tozeroy', line=dict(color=color, width=2)))
fig.update_layout(
template="plotly_dark", paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)',
margin=dict(l=0, r=0, t=0, b=0), xaxis=dict(visible=False), yaxis=dict(visible=False),
showlegend=False, height=60
)
return fig
def create_card_html(coin):
color_24 = "#10B981" if coin['chg_24'] >= 0 else "#EF4444"
return f"""
<div style="background-color: #1F2937; padding: 15px; border-radius: 10px; border: 1px solid #374151; color: white;">
<div style="display: flex; justify-content: space-between;">
<b>{coin['name']} ({coin['symbol']})</b>
<span>${coin['price']:,.2f}</span>
</div>
<div style="color: {color_24}; font-size: 0.8em;">{coin['chg_24']:.2f}% (24h)</div>
</div>
"""
def refresh_dashboard():
data = fetch_crypto_data()
if not data: return [gr.update()] * 8
outputs = []
for coin in data:
outputs.append(create_card_html(coin))
outputs.append(create_sparkline(coin['history'], coin['chg_24']))
return outputs
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.HTML("<h1 style='text-align: center;'>⚡ Local Qwen CryptoDash</h1>")
with gr.Row():
with gr.Column():
c1_h = gr.HTML(); c1_p = gr.Plot(container=False)
with gr.Column():
c2_h = gr.HTML(); c2_p = gr.Plot(container=False)
with gr.Column():
c3_h = gr.HTML(); c3_p = gr.Plot(container=False)
with gr.Column():
c4_h = gr.HTML(); c4_p = gr.Plot(container=False)
btn = gr.Button("Update Market")
gr.ChatInterface(fn=chat_logic)
demo.load(refresh_dashboard, outputs=[c1_h, c1_p, c2_h, c2_p, c3_h, c3_p, c4_h, c4_p])
btn.click(refresh_dashboard, outputs=[c1_h, c1_p, c2_h, c2_p, c3_h, c3_p, c4_h, c4_p])
if __name__ == "__main__":
demo.launch()