Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,145 +1,97 @@
|
|
| 1 |
-
import
|
| 2 |
-
import
|
| 3 |
-
import
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
"
|
| 81 |
-
"
|
| 82 |
-
"
|
| 83 |
-
"
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
if 'result' not in data or not isinstance(data['result'], list):
|
| 90 |
-
continue
|
| 91 |
-
trade = data['result'][0]
|
| 92 |
-
p = float(trade['price'])
|
| 93 |
-
v = float(trade['amount'])
|
| 94 |
-
t = int(trade['create_time_ms'])
|
| 95 |
-
tick_times_b.append(t / 1000)
|
| 96 |
-
price_buffer_b.append(p)
|
| 97 |
-
volume_buffer_b.append(v)
|
| 98 |
-
|
| 99 |
-
async def stream_binance(handler_queue):
|
| 100 |
-
uri = f"wss://stream.binance.com:9443/ws/{PAIR.lower()}@bookTicker"
|
| 101 |
-
async with websockets.connect(uri) as ws:
|
| 102 |
-
async for msg in ws:
|
| 103 |
-
data = json.loads(msg)
|
| 104 |
-
tick = {
|
| 105 |
-
'T': data['u'],
|
| 106 |
-
'a': float(data['a']),
|
| 107 |
-
'b': float(data['b']),
|
| 108 |
-
'bv': random.uniform(100, 200), # mock depth
|
| 109 |
-
'av': random.uniform(100, 200) # mock depth
|
| 110 |
-
}
|
| 111 |
-
price = (tick['a'] + tick['b']) / 2
|
| 112 |
-
volume = random.uniform(1, 10)
|
| 113 |
-
tick_times_a.append(time.time())
|
| 114 |
-
price_buffer_a.append(price)
|
| 115 |
-
volume_buffer_a.append(volume)
|
| 116 |
-
await handler_queue.put(tick)
|
| 117 |
-
|
| 118 |
-
# --- Coordination Loop ---
|
| 119 |
-
async def monitor(handler_queue):
|
| 120 |
-
while True:
|
| 121 |
-
msg_a = await handler_queue.get()
|
| 122 |
-
msg_b = {
|
| 123 |
-
'T': int(tick_times_b[-1] * 1000),
|
| 124 |
-
'a': price_buffer_b[-1] + 0.5,
|
| 125 |
-
'b': price_buffer_b[-1] - 0.5,
|
| 126 |
-
'bv': random.uniform(50, 150),
|
| 127 |
-
'av': random.uniform(50, 150)
|
| 128 |
-
}
|
| 129 |
-
kpi_vector = compute_kpis(msg_a, msg_b)
|
| 130 |
-
score = evaluate_entry(kpi_vector)
|
| 131 |
-
print(f"E_t = {score:.3f} | KPIs = {np.round(kpi_vector, 4)}")
|
| 132 |
-
if score > ENTRY_THRESHOLD:
|
| 133 |
-
print(">>> Entry Signal Triggered")
|
| 134 |
-
|
| 135 |
-
# --- Main Entrypoint ---
|
| 136 |
-
async def main():
|
| 137 |
-
handler_queue = asyncio.Queue()
|
| 138 |
-
await asyncio.gather(
|
| 139 |
-
stream_binance(handler_queue),
|
| 140 |
-
stream_gate(),
|
| 141 |
-
monitor(handler_queue)
|
| 142 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
-
|
| 145 |
-
asyncio.run(main())
|
|
|
|
| 1 |
+
import os, json, requests, torch
|
| 2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 3 |
+
import gradio as gr
|
| 4 |
+
|
| 5 |
+
def fetch_file(space_id, filename):
|
| 6 |
+
url = f"https://huggingface.co/spaces/{space_id}/raw/main/{filename}"
|
| 7 |
+
try:
|
| 8 |
+
r = requests.get(url, timeout=10)
|
| 9 |
+
return r.text if r.status_code == 200 else ""
|
| 10 |
+
except:
|
| 11 |
+
return ""
|
| 12 |
+
|
| 13 |
+
def build_prompt(readme, code, reqs):
|
| 14 |
+
return f"""<s>[INST] You are a protocol intelligence model. Determine if this Hugging Face Space is monetized on-chain.
|
| 15 |
+
|
| 16 |
+
Return strictly in this JSON format:
|
| 17 |
+
{{
|
| 18 |
+
"is_revenue_ready": true|false,
|
| 19 |
+
"confidence": float,
|
| 20 |
+
"blockers": [ "reason 1", "reason 2" ],
|
| 21 |
+
"summary": "short summary"
|
| 22 |
+
}}
|
| 23 |
+
|
| 24 |
+
README:
|
| 25 |
+
{readme}
|
| 26 |
+
|
| 27 |
+
Code:
|
| 28 |
+
{code}
|
| 29 |
+
|
| 30 |
+
Dependencies:
|
| 31 |
+
{reqs}
|
| 32 |
+
[/INST]
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def run_audit(space_id, model_id):
|
| 36 |
+
readme = fetch_file(space_id, "README.md")
|
| 37 |
+
code = fetch_file(space_id, "app.py")
|
| 38 |
+
reqs = fetch_file(space_id, "requirements.txt")
|
| 39 |
+
prompt = build_prompt(readme, code, reqs)
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 43 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
|
| 44 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
|
| 45 |
+
result = pipe(prompt)[0]["generated_text"]
|
| 46 |
+
except Exception as e:
|
| 47 |
+
return {"error": f"Model load failed: {str(e)}"}
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
blob = result.split("{", 1)[1].rsplit("}", 1)[0]
|
| 51 |
+
js = json.loads("{" + blob + "}")
|
| 52 |
+
js["space_id"] = space_id
|
| 53 |
+
return js
|
| 54 |
+
except Exception as e:
|
| 55 |
+
return {"error": f"Output parse failed: {str(e)}", "raw": result}
|
| 56 |
+
|
| 57 |
+
def batch_audit():
|
| 58 |
+
spaces = open("space_list.txt").read().splitlines()
|
| 59 |
+
os.makedirs("out/unified_audit", exist_ok=True)
|
| 60 |
+
model_id = "mistralai/Mistral-7B-Instruct-v0.1"
|
| 61 |
+
for sid in spaces:
|
| 62 |
+
result = run_audit(sid, model_id)
|
| 63 |
+
with open(f"out/unified_audit/{sid.replace('/', '__')}.json", "w") as f:
|
| 64 |
+
json.dump(result, f, indent=2)
|
| 65 |
+
print(f"✅ {sid}: {result.get('summary', result)}")
|
| 66 |
+
|
| 67 |
+
# Gradio UI
|
| 68 |
+
with gr.Blocks() as demo:
|
| 69 |
+
gr.Markdown("# 🔍 HF Space Revenue Readiness Auditor (33x LLMs, No API Keys)")
|
| 70 |
+
sid = gr.Textbox(label="Space ID (e.g. username/space-name)")
|
| 71 |
+
model = gr.Dropdown(
|
| 72 |
+
label="Select LLM Model",
|
| 73 |
+
choices=[
|
| 74 |
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
| 75 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 76 |
+
"google/gemma-2b-it",
|
| 77 |
+
"microsoft/phi-2",
|
| 78 |
+
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
| 79 |
+
"NousResearch/Nous-Capybara-7B-V1",
|
| 80 |
+
"HuggingFaceH4/zephyr-7b-alpha",
|
| 81 |
+
"intel/neural-chat-7b-v3",
|
| 82 |
+
"tiiuae/falcon-rw-1b",
|
| 83 |
+
"EleutherAI/pythia-1.4b",
|
| 84 |
+
"EleutherAI/pythia-2.8b",
|
| 85 |
+
"Open-Orca/Mistral-7B-OpenOrca"
|
| 86 |
+
# Extend to full 33 here
|
| 87 |
+
],
|
| 88 |
+
value="mistralai/Mistral-7B-Instruct-v0.1"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
)
|
| 90 |
+
run = gr.Button("Run Audit")
|
| 91 |
+
output = gr.JSON(label="Audit Result")
|
| 92 |
+
run.click(fn=run_audit, inputs=[sid, model], outputs=output)
|
| 93 |
+
|
| 94 |
+
# Uncomment to run CLI batch:
|
| 95 |
+
# batch_audit()
|
| 96 |
|
| 97 |
+
demo.launch()
|
|
|