luguog commited on
Commit
bb0633d
·
verified ·
1 Parent(s): 3926afa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -143
app.py CHANGED
@@ -1,145 +1,97 @@
1
- import asyncio
2
- import websockets
3
- import json
4
- import time
5
- import numpy as np
6
- from collections import deque
7
- from scipy.stats import entropy
8
- from statistics import mean, stdev
9
-
10
- # --- Config ---
11
- PAIR = 'BTCUSDT'
12
- VWAP_WINDOW = 20
13
- CVC_WINDOW = 30
14
- ENTRY_THRESHOLD = 0.85
15
- MAX_LATENCY_MS = 30
16
-
17
- # Internal buffers
18
- price_buffer_a, price_buffer_b = deque(maxlen=CVC_WINDOW), deque(maxlen=CVC_WINDOW)
19
- volume_buffer_a, volume_buffer_b = deque(maxlen=VWAP_WINDOW), deque(maxlen=VWAP_WINDOW)
20
- vwap_buffer_a, vwap_buffer_b = deque(maxlen=VWAP_WINDOW), deque(maxlen=VWAP_WINDOW)
21
- tick_times_a, tick_times_b = deque(maxlen=2), deque(maxlen=2)
22
-
23
- # --- Utility Functions ---
24
- def compute_vwap(prices, volumes):
25
- return sum(p * v for p, v in zip(prices, volumes)) / sum(volumes) if volumes else 0
26
-
27
- def compute_entropy(distribution):
28
- probs = np.array(distribution) / sum(distribution)
29
- return entropy(probs) if all(probs) else 0
30
-
31
- def sigmoid(x): return 1 / (1 + np.exp(-x))
32
-
33
- # --- KPI Computation ---
34
- def compute_kpis(msg_a, msg_b):
35
- t_a, t_b = msg_a['T'], msg_b['T']
36
- latency = abs(t_a - t_b) / 1000
37
-
38
- ask_a, bid_a = msg_a['a'], msg_a['b']
39
- ask_b, bid_b = msg_b['a'], msg_b['b']
40
-
41
- spread_eff = (ask_a - bid_a) / ((ask_a + bid_a) / 2)
42
- micro_skew = (msg_a['bv'] - msg_a['av']) / max(msg_a['bv'] + msg_a['av'], 1e-9)
43
-
44
- vwap_a = compute_vwap(price_buffer_a, volume_buffer_a)
45
- vwap_b = compute_vwap(price_buffer_b, volume_buffer_b)
46
- vwap_div = (vwap_a - vwap_b) / vwap_b if vwap_b else 0
47
-
48
- ob_entropy = compute_entropy([msg_a['bv'], msg_a['av']])
49
-
50
- if len(price_buffer_a) >= 2:
51
- tick_velocity = (price_buffer_a[-1] - price_buffer_a[-2]) / (tick_times_a[-1] - tick_times_a[-2] + 1e-6)
52
- else:
53
- tick_velocity = 0
54
-
55
- if len(volume_buffer_a) >= 2:
56
- vdm = (volume_buffer_a[-1] - volume_buffer_a[-2]) / (tick_times_a[-1] - tick_times_a[-2] + 1e-6)
57
- else:
58
- vdm = 0
59
-
60
- cvc = np.corrcoef(price_buffer_a, price_buffer_b)[0][1] if len(price_buffer_a) >= 2 else 0
61
-
62
- # Combine into vector
63
- return np.array([
64
- spread_eff, latency, micro_skew,
65
- vwap_div, ob_entropy, tick_velocity,
66
- vdm, 0, # placeholder for funding skew
67
- cvc
68
- ])
69
-
70
- def evaluate_entry(kpi_vector):
71
- weights = np.array([1.0, -0.5, 1.0, 0.8, -0.6, 0.9, 0.8, 0.5, 1.0])
72
- score = sigmoid(np.dot(weights, kpi_vector))
73
- return score
74
-
75
- # --- WebSocket Handlers ---
76
- async def stream_gate():
77
- uri = f"wss://api.gateio.ws/ws/v4/"
78
- async with websockets.connect(uri) as ws:
79
- payload = {
80
- "time": int(time.time()),
81
- "channel": "spot.trades",
82
- "event": "subscribe",
83
- "payload": [f"{PAIR.lower()}"]
84
- }
85
- await ws.send(json.dumps(payload))
86
-
87
- async for msg in ws:
88
- data = json.loads(msg)
89
- if 'result' not in data or not isinstance(data['result'], list):
90
- continue
91
- trade = data['result'][0]
92
- p = float(trade['price'])
93
- v = float(trade['amount'])
94
- t = int(trade['create_time_ms'])
95
- tick_times_b.append(t / 1000)
96
- price_buffer_b.append(p)
97
- volume_buffer_b.append(v)
98
-
99
- async def stream_binance(handler_queue):
100
- uri = f"wss://stream.binance.com:9443/ws/{PAIR.lower()}@bookTicker"
101
- async with websockets.connect(uri) as ws:
102
- async for msg in ws:
103
- data = json.loads(msg)
104
- tick = {
105
- 'T': data['u'],
106
- 'a': float(data['a']),
107
- 'b': float(data['b']),
108
- 'bv': random.uniform(100, 200), # mock depth
109
- 'av': random.uniform(100, 200) # mock depth
110
- }
111
- price = (tick['a'] + tick['b']) / 2
112
- volume = random.uniform(1, 10)
113
- tick_times_a.append(time.time())
114
- price_buffer_a.append(price)
115
- volume_buffer_a.append(volume)
116
- await handler_queue.put(tick)
117
-
118
- # --- Coordination Loop ---
119
- async def monitor(handler_queue):
120
- while True:
121
- msg_a = await handler_queue.get()
122
- msg_b = {
123
- 'T': int(tick_times_b[-1] * 1000),
124
- 'a': price_buffer_b[-1] + 0.5,
125
- 'b': price_buffer_b[-1] - 0.5,
126
- 'bv': random.uniform(50, 150),
127
- 'av': random.uniform(50, 150)
128
- }
129
- kpi_vector = compute_kpis(msg_a, msg_b)
130
- score = evaluate_entry(kpi_vector)
131
- print(f"E_t = {score:.3f} | KPIs = {np.round(kpi_vector, 4)}")
132
- if score > ENTRY_THRESHOLD:
133
- print(">>> Entry Signal Triggered")
134
-
135
- # --- Main Entrypoint ---
136
- async def main():
137
- handler_queue = asyncio.Queue()
138
- await asyncio.gather(
139
- stream_binance(handler_queue),
140
- stream_gate(),
141
- monitor(handler_queue)
142
  )
 
 
 
 
 
 
143
 
144
- if __name__ == "__main__":
145
- asyncio.run(main())
 
1
+ import os, json, requests, torch
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
+ import gradio as gr
4
+
5
+ def fetch_file(space_id, filename):
6
+ url = f"https://huggingface.co/spaces/{space_id}/raw/main/{filename}"
7
+ try:
8
+ r = requests.get(url, timeout=10)
9
+ return r.text if r.status_code == 200 else ""
10
+ except:
11
+ return ""
12
+
13
+ def build_prompt(readme, code, reqs):
14
+ return f"""<s>[INST] You are a protocol intelligence model. Determine if this Hugging Face Space is monetized on-chain.
15
+
16
+ Return strictly in this JSON format:
17
+ {{
18
+ "is_revenue_ready": true|false,
19
+ "confidence": float,
20
+ "blockers": [ "reason 1", "reason 2" ],
21
+ "summary": "short summary"
22
+ }}
23
+
24
+ README:
25
+ {readme}
26
+
27
+ Code:
28
+ {code}
29
+
30
+ Dependencies:
31
+ {reqs}
32
+ [/INST]
33
+ """
34
+
35
+ def run_audit(space_id, model_id):
36
+ readme = fetch_file(space_id, "README.md")
37
+ code = fetch_file(space_id, "app.py")
38
+ reqs = fetch_file(space_id, "requirements.txt")
39
+ prompt = build_prompt(readme, code, reqs)
40
+
41
+ try:
42
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
43
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
44
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
45
+ result = pipe(prompt)[0]["generated_text"]
46
+ except Exception as e:
47
+ return {"error": f"Model load failed: {str(e)}"}
48
+
49
+ try:
50
+ blob = result.split("{", 1)[1].rsplit("}", 1)[0]
51
+ js = json.loads("{" + blob + "}")
52
+ js["space_id"] = space_id
53
+ return js
54
+ except Exception as e:
55
+ return {"error": f"Output parse failed: {str(e)}", "raw": result}
56
+
57
+ def batch_audit():
58
+ spaces = open("space_list.txt").read().splitlines()
59
+ os.makedirs("out/unified_audit", exist_ok=True)
60
+ model_id = "mistralai/Mistral-7B-Instruct-v0.1"
61
+ for sid in spaces:
62
+ result = run_audit(sid, model_id)
63
+ with open(f"out/unified_audit/{sid.replace('/', '__')}.json", "w") as f:
64
+ json.dump(result, f, indent=2)
65
+ print(f"✅ {sid}: {result.get('summary', result)}")
66
+
67
+ # Gradio UI
68
+ with gr.Blocks() as demo:
69
+ gr.Markdown("# 🔍 HF Space Revenue Readiness Auditor (33x LLMs, No API Keys)")
70
+ sid = gr.Textbox(label="Space ID (e.g. username/space-name)")
71
+ model = gr.Dropdown(
72
+ label="Select LLM Model",
73
+ choices=[
74
+ "mistralai/Mistral-7B-Instruct-v0.1",
75
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
76
+ "google/gemma-2b-it",
77
+ "microsoft/phi-2",
78
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
79
+ "NousResearch/Nous-Capybara-7B-V1",
80
+ "HuggingFaceH4/zephyr-7b-alpha",
81
+ "intel/neural-chat-7b-v3",
82
+ "tiiuae/falcon-rw-1b",
83
+ "EleutherAI/pythia-1.4b",
84
+ "EleutherAI/pythia-2.8b",
85
+ "Open-Orca/Mistral-7B-OpenOrca"
86
+ # Extend to full 33 here
87
+ ],
88
+ value="mistralai/Mistral-7B-Instruct-v0.1"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  )
90
+ run = gr.Button("Run Audit")
91
+ output = gr.JSON(label="Audit Result")
92
+ run.click(fn=run_audit, inputs=[sid, model], outputs=output)
93
+
94
+ # Uncomment to run CLI batch:
95
+ # batch_audit()
96
 
97
+ demo.launch()