File size: 5,242 Bytes
4dfde07
 
 
 
 
 
37e7757
4dfde07
 
 
 
37e7757
4dfde07
37e7757
4dfde07
 
 
9f65c03
4dfde07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f65c03
4dfde07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f65c03
4dfde07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f65c03
4dfde07
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import asyncio
import os
import time
import orjson
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.responses import JSONResponse

# ----------------------------
# App
# ----------------------------
app = FastAPI(title="Dummy Python AI", version="1.0.0")

START_TS = time.time()

def j(obj) -> str:
    # Fast JSON dumps
    return orjson.dumps(obj).decode("utf-8")

@app.get("/health")
def health():
    return JSONResponse({
        "ok": True,
        "service": "dummy-ai",
        "uptime_sec": round(time.time() - START_TS, 2)
    })

# ----------------------------
# WebSocket Protocol
# Client -> Server:
#   {"type":"task", "text":"..."}              # start a task
#   {"type":"telemetry", "cpu":.., "mem":..}   # periodic telemetry
#   {"type":"cancel"}                          # cancel current stream
#
# Server -> Client:
#   {"type":"ready"}                           # once on connect
#   {"type":"log","msg":"..."}                 # log line
#   {"type":"token","text":"..." }             # streaming token
#   {"type":"say","text":"..."}                # client should speak this ASAP
#   {"type":"done","result":"..."}             # task completed
#   {"type":"error","msg":"..."}               # error
# ----------------------------

@app.websocket("/ws/ai")
async def ws_ai(ws: WebSocket):
    await ws.accept()
    await ws.send_text(j({"type": "ready", "msg": "Dummy AI online"}))

    current_task = None
    current_cancel = asyncio.Event()

    async def stream_dummy_answer(prompt: str):
        """Stream a staged, convincing dummy answer with tokens and say-cues."""
        try:
            # 1) acknowledge
            await ws.send_text(j({"type":"log","msg":f"Received task: {prompt[:120]}"}))
            await asyncio.sleep(0.2)

            # 2) "thinking…" (simulate tool use / chain-of-thought without revealing it)
            phases = [
                "Analyzing your request",
                "Planning steps",
                "Executing subtask 1",
                "Executing subtask 2",
                "Compiling results"
            ]
            for ph in phases:
                if current_cancel.is_set(): return
                await ws.send_text(j({"type":"log","msg":ph}))
                await asyncio.sleep(0.35)

            # 3) start streaming an answer token-by-token
            answer = (
                "Sure — here’s a dummy streamed response to verify your end-to-end pipeline. "
                "I’m emitting short tokens so your client UI can show them live, "
                "and your TTS can speak them as they arrive."
            )
            # also ask client to speak a "lead in" immediately
            await ws.send_text(j({"type":"say","text":"Starting response."}))

            for token in answer.split(" "):
                if current_cancel.is_set(): return
                await ws.send_text(j({"type":"token","text":token + " "}))
                await asyncio.sleep(0.06)  # controls stream cadence

            if current_cancel.is_set(): return

            await asyncio.sleep(0.15)
            await ws.send_text(j({"type":"say","text":"Response complete."}))
            await ws.send_text(j({"type":"done","result":"OK"}))

        except Exception as e:
            await ws.send_text(j({"type":"error","msg":str(e)}))

    try:
        while True:
            raw = await ws.receive_text()
            try:
                msg = orjson.loads(raw)
            except Exception:
                await ws.send_text(j({"type":"error","msg":"Invalid JSON"}))
                continue

            mtype = msg.get("type")
            if mtype == "telemetry":
                # best-effort log
                await ws.send_text(j({
                    "type":"log",
                    "msg": f"Telemetry cpu={msg.get('cpu')} mem={msg.get('mem')} active={msg.get('active_window')}"
                }))
                continue

            if mtype == "cancel":
                current_cancel.set()
                await ws.send_text(j({"type":"log","msg":"Cancel requested"}))
                continue

            if mtype == "task":
                # cancel any ongoing stream
                if current_task and not current_task.done():
                    current_cancel.set()
                    with contextlib.suppress(asyncio.CancelledError):
                        current_task.cancel()
                    current_task = None
                    await asyncio.sleep(0.05)

                # reset cancel flag
                current_cancel = asyncio.Event()
                prompt = str(msg.get("text", "")).strip() or "(empty)"
                current_task = asyncio.create_task(stream_dummy_answer(prompt))
                continue

            await ws.send_text(j({"type":"error","msg":f"Unknown message type '{mtype}'"}))

    except WebSocketDisconnect:
        # client left
        return
    except Exception as e:
        try:
            await ws.send_text(j({"type":"error","msg":str(e)}))
        finally:
            return

# ------------- Local run -------------
if _name_ == "_main_":
    import uvicorn, contextlib
    uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("PORT", "7860")))