Emmanuel Acheampong commited on
Commit
7d49763
·
0 Parent(s):

adding one shot agent

Browse files
README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: One-Shot Deploy
3
+ emoji: 🚀
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: streamlit
7
+ sdk_version: "1.40.0"
8
+ app_file: app.py
9
+ pinned: true
10
+ ---
11
+
12
+ # One-Shot Deploy
13
+
14
+ Describe a demo in plain English → Crusoe's multi-model agent writes the code and ships it to Hugging Face Spaces in minutes.
15
+
16
+ ## How it works
17
+
18
+ | Stage | Model | What happens |
19
+ |-------|-------|-------------|
20
+ | 1 · Intent | **DeepSeek R1** | Reasons about your prompt, selects a template, writes the AI system prompt |
21
+ | 2 · Codegen | **Kimi-K2** | Writes a complete single-file Streamlit app wired to Crusoe inference |
22
+ | 3 · Validate | *(ast parser)* | Checks syntax and structural patterns |
23
+ | 4 · Deploy | **Qwen3** (healer) | Pushes to HF Spaces, injects secrets, auto-fixes on build failure (up to 3×) |
24
+
25
+ ## Required secrets
26
+
27
+ Set these in **Settings → Variables and secrets** in your Space:
28
+
29
+ | Name | Description |
30
+ |------|-------------|
31
+ | `CRUSOE_API_KEY` | Crusoe Managed Inference API key |
32
+ | `HF_TOKEN` | Hugging Face token with write scope |
33
+ | `HF_USERNAME` | Your Hugging Face username |
34
+
35
+ Optionally override the default models:
36
+
37
+ | Name | Default |
38
+ |------|---------|
39
+ | `CRUSOE_BASE_URL` | `https://api.inference.crusoe.ai/v1` |
40
+ | `INTENT_MODEL` | `deepseek-ai/DeepSeek-R1` |
41
+ | `CODEGEN_MODEL` | `moonshotai/Kimi-K2-Instruct` |
42
+ | `HEALER_MODEL` | `Qwen/Qwen3-235B-A22B` |
43
+
44
+ ## Built by
45
+
46
+ Crusoe DevRel · Powered by [Crusoe Managed Inference](https://crusoe.ai)
agent/__init__.py ADDED
File without changes
agent/code_generator.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stage 2 — Code Generator
3
+ Model: Kimi-K2 (long-context, excels at code generation)
4
+ Input: structured intent dict
5
+ Output: complete single-file Streamlit app as a string
6
+ """
7
+
8
+ import re
9
+ from openai import OpenAI
10
+ from config import CRUSOE_API_KEY, CRUSOE_BASE_URL, CODEGEN_MODEL
11
+ from templates import get_template
12
+
13
+ client = OpenAI(api_key=CRUSOE_API_KEY, base_url=CRUSOE_BASE_URL)
14
+
15
+ SYSTEM_PROMPT = """You are an expert Streamlit developer creating polished AI demos.
16
+
17
+ You will be given a base template and a structured intent. Your job is to:
18
+ 1. Fill in the template placeholders accurately
19
+ 2. Add any extra UI polish or features mentioned in the intent
20
+ 3. Keep the code as a single self-contained Python file
21
+ 4. Ensure the openai client uses env vars for CRUSOE_API_KEY and CRUSOE_BASE_URL
22
+
23
+ Rules:
24
+ - Output ONLY valid Python code
25
+ - No markdown fences, no explanations
26
+ - All string placeholders ({title}, {description}, etc.) must be replaced
27
+ - Do not change the streaming pattern or client setup
28
+ - Available Crusoe models: deepseek-ai/DeepSeek-R1, moonshotai/Kimi-K2-Instruct, Qwen/Qwen3-235B-A22B"""
29
+
30
+
31
+ def generate_code(intent: dict) -> str:
32
+ template = get_template(intent.get("template_type", "chatbot"))
33
+
34
+ # Build the pre-filled template by substituting known values
35
+ try:
36
+ prefilled = _fill_template(template, intent)
37
+ except KeyError:
38
+ prefilled = template # fall back to raw template
39
+
40
+ user_message = f"""Fill in this Streamlit app template using the intent below.
41
+
42
+ INTENT:
43
+ - Title: {intent.get('title')}
44
+ - Description: {intent.get('description')}
45
+ - Template type: {intent.get('template_type')}
46
+ - Model: {intent.get('model')}
47
+ - System prompt: {intent.get('system_prompt')}
48
+ - Features: {', '.join(intent.get('features', []))}
49
+ - Chat placeholder: {intent.get('chat_placeholder', '')}
50
+ - Input label: {intent.get('input_label', '')}
51
+ - Input placeholder: {intent.get('input_placeholder', '')}
52
+ - Model A: {intent.get('model_a', '')} ({intent.get('model_a_label', '')})
53
+ - Model B: {intent.get('model_b', '')} ({intent.get('model_b_label', '')})
54
+ - Steps: {intent.get('steps', [])}
55
+
56
+ TEMPLATE (replace all {{placeholders}} and improve where noted):
57
+ {prefilled}"""
58
+
59
+ response = client.chat.completions.create(
60
+ model=CODEGEN_MODEL,
61
+ messages=[
62
+ {"role": "system", "content": SYSTEM_PROMPT},
63
+ {"role": "user", "content": user_message},
64
+ ],
65
+ temperature=0.2,
66
+ max_tokens=4096,
67
+ )
68
+
69
+ raw = response.choices[0].message.content or ""
70
+ return _extract_code(raw)
71
+
72
+
73
+ def _fill_template(template: str, intent: dict) -> str:
74
+ """Best-effort template fill — remaining {placeholders} are sent to the LLM."""
75
+ steps_repr = repr(intent.get("steps", []))
76
+ return template.format(
77
+ title=_esc(intent.get("title", "AI Demo")),
78
+ description=_esc(intent.get("description", "Powered by Crusoe")),
79
+ system_prompt=_esc(intent.get("system_prompt", "You are a helpful assistant.")),
80
+ model=intent.get("model", "Qwen/Qwen3-235B-A22B"),
81
+ chat_placeholder=_esc(intent.get("chat_placeholder", "Ask me anything...")),
82
+ input_label=_esc(intent.get("input_label", "Enter input:")),
83
+ input_placeholder=_esc(intent.get("input_placeholder", "Type here...")),
84
+ model_a=intent.get("model_a", "deepseek-ai/DeepSeek-R1"),
85
+ model_a_label=_esc(intent.get("model_a_label", "Model A")),
86
+ model_b=intent.get("model_b", "moonshotai/Kimi-K2-Instruct"),
87
+ model_b_label=_esc(intent.get("model_b_label", "Model B")),
88
+ steps=steps_repr,
89
+ )
90
+
91
+
92
+ def _esc(s: str) -> str:
93
+ """Escape triple-quotes inside template string values."""
94
+ return str(s).replace('"""', "'''")
95
+
96
+
97
+ def _extract_code(raw: str) -> str:
98
+ """Strip markdown fences if the model wrapped the output."""
99
+ fenced = re.search(r"```(?:python)?\n(.*?)```", raw, re.DOTALL)
100
+ if fenced:
101
+ return fenced.group(1).strip()
102
+ return raw.strip()
agent/deployer.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stage 4 — Deployer + Self-Healer
3
+ Deploys generated code to Hugging Face Spaces.
4
+ If the build fails, Qwen3 reads the error logs and patches the code.
5
+ Up to MAX_RETRIES self-heal cycles before giving up.
6
+ """
7
+
8
+ import re
9
+ import time
10
+ import requests
11
+ from openai import OpenAI
12
+ from huggingface_hub import HfApi
13
+ from huggingface_hub.utils import HfHubHTTPError
14
+ from config import (
15
+ CRUSOE_API_KEY,
16
+ CRUSOE_BASE_URL,
17
+ HEALER_MODEL,
18
+ HF_TOKEN,
19
+ HF_USERNAME,
20
+ )
21
+
22
+ MAX_RETRIES = 3
23
+ POLL_INTERVAL = 15 # seconds between status checks
24
+ BUILD_TIMEOUT = 300 # seconds before we give up waiting
25
+
26
+ healer_client = OpenAI(api_key=CRUSOE_API_KEY, base_url=CRUSOE_BASE_URL)
27
+
28
+ HEALER_SYSTEM_PROMPT = """You are an expert Python/Streamlit debugging assistant.
29
+ You will receive broken Streamlit app code and a build/runtime error message.
30
+ Fix the code so it runs cleanly on Hugging Face Spaces with:
31
+ - Python 3.11
32
+ - streamlit and openai installed
33
+ - CRUSOE_API_KEY and CRUSOE_BASE_URL set as environment secrets
34
+
35
+ Output ONLY the corrected Python code. No markdown fences, no explanations."""
36
+
37
+ APP_REQUIREMENTS = "streamlit\nopenai\n"
38
+
39
+ README_TEMPLATE = """---
40
+ title: {title}
41
+ emoji: 🚀
42
+ colorFrom: blue
43
+ colorTo: purple
44
+ sdk: streamlit
45
+ sdk_version: "1.40.0"
46
+ app_file: app.py
47
+ pinned: false
48
+ ---
49
+
50
+ {description}
51
+
52
+ *Powered by [Crusoe Managed Inference](https://crusoe.ai)*
53
+ """
54
+
55
+
56
+ def deploy(code: str, intent: dict, on_progress=None) -> str:
57
+ """
58
+ Deploy code to HF Spaces and return the live URL.
59
+ Calls on_progress(message) at each significant step.
60
+ """
61
+ api = HfApi(token=HF_TOKEN)
62
+ space_name = _slugify(intent.get("title", "crusoe-demo"))
63
+ repo_id = f"{HF_USERNAME}/{space_name}"
64
+
65
+ _log(on_progress, f"Creating Space `{repo_id}`...")
66
+ _create_space(api, repo_id)
67
+
68
+ readme = README_TEMPLATE.format(
69
+ title=intent.get("title", "AI Demo"),
70
+ description=intent.get("description", ""),
71
+ )
72
+
73
+ for attempt in range(1, MAX_RETRIES + 1):
74
+ _log(on_progress, f"Uploading files (attempt {attempt}/{MAX_RETRIES})...")
75
+ _upload_files(api, repo_id, code, readme)
76
+
77
+ _log(on_progress, "Waiting for Hugging Face build...")
78
+ status, error_log = _wait_for_build(api, repo_id, on_progress)
79
+
80
+ if status == "RUNNING":
81
+ app_url = _space_url(repo_id)
82
+ _log(on_progress, f"Space is live at {app_url}")
83
+ return app_url
84
+
85
+ if attempt < MAX_RETRIES:
86
+ _log(on_progress, f"Build failed. Asking {HEALER_MODEL} to fix the code...")
87
+ code = _heal(code, error_log)
88
+ else:
89
+ raise RuntimeError(
90
+ f"Deployment failed after {MAX_RETRIES} attempts.\nLast error:\n{error_log}"
91
+ )
92
+
93
+ raise RuntimeError("Unexpected exit from deploy loop")
94
+
95
+
96
+ # ---------------------------------------------------------------------------
97
+ # Internal helpers
98
+ # ---------------------------------------------------------------------------
99
+
100
+ def _create_space(api: HfApi, repo_id: str) -> None:
101
+ try:
102
+ api.create_repo(
103
+ repo_id=repo_id,
104
+ repo_type="space",
105
+ space_sdk="streamlit",
106
+ exist_ok=True,
107
+ private=False,
108
+ )
109
+ # Inject Crusoe credentials as Space secrets
110
+ from config import CRUSOE_API_KEY as KEY, CRUSOE_BASE_URL as BASE_URL
111
+ api.add_space_secret(repo_id=repo_id, key="CRUSOE_API_KEY", value=KEY)
112
+ api.add_space_secret(repo_id=repo_id, key="CRUSOE_BASE_URL", value=BASE_URL)
113
+ except HfHubHTTPError as exc:
114
+ raise RuntimeError(f"Failed to create HF Space: {exc}") from exc
115
+
116
+
117
+ def _upload_files(api: HfApi, repo_id: str, code: str, readme: str) -> None:
118
+ api.upload_file(
119
+ path_or_fileobj=code.encode(),
120
+ path_in_repo="app.py",
121
+ repo_id=repo_id,
122
+ repo_type="space",
123
+ )
124
+ api.upload_file(
125
+ path_or_fileobj=APP_REQUIREMENTS.encode(),
126
+ path_in_repo="requirements.txt",
127
+ repo_id=repo_id,
128
+ repo_type="space",
129
+ )
130
+ api.upload_file(
131
+ path_or_fileobj=readme.encode(),
132
+ path_in_repo="README.md",
133
+ repo_id=repo_id,
134
+ repo_type="space",
135
+ )
136
+
137
+
138
+ def _wait_for_build(
139
+ api: HfApi, repo_id: str, on_progress=None
140
+ ) -> tuple[str, str]:
141
+ """
142
+ Poll the Space runtime until it reaches a terminal state.
143
+ Returns (stage_string, error_log).
144
+ """
145
+ deadline = time.time() + BUILD_TIMEOUT
146
+ terminal_stages = {"RUNNING", "RUNTIME_ERROR", "BUILD_ERROR", "APP_CRASHED"}
147
+
148
+ while time.time() < deadline:
149
+ try:
150
+ runtime = api.get_space_runtime(repo_id=repo_id)
151
+ stage = str(runtime.stage)
152
+ _log(on_progress, f"Build status: {stage}")
153
+
154
+ if stage in terminal_stages:
155
+ if stage == "RUNNING":
156
+ return "RUNNING", ""
157
+ error_log = _fetch_build_logs(repo_id)
158
+ return stage, error_log
159
+
160
+ except Exception:
161
+ pass # transient API error — keep polling
162
+
163
+ time.sleep(POLL_INTERVAL)
164
+
165
+ return "TIMEOUT", "Build timed out after 5 minutes."
166
+
167
+
168
+ def _fetch_build_logs(repo_id: str) -> str:
169
+ """Fetch build logs from the HF REST API."""
170
+ try:
171
+ owner, name = repo_id.split("/", 1)
172
+ url = f"https://huggingface.co/api/spaces/{owner}/{name}/logs"
173
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
174
+ resp = requests.get(url, headers=headers, timeout=15)
175
+ if resp.ok:
176
+ logs = resp.json()
177
+ # logs is a list of {"type": "...", "data": "..."}
178
+ lines = [entry.get("data", "") for entry in logs if entry.get("data")]
179
+ return "\n".join(lines[-100:]) # last 100 lines
180
+ except Exception:
181
+ pass
182
+ return "Unable to retrieve build logs."
183
+
184
+
185
+ def _heal(code: str, error_log: str) -> str:
186
+ """Ask HEALER_MODEL to fix the code given the error log."""
187
+ response = healer_client.chat.completions.create(
188
+ model=HEALER_MODEL,
189
+ messages=[
190
+ {"role": "system", "content": HEALER_SYSTEM_PROMPT},
191
+ {
192
+ "role": "user",
193
+ "content": (
194
+ f"Error log:\n```\n{error_log}\n```\n\n"
195
+ f"Code to fix:\n```python\n{code}\n```"
196
+ ),
197
+ },
198
+ ],
199
+ temperature=0.1,
200
+ max_tokens=4096,
201
+ )
202
+ raw = response.choices[0].message.content or ""
203
+ fenced = re.search(r"```(?:python)?\n(.*?)```", raw, re.DOTALL)
204
+ return fenced.group(1).strip() if fenced else raw.strip()
205
+
206
+
207
+ def _space_url(repo_id: str) -> str:
208
+ owner, name = repo_id.split("/", 1)
209
+ slug = f"{owner}-{name}".replace(".", "-")
210
+ return f"https://{slug}.hf.space"
211
+
212
+
213
+ def _slugify(text: str) -> str:
214
+ text = text.lower().strip()
215
+ text = re.sub(r"[^a-z0-9]+", "-", text)
216
+ text = text.strip("-")[:40]
217
+ return f"crusoe-{text}" if text else "crusoe-demo"
218
+
219
+
220
+ def _log(callback, message: str) -> None:
221
+ if callback:
222
+ callback(message)
agent/intent_parser.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stage 1 — Intent Parser
3
+ Model: DeepSeek R1 (reasoning model, ideal for structured analysis)
4
+ Input: raw user prompt
5
+ Output: structured intent dict used by the code generator
6
+ """
7
+
8
+ import json
9
+ import re
10
+ from openai import OpenAI
11
+ from config import CRUSOE_API_KEY, CRUSOE_BASE_URL, INTENT_MODEL
12
+
13
+ client = OpenAI(api_key=CRUSOE_API_KEY, base_url=CRUSOE_BASE_URL)
14
+
15
+ SYSTEM_PROMPT = """You are an expert at analysing AI demo requests and mapping them to the right app template.
16
+
17
+ Given a user's demo idea, output a single JSON object with these fields:
18
+
19
+ REQUIRED (all templates):
20
+ - template_type: one of "chatbot" | "comparison" | "dashboard" | "form_wizard"
21
+ - title: short, catchy demo title (max 8 words)
22
+ - description: one-line description shown below the title
23
+ - system_prompt: detailed system prompt for the AI assistant inside the demo
24
+ - model: best Crusoe model for this demo:
25
+ • "deepseek-ai/DeepSeek-R1" — reasoning, analysis, step-by-step thinking
26
+ • "moonshotai/Kimi-K2-Instruct" — coding, long documents, structured output
27
+ • "Qwen/Qwen3-235B-A22B" — general purpose, fast responses, multilingual
28
+ - features: list of 2-4 key features to highlight
29
+
30
+ CONDITIONAL (chatbot only):
31
+ - chat_placeholder: placeholder text shown in the chat input box
32
+
33
+ CONDITIONAL (comparison only):
34
+ - model_a: first model ID (from the list above)
35
+ - model_a_label: friendly display name for model A
36
+ - model_b: second model ID
37
+ - model_b_label: friendly display name for model B
38
+
39
+ CONDITIONAL (dashboard only):
40
+ - input_label: label for the main input text area
41
+ - input_placeholder: placeholder text for that input
42
+
43
+ CONDITIONAL (form_wizard only):
44
+ - steps: list of 3-5 objects, each {"key": "snake_case_name", "question": "Question text?"}
45
+
46
+ Template selection guide:
47
+ - chatbot: conversational Q&A, support bots, advisors
48
+ - comparison: show two models side-by-side on the same prompt
49
+ - dashboard: analyze / summarize pasted text, data, or documents
50
+ - form_wizard: multi-step intake flows that end with AI recommendations
51
+
52
+ Output ONLY the JSON object. No markdown, no explanations."""
53
+
54
+
55
+ def parse_intent(prompt: str) -> dict:
56
+ response = client.chat.completions.create(
57
+ model=INTENT_MODEL,
58
+ messages=[
59
+ {"role": "system", "content": SYSTEM_PROMPT},
60
+ {"role": "user", "content": f"Create a demo for: {prompt}"},
61
+ ],
62
+ temperature=0.3,
63
+ max_tokens=2000,
64
+ )
65
+
66
+ content = response.choices[0].message.content or ""
67
+
68
+ # Strip DeepSeek R1 chain-of-thought tags
69
+ content = re.sub(r"<think>.*?</think>", "", content, flags=re.DOTALL).strip()
70
+
71
+ # Extract the JSON block if surrounded by markdown fences
72
+ fenced = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", content, re.DOTALL)
73
+ if fenced:
74
+ content = fenced.group(1)
75
+ else:
76
+ # Grab the outermost { ... }
77
+ brace = re.search(r"\{.*\}", content, re.DOTALL)
78
+ if brace:
79
+ content = brace.group()
80
+
81
+ intent = json.loads(content)
82
+ return _apply_defaults(intent)
83
+
84
+
85
+ def _apply_defaults(intent: dict) -> dict:
86
+ intent.setdefault("template_type", "chatbot")
87
+ intent.setdefault("title", "AI Demo")
88
+ intent.setdefault("description", "Powered by Crusoe Managed Inference")
89
+ intent.setdefault("system_prompt", "You are a helpful AI assistant.")
90
+ intent.setdefault("model", "Qwen/Qwen3-235B-A22B")
91
+ intent.setdefault("features", [])
92
+ # Chatbot defaults
93
+ intent.setdefault("chat_placeholder", "Ask me anything...")
94
+ # Comparison defaults
95
+ intent.setdefault("model_a", "deepseek-ai/DeepSeek-R1")
96
+ intent.setdefault("model_a_label", "DeepSeek R1")
97
+ intent.setdefault("model_b", "moonshotai/Kimi-K2-Instruct")
98
+ intent.setdefault("model_b_label", "Kimi K2")
99
+ # Dashboard defaults
100
+ intent.setdefault("input_label", "Paste your content here:")
101
+ intent.setdefault("input_placeholder", "Enter text to analyze...")
102
+ # Form wizard defaults
103
+ intent.setdefault("steps", [
104
+ {"key": "requirement", "question": "What is your main requirement?"},
105
+ {"key": "context", "question": "Can you describe your use case in more detail?"},
106
+ {"key": "constraints", "question": "Are there any constraints or preferences to keep in mind?"},
107
+ ])
108
+ return intent
agent/pipeline.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main orchestration pipeline.
3
+ Runs the four stages sequentially and surfaces progress via callbacks.
4
+ """
5
+
6
+ from dataclasses import dataclass, field
7
+ from typing import Callable
8
+
9
+ from agent.intent_parser import parse_intent
10
+ from agent.code_generator import generate_code
11
+ from agent.validator import validate_code
12
+ from agent.deployer import deploy
13
+
14
+
15
+ @dataclass
16
+ class PipelineResult:
17
+ intent: dict = field(default_factory=dict)
18
+ code: str = ""
19
+ url: str = ""
20
+ error: str = ""
21
+ success: bool = False
22
+
23
+
24
+ def run(
25
+ prompt: str,
26
+ on_stage: Callable[[str, str], None] | None = None,
27
+ ) -> PipelineResult:
28
+ """
29
+ Execute the full pipeline.
30
+
31
+ on_stage(stage_key, message) is called at each significant step:
32
+ stage_key in {"parsing", "parsed", "generating", "generated",
33
+ "validating", "validated", "deploying", "deploy_progress",
34
+ "done", "error"}
35
+ """
36
+ result = PipelineResult()
37
+
38
+ def _notify(stage: str, msg: str = "") -> None:
39
+ if on_stage:
40
+ on_stage(stage, msg)
41
+
42
+ # ── Stage 1: Intent Parsing ──────────────────────────────────────────────
43
+ _notify("parsing", f"DeepSeek R1 is analysing your prompt...")
44
+ try:
45
+ result.intent = parse_intent(prompt)
46
+ _notify(
47
+ "parsed",
48
+ f"Template: **{result.intent['template_type']}** · "
49
+ f"Title: **{result.intent['title']}** · "
50
+ f"Model: `{result.intent['model']}`",
51
+ )
52
+ except Exception as exc:
53
+ result.error = f"Intent parsing failed: {exc}"
54
+ _notify("error", result.error)
55
+ return result
56
+
57
+ # ── Stage 2: Code Generation ─────────────────────────────────────────────
58
+ _notify("generating", "Kimi-K2 is writing your Streamlit app...")
59
+ try:
60
+ result.code = generate_code(result.intent)
61
+ _notify("generated", f"Generated {len(result.code.splitlines())} lines of code.")
62
+ except Exception as exc:
63
+ result.error = f"Code generation failed: {exc}"
64
+ _notify("error", result.error)
65
+ return result
66
+
67
+ # ── Stage 3: Validation ──────────────────────────────────────────────────
68
+ _notify("validating", "Validating generated code...")
69
+ valid, validation_error = validate_code(result.code)
70
+ if not valid:
71
+ # Non-fatal: log the issue and continue — the healer may fix it on deploy
72
+ _notify("validated", f"Validation warning: {validation_error}. Proceeding anyway.")
73
+ else:
74
+ _notify("validated", "Code looks good.")
75
+
76
+ # ── Stage 4: Deploy + Self-Heal ──────────────────────────────────────────
77
+ _notify("deploying", "Deploying to Hugging Face Spaces...")
78
+ try:
79
+ result.url = deploy(
80
+ result.code,
81
+ result.intent,
82
+ on_progress=lambda msg: _notify("deploy_progress", msg),
83
+ )
84
+ result.success = True
85
+ _notify("done", result.url)
86
+ except Exception as exc:
87
+ result.error = f"Deployment failed: {exc}"
88
+ _notify("error", result.error)
89
+
90
+ return result
agent/validator.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stage 3 — Validator
3
+ No LLM needed — uses Python's ast module for a fast syntax check.
4
+ Returns (is_valid: bool, error_message: str).
5
+ """
6
+
7
+ import ast
8
+
9
+
10
+ def validate_code(code: str) -> tuple[bool, str]:
11
+ try:
12
+ ast.parse(code)
13
+ except SyntaxError as exc:
14
+ return False, f"SyntaxError at line {exc.lineno}: {exc.msg}"
15
+
16
+ issues = _lint(code)
17
+ if issues:
18
+ return False, "; ".join(issues)
19
+
20
+ return True, ""
21
+
22
+
23
+ def _lint(code: str) -> list[str]:
24
+ """Lightweight structural checks."""
25
+ issues = []
26
+
27
+ if "import streamlit" not in code:
28
+ issues.append("missing 'import streamlit'")
29
+ if "OpenAI(" not in code:
30
+ issues.append("missing OpenAI client instantiation")
31
+ if "CRUSOE_API_KEY" not in code:
32
+ issues.append("missing CRUSOE_API_KEY env var reference")
33
+ if "stream=True" not in code:
34
+ issues.append("missing streaming (stream=True)")
35
+
36
+ return issues
app.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ One-Shot Deploy — Main Streamlit UI
3
+ Describe a demo → watch the agent build and deploy it live.
4
+ Every request calls Crusoe Managed Inference in real time.
5
+ """
6
+
7
+ import re
8
+ import streamlit as st
9
+ from config import CRUSOE_API_KEY, CRUSOE_BASE_URL, HF_TOKEN, HF_USERNAME
10
+
11
+ # ── Page config ───────────────────────────────────────────────────────────────
12
+ st.set_page_config(
13
+ page_title="One-Shot Deploy | Crusoe",
14
+ page_icon="🚀",
15
+ layout="wide",
16
+ initial_sidebar_state="expanded",
17
+ )
18
+
19
+ # ── CSS ───────────────────────────────────────────────────────────────────────
20
+ st.markdown(
21
+ """
22
+ <style>
23
+ /* Pipeline log rows */
24
+ .stage-row { padding: 5px 0 5px 4px; font-size: 0.92rem; border-left: 3px solid transparent; margin-bottom: 2px; }
25
+ .stage-done { color: #22c55e; border-left-color: #22c55e; }
26
+ .stage-run { color: #f59e0b; border-left-color: #f59e0b; }
27
+ .stage-err { color: #ef4444; border-left-color: #ef4444; }
28
+ .stage-info { color: #94a3b8; border-left-color: #475569; }
29
+
30
+ /* Integration badge */
31
+ .int-badge {
32
+ display: flex; align-items: center; gap: 10px;
33
+ background: #1e293b; border: 1px solid #334155;
34
+ border-radius: 10px; padding: 10px 14px; margin-bottom: 10px;
35
+ }
36
+ .int-dot-green { width:10px; height:10px; border-radius:50%; background:#22c55e; flex-shrink:0; }
37
+ .int-dot-grey { width:10px; height:10px; border-radius:50%; background:#64748b; flex-shrink:0; }
38
+ .int-label { font-weight: 600; font-size: 0.88rem; }
39
+ .int-sub { font-size: 0.78rem; color: #94a3b8; }
40
+
41
+ /* Pipeline flow diagram */
42
+ .flow-wrap {
43
+ display: flex; align-items: center; gap: 0;
44
+ background: #0f172a; border: 1px solid #1e293b;
45
+ border-radius: 12px; padding: 16px 20px;
46
+ margin-bottom: 4px; overflow-x: auto;
47
+ }
48
+ .flow-node {
49
+ background: #1e293b; border: 1px solid #334155;
50
+ border-radius: 8px; padding: 8px 14px;
51
+ text-align: center; min-width: 110px; flex-shrink: 0;
52
+ }
53
+ .flow-node .fn-label { font-size: 0.72rem; color: #94a3b8; margin-bottom: 2px; }
54
+ .flow-node .fn-value { font-size: 0.85rem; font-weight: 600; }
55
+ .flow-arrow { color: #475569; font-size: 1.3rem; padding: 0 10px; flex-shrink: 0; }
56
+ .flow-node-hf { border-color: #FFD21E44; background: #1a1800; }
57
+ .flow-node-hf .fn-value { color: #FFD21E; }
58
+ .flow-node-crusoe { border-color: #6366f144; background: #0f0f1a; }
59
+ .flow-node-crusoe .fn-value { color: #818cf8; }
60
+
61
+ /* HF Space result card */
62
+ .hf-card {
63
+ background: #1a1800; border: 1px solid #FFD21E55;
64
+ border-radius: 12px; padding: 20px 24px;
65
+ }
66
+ .hf-card-header { display: flex; align-items: center; gap: 10px; margin-bottom: 16px; }
67
+ .hf-title { font-size: 1.1rem; font-weight: 700; }
68
+ .hf-row { display: flex; align-items: center; gap: 8px; margin-bottom: 8px; font-size: 0.88rem; color: #cbd5e1; }
69
+ .hf-pill {
70
+ display: inline-block; background: #FFD21E22; color: #FFD21E;
71
+ border: 1px solid #FFD21E44; border-radius: 999px;
72
+ padding: 1px 10px; font-size: 0.75rem; font-weight: 600;
73
+ }
74
+ </style>
75
+ """,
76
+ unsafe_allow_html=True,
77
+ )
78
+
79
+
80
+ # ── Helpers ───────────────────────────────────────────────────────────────────
81
+ def _slugify(text: str) -> str:
82
+ text = text.lower().strip()
83
+ text = re.sub(r"[^a-z0-9]+", "-", text)
84
+ return text.strip("-")[:40] or "demo"
85
+
86
+
87
+ def _space_page_url(space_name: str) -> str:
88
+ return f"https://huggingface.co/spaces/{HF_USERNAME}/{space_name}"
89
+
90
+
91
+ def _config_ok() -> bool:
92
+ missing = []
93
+ if not CRUSOE_API_KEY:
94
+ missing.append("`CRUSOE_API_KEY`")
95
+ if not HF_TOKEN:
96
+ missing.append("`HF_TOKEN`")
97
+ if not HF_USERNAME:
98
+ missing.append("`HF_USERNAME`")
99
+ if missing:
100
+ st.error(
101
+ f"Missing required environment variables: {', '.join(missing)}. "
102
+ "Copy `env.example` to `.env` and fill in your credentials."
103
+ )
104
+ return False
105
+ return True
106
+
107
+
108
+ # ── Sidebar — Integrations ────────────────────────────────────────────────────
109
+ with st.sidebar:
110
+ st.markdown("### Connected integrations")
111
+
112
+ crusoe_dot = "int-dot-green" if CRUSOE_API_KEY else "int-dot-grey"
113
+ crusoe_status = "Connected" if CRUSOE_API_KEY else "Missing API key"
114
+ st.markdown(
115
+ f"""
116
+ <div class="int-badge">
117
+ <div class="{crusoe_dot}"></div>
118
+ <div>
119
+ <div class="int-label">Crusoe Managed Inference</div>
120
+ <div class="int-sub">{crusoe_status} · {CRUSOE_BASE_URL.replace("https://","")}</div>
121
+ </div>
122
+ </div>
123
+ """,
124
+ unsafe_allow_html=True,
125
+ )
126
+
127
+ hf_dot = "int-dot-green" if (HF_TOKEN and HF_USERNAME) else "int-dot-grey"
128
+ hf_status = f"@{HF_USERNAME}" if HF_USERNAME else "Missing token / username"
129
+ st.markdown(
130
+ f"""
131
+ <div class="int-badge">
132
+ <div class="{hf_dot}"></div>
133
+ <div>
134
+ <div class="int-label">🤗 Hugging Face Spaces</div>
135
+ <div class="int-sub">{hf_status} · streamlit · public</div>
136
+ </div>
137
+ </div>
138
+ """,
139
+ unsafe_allow_html=True,
140
+ )
141
+
142
+ st.divider()
143
+ st.markdown("**Models in this pipeline**")
144
+ st.markdown(
145
+ """
146
+ | Stage | Model |
147
+ |-------|-------|
148
+ | Intent | DeepSeek R1 |
149
+ | Codegen | Kimi-K2 |
150
+ | Healer | Qwen3 |
151
+ """
152
+ )
153
+
154
+ st.divider()
155
+ st.caption("Built by Crusoe DevRel")
156
+
157
+
158
+ # ── Header ────────────────────────────────────────────────────────────────────
159
+ st.markdown("# 🚀 One-Shot Deploy")
160
+ st.caption(
161
+ "Describe a demo in plain English. "
162
+ "Crusoe's multi-model agent writes the code and ships it to **Hugging Face Spaces** — live in minutes."
163
+ )
164
+
165
+ # Pipeline flow diagram
166
+ st.markdown(
167
+ """
168
+ <div class="flow-wrap">
169
+ <div class="flow-node">
170
+ <div class="fn-label">You provide</div>
171
+ <div class="fn-value">💬 Prompt</div>
172
+ </div>
173
+ <div class="flow-arrow">→</div>
174
+ <div class="flow-node flow-node-crusoe">
175
+ <div class="fn-label">DeepSeek R1</div>
176
+ <div class="fn-value">Intent</div>
177
+ </div>
178
+ <div class="flow-arrow">→</div>
179
+ <div class="flow-node flow-node-crusoe">
180
+ <div class="fn-label">Kimi-K2</div>
181
+ <div class="fn-value">Codegen</div>
182
+ </div>
183
+ <div class="flow-arrow">→</div>
184
+ <div class="flow-node flow-node-crusoe">
185
+ <div class="fn-label">Qwen3</div>
186
+ <div class="fn-value">Self-heal</div>
187
+ </div>
188
+ <div class="flow-arrow">→</div>
189
+ <div class="flow-node flow-node-hf">
190
+ <div class="fn-label">🤗 HF Spaces</div>
191
+ <div class="fn-value">Live URL</div>
192
+ </div>
193
+ </div>
194
+ """,
195
+ unsafe_allow_html=True,
196
+ )
197
+
198
+ st.divider()
199
+
200
+ # ── Prompt input ──────────────────────────────────────────────────────────────
201
+ examples = [
202
+ "A chatbot that helps users pick the right GPU for their ML workload",
203
+ "Side-by-side comparison of DeepSeek R1 vs Kimi K2 on coding problems",
204
+ "A dashboard that analyses a job description and surfaces key requirements",
205
+ "A wizard that collects ML requirements and recommends the optimal Crusoe instance type",
206
+ ]
207
+
208
+ col_input, col_examples = st.columns([3, 2])
209
+
210
+ with col_input:
211
+ prompt = st.text_area(
212
+ "What demo do you want to build?",
213
+ height=120,
214
+ placeholder="e.g. A chatbot that helps startups write investor updates...",
215
+ )
216
+ deploy_btn = st.button(
217
+ "🚀 Deploy to Hugging Face Spaces",
218
+ type="primary",
219
+ disabled=not prompt,
220
+ use_container_width=True,
221
+ )
222
+
223
+ with col_examples:
224
+ st.markdown("**Try an example**")
225
+ for ex in examples:
226
+ if st.button(ex, use_container_width=True, key=f"ex_{ex[:20]}"):
227
+ st.session_state["example_tip"] = ex
228
+
229
+ if "example_tip" in st.session_state:
230
+ st.info(f"Paste into the prompt box: *{st.session_state['example_tip']}*")
231
+
232
+ st.divider()
233
+
234
+ # ── Pipeline runner ────────────────────────────────────────────────────────────
235
+ if deploy_btn and prompt and _config_ok():
236
+ from agent.pipeline import run as run_pipeline
237
+
238
+ stage_log_el = st.empty()
239
+ code_expander = st.expander("Generated code", expanded=False)
240
+ result_el = st.empty()
241
+
242
+ log_lines: list[str] = []
243
+
244
+ ICONS = {
245
+ "parsing": "⏳",
246
+ "parsed": "✅",
247
+ "generating": "⏳",
248
+ "generated": "✅",
249
+ "validating": "⏳",
250
+ "validated": "✅",
251
+ "deploying": "⏳",
252
+ "deploy_progress": " ",
253
+ "done": "🎉",
254
+ "error": "❌",
255
+ }
256
+ LABELS = {
257
+ "parsing": "Stage 1 · DeepSeek R1 — Parsing intent",
258
+ "parsed": "Stage 1 · Intent parsed",
259
+ "generating": "Stage 2 · Kimi-K2 — Generating code",
260
+ "generated": "Stage 2 · Code generated",
261
+ "validating": "Stage 3 · Validating code",
262
+ "validated": "Stage 3 · Validation complete",
263
+ "deploying": "Stage 4 · Pushing to 🤗 Hugging Face Spaces",
264
+ "deploy_progress": "Stage 4 ·",
265
+ "done": "Done",
266
+ "error": "Error",
267
+ }
268
+ CSS = {
269
+ "parsed": "stage-done", "generated": "stage-done",
270
+ "validated": "stage-done", "done": "stage-done",
271
+ "error": "stage-err",
272
+ "deploy_progress": "stage-info",
273
+ }
274
+
275
+ def on_stage(stage: str, msg: str) -> None:
276
+ icon = ICONS.get(stage, " ")
277
+ label = LABELS.get(stage, stage)
278
+ css = CSS.get(stage, "stage-run")
279
+ line = (
280
+ f'<div class="stage-row {css}">'
281
+ f'{icon}&nbsp; <b>{label}</b>'
282
+ f'{"&nbsp;&nbsp;" + msg if msg else ""}'
283
+ f'</div>'
284
+ )
285
+ log_lines.append(line)
286
+ stage_log_el.markdown("\n".join(log_lines), unsafe_allow_html=True)
287
+
288
+ result = run_pipeline(prompt, on_stage=on_stage)
289
+
290
+ # Generated code preview
291
+ if result.code:
292
+ with code_expander:
293
+ st.code(result.code, language="python")
294
+
295
+ # Outcome
296
+ if result.success:
297
+ title = result.intent.get("title", "demo")
298
+ space_name = f"crusoe-{_slugify(title)}"
299
+ page_url = _space_page_url(space_name)
300
+ app_url = result.url
301
+
302
+ # HF Space result card
303
+ result_el.markdown(
304
+ f"""
305
+ <div class="hf-card">
306
+ <div class="hf-card-header">
307
+ <span style="font-size:1.6rem">🤗</span>
308
+ <div>
309
+ <div class="hf-title">Your Space is live on Hugging Face</div>
310
+ <div style="font-size:0.82rem;color:#94a3b8;">
311
+ Deployed to <b>huggingface.co/spaces/{HF_USERNAME}/{space_name}</b>
312
+ </div>
313
+ </div>
314
+ </div>
315
+ <div class="hf-row">
316
+ <span>👤</span>
317
+ <span><b>{HF_USERNAME}</b> / <b>{space_name}</b></span>
318
+ <span class="hf-pill">streamlit</span>
319
+ <span class="hf-pill">public</span>
320
+ </div>
321
+ <div class="hf-row">
322
+ <span>🌐</span>
323
+ <a href="{app_url}" target="_blank" style="color:#FFD21E;">{app_url}</a>
324
+ </div>
325
+ <div class="hf-row">
326
+ <span>📄</span>
327
+ <a href="{page_url}" target="_blank" style="color:#94a3b8;">{page_url}</a>
328
+ </div>
329
+ <div class="hf-row" style="margin-top:6px;">
330
+ <span>⚡</span>
331
+ <span style="color:#94a3b8;">Calls <b>Crusoe Managed Inference</b> on every request</span>
332
+ </div>
333
+ </div>
334
+ """,
335
+ unsafe_allow_html=True,
336
+ )
337
+
338
+ col_a, col_b = st.columns(2)
339
+ with col_a:
340
+ st.link_button(
341
+ "🚀 Open live demo",
342
+ app_url,
343
+ use_container_width=True,
344
+ type="primary",
345
+ )
346
+ with col_b:
347
+ st.link_button(
348
+ "🤗 View Space on Hugging Face",
349
+ page_url,
350
+ use_container_width=True,
351
+ )
352
+
353
+ st.subheader("Live preview")
354
+ st.caption(f"Embedded from `{app_url}`")
355
+ st.components.v1.iframe(app_url, height=600, scrolling=True)
356
+
357
+ else:
358
+ result_el.error(f"**Deployment failed.** {result.error}")
359
+
360
+
361
+ # ── How it works ──────────────────────────────────────────────────────────────
362
+ with st.expander("How it works", expanded=False):
363
+ st.markdown(
364
+ """
365
+ | Stage | Model | What happens |
366
+ |-------|-------|-------------|
367
+ | 1 · Intent | **DeepSeek R1** | Reasons about your prompt, selects a template (chatbot / comparison / dashboard / wizard), writes the AI system prompt |
368
+ | 2 · Codegen | **Kimi-K2** | Writes a complete single-file Streamlit app wired to Crusoe inference |
369
+ | 3 · Validate | *(ast parser)* | Checks syntax and structural patterns — no LLM cost |
370
+ | 4 · Deploy | **Qwen3** (healer) | Pushes `app.py` + `requirements.txt` to HF Spaces; injects `CRUSOE_API_KEY` as a Space secret; reads build logs and auto-fixes on failure (up to 3×) |
371
+
372
+ Every deployed app calls **Crusoe Managed Inference** live, so every visitor experiences Crusoe's speed firsthand.
373
+ """
374
+ )
375
+
376
+ # ── Footer ────────────────────────────────────────────────────────────────────
377
+ st.divider()
378
+ st.caption("Built with ❤️ by Crusoe DevRel · Powered by Crusoe Managed Inference · Deployed via 🤗 Hugging Face Spaces")
config.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ CRUSOE_API_KEY = os.environ.get("CRUSOE_API_KEY", "")
7
+ CRUSOE_BASE_URL = os.environ.get("CRUSOE_BASE_URL", "https://api.inference.crusoe.ai/v1")
8
+
9
+ HF_TOKEN = os.environ.get("HF_TOKEN", "")
10
+ HF_USERNAME = os.environ.get("HF_USERNAME", "")
11
+
12
+ # Model assignments per pipeline stage
13
+ INTENT_MODEL = os.environ.get("INTENT_MODEL", "deepseek-ai/DeepSeek-R1")
14
+ CODEGEN_MODEL = os.environ.get("CODEGEN_MODEL", "moonshotai/Kimi-K2-Instruct")
15
+ HEALER_MODEL = os.environ.get("HEALER_MODEL", "Qwen/Qwen3-235B-A22B")
env.example ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Crusoe Managed Inference
2
+ CRUSOE_API_KEY=your_crusoe_api_key_here
3
+ CRUSOE_BASE_URL=https://api.inference.crusoe.ai/v1
4
+
5
+ # Hugging Face (for deployment)
6
+ HF_TOKEN=your_hf_token_here
7
+ HF_USERNAME=your_hf_username_here
8
+
9
+ # Model overrides (optional — defaults shown)
10
+ INTENT_MODEL=deepseek-ai/DeepSeek-R1
11
+ CODEGEN_MODEL=moonshotai/Kimi-K2-Instruct
12
+ HEALER_MODEL=Qwen/Qwen3-235B-A22B
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit>=1.40.0
2
+ openai>=1.50.0
3
+ huggingface_hub>=0.25.0
4
+ python-dotenv>=1.0.0
5
+ requests>=2.31.0
templates/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .chatbot import CHATBOT_TEMPLATE
2
+ from .comparison import COMPARISON_TEMPLATE
3
+ from .dashboard import DASHBOARD_TEMPLATE
4
+ from .form_wizard import FORM_WIZARD_TEMPLATE
5
+
6
+ TEMPLATES = {
7
+ "chatbot": CHATBOT_TEMPLATE,
8
+ "comparison": COMPARISON_TEMPLATE,
9
+ "dashboard": DASHBOARD_TEMPLATE,
10
+ "form_wizard": FORM_WIZARD_TEMPLATE,
11
+ }
12
+
13
+
14
+ def get_template(template_type: str) -> str:
15
+ return TEMPLATES.get(template_type, CHATBOT_TEMPLATE)
templates/chatbot.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CHATBOT_TEMPLATE = '''import streamlit as st
2
+ from openai import OpenAI
3
+ import os
4
+
5
+ TITLE = "{title}"
6
+ DESCRIPTION = "{description}"
7
+ SYSTEM_PROMPT = """{system_prompt}"""
8
+ MODEL = "{model}"
9
+ CHAT_PLACEHOLDER = "{chat_placeholder}"
10
+
11
+ client = OpenAI(
12
+ api_key=os.environ.get("CRUSOE_API_KEY"),
13
+ base_url=os.environ.get("CRUSOE_BASE_URL", "https://api.inference.crusoe.ai/v1"),
14
+ )
15
+
16
+ st.set_page_config(page_title=TITLE, page_icon="💬", layout="centered")
17
+ st.title(TITLE)
18
+ st.caption(DESCRIPTION)
19
+ st.divider()
20
+
21
+ if "messages" not in st.session_state:
22
+ st.session_state.messages = []
23
+
24
+ for message in st.session_state.messages:
25
+ with st.chat_message(message["role"]):
26
+ st.markdown(message["content"])
27
+
28
+ if prompt := st.chat_input(CHAT_PLACEHOLDER):
29
+ st.session_state.messages.append({{"role": "user", "content": prompt}})
30
+ with st.chat_message("user"):
31
+ st.markdown(prompt)
32
+
33
+ with st.chat_message("assistant"):
34
+ stream = client.chat.completions.create(
35
+ model=MODEL,
36
+ messages=[
37
+ {{"role": "system", "content": SYSTEM_PROMPT}},
38
+ *st.session_state.messages,
39
+ ],
40
+ stream=True,
41
+ )
42
+
43
+ def get_stream():
44
+ for chunk in stream:
45
+ content = chunk.choices[0].delta.content
46
+ if content:
47
+ yield content
48
+
49
+ response = st.write_stream(get_stream())
50
+ st.session_state.messages.append({{"role": "assistant", "content": response}})
51
+
52
+ with st.sidebar:
53
+ st.caption("Powered by [Crusoe](https://crusoe.ai)")
54
+ if st.button("Clear chat"):
55
+ st.session_state.messages = []
56
+ st.rerun()
57
+ '''
templates/comparison.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ COMPARISON_TEMPLATE = '''import streamlit as st
2
+ from openai import OpenAI
3
+ import os
4
+ import threading
5
+
6
+ TITLE = "{title}"
7
+ DESCRIPTION = "{description}"
8
+ SYSTEM_PROMPT = """{system_prompt}"""
9
+ MODEL_A = "{model_a}"
10
+ MODEL_A_LABEL = "{model_a_label}"
11
+ MODEL_B = "{model_b}"
12
+ MODEL_B_LABEL = "{model_b_label}"
13
+
14
+ client = OpenAI(
15
+ api_key=os.environ.get("CRUSOE_API_KEY"),
16
+ base_url=os.environ.get("CRUSOE_BASE_URL", "https://api.inference.crusoe.ai/v1"),
17
+ )
18
+
19
+ st.set_page_config(page_title=TITLE, page_icon="⚖️", layout="wide")
20
+ st.title(TITLE)
21
+ st.caption(DESCRIPTION)
22
+ st.divider()
23
+
24
+ prompt = st.text_area("Enter your prompt:", height=120, placeholder="Ask anything to compare both models...")
25
+
26
+ if st.button("⚡ Compare Models", type="primary", disabled=not prompt):
27
+ col1, col2 = st.columns(2)
28
+
29
+ with col1:
30
+ st.subheader(f"🤖 {MODEL_A_LABEL}")
31
+ stream_a = client.chat.completions.create(
32
+ model=MODEL_A,
33
+ messages=[
34
+ {{"role": "system", "content": SYSTEM_PROMPT}},
35
+ {{"role": "user", "content": prompt}},
36
+ ],
37
+ stream=True,
38
+ )
39
+
40
+ def stream_a_content():
41
+ for chunk in stream_a:
42
+ content = chunk.choices[0].delta.content
43
+ if content:
44
+ yield content
45
+
46
+ st.write_stream(stream_a_content())
47
+
48
+ with col2:
49
+ st.subheader(f"🤖 {MODEL_B_LABEL}")
50
+ stream_b = client.chat.completions.create(
51
+ model=MODEL_B,
52
+ messages=[
53
+ {{"role": "system", "content": SYSTEM_PROMPT}},
54
+ {{"role": "user", "content": prompt}},
55
+ ],
56
+ stream=True,
57
+ )
58
+
59
+ def stream_b_content():
60
+ for chunk in stream_b:
61
+ content = chunk.choices[0].delta.content
62
+ if content:
63
+ yield content
64
+
65
+ st.write_stream(stream_b_content())
66
+
67
+ with st.sidebar:
68
+ st.caption("Powered by [Crusoe](https://crusoe.ai)")
69
+ st.markdown(f"**Model A:** `{MODEL_A}`")
70
+ st.markdown(f"**Model B:** `{MODEL_B}`")
71
+ '''
templates/dashboard.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DASHBOARD_TEMPLATE = '''import streamlit as st
2
+ from openai import OpenAI
3
+ import os
4
+
5
+ TITLE = "{title}"
6
+ DESCRIPTION = "{description}"
7
+ SYSTEM_PROMPT = """{system_prompt}"""
8
+ MODEL = "{model}"
9
+ INPUT_LABEL = "{input_label}"
10
+ INPUT_PLACEHOLDER = "{input_placeholder}"
11
+
12
+ client = OpenAI(
13
+ api_key=os.environ.get("CRUSOE_API_KEY"),
14
+ base_url=os.environ.get("CRUSOE_BASE_URL", "https://api.inference.crusoe.ai/v1"),
15
+ )
16
+
17
+ st.set_page_config(page_title=TITLE, page_icon="📊", layout="wide")
18
+ st.title(TITLE)
19
+ st.caption(DESCRIPTION)
20
+ st.divider()
21
+
22
+ col_input, col_output = st.columns([1, 2])
23
+
24
+ with col_input:
25
+ st.subheader("Input")
26
+ user_input = st.text_area(INPUT_LABEL, placeholder=INPUT_PLACEHOLDER, height=200)
27
+ analyze = st.button("🔍 Analyze", type="primary", disabled=not user_input)
28
+
29
+ with col_output:
30
+ st.subheader("Analysis")
31
+ if analyze and user_input:
32
+ stream = client.chat.completions.create(
33
+ model=MODEL,
34
+ messages=[
35
+ {{"role": "system", "content": SYSTEM_PROMPT}},
36
+ {{"role": "user", "content": user_input}},
37
+ ],
38
+ stream=True,
39
+ )
40
+
41
+ def get_stream():
42
+ for chunk in stream:
43
+ content = chunk.choices[0].delta.content
44
+ if content:
45
+ yield content
46
+
47
+ st.write_stream(get_stream())
48
+ else:
49
+ st.info("Enter your input and click Analyze to get started.")
50
+
51
+ with st.sidebar:
52
+ st.caption("Powered by [Crusoe](https://crusoe.ai)")
53
+ st.markdown(f"**Model:** `{MODEL}`")
54
+ '''
templates/form_wizard.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FORM_WIZARD_TEMPLATE = '''import streamlit as st
2
+ from openai import OpenAI
3
+ import os
4
+
5
+ TITLE = "{title}"
6
+ DESCRIPTION = "{description}"
7
+ SYSTEM_PROMPT = """{system_prompt}"""
8
+ MODEL = "{model}"
9
+ STEPS = {steps}
10
+
11
+ client = OpenAI(
12
+ api_key=os.environ.get("CRUSOE_API_KEY"),
13
+ base_url=os.environ.get("CRUSOE_BASE_URL", "https://api.inference.crusoe.ai/v1"),
14
+ )
15
+
16
+ st.set_page_config(page_title=TITLE, page_icon="🧙", layout="centered")
17
+ st.title(TITLE)
18
+ st.caption(DESCRIPTION)
19
+ st.divider()
20
+
21
+ if "step" not in st.session_state:
22
+ st.session_state.step = 0
23
+ if "answers" not in st.session_state:
24
+ st.session_state.answers = {{}}
25
+ if "result" not in st.session_state:
26
+ st.session_state.result = None
27
+
28
+ if st.session_state.step < len(STEPS):
29
+ current_step = STEPS[st.session_state.step]
30
+ progress = st.session_state.step / len(STEPS)
31
+ st.progress(progress, text=f"Step {{st.session_state.step + 1}} of {{len(STEPS)}}")
32
+ st.subheader(current_step["question"])
33
+
34
+ answer = st.text_input(
35
+ "Your answer:",
36
+ key=f"answer_{{st.session_state.step}}",
37
+ placeholder="Type your answer here...",
38
+ )
39
+
40
+ col1, col2 = st.columns([3, 1])
41
+ with col2:
42
+ if st.button("Next →", type="primary", disabled=not answer):
43
+ st.session_state.answers[current_step["key"]] = answer
44
+ st.session_state.step += 1
45
+ st.rerun()
46
+
47
+ else:
48
+ st.success("Great! Generating your personalized recommendations...")
49
+ summary = "\\n".join([f"- {{k.replace('_', ' ').title()}}: {{v}}" for k, v in st.session_state.answers.items()])
50
+
51
+ if st.session_state.result is None:
52
+ stream = client.chat.completions.create(
53
+ model=MODEL,
54
+ messages=[
55
+ {{"role": "system", "content": SYSTEM_PROMPT}},
56
+ {{"role": "user", "content": f"Based on the following information, provide detailed recommendations:\\n{{summary}}"}},
57
+ ],
58
+ stream=True,
59
+ )
60
+
61
+ def get_stream():
62
+ for chunk in stream:
63
+ content = chunk.choices[0].delta.content
64
+ if content:
65
+ yield content
66
+
67
+ st.subheader("Your Recommendations")
68
+ result = st.write_stream(get_stream())
69
+ st.session_state.result = result
70
+ else:
71
+ st.subheader("Your Recommendations")
72
+ st.markdown(st.session_state.result)
73
+
74
+ st.divider()
75
+ if st.button("Start Over"):
76
+ st.session_state.step = 0
77
+ st.session_state.answers = {{}}
78
+ st.session_state.result = None
79
+ st.rerun()
80
+
81
+ with st.sidebar:
82
+ st.caption("Powered by [Crusoe](https://crusoe.ai)")
83
+ if st.session_state.answers:
84
+ st.subheader("Your answers so far")
85
+ for k, v in st.session_state.answers.items():
86
+ st.markdown(f"**{{k.replace('_', ' ').title()}}:** {{v}}")
87
+ '''