Mayank Keoliya commited on
Commit
4a4614d
Β·
1 Parent(s): b6742e6

Bundle camel library, demo data, and update app.py for HF Space

Browse files
Files changed (34) hide show
  1. app.py +70 -262
  2. {camel_inference/src/camel β†’ camel}/__init__.py +0 -0
  3. {camel_inference/src/camel β†’ camel}/assertions.py +0 -0
  4. {camel_inference/src/camel β†’ camel}/camel_model.py +0 -0
  5. {camel_inference/src/camel β†’ camel}/checkpoint_utils.py +0 -0
  6. {camel_inference/src/camel β†’ camel}/ecg_attention_masks.py +0 -0
  7. {camel_inference/src/camel β†’ camel}/ecg_gemma_model.py +0 -0
  8. {camel_inference/src/camel β†’ camel}/ecg_model_wrapper.py +0 -0
  9. {camel_inference/src/camel β†’ camel}/ecg_text_packing.py +0 -0
  10. {camel_inference/src/camel β†’ camel}/inference.py +0 -0
  11. {camel_inference/src/camel β†’ camel}/model_init.py +0 -0
  12. {camel_inference/src/camel β†’ camel}/model_introspect.py +0 -0
  13. {camel_inference/src/camel β†’ camel}/model_registry.py +0 -0
  14. {camel_inference/src/camel β†’ camel}/model_registry.yaml +0 -0
  15. {camel_inference/src/camel β†’ camel}/process_ecg.py +0 -0
  16. {camel_inference/src/camel β†’ camel}/projectors.py +0 -0
  17. {camel_inference/src/camel β†’ camel}/prompt_renderers.py +0 -0
  18. {camel_inference/src/camel β†’ camel}/training_setup.py +0 -0
  19. camel_inference/.gitignore +0 -12
  20. camel_inference/README.md +0 -82
  21. camel_inference/pyproject.toml +0 -36
  22. camel_inference/run_camel.py +0 -49
  23. camel_inference/scripts/download_checkpoints.sh +0 -37
  24. {camel_inference/demo β†’ demo}/08704_hr.dat +0 -0
  25. {camel_inference/demo β†’ demo}/08704_hr.hea +0 -0
  26. {camel_inference/demo β†’ demo}/12585_hr.dat +0 -0
  27. {camel_inference/demo β†’ demo}/12585_hr.hea +0 -0
  28. {camel_inference/demo β†’ demo}/12646_hr.dat +0 -0
  29. {camel_inference/demo β†’ demo}/12646_hr.hea +0 -0
  30. {camel_inference/demo β†’ demo}/example_prompt.json +0 -0
  31. camel_inference/src/read_ecg.py β†’ read_ecg.py +0 -0
  32. requirements.txt +1 -0
  33. run_local.sh +0 -6
  34. test_setup.py +0 -54
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import gradio as gr
2
  import os
3
  import sys
4
- import subprocess
5
  from pathlib import Path
6
  import wfdb
7
  import numpy as np
@@ -9,308 +8,117 @@ import matplotlib.pyplot as plt
9
  import torch
10
  import glob
11
  from types import SimpleNamespace
 
12
 
13
- # --- Configuration ---
14
- REPO_URL = "https://github.com/CAMEL-ECG/CAMEL-inference.git"
15
- REPO_DIR = Path("./camel_inference")
16
- DEMO_DIR = REPO_DIR / "demo"
17
 
18
- # --- Setup: Clone Inference Repo & Install ---
19
- def setup_inference_env():
20
- """Clones the repo and ensures it's in sys.path."""
21
- if not REPO_DIR.exists():
22
- print(f"Cloning {REPO_URL}...")
23
- subprocess.run(["git", "clone", REPO_URL, str(REPO_DIR)], check=True)
24
- else:
25
- if (REPO_DIR / ".git").exists():
26
- print("Repo directory exists. Pulling latest...")
27
- try:
28
- subprocess.run(["git", "-C", str(REPO_DIR), "pull"], check=True)
29
- except Exception as e:
30
- print(f"Git pull failed (non-fatal): {e}")
31
- else:
32
- print("Found bundled inference code. Skipping git pull.")
33
 
34
- # Ensure the src is importable
35
- if str(REPO_DIR.resolve()) not in sys.path:
36
- sys.path.append(str(REPO_DIR.resolve()))
37
-
38
- # Install the package in editable mode if not already (for dependencies)
39
- # Note: In a persistent space, we might check if it's already installed.
40
- # For now, we rely on requirements.txt satisfying most, but we run this to be safe.
41
- try:
42
- subprocess.run([sys.executable, "-m", "pip", "install", "-e", str(REPO_DIR)], check=True)
43
- except Exception as e:
44
- print(f"Pip install failed: {e}")
45
 
46
- def download_checkpoints():
47
- """Downloads CAMEL checkpoints if missing."""
48
- from huggingface_hub import hf_hub_download
49
- import shutil
50
-
51
- checkpoint_dir = Path("checkpoints")
52
- checkpoint_dir.mkdir(exist_ok=True)
53
-
54
- repo_id = "CAMEL-ECG/CAMEL"
55
- files = ["camel_base.pt", "camel_ecginstruct.pt", "camel_forecast.pt"]
56
-
57
- for f in files:
58
- dest = checkpoint_dir / f
59
- if not dest.exists():
60
- print(f"Downloading {f}...")
61
- try:
62
- src = hf_hub_download(repo_id=repo_id, filename=f)
63
- shutil.copy(src, dest)
64
- print(f"Downloaded {f}")
65
- except Exception as e:
66
- print(f"Error downloading {f}: {e}")
67
- else:
68
- print(f"Checkpoint {f} exists.")
69
-
70
- setup_inference_env()
71
- try:
72
- download_checkpoints()
73
- except Exception as e:
74
- print(f"Checkpoint download warning: {e}")
75
-
76
- # --- Model Loading (Global Cache) ---
77
- # Import after setup to ensure paths are correct
78
- try:
79
- from camel.camel_model import CAMEL
80
- except ImportError:
81
- # Fallback if pip install -e didn't work immediately in this process
82
- sys.path.append(str(REPO_DIR / "src"))
83
- from camel.camel_model import CAMEL
84
 
85
  MODEL_CACHE = None
86
 
87
  def get_model():
88
  global MODEL_CACHE
89
  if MODEL_CACHE is None:
90
- print("Loading CAMEL model...")
91
-
92
- # Verify CUDA as requested
93
- print(f"Is CUDA available: {torch.cuda.is_available()}")
94
- if torch.cuda.is_available():
95
- print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
96
-
97
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
98
- print(f"Using device: {device}")
99
-
100
- # Initialize model - checkpoints will be downloaded automatically by transformers/huggingface_hub
101
  MODEL_CACHE = CAMEL(mode='base', device=device)
102
- print("Model loaded.")
103
  return MODEL_CACHE
104
 
105
  # --- Helper Functions ---
 
106
 
107
  def load_examples():
108
- """Returns a list of example basenames from the demo directory."""
109
- if not DEMO_DIR.exists():
110
- return []
111
- # Search for .hea files
112
- hea_files = glob.glob(str(DEMO_DIR / "*.hea"))
113
- # Extract basenames (filename without extension)
114
- basenames = [Path(f).stem for f in hea_files]
115
- return sorted(basenames)
116
 
117
- def read_ecg_file(file_path):
118
- """Reads an ECG record using wfdb."""
119
- # wfdb.rdrecord expects the path WITHOUT extension for the 'record_name' argument
120
- # but we usually pass a path like "demo/12345".
121
- record_path = str(Path(file_path).with_suffix(""))
122
  try:
123
- record = wfdb.rdrecord(record_path)
124
- return record
125
  except Exception as e:
126
- print(f"Error reading record {record_path}: {e}")
127
  return None
128
 
129
  def plot_ecg(record):
130
- """Plots the ECG signal."""
131
- if record is None:
132
- return None
133
-
134
- signals = record.p_signal
135
- # Plot only the first lead for clarity, or a few leads
136
  fig, ax = plt.subplots(figsize=(10, 4))
137
-
138
- # If multiple channels, plot the first one (usually I or II)
139
- # Determine number of samples to plot (e.g., first 10 seconds aka 10*fs)
140
- fs = record.fs
141
- duration_secs = 10
142
- samples = int(min(signals.shape[0], fs * duration_secs))
143
-
144
- t = np.arange(samples) / fs
145
- ax.plot(t, signals[:samples, 0], label=record.sig_name[0])
146
-
147
- ax.set_title(f"ECG Signal: {record.record_name} (Lead {record.sig_name[0]})")
148
- ax.set_xlabel("Time (s)")
149
- ax.set_ylabel(record.units[0] if record.units else "mV")
150
- ax.legend()
151
- ax.grid(True, alpha=0.3)
152
-
153
  plt.tight_layout()
154
  return fig
155
 
156
- def get_header_info(record):
157
- """Extracts ground truth/comments from the header."""
158
- if record is None:
159
- return "Error reading record."
160
-
161
- info = []
162
- if record.comments:
163
- info.append("Comments/Labels:")
164
- for c in record.comments:
165
- info.append(f"- {c}")
166
-
167
- info.append(f"Fs: {record.fs} Hz")
168
- info.append(f"Length: {record.sig_len} samples")
169
-
170
- return "\n".join(info)
171
 
172
  def process_upload(files):
173
- """Handles uploaded .dat/.hea files."""
174
- if not files:
175
- return None, "No files uploaded.", None
176
-
177
- # We need to find the pair (.dat and .hea)
178
- # Gradio uploads to a temp dir. We need to identify the common basename.
179
- # Group by basename
180
  paths = [Path(f.name) for f in files]
181
- # Simple heuristic: take the first .hea found and look for its .dat sibling
182
- hea_path = next((p for p in paths if p.suffix == '.hea'), None)
183
-
184
- if not hea_path:
185
- return None, "Upload must include a .hea file.", None
186
 
187
- # wfdb needs the path without extension
188
- record = read_ecg_file(str(hea_path))
189
-
190
- if record is None:
191
- return None, "Failed to read uploaded ECG.", None
192
-
193
- fig = plot_ecg(record)
194
- info = get_header_info(record)
195
-
196
- return fig, info, str(hea_path.with_suffix("")) # Return path for inference
197
 
198
- def process_example(example_name):
199
- """Handles selected example."""
200
- if not example_name:
201
- return None, "No example selected.", None
202
-
203
- path = DEMO_DIR / example_name
204
- record = read_ecg_file(str(path))
205
-
206
- fig = plot_ecg(record)
207
- info = get_header_info(record)
208
-
209
- return fig, info, str(path) # Return path for inference
210
 
211
- def run_inference_logic(ecg_path_str, prompt="Describe the ECG."):
212
- """Runs the CAMEL inference."""
213
- if not ecg_path_str:
214
- return "Please select an ECG first."
215
-
216
  model = get_model()
217
-
218
- # Construct args expected by model.run()
219
- # Looking at run_camel.py:
220
- # args.mode, args.text, args.ecgs (list), args.device, args.ecg_configs
221
- # Defaults in run_camel.py: mode='base', device='cuda:0', top_k=64, top_p=0.95...
222
-
223
  args = SimpleNamespace(
224
- mode='base',
225
- text=prompt,
226
- ecgs=[ecg_path_str],
227
- device=model.device, # Use model's device
228
- ecg_configs=None, # Default
229
- json=None,
230
- temperature=0.0,
231
- top_k=64,
232
- top_p=0.95,
233
- min_p=0.0,
234
- max_new_tokens=512
235
  )
236
-
237
- try:
238
- output, used_prompt = model.run(args)
239
- return output
240
- except Exception as e:
241
- return f"Inference failed: {e}"
242
 
243
-
244
- # --- UI Construction ---
245
- with gr.Blocks(title="CAMEL ECG Analysis") as demo:
246
- gr.Markdown("# πŸͺ CAMEL ECG Foundation Model")
247
- gr.Markdown("Upload an ECG (.dat and .hea) or select an example to view the signal and run inference.")
248
-
249
- current_ecg_path = gr.State()
250
 
251
  with gr.Row():
252
- with gr.Column(scale=1):
253
- gr.Markdown("### Input")
 
 
 
 
 
 
254
 
255
- tab_example, tab_upload = gr.Tabs(), gr.Tabs()
 
256
 
257
- with gr.Tab("Examples"):
258
- example_dropdown = gr.Dropdown(
259
- label="Choose an Example",
260
- choices=load_examples(),
261
- value=None,
262
- interactive=True
263
- )
264
- load_example_btn = gr.Button("Load Example", size="sm")
265
-
266
- with gr.Tab("Upload"):
267
- file_upload = gr.File(
268
- label="Upload .dat AND .hea",
269
- file_count="multiple",
270
- file_types=[".dat", ".hea"]
271
- )
272
- load_upload_btn = gr.Button("Load Uploaded Files", size="sm")
273
-
274
- gr.Markdown("---")
275
- gr.Markdown("### Inference Config")
276
- prompt_input = gr.Textbox(
277
- label="Prompt",
278
- value="Describe the ECG.",
279
- lines=2
280
- )
281
- run_btn = gr.Button("Run Inference", variant="primary")
282
-
283
- with gr.Column(scale=2):
284
- gr.Markdown("### Visualization & Ground Truth")
285
- plot_output = gr.Plot(label="ECG Trace")
286
- label_output = gr.Code(label="Header Info / Ground Truth", language="markdown")
287
-
288
- gr.Markdown("### Model Output")
289
- inference_output = gr.Textbox(label="CAMEL Response", interactive=False, lines=5)
290
-
291
- # --- Interaction Logic ---
292
-
293
- def on_load_example(evt):
294
- fig, info, path = process_example(evt)
295
- return fig, info, path
296
 
297
- load_example_btn.click(
298
- on_load_example,
299
- inputs=[example_dropdown],
300
- outputs=[plot_output, label_output, current_ecg_path]
301
- )
302
-
303
- load_upload_btn.click(
304
- process_upload,
305
- inputs=[file_upload],
306
- outputs=[plot_output, label_output, current_ecg_path]
307
- )
308
-
309
- run_btn.click(
310
- run_inference_logic,
311
- inputs=[current_ecg_path, prompt_input],
312
- outputs=[inference_output]
313
- )
314
 
315
  if __name__ == "__main__":
316
- demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
 
1
  import gradio as gr
2
  import os
3
  import sys
 
4
  from pathlib import Path
5
  import wfdb
6
  import numpy as np
 
8
  import torch
9
  import glob
10
  from types import SimpleNamespace
11
+ from huggingface_hub import hf_hub_download
12
 
13
+ # --- Setup: Checkpoints & Model ---
14
+ CHECKPOINT_DIR = Path("checkpoints")
15
+ CHECKPOINT_DIR.mkdir(exist_ok=True)
 
16
 
17
+ # Ensure checkpoints are symlinked from cache to local dir for CAMEL
18
+ files = ["camel_base.pt", "camel_ecginstruct.pt", "camel_forecast.pt"]
19
+ repo_id = "CAMEL-ECG/CAMEL"
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ for f in files:
22
+ cached_path = hf_hub_download(repo_id=repo_id, filename=f)
23
+ target_link = CHECKPOINT_DIR / f
24
+ if not target_link.exists():
25
+ target_link.symlink_to(cached_path)
26
+ print(f"Symlinked {f} -> {cached_path}")
 
 
 
 
 
27
 
28
+ # Import local CAMEL library
29
+ from camel.camel_model import CAMEL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  MODEL_CACHE = None
32
 
33
  def get_model():
34
  global MODEL_CACHE
35
  if MODEL_CACHE is None:
 
 
 
 
 
 
 
36
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
37
+ print(f"Loading model on {device}...")
 
 
38
  MODEL_CACHE = CAMEL(mode='base', device=device)
 
39
  return MODEL_CACHE
40
 
41
  # --- Helper Functions ---
42
+ DEMO_DIR = Path("demo")
43
 
44
  def load_examples():
45
+ return sorted([p.stem for p in DEMO_DIR.glob("*.hea")]) if DEMO_DIR.exists() else []
 
 
 
 
 
 
 
46
 
47
+ def read_ecg(path_str):
 
 
 
 
48
  try:
49
+ record_path = str(Path(path_str).with_suffix(""))
50
+ return wfdb.rdrecord(record_path)
51
  except Exception as e:
52
+ print(f"Error reading {path_str}: {e}")
53
  return None
54
 
55
  def plot_ecg(record):
56
+ if not record: return None
 
 
 
 
 
57
  fig, ax = plt.subplots(figsize=(10, 4))
58
+ signals = record.p_signal
59
+ samples = int(min(signals.shape[0], record.fs * 10))
60
+ t = np.arange(samples) / record.fs
61
+ ax.plot(t, signals[:samples, 0])
62
+ ax.set_title(f"Lead {record.sig_name[0]}")
 
 
 
 
 
 
 
 
 
 
 
63
  plt.tight_layout()
64
  return fig
65
 
66
+ def get_info(record):
67
+ if not record: return ""
68
+ return f"Fs: {record.fs} Hz\nLength: {record.sig_len}\nComments: {record.comments}"
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  def process_upload(files):
71
+ if not files: return None, "", None
 
 
 
 
 
 
72
  paths = [Path(f.name) for f in files]
73
+ hea = next((p for p in paths if p.suffix == '.hea'), None)
74
+ if not hea: return None, "Missing .hea file", None
 
 
 
75
 
76
+ record = read_ecg(str(hea))
77
+ return plot_ecg(record), get_info(record), str(hea.with_suffix(""))
 
 
 
 
 
 
 
 
78
 
79
+ def process_example(name):
80
+ if not name: return None, "", None
81
+ path = DEMO_DIR / name
82
+ record = read_ecg(str(path))
83
+ return plot_ecg(record), get_info(record), str(path)
 
 
 
 
 
 
 
84
 
85
+ def run_inference(ecg_path, prompt):
86
+ if not ecg_path: return "Select ECG first."
 
 
 
87
  model = get_model()
 
 
 
 
 
 
88
  args = SimpleNamespace(
89
+ mode='base', text=prompt, ecgs=[ecg_path], device=model.device,
90
+ ecg_configs=None, json=None, temperature=0.0, top_k=64,
91
+ top_p=0.95, min_p=0.0, max_new_tokens=512
 
 
 
 
 
 
 
 
92
  )
93
+ output, _ = model.run(args)
94
+ return output
 
 
 
 
95
 
96
+ # --- UI ---
97
+ with gr.Blocks(title="CAMEL ECG") as demo:
98
+ gr.Markdown("# πŸͺ CAMEL ECG Model")
99
+ cur_path = gr.State()
 
 
 
100
 
101
  with gr.Row():
102
+ with gr.Column():
103
+ with gr.Tabs():
104
+ with gr.Tab("Example"):
105
+ ex_dd = gr.Dropdown(load_examples(), label="Example")
106
+ ex_btn = gr.Button("Load")
107
+ with gr.Tab("Upload"):
108
+ up_file = gr.File(file_count="multiple")
109
+ up_btn = gr.Button("Load Files")
110
 
111
+ prompt = gr.Textbox("Describe the ECG.", label="Prompt")
112
+ run_btn = gr.Button("Run", variant="primary")
113
 
114
+ with gr.Column():
115
+ plot = gr.Plot()
116
+ info = gr.Code(language="markdown")
117
+ out = gr.Textbox(label="Output", lines=5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
+ ex_btn.click(process_example, ex_dd, [plot, info, cur_path])
120
+ up_btn.click(process_upload, up_file, [plot, info, cur_path])
121
+ run_btn.click(run_inference, [cur_path, prompt], out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  if __name__ == "__main__":
124
+ demo.launch(server_name="0.0.0.0", server_port=7860)
{camel_inference/src/camel β†’ camel}/__init__.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/assertions.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/camel_model.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/checkpoint_utils.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/ecg_attention_masks.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/ecg_gemma_model.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/ecg_model_wrapper.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/ecg_text_packing.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/inference.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/model_init.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/model_introspect.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/model_registry.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/model_registry.yaml RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/process_ecg.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/projectors.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/prompt_renderers.py RENAMED
File without changes
{camel_inference/src/camel β†’ camel}/training_setup.py RENAMED
File without changes
camel_inference/.gitignore DELETED
@@ -1,12 +0,0 @@
1
- # MacOS
2
- .DS_Store
3
-
4
- # Python
5
- /.env
6
- __pycache__
7
-
8
- # Ignore model checkpoints
9
- checkpoints/
10
- *.pt
11
-
12
- *.egg-info
 
 
 
 
 
 
 
 
 
 
 
 
 
camel_inference/README.md DELETED
@@ -1,82 +0,0 @@
1
- # CAMEL Inference
2
-
3
- Inference-only repository for running CAMEL ECG-language checkpoints.
4
-
5
- Only `run_camel.py` is intended as a public entrypoint. Modules under `src/camel/` are internal implementation details and may change.
6
-
7
- ## Repository Layout
8
-
9
- - `run_camel.py`: public inference CLI
10
- - `src/camel/`: internal model, tokenizer, ECG packing, and loading utilities
11
- - `checkpoints/`: local adapter/checkpoint files
12
-
13
- ## Requirements
14
-
15
- - Python 3.10+
16
- - CUDA-enabled PyTorch recommended for practical inference latency
17
-
18
- ## Install
19
-
20
- ```bash
21
- conda create -n camel python=3.10 -y
22
- conda activate camel
23
- pip install -e .
24
- ```
25
-
26
- ## Checkpoints
27
-
28
- Checkpoints must be downloaded from huggingface `CAMEL-ECG/CAMEL` or with the repository script:
29
-
30
- ```bash
31
- bash scripts/download_checkpoints.sh
32
- ```
33
-
34
- ## Usage
35
-
36
- * CAMEL is available in three modes:
37
- - `base`
38
- - `ecgbench`
39
- - `forecast`
40
-
41
- ```bash
42
- python run_camel.py \
43
- --mode forecast \
44
- --text "Forecast cardiac rhythm for the next 5 minutes." \
45
- --ecgs demo/08704_hr \
46
- --device cuda:0
47
- ```
48
-
49
- ```bash
50
- python run_camel.py \
51
- --mode base \
52
- --text "Compare the two ECG waveforms." \
53
- --ecgs demo/12585_hr demo/12646_hr \
54
- --device cuda:0
55
- ```
56
-
57
- * Optionally, you can set start, end, and leads with `--ecgs-config`.
58
-
59
- ```bash
60
- python run_camel.py \
61
- --mode forecast \
62
- --text "Forecast cardiac rhythm for the next 5 minutes." \
63
- --ecgs demo/08704_hr \
64
- --ecg-configs "start:0;end:5;use_leads:I,II" \
65
- --device cuda:0
66
- ```
67
-
68
- * Using `--text` and `--ecgs` defaults to text followed by the ecg in order.
69
- For arbitrary text/ECG interleaving use `--json`.
70
- ```bash
71
- python run_camel.py --mode base --json demo/example_prompt.json --device cuda:0
72
- ```
73
-
74
- * Sampling flags:
75
- - `--temperature`
76
- - `--top-k`
77
- - `--top-p`
78
- - `--min-p`
79
- - `--max-new-tokens`
80
-
81
- Implementation notes:
82
- - ECG loading is currently implemented for WFDB-format inputs. To support additional formats, extend `src/read_ecg.py`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
camel_inference/pyproject.toml DELETED
@@ -1,36 +0,0 @@
1
- [build-system]
2
- requires = ["setuptools>=61.0", "wheel"]
3
- build-backend = "setuptools.build_meta"
4
-
5
- [tool.setuptools]
6
- py-modules = ["run_camel"]
7
-
8
- [tool.setuptools.packages.find]
9
- where = ["src"]
10
-
11
- [tool.setuptools.package-data]
12
- camel = ["model_registry.yaml"]
13
-
14
- [project]
15
- name = "camel-inference"
16
- version = "0.1.0"
17
- description = "Inference-only CLI for CAMEL ECG-language checkpoints"
18
- readme = "README.md"
19
- requires-python = ">=3.9"
20
- authors = [
21
- { name = "CAMEL contributors" }
22
- ]
23
- dependencies = [
24
- "numpy",
25
- "scipy",
26
- "pyyaml",
27
- "torch",
28
- "transformers",
29
- "peft",
30
- "accelerate",
31
- "sentencepiece",
32
- "protobuf"
33
- ]
34
-
35
- [project.scripts]
36
- camel-infer = "run_camel:main"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
camel_inference/run_camel.py DELETED
@@ -1,49 +0,0 @@
1
- import argparse
2
- from camel.camel_model import CAMEL
3
-
4
- def main():
5
- parser = argparse.ArgumentParser(description="CAMEL")
6
- parser.add_argument("--mode", type=str, choices=['forecast', 'base', 'ecgbench'], default='base')
7
- parser.add_argument("--device", type=str, default='cuda:0')
8
- parser.add_argument("--json", type=str, default=None)
9
- parser.add_argument("--text", type=str, default=None)
10
- parser.add_argument("--ecgs", type=str, default=None, nargs='+')
11
- parser.add_argument("--ecg-configs", type=str, default=None, nargs='+')
12
- parser.add_argument("--temperature", type=float, default=0.0)
13
- parser.add_argument(
14
- "--top-k",
15
- dest="top_k",
16
- type=int,
17
- default=64,
18
- help="Top-k sampling cutoff (set <=0 to disable).",
19
- )
20
- parser.add_argument(
21
- "--top-p",
22
- dest="top_p",
23
- type=float,
24
- default=0.95,
25
- help="Nucleus sampling cumulative probability cutoff.",
26
- )
27
- parser.add_argument(
28
- "--min-p",
29
- dest="min_p",
30
- type=float,
31
- default=0.0,
32
- help="Minimum per-token probability threshold applied after temperature scaling.",
33
- )
34
- parser.add_argument(
35
- "--max-new-tokens",
36
- type=int,
37
- default=512,
38
- help="Maximum number of tokens to generate per sample.",
39
- )
40
- args = parser.parse_args()
41
-
42
- model = CAMEL(mode=args.mode, device=args.device)
43
- output, prompt = model.run(args)
44
-
45
- print(f'Prompt: {prompt}')
46
- print(f'Prediction: {output}')
47
-
48
- if __name__ == "__main__":
49
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
camel_inference/scripts/download_checkpoints.sh DELETED
@@ -1,37 +0,0 @@
1
- #!/usr/bin/env bash
2
- set -euo pipefail
3
-
4
- echo "Installing huggingface_hub if needed..."
5
- python3 -m pip install -q --user huggingface_hub
6
-
7
- echo "Downloading CAMEL checkpoints from Hugging Face..."
8
- mkdir -p checkpoints
9
-
10
- python3 - <<'PY'
11
- import os, shutil
12
- from huggingface_hub import hf_hub_download
13
-
14
- repo = "CAMEL-ECG/CAMEL"
15
- files = [
16
- "camel_base.pt",
17
- "camel_ecginstruct.pt",
18
- "camel_forecast.pt"
19
- ]
20
-
21
- os.makedirs("checkpoints", exist_ok=True)
22
-
23
- for f in files:
24
- print(f"Downloading {f}...")
25
- src = hf_hub_download(
26
- repo_id=repo,
27
- filename=f,
28
- repo_type="model"
29
- )
30
- dst = os.path.join("checkpoints", f)
31
- shutil.copy2(src, dst)
32
- print(f"Saved to {dst}")
33
-
34
- print("All checkpoints downloaded.")
35
- PY
36
-
37
- echo "Done."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
{camel_inference/demo β†’ demo}/08704_hr.dat RENAMED
File without changes
{camel_inference/demo β†’ demo}/08704_hr.hea RENAMED
File without changes
{camel_inference/demo β†’ demo}/12585_hr.dat RENAMED
File without changes
{camel_inference/demo β†’ demo}/12585_hr.hea RENAMED
File without changes
{camel_inference/demo β†’ demo}/12646_hr.dat RENAMED
File without changes
{camel_inference/demo β†’ demo}/12646_hr.hea RENAMED
File without changes
{camel_inference/demo β†’ demo}/example_prompt.json RENAMED
File without changes
camel_inference/src/read_ecg.py β†’ read_ecg.py RENAMED
File without changes
requirements.txt CHANGED
@@ -12,3 +12,4 @@ sentencepiece
12
  torch
13
  transformers
14
  wfdb
 
 
12
  torch
13
  transformers
14
  wfdb
15
+ accelerate
run_local.sh DELETED
@@ -1,6 +0,0 @@
1
- #!/bin/bash
2
- echo "Installing dependencies..."
3
- pip install -r requirements.txt
4
-
5
- echo "Running CAMEL App..."
6
- python app.py
 
 
 
 
 
 
 
test_setup.py DELETED
@@ -1,54 +0,0 @@
1
- import sys
2
- import os
3
- from pathlib import Path
4
- import wfdb
5
-
6
- # Add camel_inference to path just like app.py does
7
- repo_dir = Path("./camel_inference").resolve()
8
- if str(repo_dir) not in sys.path:
9
- sys.path.append(str(repo_dir))
10
-
11
- print("Testing imports...")
12
- try:
13
- import gradio
14
- import numpy
15
- import matplotlib
16
- import torch
17
- print("Standard deps: OK")
18
- except ImportError as e:
19
- print(f"Standard deps failed: {e}")
20
- sys.exit(1)
21
-
22
- print("Testing CAMEL import...")
23
- try:
24
- from camel.camel_model import CAMEL
25
- print("CAMEL import: OK")
26
- except ImportError as e:
27
- print(f"CAMEL import failed: {e}")
28
- # Try looking in src explicitly if package install failed to link correctly
29
- sys.path.append(str(repo_dir / "src"))
30
- try:
31
- from camel.camel_model import CAMEL
32
- print("CAMEL import (via src fallback): OK")
33
- except ImportError as e2:
34
- print(f"CAMEL import totally failed: {e2}")
35
- sys.exit(1)
36
-
37
- print("Testing WFDB Data Loading...")
38
- demo_dir = repo_dir / "demo"
39
- hea_files = list(demo_dir.glob("*.hea"))
40
- if not hea_files:
41
- print("No .hea files found in demo dir!")
42
- sys.exit(1)
43
-
44
- example_path = str(hea_files[0].with_suffix(""))
45
- try:
46
- record = wfdb.rdrecord(example_path)
47
- print(f"Loaded {record.record_name}: Fs={record.fs}, Shape={record.p_signal.shape}")
48
- except Exception as e:
49
- print(f"WFDB load failed: {e}")
50
- sys.exit(1)
51
-
52
- print("\n------------------------------")
53
- print("VERIFICATION SUCCESSFUL")
54
- print("------------------------------")