gkemp181 commited on
Commit
8edfe19
·
1 Parent(s): ca09bf5

Moved files into new repository

Browse files
Files changed (5) hide show
  1. app.py +14 -31
  2. app_test_2.py +89 -0
  3. app_test_3.py +64 -0
  4. app_test_4.py +46 -0
  5. app_test_5.py +72 -0
app.py CHANGED
@@ -4,28 +4,26 @@ import numpy as np
4
  import torch
5
  import imageio
6
  from stable_baselines3 import SAC
7
- from custom_env import create_env
8
 
9
  # Define the function that runs the model and outputs a video
10
- def run_model_episode(x_start, y_start, x_targ, y_targ, z_targ):
11
- # Create environment with user inputs
12
- env = create_env(render_mode="rgb_array",
13
- block_xy=(x_start, y_start),
14
- goal_xyz=(x_targ, y_targ, z_targ))
15
 
16
- # Load your trained model
17
- checkpoint_path = os.path.join("App", "model", "model.zip")
18
  model = SAC.load(checkpoint_path, env=env, verbose=1)
19
 
20
- # Rollout the episode
21
  frames = []
22
  obs, info = env.reset()
23
 
24
- for _ in range(200): # Shorter rollout
25
  action, _ = model.predict(obs, deterministic=True)
26
  obs, reward, done, trunc, info = env.step(action)
27
 
28
- frame = env.render()
29
  frames.append(frame)
30
 
31
  if done or trunc:
@@ -33,10 +31,11 @@ def run_model_episode(x_start, y_start, x_targ, y_targ, z_targ):
33
 
34
  env.close()
35
 
36
- # Save frames into a video
37
  video_path = "run_video.mp4"
38
  imageio.mimsave(video_path, frames, fps=30)
39
 
 
40
  return video_path
41
 
42
  # --------------------------------------
@@ -44,29 +43,13 @@ def run_model_episode(x_start, y_start, x_targ, y_targ, z_targ):
44
  # --------------------------------------
45
 
46
  with gr.Blocks() as demo:
47
- gr.Markdown("## Fetch Robot: Model Demo App")
48
- gr.Markdown("Enter start and target coordinates, then click 'Run Model' to watch the robot!")
49
- gr.Markdown("Coordinates are relative to the center of the table.")
50
- gr.Markdown("X and Y coordinates are in meters, Z coordinate is height in meters.")
51
- gr.Markdown("0,0,0 is the center of the table.")
52
-
53
- with gr.Row():
54
- x_start = gr.Number(label="Start X", value=0.0)
55
- y_start = gr.Number(label="Start Y", value=0.0)
56
-
57
- with gr.Row():
58
- x_targ = gr.Number(label="Target X", value=0.1)
59
- y_targ = gr.Number(label="Target Y", value=0.1)
60
- z_targ = gr.Number(label="Target Z", value=0.1)
61
 
62
  run_button = gr.Button("Run Model")
63
  output_video = gr.Video()
64
 
65
- run_button.click(
66
- fn=run_model_episode,
67
- inputs=[x_start, y_start, x_targ, y_targ, z_targ],
68
- outputs=output_video
69
- )
70
 
71
  demo.launch(share=True)
72
 
 
4
  import torch
5
  import imageio
6
  from stable_baselines3 import SAC
7
+ from create_env import create_env
8
 
9
  # Define the function that runs the model and outputs a video
10
+ def run_model_episode():
11
+ # 1. Create environment with render_mode="rgb_array" (needed to capture frames)
12
+ env = create_env(render_mode="rgb_array")
 
 
13
 
14
+ # 2. Load your trained model
15
+ checkpoint_path = os.path.join("models", "test", "model.zip")
16
  model = SAC.load(checkpoint_path, env=env, verbose=1)
17
 
18
+ # 3. Rollout the episode
19
  frames = []
20
  obs, info = env.reset()
21
 
22
+ for _ in range(200): # Shorter rollout to avoid giant videos
23
  action, _ = model.predict(obs, deterministic=True)
24
  obs, reward, done, trunc, info = env.step(action)
25
 
26
+ frame = env.render() # Get current frame as image (rgb_array)
27
  frames.append(frame)
28
 
29
  if done or trunc:
 
31
 
32
  env.close()
33
 
34
+ # 4. Save the frames into a video
35
  video_path = "run_video.mp4"
36
  imageio.mimsave(video_path, frames, fps=30)
37
 
38
+ # 5. Return path to Gradio to display
39
  return video_path
40
 
41
  # --------------------------------------
 
43
  # --------------------------------------
44
 
45
  with gr.Blocks() as demo:
46
+ gr.Markdown("# 🤖 Fetch Robot: Model Demo App")
47
+ gr.Markdown("Click 'Run Model' to watch the SAC agent interact with the FetchPickAndPlace environment.")
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  run_button = gr.Button("Run Model")
50
  output_video = gr.Video()
51
 
52
+ run_button.click(fn=run_model_episode, inputs=[], outputs=output_video)
 
 
 
 
53
 
54
  demo.launch(share=True)
55
 
app_test_2.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import wandb
3
+ import requests
4
+ from PIL import Image
5
+ from io import BytesIO
6
+
7
+ # Connect to W&B
8
+ api = wandb.Api()
9
+
10
+ # Replace this with your correct run path
11
+ ENTITY = "jarrett-defreitas-university-of-rhode-island" # your wandb username or team
12
+ PROJECT = "pickup-and-place"
13
+ RUN_ID = "trr5oagz" # NOT the display name; the ID like "3xi2sld8"
14
+
15
+ run = api.run(f"{ENTITY}/{PROJECT}/{RUN_ID}")
16
+
17
+ # Collect all images and metrics
18
+ logged_images = []
19
+ logged_scalars = []
20
+
21
+ # Scan all rows of logged history
22
+ for row in run.scan_history():
23
+ for key, val in row.items():
24
+ # Handle images correctly
25
+ if isinstance(val, list):
26
+ for item in val:
27
+ if isinstance(item, wandb.data_types.Image):
28
+ logged_images.append((key, item.url))
29
+ elif isinstance(val, wandb.data_types.Image):
30
+ logged_images.append((key, val.url))
31
+
32
+ # Handle scalars (numbers like loss, accuracy)
33
+ if isinstance(val, (int, float)):
34
+ logged_scalars.append((key, val))
35
+
36
+ # Debug: show what was found
37
+ print("Logged Images:", logged_images)
38
+ print("Logged Scalars:", logged_scalars)
39
+
40
+ # --------------------------------------
41
+ # 3. Prepare Dropdown Choices
42
+ # --------------------------------------
43
+
44
+ image_keys = [key for key, _ in logged_images]
45
+ scalar_keys = [key for key, _ in logged_scalars]
46
+
47
+ # --------------------------------------
48
+ # 4. Define viewer functions
49
+ # --------------------------------------
50
+
51
+ # View image by selected key
52
+ def view_image(selected_key):
53
+ for key, url in logged_images:
54
+ if key == selected_key:
55
+ response = requests.get(url)
56
+ if response.status_code == 200:
57
+ return Image.open(BytesIO(response.content))
58
+ else:
59
+ return None
60
+ return None
61
+
62
+ # View scalar (number) by selected key
63
+ def view_scalar(selected_key):
64
+ for key, value in logged_scalars:
65
+ if key == selected_key:
66
+ return f"{key}: {value}"
67
+ return "Not found"
68
+
69
+ # --------------------------------------
70
+ # 5. Build the Gradio App
71
+ # --------------------------------------
72
+
73
+ with gr.Blocks() as demo:
74
+ gr.Markdown("# 📈 WandB Run Viewer")
75
+ gr.Markdown("View images and metrics logged to a specific W&B run.")
76
+
77
+ with gr.Tab("Logged Images"):
78
+ img_selector = gr.Dropdown(choices=image_keys, label="Select an Image Key")
79
+ img_display = gr.Image()
80
+
81
+ img_selector.change(fn=view_image, inputs=img_selector, outputs=img_display)
82
+
83
+ with gr.Tab("Logged Scalars"):
84
+ scalar_selector = gr.Dropdown(choices=scalar_keys, label="Select a Scalar Metric")
85
+ scalar_display = gr.Textbox()
86
+
87
+ scalar_selector.change(fn=view_scalar, inputs=scalar_selector, outputs=scalar_display)
88
+
89
+ demo.launch(share=True)
app_test_3.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import numpy as np
4
+ import torch
5
+ import imageio
6
+ from stable_baselines3 import SAC
7
+ from custom_env import create_env
8
+
9
+ # Define the function that runs the model and outputs a video
10
+ def run_model_episode():
11
+ # 1. Create environment with render_mode="rgb_array" (needed to capture frames)
12
+ # e.g. user inputs:
13
+ # Relative to center of table
14
+ x_start, y_start = 0.0, 0.0
15
+ x_targ, y_targ, z_targ = 0.1, 0.1, 0.1
16
+
17
+ env = create_env(render_mode="rgb_array",
18
+ block_xy=(x_start, y_start),
19
+ goal_xyz=(x_targ, y_targ, z_targ))
20
+
21
+ # 2. Load your trained model
22
+ checkpoint_path = os.path.join("model", "model.zip")
23
+ model = SAC.load(checkpoint_path, env=env, verbose=1)
24
+
25
+ # 3. Rollout the episode
26
+ frames = []
27
+ obs, info = env.reset()
28
+
29
+ for _ in range(200): # Shorter rollout to avoid giant videos
30
+ action, _ = model.predict(obs, deterministic=True)
31
+ obs, reward, done, trunc, info = env.step(action)
32
+
33
+ frame = env.render() # Get current frame as image (rgb_array)
34
+ frames.append(frame)
35
+
36
+ if done or trunc:
37
+ obs, info = env.reset()
38
+
39
+ env.close()
40
+
41
+ # TODO This will probably need to save into a unique directory
42
+ # so it doesnt override when multiple people are running the app
43
+
44
+ # 4. Save the frames into a video
45
+ video_path = "run_video_2.mp4"
46
+ imageio.mimsave(video_path, frames, fps=30)
47
+
48
+ # 5. Return path to Gradio to display
49
+ return video_path
50
+
51
+ # --------------------------------------
52
+ # Build the Gradio App
53
+ # --------------------------------------
54
+
55
+ with gr.Blocks() as demo:
56
+ gr.Markdown("Fetch Robot: Model Demo App")
57
+ gr.Markdown("Click 'Run Model' to watch the SAC agent interact with the FetchPickAndPlace environment.")
58
+
59
+ run_button = gr.Button("Run Model")
60
+ output_video = gr.Video()
61
+
62
+ run_button.click(fn=run_model_episode, inputs=[], outputs=output_video)
63
+
64
+ demo.launch(share=True)
app_test_4.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import numpy as np
4
+ import torch
5
+ import imageio
6
+ import time
7
+ from stable_baselines3 import SAC
8
+ from custom_env import create_env
9
+
10
+ def stream_frames():
11
+ x_start, y_start = 0.0, 0.0
12
+ x_targ, y_targ, z_targ = 0.1, 0.1, 0.1
13
+
14
+ env = create_env(render_mode="rgb_array",
15
+ block_xy=(x_start, y_start),
16
+ goal_xyz=(x_targ, y_targ, z_targ))
17
+
18
+ checkpoint_path = os.path.join("App", "model", "model.zip")
19
+ model = SAC.load(checkpoint_path, env=env, verbose=1)
20
+
21
+ obs, info = env.reset()
22
+
23
+ while True:
24
+ action, _ = model.predict(obs, deterministic=True)
25
+ obs, reward, done, trunc, info = env.step(action)
26
+
27
+ frame = env.render() # Grab RGB frame
28
+ yield frame # Yield this frame to Gradio
29
+
30
+ if done or trunc:
31
+ obs, info = env.reset()
32
+
33
+ time.sleep(0.033) # ~30 FPS (1/30 seconds)
34
+
35
+ env.close()
36
+
37
+ # Build Gradio app
38
+ with gr.Blocks() as demo:
39
+ gr.Markdown("Fetch Robot: Live Model Demo App")
40
+ frame_output = gr.Image()
41
+ start_button = gr.Button("Start Streaming")
42
+
43
+ start_button.click(fn=stream_frames, inputs=[], outputs=frame_output)
44
+
45
+ demo.queue()
46
+ demo.launch(share=True)
app_test_5.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import numpy as np
4
+ import torch
5
+ import imageio
6
+ from stable_baselines3 import SAC
7
+ from custom_env import create_env
8
+
9
+ # Define the function that runs the model and outputs a video
10
+ def run_model_episode(x_start, y_start, x_targ, y_targ, z_targ):
11
+ # Create environment with user inputs
12
+ env = create_env(render_mode="rgb_array",
13
+ block_xy=(x_start, y_start),
14
+ goal_xyz=(x_targ, y_targ, z_targ))
15
+
16
+ # Load your trained model
17
+ checkpoint_path = os.path.join("App", "model", "model.zip")
18
+ model = SAC.load(checkpoint_path, env=env, verbose=1)
19
+
20
+ # Rollout the episode
21
+ frames = []
22
+ obs, info = env.reset()
23
+
24
+ for _ in range(200): # Shorter rollout
25
+ action, _ = model.predict(obs, deterministic=True)
26
+ obs, reward, done, trunc, info = env.step(action)
27
+
28
+ frame = env.render()
29
+ frames.append(frame)
30
+
31
+ if done or trunc:
32
+ obs, info = env.reset()
33
+
34
+ env.close()
35
+
36
+ # Save frames into a video
37
+ video_path = "run_video.mp4"
38
+ imageio.mimsave(video_path, frames, fps=30)
39
+
40
+ return video_path
41
+
42
+ # --------------------------------------
43
+ # Build the Gradio App
44
+ # --------------------------------------
45
+
46
+ with gr.Blocks() as demo:
47
+ gr.Markdown("## Fetch Robot: Model Demo App")
48
+ gr.Markdown("Enter start and target coordinates, then click 'Run Model' to watch the robot!")
49
+ gr.Markdown("Coordinates are relative to the center of the table.")
50
+ gr.Markdown("X and Y coordinates are in meters, Z coordinate is height in meters.")
51
+ gr.Markdown("0,0,0 is the center of the table.")
52
+
53
+ with gr.Row():
54
+ x_start = gr.Number(label="Start X", value=0.0)
55
+ y_start = gr.Number(label="Start Y", value=0.0)
56
+
57
+ with gr.Row():
58
+ x_targ = gr.Number(label="Target X", value=0.1)
59
+ y_targ = gr.Number(label="Target Y", value=0.1)
60
+ z_targ = gr.Number(label="Target Z", value=0.1)
61
+
62
+ run_button = gr.Button("Run Model")
63
+ output_video = gr.Video()
64
+
65
+ run_button.click(
66
+ fn=run_model_episode,
67
+ inputs=[x_start, y_start, x_targ, y_targ, z_targ],
68
+ outputs=output_video
69
+ )
70
+
71
+ demo.launch(share=True)
72
+