minium commited on
Commit
e217881
ยท
verified ยท
1 Parent(s): ca6d175

Upload example_usage.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. example_usage.py +56 -0
example_usage.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Mobile VLA ์‚ฌ์šฉ ์˜ˆ์ œ
4
+ """
5
+
6
+ import torch
7
+ from transformers import AutoTokenizer, AutoProcessor
8
+ from PIL import Image
9
+ import numpy as np
10
+
11
+ def load_mobile_vla_model(model_name="minuum/mobile-vla"):
12
+ """Mobile VLA ๋ชจ๋ธ ๋กœ๋“œ"""
13
+
14
+ # ์—ฌ๊ธฐ์„œ ์‹ค์ œ ๋ชจ๋ธ ๋กœ๋”ฉ ๋กœ์ง ๊ตฌํ˜„
15
+ print(f"Loading Mobile VLA model: {model_name}")
16
+
17
+ # ์‹ค์ œ ๊ตฌํ˜„์—์„œ๋Š” MobileVLATrainer๋ฅผ ์‚ฌ์šฉ
18
+ # from robovlms.train.mobile_vla_trainer import MobileVLATrainer
19
+ # model = MobileVLATrainer.from_pretrained(model_name)
20
+
21
+ return None # ํ”Œ๋ ˆ์ด์Šคํ™€๋”
22
+
23
+ def predict_action(model, image_path, task_description):
24
+ """์•ก์…˜ ์˜ˆ์ธก"""
25
+
26
+ # ์ด๋ฏธ์ง€ ๋กœ๋“œ
27
+ image = Image.open(image_path).convert("RGB")
28
+
29
+ # ์ „์ฒ˜๋ฆฌ (์‹ค์ œ ๊ตฌํ˜„์—์„œ๋Š” mobile_vla_collate_fn ์‚ฌ์šฉ)
30
+ # processed = preprocess_image(image)
31
+
32
+ # ์˜ˆ์ธก (ํ”Œ๋ ˆ์ด์Šคํ™€๋”)
33
+ dummy_action = [0.5, 0.2, 0.1] # [linear_x, linear_y, angular_z]
34
+
35
+ return dummy_action
36
+
37
+ def main():
38
+ """๋ฉ”์ธ ์‹คํ–‰ ํ•จ์ˆ˜"""
39
+
40
+ print("๐Ÿš€ Mobile VLA ์˜ˆ์ œ ์‹คํ–‰")
41
+
42
+ # ๋ชจ๋ธ ๋กœ๋“œ
43
+ model = load_mobile_vla_model()
44
+
45
+ # ์˜ˆ์ œ ์˜ˆ์ธก
46
+ task = "Navigate around obstacles to track the target cup"
47
+ action = predict_action(model, "example_image.jpg", task)
48
+
49
+ print(f"Task: {task}")
50
+ print(f"Predicted Action: {action}")
51
+ print(f" - Linear X (forward/backward): {action[0]:.3f}")
52
+ print(f" - Linear Y (left/right): {action[1]:.3f}")
53
+ print(f" - Angular Z (rotation): {action[2]:.3f}")
54
+
55
+ if __name__ == "__main__":
56
+ main()