Improve language tag

#1
by lbourdois - opened
Files changed (1) hide show
  1. README.md +61 -49
README.md CHANGED
@@ -1,50 +1,62 @@
1
- ---
2
- license: cc
3
- language:
4
- - en
5
- base_model:
6
- - Qwen/Qwen2.5-1.5B-Instruct
7
- ---
8
-
9
- todos:
10
- * check numerical output same as original VILA impl
11
- * check training stablitiy
12
- * save_pretrained()
13
-
14
- already finished
15
- * AutoModel.from_pretrained() / device_map auto to shard
16
- * loading
17
- * fix recursive imports
18
- * text conv
19
- * image + text conv:
20
- * .generate() / .generate_content()
21
- * llava/cli/infer.py
22
- * tests/bash/test_inference.sh
23
-
24
- ## NVILA HF Comptatible Mode
25
- Remote model loading example
26
-
27
- ```python
28
- from transformers import AutoConfig, AutoModel
29
- from termcolor import colored
30
-
31
- model_path = "Efficient-Large-Model/nvila_lite_3b_dev"
32
- print("main_dev.py, loading from ", model_path)
33
-
34
- # config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
35
- # model = AutoModel.from_config(config, trust_remote_code=True)
36
- model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map="auto")
37
- res = model.generate_content([
38
- "how are you today?"
39
- ])
40
- print(colored(res, "cyan", attrs=["bold"]))
41
-
42
- print("---" * 40)
43
-
44
- import PIL.Image
45
- response = model.generate_content([
46
- PIL.Image.open("inference_test/test_data/caption_meat.jpeg"),
47
- "describe the image?"
48
- ])
49
- print(colored(response, "cyan", attrs=["bold"]))
 
 
 
 
 
 
 
 
 
 
 
 
50
  ```
 
1
+ ---
2
+ license: cc
3
+ language:
4
+ - zho
5
+ - eng
6
+ - fra
7
+ - spa
8
+ - por
9
+ - deu
10
+ - ita
11
+ - rus
12
+ - jpn
13
+ - kor
14
+ - vie
15
+ - tha
16
+ - ara
17
+ base_model:
18
+ - Qwen/Qwen2.5-1.5B-Instruct
19
+ ---
20
+
21
+ todos:
22
+ * check numerical output same as original VILA impl
23
+ * check training stablitiy
24
+ * save_pretrained()
25
+
26
+ already finished
27
+ * AutoModel.from_pretrained() / device_map auto to shard
28
+ * loading
29
+ * fix recursive imports
30
+ * text conv
31
+ * image + text conv:
32
+ * .generate() / .generate_content()
33
+ * llava/cli/infer.py
34
+ * tests/bash/test_inference.sh
35
+
36
+ ## NVILA HF Comptatible Mode
37
+ Remote model loading example
38
+
39
+ ```python
40
+ from transformers import AutoConfig, AutoModel
41
+ from termcolor import colored
42
+
43
+ model_path = "Efficient-Large-Model/nvila_lite_3b_dev"
44
+ print("main_dev.py, loading from ", model_path)
45
+
46
+ # config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
47
+ # model = AutoModel.from_config(config, trust_remote_code=True)
48
+ model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map="auto")
49
+ res = model.generate_content([
50
+ "how are you today?"
51
+ ])
52
+ print(colored(res, "cyan", attrs=["bold"]))
53
+
54
+ print("---" * 40)
55
+
56
+ import PIL.Image
57
+ response = model.generate_content([
58
+ PIL.Image.open("inference_test/test_data/caption_meat.jpeg"),
59
+ "describe the image?"
60
+ ])
61
+ print(colored(response, "cyan", attrs=["bold"]))
62
  ```