jaeikkim commited on
Commit
afc1fa2
·
1 Parent(s): 81b76c0

Cng MagViT dir

Browse files
MMaDA/configs/mmada_demo.yaml CHANGED
@@ -11,7 +11,7 @@ experiment:
11
  model:
12
  vq_model:
13
  type: "magvitv2"
14
- vq_model_name: "showlab/magvitv2"
15
 
16
  mmada:
17
  pretrained_model_path: "Gen-Verse/MMaDA-8B-Base"
 
11
  model:
12
  vq_model:
13
  type: "magvitv2"
14
+ vq_model_name: "jaeikkim/magvitv2"
15
 
16
  mmada:
17
  pretrained_model_path: "Gen-Verse/MMaDA-8B-Base"
MMaDA/configs/mmada_demo_s2t.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
 
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
MMaDA/configs/mmada_demo_speech.yaml CHANGED
@@ -11,7 +11,7 @@ experiment:
11
  model:
12
  vq_model:
13
  type: "magvitv2"
14
- vq_model_name: "showlab/magvitv2"
15
  speech_model:
16
  type: "emova"
17
  speech_model_name: "Emova-ollm/emova_speech_tokenizer_hf"
 
11
  model:
12
  vq_model:
13
  type: "magvitv2"
14
+ vq_model_name: "jaeikkim/magvitv2"
15
  speech_model:
16
  type: "emova"
17
  speech_model_name: "Emova-ollm/emova_speech_tokenizer_hf"
MMaDA/configs/mmada_demo_video.yaml CHANGED
@@ -11,7 +11,7 @@ experiment:
11
  model:
12
  vq_model:
13
  type: "magvitv2"
14
- vq_model_name: "showlab/magvitv2"
15
 
16
  mmada:
17
  pretrained_model_path: "Gen-Verse/MMaDA-8B-Base"
 
11
  model:
12
  vq_model:
13
  type: "magvitv2"
14
+ vq_model_name: "jaeikkim/magvitv2"
15
 
16
  mmada:
17
  pretrained_model_path: "Gen-Verse/MMaDA-8B-Base"
MMaDA/configs/mmada_demo_video_temp.yaml CHANGED
@@ -11,7 +11,7 @@ experiment:
11
  model:
12
  vq_model:
13
  type: "magvitv2"
14
- vq_model_name: "showlab/magvitv2"
15
 
16
  mmada:
17
  pretrained_model_path: "Gen-Verse/MMaDA-8B-Base"
 
11
  model:
12
  vq_model:
13
  type: "magvitv2"
14
+ vq_model_name: "jaeikkim/magvitv2"
15
 
16
  mmada:
17
  pretrained_model_path: "Gen-Verse/MMaDA-8B-Base"
MMaDA/configs/mmada_pretraining_i2i.yaml CHANGED
@@ -20,7 +20,7 @@ experiment:
20
  model:
21
  vq_model:
22
  type: "magvitv2"
23
- vq_model_name: "showlab/magvitv2"
24
 
25
  mmada:
26
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
 
20
  model:
21
  vq_model:
22
  type: "magvitv2"
23
+ vq_model_name: "jaeikkim/magvitv2"
24
 
25
  mmada:
26
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
MMaDA/configs/mmada_pretraining_stage1_llada_instruct.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  mmada:
24
  pretrained_model_path: "GSAI-ML/LLaDA-8B-Instruct"
25
  w_clip_vit: False
 
19
  model:
20
  vq_model:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  mmada:
24
  pretrained_model_path: "GSAI-ML/LLaDA-8B-Instruct"
25
  w_clip_vit: False
MMaDA/configs/mmada_pretraining_stage2_llada_instruct.yaml CHANGED
@@ -21,7 +21,7 @@ experiment:
21
  model:
22
  vq_model:
23
  type: "magvitv2"
24
- vq_model_name: "showlab/magvitv2"
25
 
26
  mmada:
27
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
 
21
  model:
22
  vq_model:
23
  type: "magvitv2"
24
+ vq_model_name: "jaeikkim/magvitv2"
25
 
26
  mmada:
27
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
MMaDA/configs/mmada_pretraining_stage3_llada_instruct.yaml CHANGED
@@ -21,7 +21,7 @@ experiment:
21
  model:
22
  vq_model:
23
  type: "magvitv2"
24
- vq_model_name: "showlab/magvitv2"
25
 
26
  mmada:
27
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
 
21
  model:
22
  vq_model:
23
  type: "magvitv2"
24
+ vq_model_name: "jaeikkim/magvitv2"
25
 
26
  mmada:
27
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
MMaDA/configs/mmada_pretraining_stage3_llada_instruct_512_cot.yaml CHANGED
@@ -22,7 +22,7 @@ experiment:
22
  model:
23
  vq_model:
24
  type: "magvitv2"
25
- vq_model_name: "showlab/magvitv2"
26
 
27
  mmada:
28
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
 
22
  model:
23
  vq_model:
24
  type: "magvitv2"
25
+ vq_model_name: "jaeikkim/magvitv2"
26
 
27
  mmada:
28
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
MMaDA/configs/mmada_pretraining_stage4_llada_instruct.yaml CHANGED
@@ -21,7 +21,7 @@ experiment:
21
  model:
22
  vq_model:
23
  type: "magvitv2"
24
- vq_model_name: "showlab/magvitv2"
25
 
26
  mmada:
27
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
 
21
  model:
22
  vq_model:
23
  type: "magvitv2"
24
+ vq_model_name: "jaeikkim/magvitv2"
25
 
26
  mmada:
27
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
MMaDA/configs/mmada_pretraining_v2s.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
 
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
MMaDA/configs/mmada_pretraining_v2t.yaml CHANGED
@@ -21,7 +21,7 @@ experiment:
21
  model:
22
  vq_model:
23
  type: "magvitv2"
24
- vq_model_name: "showlab/magvitv2"
25
 
26
  mmada:
27
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
 
21
  model:
22
  vq_model:
23
  type: "magvitv2"
24
+ vq_model_name: "jaeikkim/magvitv2"
25
 
26
  mmada:
27
  tokenizer_path: "GSAI-ML/LLaDA-8B-Instruct"
MMaDA/configs/omada_instruction_tuning.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
 
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
MMaDA/configs/omada_pretraining_stage1-2.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
 
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
MMaDA/configs/omada_pretraining_stage1-3.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
 
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
MMaDA/configs/omada_pretraining_stage1-4.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
 
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
MMaDA/configs/omada_pretraining_stage1.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
 
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
MMaDA/configs/omada_pretraining_v2t_inst.yaml CHANGED
@@ -19,7 +19,7 @@ experiment:
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
- vq_model_name: "showlab/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
 
19
  model:
20
  vq_model_image:
21
  type: "magvitv2"
22
+ vq_model_name: "jaeikkim/magvitv2"
23
  ### Omada ###############################################################
24
  vq_model_audio:
25
  type: "emova"
MMaDA/inference/demo/demo.yaml CHANGED
@@ -10,7 +10,7 @@ experiment:
10
  model:
11
  vq_model_image:
12
  type: "magvitv2"
13
- vq_model_name: "showlab/magvitv2"
14
  vq_model_audio:
15
  type: "emova"
16
  # Loaded via EMOVASpeechTokenizer.from_pretrained
 
10
  model:
11
  vq_model_image:
12
  type: "magvitv2"
13
+ vq_model_name: "jaeikkim/magvitv2"
14
  vq_model_audio:
15
  type: "emova"
16
  # Loaded via EMOVASpeechTokenizer.from_pretrained
MMaDA/mmada-training-stage3-llada-instruct-v2t-special-token-1e-5/config.yaml CHANGED
@@ -20,7 +20,7 @@ experiment:
20
  model:
21
  vq_model:
22
  type: magvitv2
23
- vq_model_name: showlab/magvitv2
24
  mmada:
25
  tokenizer_path: GSAI-ML/LLaDA-8B-Instruct
26
  pretrained_model_path: Gen-Verse/MMaDA-8B-Base
 
20
  model:
21
  vq_model:
22
  type: magvitv2
23
+ vq_model_name: jaeikkim/magvitv2
24
  mmada:
25
  tokenizer_path: GSAI-ML/LLaDA-8B-Instruct
26
  pretrained_model_path: Gen-Verse/MMaDA-8B-Base