Abigail45 commited on
Commit
90b5eaa
·
verified ·
1 Parent(s): f7c928f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +28 -31
README.md CHANGED
@@ -1,43 +1,41 @@
1
  ---
2
  license: apache-2.0
3
  model_name: Aurora-Fusion
4
- new_version: "1.0" # <-- must be a string
5
  pipeline_tag: text-generation
6
  library_name: transformers
7
-
8
  tags:
9
- - text-generation
10
- - causal-lm
11
- - merge
12
- - multilingual
13
- - instruction-following
14
- - reasoning
15
- - chat
16
-
17
  base_models:
18
- - google/gemma-2-27b-it
19
- - meta-llama/Llama-3-70B-Instruct
20
- - mistralai/Mixtral-8x22B-Instruct-v0.1
21
- - Qwen/Qwen2-72B-Instruct
22
- - eleutherai/gpt-neox-20b
23
-
24
  datasets:
25
- - teknium/OpenHermes-2.5
26
- - PhilipMay/UltraChat-200k-ShareGPT-clean
27
- - garage-bAInd/Open-Platypus
28
- - meta-math/MetaMathQA
29
- - wikimedia/wikipedia
30
-
31
  language:
32
- - en
33
- - es
34
- - fr
35
-
36
  metrics:
37
- - perplexity
38
- - MT-Bench
39
- - GLUE
40
- - SuperGLUE
 
 
 
 
 
41
  ---
42
  # Aurora-Fusion Model Card
43
 
@@ -48,7 +46,6 @@ Aurora-Fusion is a high-performance multilingual causal language model created b
48
  - google/gemma-2-27b-it
49
  - meta-llama/Llama-3-70B-Instruct
50
  - mistralai/Mixtral-8x22B-Instruct-v0.1
51
- - Qwen/Qwen2-72B-Instruct
52
  - eleutherai/gpt-neox-20b
53
 
54
  ## Training Datasets
 
1
  ---
2
  license: apache-2.0
3
  model_name: Aurora-Fusion
 
4
  pipeline_tag: text-generation
5
  library_name: transformers
 
6
  tags:
7
+ - text-generation
8
+ - causal-lm
9
+ - merge
10
+ - multilingual
11
+ - instruction-following
12
+ - reasoning
13
+ - chat
 
14
  base_models:
15
+ - google/gemma-2-27b-it
16
+ - meta-llama/Llama-3-70B-Instruct
17
+ - mistralai/Mixtral-8x22B-Instruct-v0.1
18
+ - eleutherai/gpt-neox-20b
 
 
19
  datasets:
20
+ - teknium/OpenHermes-2.5
21
+ - PhilipMay/UltraChat-200k-ShareGPT-clean
22
+ - garage-bAInd/Open-Platypus
23
+ - meta-math/MetaMathQA
24
+ - wikimedia/wikipedia
 
25
  language:
26
+ - en
27
+ - es
28
+ - fr
 
29
  metrics:
30
+ - perplexity
31
+ - MT-Bench
32
+ - GLUE
33
+ - SuperGLUE
34
+ base_model:
35
+ - google/gemma-2-27b-it
36
+ - meta-llama/Meta-Llama-3-70B-Instruct
37
+ - mistralai/Mixtral-8x22B-Instruct-v0.1
38
+ - EleutherAI/gpt-neox-20b
39
  ---
40
  # Aurora-Fusion Model Card
41
 
 
46
  - google/gemma-2-27b-it
47
  - meta-llama/Llama-3-70B-Instruct
48
  - mistralai/Mixtral-8x22B-Instruct-v0.1
 
49
  - eleutherai/gpt-neox-20b
50
 
51
  ## Training Datasets