Translation
Safetensors
llama

Add library name to model card

#1
by nielsr HF Staff - opened
Files changed (1) hide show
  1. README.md +8 -5
README.md CHANGED
@@ -1,5 +1,6 @@
1
  ---
2
- license: mit
 
3
  datasets:
4
  - NiuTrans/ComMT
5
  language:
@@ -7,12 +8,12 @@ language:
7
  - zh
8
  - de
9
  - cs
 
10
  metrics:
11
  - bleu
12
  - comet
13
- base_model:
14
- - meta-llama/Meta-Llama-3-8B
15
  pipeline_tag: translation
 
16
  ---
17
 
18
  # LaMaTE
@@ -48,7 +49,9 @@ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
48
  config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
49
  model = LlamaCrossAttentionEncDec.from_pretrained(model_name_or_path, config=config)
50
 
51
- prompt = "Translate the following text from English into Chinese.\nEnglish: The harder you work at it, the more progress you will make.\nChinese: ",
 
 
52
  input_ids = tokenizer(prompt, return_tensors="pt")
53
  outputs_tokenized = model.generate(
54
  **input_ids,
@@ -71,4 +74,4 @@ print(outputs)
71
  archivePrefix={arXiv},
72
  primaryClass={cs.CL}
73
  }
74
- ```
 
1
  ---
2
+ base_model:
3
+ - meta-llama/Meta-Llama-3-8B
4
  datasets:
5
  - NiuTrans/ComMT
6
  language:
 
8
  - zh
9
  - de
10
  - cs
11
+ license: mit
12
  metrics:
13
  - bleu
14
  - comet
 
 
15
  pipeline_tag: translation
16
+ library_name: transformers
17
  ---
18
 
19
  # LaMaTE
 
49
  config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
50
  model = LlamaCrossAttentionEncDec.from_pretrained(model_name_or_path, config=config)
51
 
52
+ prompt = "Translate the following text from English into Chinese.
53
+ English: The harder you work at it, the more progress you will make.
54
+ Chinese: ",
55
  input_ids = tokenizer(prompt, return_tensors="pt")
56
  outputs_tokenized = model.generate(
57
  **input_ids,
 
74
  archivePrefix={arXiv},
75
  primaryClass={cs.CL}
76
  }
77
+ ```