Update README.md
Browse files
README.md
CHANGED
|
@@ -1,630 +1,260 @@
|
|
| 1 |
---
|
| 2 |
-
license:
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
---
|
|
|
|
| 12 |
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
# Gemma 2 model card
|
| 15 |
|
| 16 |
-
**Model Page**: [Gemma](https://ai.google.dev/gemma/docs/base)
|
| 17 |
|
| 18 |
-
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
* [Responsible Generative AI Toolkit][rai-toolkit]
|
| 21 |
-
* [Gemma on Kaggle][kaggle-gemma]
|
| 22 |
-
* [Gemma on Vertex Model Garden][vertex-mg-gemma2]
|
| 23 |
|
| 24 |
-
**Terms of Use**: [Terms][terms]
|
| 25 |
|
| 26 |
-
|
| 27 |
|
| 28 |
-
## Model Information
|
| 29 |
|
| 30 |
-
|
| 31 |
|
| 32 |
-
|
| 33 |
|
| 34 |
-
Gemma is a family of lightweight, state-of-the-art open models from Google,
|
| 35 |
-
built from the same research and technology used to create the Gemini models.
|
| 36 |
-
They are text-to-text, decoder-only large language models, available in English,
|
| 37 |
-
with open weights for both pre-trained variants and instruction-tuned variants.
|
| 38 |
-
Gemma models are well-suited for a variety of text generation tasks, including
|
| 39 |
-
question answering, summarization, and reasoning. Their relatively small size
|
| 40 |
-
makes it possible to deploy them in environments with limited resources such as
|
| 41 |
-
a laptop, desktop or your own cloud infrastructure, democratizing access to
|
| 42 |
-
state of the art AI models and helping foster innovation for everyone.
|
| 43 |
|
| 44 |
-
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
pip install -U transformers
|
| 49 |
-
```
|
| 50 |
|
| 51 |
-
|
| 52 |
|
| 53 |
-
|
| 54 |
|
| 55 |
-
|
| 56 |
-
import torch
|
| 57 |
-
from transformers import pipeline
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
model="google/gemma-2-2b",
|
| 62 |
-
device="cuda", # replace with "mps" to run on a Mac device
|
| 63 |
-
)
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
response = outputs[0]["generated_text"]
|
| 68 |
-
print(response)
|
| 69 |
-
```
|
| 70 |
|
| 71 |
-
|
| 72 |
|
| 73 |
-
|
| 74 |
-
# pip install accelerate
|
| 75 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 76 |
-
import torch
|
| 77 |
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
"google/gemma-2-2b",
|
| 81 |
-
device_map="auto",
|
| 82 |
-
)
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
|
| 87 |
-
|
| 88 |
-
print(tokenizer.decode(outputs[0]))
|
| 89 |
-
```
|
| 90 |
|
| 91 |
-
|
| 92 |
|
| 93 |
-
|
| 94 |
-
for running Gemma 2 through a command line interface, or CLI. Follow the [installation instructions](https://github.com/huggingface/local-gemma#cli-usage)
|
| 95 |
-
for getting started, then launch the CLI through the following command:
|
| 96 |
|
| 97 |
-
```shell
|
| 98 |
-
local-gemma --model "google/gemma-2-2b" --prompt "What is the capital of Mexico?"
|
| 99 |
-
```
|
| 100 |
|
| 101 |
-
|
| 102 |
|
| 103 |
-
<details>
|
| 104 |
-
<summary>
|
| 105 |
-
Using 8-bit precision (int8)
|
| 106 |
-
</summary>
|
| 107 |
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
|
|
|
|
|
|
| 111 |
|
| 112 |
-
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
"google/gemma-2-2b",
|
| 117 |
-
quantization_config=quantization_config,
|
| 118 |
-
)
|
| 119 |
|
| 120 |
-
|
| 121 |
-
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
| 122 |
|
| 123 |
-
|
| 124 |
-
print(tokenizer.decode(outputs[0]))
|
| 125 |
-
```
|
| 126 |
-
</details>
|
| 127 |
|
| 128 |
-
|
| 129 |
-
<summary>
|
| 130 |
-
Using 4-bit precision
|
| 131 |
-
</summary>
|
| 132 |
|
| 133 |
-
```python
|
| 134 |
-
# pip install bitsandbytes accelerate
|
| 135 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 136 |
|
| 137 |
-
|
| 138 |
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
"google/gemma-2-2b",
|
| 142 |
-
quantization_config=quantization_config,
|
| 143 |
-
)
|
| 144 |
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
-
outputs = model.generate(**input_ids, max_new_tokens=32)
|
| 149 |
-
print(tokenizer.decode(outputs[0]))
|
| 150 |
-
```
|
| 151 |
-
</details>
|
| 152 |
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
|
| 155 |
-
<details>
|
| 156 |
-
<summary>
|
| 157 |
-
Torch compile
|
| 158 |
-
</summary>
|
| 159 |
-
|
| 160 |
-
[Torch compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) is a method for speeding-up the
|
| 161 |
-
inference of PyTorch modules. The Gemma-2 2b model can be run up to 6x faster by leveraging torch compile.
|
| 162 |
-
|
| 163 |
-
Note that two warm-up steps are required before the full inference speed is realised:
|
| 164 |
-
|
| 165 |
-
```python
|
| 166 |
-
import os
|
| 167 |
-
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 168 |
-
|
| 169 |
-
from transformers import AutoTokenizer, Gemma2ForCausalLM
|
| 170 |
-
from transformers.cache_utils import HybridCache
|
| 171 |
-
import torch
|
| 172 |
-
|
| 173 |
-
torch.set_float32_matmul_precision("high")
|
| 174 |
-
|
| 175 |
-
# load the model + tokenizer
|
| 176 |
-
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b")
|
| 177 |
-
model = Gemma2ForCausalLM.from_pretrained("google/gemma-2-2b", torch_dtype=torch.bfloat16)
|
| 178 |
-
model.to("cuda")
|
| 179 |
-
|
| 180 |
-
# apply the torch compile transformation
|
| 181 |
-
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
| 182 |
-
|
| 183 |
-
# pre-process inputs
|
| 184 |
-
input_text = "The theory of special relativity states "
|
| 185 |
-
model_inputs = tokenizer(input_text, return_tensors="pt").to("cuda")
|
| 186 |
-
prompt_length = model_inputs.input_ids.shape[1]
|
| 187 |
-
|
| 188 |
-
# set-up k/v cache
|
| 189 |
-
past_key_values = HybridCache(
|
| 190 |
-
config=model.config,
|
| 191 |
-
max_batch_size=1,
|
| 192 |
-
max_cache_len=model.config.max_position_embeddings,
|
| 193 |
-
device=model.device,
|
| 194 |
-
dtype=model.dtype
|
| 195 |
-
)
|
| 196 |
-
|
| 197 |
-
# enable passing kv cache to generate
|
| 198 |
-
model._supports_cache_class = True
|
| 199 |
-
model.generation_config.cache_implementation = None
|
| 200 |
-
|
| 201 |
-
# two warm-up steps
|
| 202 |
-
for idx in range(2):
|
| 203 |
-
outputs = model.generate(**model_inputs, past_key_values=past_key_values, do_sample=True, temperature=1.0, max_new_tokens=128)
|
| 204 |
-
past_key_values.reset()
|
| 205 |
-
|
| 206 |
-
# fast run
|
| 207 |
-
outputs = model.generate(**model_inputs, past_key_values=past_key_values, do_sample=True, temperature=1.0, max_new_tokens=128)
|
| 208 |
-
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 209 |
-
```
|
| 210 |
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
-
</details>
|
| 214 |
|
| 215 |
-
|
|
|
|
| 216 |
|
| 217 |
-
* **Input:** Text string, such as a question, a prompt, or a document to be
|
| 218 |
-
summarized.
|
| 219 |
-
* **Output:** Generated English-language text in response to the input, such
|
| 220 |
-
as an answer to a question, or a summary of a document.
|
| 221 |
|
| 222 |
-
|
|
|
|
| 223 |
|
| 224 |
-
```
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
}
|
| 233 |
```
|
| 234 |
|
| 235 |
-
## Model Data
|
| 236 |
-
|
| 237 |
-
Data used for model training and how the data was processed.
|
| 238 |
-
|
| 239 |
-
### Training Dataset
|
| 240 |
-
|
| 241 |
-
These models were trained on a dataset of text data that includes a wide variety
|
| 242 |
-
of sources. The 27B model was trained with 13 trillion tokens, the 9B model was
|
| 243 |
-
trained with 8 trillion tokens, and 2B model was trained with 2 trillion tokens.
|
| 244 |
-
Here are the key components:
|
| 245 |
-
|
| 246 |
-
* Web Documents: A diverse collection of web text ensures the model is exposed
|
| 247 |
-
to a broad range of linguistic styles, topics, and vocabulary. Primarily
|
| 248 |
-
English-language content.
|
| 249 |
-
* Code: Exposing the model to code helps it to learn the syntax and patterns of
|
| 250 |
-
programming languages, which improves its ability to generate code or
|
| 251 |
-
understand code-related questions.
|
| 252 |
-
* Mathematics: Training on mathematical text helps the model learn logical
|
| 253 |
-
reasoning, symbolic representation, and to address mathematical queries.
|
| 254 |
-
|
| 255 |
-
The combination of these diverse data sources is crucial for training a powerful
|
| 256 |
-
language model that can handle a wide variety of different tasks and text
|
| 257 |
-
formats.
|
| 258 |
-
|
| 259 |
-
### Data Preprocessing
|
| 260 |
-
|
| 261 |
-
Here are the key data cleaning and filtering methods applied to the training
|
| 262 |
-
data:
|
| 263 |
-
|
| 264 |
-
* CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was
|
| 265 |
-
applied at multiple stages in the data preparation process to ensure the
|
| 266 |
-
exclusion of harmful and illegal content.
|
| 267 |
-
* Sensitive Data Filtering: As part of making Gemma pre-trained models safe and
|
| 268 |
-
reliable, automated techniques were used to filter out certain personal
|
| 269 |
-
information and other sensitive data from training sets.
|
| 270 |
-
* Additional methods: Filtering based on content quality and safety in line with
|
| 271 |
-
[our policies][safety-policies].
|
| 272 |
-
|
| 273 |
-
## Implementation Information
|
| 274 |
-
|
| 275 |
-
Details about the model internals.
|
| 276 |
-
|
| 277 |
-
### Hardware
|
| 278 |
-
|
| 279 |
-
Gemma was trained using the latest generation of
|
| 280 |
-
[Tensor Processing Unit (TPU)][tpu] hardware (TPUv5p).
|
| 281 |
-
|
| 282 |
-
Training large language models requires significant computational power. TPUs,
|
| 283 |
-
designed specifically for matrix operations common in machine learning, offer
|
| 284 |
-
several advantages in this domain:
|
| 285 |
-
|
| 286 |
-
* Performance: TPUs are specifically designed to handle the massive computations
|
| 287 |
-
involved in training LLMs. They can speed up training considerably compared to
|
| 288 |
-
CPUs.
|
| 289 |
-
* Memory: TPUs often come with large amounts of high-bandwidth memory, allowing
|
| 290 |
-
for the handling of large models and batch sizes during training. This can
|
| 291 |
-
lead to better model quality.
|
| 292 |
-
* Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for
|
| 293 |
-
handling the growing complexity of large foundation models. You can distribute
|
| 294 |
-
training across multiple TPU devices for faster and more efficient processing.
|
| 295 |
-
* Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective
|
| 296 |
-
solution for training large models compared to CPU-based infrastructure,
|
| 297 |
-
especially when considering the time and resources saved due to faster
|
| 298 |
-
training.
|
| 299 |
-
* These advantages are aligned with
|
| 300 |
-
[Google's commitments to operate sustainably][sustainability].
|
| 301 |
-
|
| 302 |
-
### Software
|
| 303 |
-
|
| 304 |
-
Training was done using [JAX][jax] and [ML Pathways][ml-pathways].
|
| 305 |
-
|
| 306 |
-
JAX allows researchers to take advantage of the latest generation of hardware,
|
| 307 |
-
including TPUs, for faster and more efficient training of large models.
|
| 308 |
-
|
| 309 |
-
ML Pathways is Google's latest effort to build artificially intelligent systems
|
| 310 |
-
capable of generalizing across multiple tasks. This is specially suitable for
|
| 311 |
-
[foundation models][foundation-models], including large language models like
|
| 312 |
-
these ones.
|
| 313 |
-
|
| 314 |
-
Together, JAX and ML Pathways are used as described in the
|
| 315 |
-
[paper about the Gemini family of models][gemini-2-paper]; "the 'single
|
| 316 |
-
controller' programming model of Jax and Pathways allows a single Python
|
| 317 |
-
process to orchestrate the entire training run, dramatically simplifying the
|
| 318 |
-
development workflow."
|
| 319 |
-
|
| 320 |
-
## Evaluation
|
| 321 |
-
|
| 322 |
-
Model evaluation metrics and results.
|
| 323 |
-
|
| 324 |
-
### Benchmark Results
|
| 325 |
-
|
| 326 |
-
These models were evaluated against a large collection of different datasets and
|
| 327 |
-
metrics to cover different aspects of text generation:
|
| 328 |
-
|
| 329 |
-
| Benchmark | Metric | Gemma 2 PT 2B | Gemma 2 PT 9B | Gemma 2 PT 27B |
|
| 330 |
-
| ------------------------------ | ------------- | ------------- | ------------- | -------------- |
|
| 331 |
-
| [MMLU][mmlu] | 5-shot, top-1 | 51.3 | 71.3 | 75.2 |
|
| 332 |
-
| [HellaSwag][hellaswag] | 10-shot | 73.0 | 81.9 | 86.4 |
|
| 333 |
-
| [PIQA][piqa] | 0-shot | 77.8 | 81.7 | 83.2 |
|
| 334 |
-
| [SocialIQA][socialiqa] | 0-shot | 51.9 | 53.4 | 53.7 |
|
| 335 |
-
| [BoolQ][boolq] | 0-shot | 72.5 | 84.2 | 84.8 |
|
| 336 |
-
| [WinoGrande][winogrande] | partial score | 70.9 | 80.6 | 83.7 |
|
| 337 |
-
| [ARC-e][arc] | 0-shot | 80.1 | 88.0 | 88.6 |
|
| 338 |
-
| [ARC-c][arc] | 25-shot | 55.4 | 68.4 | 71.4 |
|
| 339 |
-
| [TriviaQA][triviaqa] | 5-shot | 59.4 | 76.6 | 83.7 |
|
| 340 |
-
| [Natural Questions][naturalq] | 5-shot | 16.7 | 29.2 | 34.5 |
|
| 341 |
-
| [HumanEval][humaneval] | pass@1 | 17.7 | 40.2 | 51.8 |
|
| 342 |
-
| [MBPP][mbpp] | 3-shot | 29.6 | 52.4 | 62.6 |
|
| 343 |
-
| [GSM8K][gsm8k] | 5-shot, maj@1 | 23.9 | 68.6 | 74.0 |
|
| 344 |
-
| [MATH][math] | 4-shot | 15.0 | 36.6 | 42.3 |
|
| 345 |
-
| [AGIEval][agieval] | 3-5-shot | 30.6 | 52.8 | 55.1 |
|
| 346 |
-
| [DROP][drop] | 3-shot, F1 | 52.0 | 69.4 | 72.2 |
|
| 347 |
-
| [BIG-Bench][big-bench] | 3-shot, CoT | 41.9 | 68.2 | 74.9 |
|
| 348 |
-
|
| 349 |
-
## Ethics and Safety
|
| 350 |
-
|
| 351 |
-
Ethics and safety evaluation approach and results.
|
| 352 |
-
|
| 353 |
-
### Evaluation Approach
|
| 354 |
-
|
| 355 |
-
Our evaluation methods include structured evaluations and internal red-teaming
|
| 356 |
-
testing of relevant content policies. Red-teaming was conducted by a number of
|
| 357 |
-
different teams, each with different goals and human evaluation metrics. These
|
| 358 |
-
models were evaluated against a number of different categories relevant to
|
| 359 |
-
ethics and safety, including:
|
| 360 |
-
|
| 361 |
-
* Text-to-Text Content Safety: Human evaluation on prompts covering safety
|
| 362 |
-
policies including child sexual abuse and exploitation, harassment, violence
|
| 363 |
-
and gore, and hate speech.
|
| 364 |
-
* Text-to-Text Representational Harms: Benchmark against relevant academic
|
| 365 |
-
datasets such as [WinoBias][winobias] and [BBQ Dataset][bbq].
|
| 366 |
-
* Memorization: Automated evaluation of memorization of training data, including
|
| 367 |
-
the risk of personally identifiable information exposure.
|
| 368 |
-
* Large-scale harm: Tests for "dangerous capabilities," such as chemical,
|
| 369 |
-
biological, radiological, and nuclear (CBRN) risks.
|
| 370 |
-
|
| 371 |
-
### Evaluation Results
|
| 372 |
-
|
| 373 |
-
The results of ethics and safety evaluations are within acceptable thresholds
|
| 374 |
-
for meeting [internal policies][safety-policies] for categories such as child
|
| 375 |
-
safety, content safety, representational harms, memorization, large-scale harms.
|
| 376 |
-
On top of robust internal evaluations, the results of well-known safety
|
| 377 |
-
benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA
|
| 378 |
-
are shown here.
|
| 379 |
-
|
| 380 |
-
#### Gemma 2.0
|
| 381 |
-
|
| 382 |
-
| Benchmark | Metric | Gemma 2 IT 2B | Gemma 2 IT 9B | Gemma 2 IT 27B |
|
| 383 |
-
| ------------------------ | ------------- | ------------- | ------------- | -------------- |
|
| 384 |
-
| [RealToxicity][realtox] | average | 8.16 | 8.25 | 8.84 |
|
| 385 |
-
| [CrowS-Pairs][crows] | top-1 | 37.67 | 37.47 | 36.67 |
|
| 386 |
-
| [BBQ Ambig][bbq] | 1-shot, top-1 | 83.20 | 88.58 | 85.99 |
|
| 387 |
-
| [BBQ Disambig][bbq] | top-1 | 69.31 | 82.67 | 86.94 |
|
| 388 |
-
| [Winogender][winogender] | top-1 | 52.91 | 79.17 | 77.22 |
|
| 389 |
-
| [TruthfulQA][truthfulqa] | | 43.72 | 50.27 | 51.60 |
|
| 390 |
-
| [Winobias 1_2][winobias] | | 59.28 | 78.09 | 81.94 |
|
| 391 |
-
| [Winobias 2_2][winobias] | | 88.57 | 95.32 | 97.22 |
|
| 392 |
-
| [Toxigen][toxigen] | | 48.32 | 39.30 | 38.42 |
|
| 393 |
-
|
| 394 |
-
## Dangerous Capability Evaluations
|
| 395 |
-
|
| 396 |
-
### Evaluation Approach
|
| 397 |
-
|
| 398 |
-
We evaluated a range of dangerous capabilities:
|
| 399 |
-
|
| 400 |
-
- **Offensive cybersecurity:** To assess the model's potential for misuse in
|
| 401 |
-
cybersecurity contexts, we utilized both publicly available
|
| 402 |
-
Capture-the-Flag (CTF) platforms like InterCode-CTF and Hack the Box, as
|
| 403 |
-
well as internally developed CTF challenges. These evaluations measure the
|
| 404 |
-
model's ability to exploit vulnerabilities and gain unauthorized access in
|
| 405 |
-
simulated environments.
|
| 406 |
-
- **Self-proliferation:** We evaluated the model's capacity for
|
| 407 |
-
self-proliferation by designing tasks that involve resource acquisition, code
|
| 408 |
-
execution, and interaction with remote systems. These evaluations assess
|
| 409 |
-
the model's ability to independently replicate and spread.
|
| 410 |
-
- **Persuasion:** To evaluate the model's capacity for persuasion and
|
| 411 |
-
deception, we conducted human persuasion studies. These studies involved
|
| 412 |
-
scenarios that measure the model's ability to build rapport, influence
|
| 413 |
-
beliefs, and elicit specific actions from human participants.
|
| 414 |
-
|
| 415 |
-
### Evaluation Results
|
| 416 |
-
|
| 417 |
-
All evaluations are described in detail in
|
| 418 |
-
[Evaluating Frontier Models for Dangerous Capabilities][eval-danger]
|
| 419 |
-
and in brief in the
|
| 420 |
-
[Gemma 2 technical report][tech-report].
|
| 421 |
-
|
| 422 |
-
<table>
|
| 423 |
-
<thead>
|
| 424 |
-
<tr>
|
| 425 |
-
<th>Evaluation</th>
|
| 426 |
-
<th>Capability</th>
|
| 427 |
-
<th>Gemma 2 IT 27B</th>
|
| 428 |
-
</tr>
|
| 429 |
-
</thead>
|
| 430 |
-
<tbody>
|
| 431 |
-
<tr>
|
| 432 |
-
<td>InterCode-CTF</td>
|
| 433 |
-
<td>Offensive cybersecurity</td>
|
| 434 |
-
<td>34/76 challenges</td>
|
| 435 |
-
</tr>
|
| 436 |
-
<tr>
|
| 437 |
-
<td>Internal CTF</td>
|
| 438 |
-
<td>Offensive cybersecurity</td>
|
| 439 |
-
<td>1/13 challenges</td>
|
| 440 |
-
</tr>
|
| 441 |
-
<tr>
|
| 442 |
-
<td>Hack the Box</td>
|
| 443 |
-
<td>Offensive cybersecurity</td>
|
| 444 |
-
<td>0/13 challenges</td>
|
| 445 |
-
</tr>
|
| 446 |
-
<tr>
|
| 447 |
-
<td>Self-proliferation early warning</td>
|
| 448 |
-
<td>Self-proliferation</td>
|
| 449 |
-
<td>1/10 challenges</td>
|
| 450 |
-
</tr>
|
| 451 |
-
<tr>
|
| 452 |
-
<td>Charm offensive</td>
|
| 453 |
-
<td>Persuasion</td>
|
| 454 |
-
<td>Percent of participants agreeing:
|
| 455 |
-
81% interesting,
|
| 456 |
-
75% would speak again,
|
| 457 |
-
80% made personal connection</td>
|
| 458 |
-
</tr>
|
| 459 |
-
<tr>
|
| 460 |
-
<td>Click Links</td>
|
| 461 |
-
<td>Persuasion</td>
|
| 462 |
-
<td>34% of participants</td>
|
| 463 |
-
</tr>
|
| 464 |
-
<tr>
|
| 465 |
-
<td>Find Info</td>
|
| 466 |
-
<td>Persuasion</td>
|
| 467 |
-
<td>9% of participants</td>
|
| 468 |
-
</tr>
|
| 469 |
-
<tr>
|
| 470 |
-
<td>Run Code</td>
|
| 471 |
-
<td>Persuasion</td>
|
| 472 |
-
<td>11% of participants</td>
|
| 473 |
-
</tr>
|
| 474 |
-
<tr>
|
| 475 |
-
<td>Money talks</td>
|
| 476 |
-
<td>Persuasion</td>
|
| 477 |
-
<td>£3.72 mean donation</td>
|
| 478 |
-
</tr>
|
| 479 |
-
<tr>
|
| 480 |
-
<td>Web of Lies</td>
|
| 481 |
-
<td>Persuasion</td>
|
| 482 |
-
<td>18% mean shift towards correct belief, 1% mean shift towards
|
| 483 |
-
incorrect belief</td>
|
| 484 |
-
</tr>
|
| 485 |
-
</tbody>
|
| 486 |
-
</table>
|
| 487 |
-
|
| 488 |
-
## Usage and Limitations
|
| 489 |
-
|
| 490 |
-
These models have certain limitations that users should be aware of.
|
| 491 |
-
|
| 492 |
-
### Intended Usage
|
| 493 |
-
|
| 494 |
-
Open Large Language Models (LLMs) have a wide range of applications across
|
| 495 |
-
various industries and domains. The following list of potential uses is not
|
| 496 |
-
comprehensive. The purpose of this list is to provide contextual information
|
| 497 |
-
about the possible use-cases that the model creators considered as part of model
|
| 498 |
-
training and development.
|
| 499 |
-
|
| 500 |
-
* Content Creation and Communication
|
| 501 |
-
* Text Generation: These models can be used to generate creative text formats
|
| 502 |
-
such as poems, scripts, code, marketing copy, and email drafts.
|
| 503 |
-
* Chatbots and Conversational AI: Power conversational interfaces for customer
|
| 504 |
-
service, virtual assistants, or interactive applications.
|
| 505 |
-
* Text Summarization: Generate concise summaries of a text corpus, research
|
| 506 |
-
papers, or reports.
|
| 507 |
-
* Research and Education
|
| 508 |
-
* Natural Language Processing (NLP) Research: These models can serve as a
|
| 509 |
-
foundation for researchers to experiment with NLP techniques, develop
|
| 510 |
-
algorithms, and contribute to the advancement of the field.
|
| 511 |
-
* Language Learning Tools: Support interactive language learning experiences,
|
| 512 |
-
aiding in grammar correction or providing writing practice.
|
| 513 |
-
* Knowledge Exploration: Assist researchers in exploring large bodies of text
|
| 514 |
-
by generating summaries or answering questions about specific topics.
|
| 515 |
-
|
| 516 |
-
### Limitations
|
| 517 |
-
|
| 518 |
-
* Training Data
|
| 519 |
-
* The quality and diversity of the training data significantly influence the
|
| 520 |
-
model's capabilities. Biases or gaps in the training data can lead to
|
| 521 |
-
limitations in the model's responses.
|
| 522 |
-
* The scope of the training dataset determines the subject areas the model can
|
| 523 |
-
handle effectively.
|
| 524 |
-
* Context and Task Complexity
|
| 525 |
-
* LLMs are better at tasks that can be framed with clear prompts and
|
| 526 |
-
instructions. Open-ended or highly complex tasks might be challenging.
|
| 527 |
-
* A model's performance can be influenced by the amount of context provided
|
| 528 |
-
(longer context generally leads to better outputs, up to a certain point).
|
| 529 |
-
* Language Ambiguity and Nuance
|
| 530 |
-
* Natural language is inherently complex. LLMs might struggle to grasp subtle
|
| 531 |
-
nuances, sarcasm, or figurative language.
|
| 532 |
-
* Factual Accuracy
|
| 533 |
-
* LLMs generate responses based on information they learned from their
|
| 534 |
-
training datasets, but they are not knowledge bases. They may generate
|
| 535 |
-
incorrect or outdated factual statements.
|
| 536 |
-
* Common Sense
|
| 537 |
-
* LLMs rely on statistical patterns in language. They might lack the ability
|
| 538 |
-
to apply common sense reasoning in certain situations.
|
| 539 |
-
|
| 540 |
-
### Ethical Considerations and Risks
|
| 541 |
-
|
| 542 |
-
The development of large language models (LLMs) raises several ethical concerns.
|
| 543 |
-
In creating an open model, we have carefully considered the following:
|
| 544 |
-
|
| 545 |
-
* Bias and Fairness
|
| 546 |
-
* LLMs trained on large-scale, real-world text data can reflect socio-cultural
|
| 547 |
-
biases embedded in the training material. These models underwent careful
|
| 548 |
-
scrutiny, input data pre-processing described and posterior evaluations
|
| 549 |
-
reported in this card.
|
| 550 |
-
* Misinformation and Misuse
|
| 551 |
-
* LLMs can be misused to generate text that is false, misleading, or harmful.
|
| 552 |
-
* Guidelines are provided for responsible use with the model, see the
|
| 553 |
-
[Responsible Generative AI Toolkit][rai-toolkit].
|
| 554 |
-
* Transparency and Accountability:
|
| 555 |
-
* This model card summarizes details on the models' architecture,
|
| 556 |
-
capabilities, limitations, and evaluation processes.
|
| 557 |
-
* A responsibly developed open model offers the opportunity to share
|
| 558 |
-
innovation by making LLM technology accessible to developers and researchers
|
| 559 |
-
across the AI ecosystem.
|
| 560 |
-
|
| 561 |
-
Risks identified and mitigations:
|
| 562 |
-
|
| 563 |
-
* Perpetuation of biases: It's encouraged to perform continuous monitoring
|
| 564 |
-
(using evaluation metrics, human review) and the exploration of de-biasing
|
| 565 |
-
techniques during model training, fine-tuning, and other use cases.
|
| 566 |
-
* Generation of harmful content: Mechanisms and guidelines for content safety
|
| 567 |
-
are essential. Developers are encouraged to exercise caution and implement
|
| 568 |
-
appropriate content safety safeguards based on their specific product policies
|
| 569 |
-
and application use cases.
|
| 570 |
-
* Misuse for malicious purposes: Technical limitations and developer and
|
| 571 |
-
end-user education can help mitigate against malicious applications of LLMs.
|
| 572 |
-
Educational resources and reporting mechanisms for users to flag misuse are
|
| 573 |
-
provided. Prohibited uses of Gemma models are outlined in the
|
| 574 |
-
[Gemma Prohibited Use Policy][prohibited-use].
|
| 575 |
-
* Privacy violations: Models were trained on data filtered for removal of PII
|
| 576 |
-
(Personally Identifiable Information). Developers are encouraged to adhere to
|
| 577 |
-
privacy regulations with privacy-preserving techniques.
|
| 578 |
-
|
| 579 |
-
### Benefits
|
| 580 |
-
|
| 581 |
-
At the time of release, this family of models provides high-performance open
|
| 582 |
-
large language model implementations designed from the ground up for Responsible
|
| 583 |
-
AI development compared to similarly sized models.
|
| 584 |
-
|
| 585 |
-
Using the benchmark evaluation metrics described in this document, these models
|
| 586 |
-
have shown to provide superior performance to other, comparably-sized open model
|
| 587 |
-
alternatives.
|
| 588 |
-
|
| 589 |
-
[tech-report]: https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf
|
| 590 |
-
[rai-toolkit]: https://ai.google.dev/responsible
|
| 591 |
-
[kaggle-gemma]: https://www.kaggle.com/models/google/gemma-2
|
| 592 |
-
[terms]: https://ai.google.dev/gemma/terms
|
| 593 |
-
[vertex-mg-gemma2]: https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/gemma2
|
| 594 |
-
[sensitive-info]: https://cloud.google.com/dlp/docs/high-sensitivity-infotypes-reference
|
| 595 |
-
[safety-policies]: https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11
|
| 596 |
-
[prohibited-use]: https://ai.google.dev/gemma/prohibited_use_policy
|
| 597 |
-
[tpu]: https://cloud.google.com/tpu/docs/intro-to-tpu
|
| 598 |
-
[sustainability]: https://sustainability.google/operating-sustainably/
|
| 599 |
-
[jax]: https://github.com/google/jax
|
| 600 |
-
[ml-pathways]: https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/
|
| 601 |
-
[sustainability]: https://sustainability.google/operating-sustainably/
|
| 602 |
-
[foundation-models]: https://ai.google/discover/foundation-models/
|
| 603 |
-
[gemini-2-paper]: https://goo.gle/gemma2report
|
| 604 |
-
[mmlu]: https://arxiv.org/abs/2009.03300
|
| 605 |
-
[hellaswag]: https://arxiv.org/abs/1905.07830
|
| 606 |
-
[piqa]: https://arxiv.org/abs/1911.11641
|
| 607 |
-
[socialiqa]: https://arxiv.org/abs/1904.09728
|
| 608 |
-
[boolq]: https://arxiv.org/abs/1905.10044
|
| 609 |
-
[winogrande]: https://arxiv.org/abs/1907.10641
|
| 610 |
-
[commonsenseqa]: https://arxiv.org/abs/1811.00937
|
| 611 |
-
[openbookqa]: https://arxiv.org/abs/1809.02789
|
| 612 |
-
[arc]: https://arxiv.org/abs/1911.01547
|
| 613 |
-
[triviaqa]: https://arxiv.org/abs/1705.03551
|
| 614 |
-
[naturalq]: https://github.com/google-research-datasets/natural-questions
|
| 615 |
-
[humaneval]: https://arxiv.org/abs/2107.03374
|
| 616 |
-
[mbpp]: https://arxiv.org/abs/2108.07732
|
| 617 |
-
[gsm8k]: https://arxiv.org/abs/2110.14168
|
| 618 |
-
[realtox]: https://arxiv.org/abs/2009.11462
|
| 619 |
-
[bold]: https://arxiv.org/abs/2101.11718
|
| 620 |
-
[crows]: https://aclanthology.org/2020.emnlp-main.154/
|
| 621 |
-
[bbq]: https://arxiv.org/abs/2110.08193v2
|
| 622 |
-
[winogender]: https://arxiv.org/abs/1804.09301
|
| 623 |
-
[truthfulqa]: https://arxiv.org/abs/2109.07958
|
| 624 |
-
[winobias]: https://arxiv.org/abs/1804.06876
|
| 625 |
-
[math]: https://arxiv.org/abs/2103.03874
|
| 626 |
-
[agieval]: https://arxiv.org/abs/2304.06364
|
| 627 |
-
[drop]: https://arxiv.org/abs/1903.00161
|
| 628 |
-
[big-bench]: https://arxiv.org/abs/2206.04615
|
| 629 |
-
[toxigen]: https://arxiv.org/abs/2203.09509
|
| 630 |
-
[eval-danger]: https://arxiv.org/abs/2403.13793
|
|
|
|
| 1 |
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
datasets:
|
| 4 |
+
- FreedomIntelligence/ApolloMoEDataset
|
| 5 |
+
language:
|
| 6 |
+
- ar
|
| 7 |
+
- en
|
| 8 |
+
- zh
|
| 9 |
+
- ko
|
| 10 |
+
- ja
|
| 11 |
+
- mn
|
| 12 |
+
- th
|
| 13 |
+
- vi
|
| 14 |
+
- lo
|
| 15 |
+
- mg
|
| 16 |
+
- de
|
| 17 |
+
- pt
|
| 18 |
+
- es
|
| 19 |
+
- fr
|
| 20 |
+
- ru
|
| 21 |
+
- it
|
| 22 |
+
- hr
|
| 23 |
+
- gl
|
| 24 |
+
- cs
|
| 25 |
+
- co
|
| 26 |
+
- la
|
| 27 |
+
- uk
|
| 28 |
+
- bs
|
| 29 |
+
- bg
|
| 30 |
+
- eo
|
| 31 |
+
- sq
|
| 32 |
+
- da
|
| 33 |
+
- sa
|
| 34 |
+
- 'no'
|
| 35 |
+
- gn
|
| 36 |
+
- sr
|
| 37 |
+
- sk
|
| 38 |
+
- gd
|
| 39 |
+
- lb
|
| 40 |
+
- hi
|
| 41 |
+
- ku
|
| 42 |
+
- mt
|
| 43 |
+
- he
|
| 44 |
+
- ln
|
| 45 |
+
- bm
|
| 46 |
+
- sw
|
| 47 |
+
- ig
|
| 48 |
+
- rw
|
| 49 |
+
- ha
|
| 50 |
+
metrics:
|
| 51 |
+
- accuracy
|
| 52 |
+
base_model:
|
| 53 |
+
- google/gemma-2-2b
|
| 54 |
+
pipeline_tag: question-answering
|
| 55 |
+
tags:
|
| 56 |
+
- biology
|
| 57 |
+
- medical
|
| 58 |
---
|
| 59 |
+
# Democratizing Medical LLMs For Much More Languages
|
| 60 |
|
| 61 |
+
Covering 12 Major Languages including English, Chinese, French, Hindi, Spanish, Arabic, Russian, Japanese, Korean, German, Italian, Portuguese and 38 Minor Languages So far.
|
| 62 |
+
<center>
|
| 63 |
|
|
|
|
| 64 |
|
|
|
|
| 65 |
|
| 66 |
+
<p align="center">
|
| 67 |
+
📃 <a href="https://arxiv.org/abs/2410.10626" target="_blank">Paper</a> • 🌐 <a href="" target="_blank">Demo</a> • 🤗 <a href="https://huggingface.co/datasets/FreedomIntelligence/ApolloMoEDataset" target="_blank">ApolloMoEDataset</a> • 🤗 <a href="https://huggingface.co/datasets/FreedomIntelligence/ApolloMoEBench" target="_blank">ApolloMoEBench</a> • 🤗 <a href="https://huggingface.co/collections/FreedomIntelligence/apollomoe-and-apollo2-670ddebe3bb1ba1aebabbf2c" target="_blank">Models</a> • 🌐 <a href="https://github.com/FreedomIntelligence/Apollo" target="_blank">Apollo</a>
|
| 68 |
+
</p>
|
| 69 |
|
|
|
|
|
|
|
|
|
|
| 70 |
|
|
|
|
| 71 |
|
| 72 |
+

|
| 73 |
|
|
|
|
| 74 |
|
| 75 |
+
## 🌈 Update
|
| 76 |
|
| 77 |
+
* **[2024.10.15]** ApolloMoE repo is published!🎉
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
+
## Architecture
|
| 81 |
|
| 82 |
+
<details>
|
| 83 |
+
<summary>Click to view the MoE routing image</summary>
|
|
|
|
|
|
|
| 84 |
|
| 85 |
+

|
| 86 |
|
| 87 |
+
</details>
|
| 88 |
|
| 89 |
+
## Results
|
|
|
|
|
|
|
| 90 |
|
| 91 |
+
### Dense
|
| 92 |
+
🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo2-0.5B" target="_blank">Apollo2-0.5B</a> • 🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo2-1.5B" target="_blank">Apollo2-1.5B</a> • 🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo2-2B" target="_blank">Apollo2-2B</a> • 🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo2-3.8B" target="_blank">Apollo2-3.8B</a> • 🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo2-7B" target="_blank">Apollo2-7B</a> • 🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo2-9B" target="_blank">Apollo2-9B</a>
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
+
<details>
|
| 95 |
+
<summary>Click to view the Dense Models Results</summary>
|
|
|
|
|
|
|
|
|
|
| 96 |
|
| 97 |
+

|
| 98 |
|
| 99 |
+
</details>
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
+
### Post-MoE
|
| 102 |
+
🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo-MoE-0.5B" target="_blank">Apollo-MoE-0.5B</a> • 🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo-MoE-1.5B" target="_blank">Apollo-MoE-1.5B</a> • 🤗 <a href="https://huggingface.co/FreedomIntelligence/Apollo-MoE-7B" target="_blank">Apollo-MoE-7B</a>
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
+
<details>
|
| 105 |
+
<summary>Click to view the Post-MoE Models Results</summary>
|
| 106 |
|
| 107 |
+

|
|
|
|
|
|
|
| 108 |
|
| 109 |
+
</details>
|
| 110 |
|
| 111 |
+
|
|
|
|
|
|
|
| 112 |
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
+
|
| 115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
|
| 117 |
+
## Usage Format
|
| 118 |
+
#### Apollo2
|
| 119 |
+
- 0.5B, 1.5B, 7B: User:{query}\nAssistant:{response}<|endoftext|>
|
| 120 |
+
- 2B, 9B: User:{query}\nAssistant:{response}\<eos\>
|
| 121 |
+
- 3.8B: <|user|>\n{query}<|end|><|assisitant|>\n{response}<|end|>
|
| 122 |
|
| 123 |
+
#### Apollo-MoE
|
| 124 |
+
- 0.5B, 1.5B, 7B: User:{query}\nAssistant:{response}<|endoftext|>
|
| 125 |
+
|
| 126 |
+
## Dataset & Evaluation
|
| 127 |
|
| 128 |
+
- Dataset
|
| 129 |
+
🤗 <a href="https://huggingface.co/datasets/FreedomIntelligence/ApolloMoEDataset" target="_blank">ApolloMoEDataset</a>
|
|
|
|
|
|
|
|
|
|
| 130 |
|
| 131 |
+
<details><summary>Click to expand</summary>
|
|
|
|
| 132 |
|
| 133 |
+

|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
+
- [Data category](https://huggingface.co/datasets/FreedomIntelligence/ApolloCorpus/tree/main/train)
|
|
|
|
|
|
|
|
|
|
| 136 |
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
+
</details>
|
| 139 |
|
| 140 |
+
- Evaluation
|
| 141 |
+
🤗 <a href="https://huggingface.co/datasets/FreedomIntelligence/ApolloMoEBench" target="_blank">ApolloMoEBench</a>
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
+
<details><summary>Click to expand</summary>
|
| 144 |
+
|
| 145 |
+
- EN:
|
| 146 |
+
- [MedQA-USMLE](https://huggingface.co/datasets/GBaker/MedQA-USMLE-4-options)
|
| 147 |
+
- [MedMCQA](https://huggingface.co/datasets/medmcqa/viewer/default/test)
|
| 148 |
+
- [PubMedQA](https://huggingface.co/datasets/pubmed_qa): Because the results fluctuated too much, they were not used in the paper.
|
| 149 |
+
- [MMLU-Medical](https://huggingface.co/datasets/cais/mmlu)
|
| 150 |
+
- Clinical knowledge, Medical genetics, Anatomy, Professional medicine, College biology, College medicine
|
| 151 |
+
- ZH:
|
| 152 |
+
- [MedQA-MCMLE](https://huggingface.co/datasets/bigbio/med_qa/viewer/med_qa_zh_4options_bigbio_qa/test)
|
| 153 |
+
- [CMB-single](https://huggingface.co/datasets/FreedomIntelligence/CMB): Not used in the paper
|
| 154 |
+
- Randomly sample 2,000 multiple-choice questions with single answer.
|
| 155 |
+
- [CMMLU-Medical](https://huggingface.co/datasets/haonan-li/cmmlu)
|
| 156 |
+
- Anatomy, Clinical_knowledge, College_medicine, Genetics, Nutrition, Traditional_chinese_medicine, Virology
|
| 157 |
+
- [CExam](https://github.com/williamliujl/CMExam): Not used in the paper
|
| 158 |
+
- Randomly sample 2,000 multiple-choice questions
|
| 159 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
+
- ES: [Head_qa](https://huggingface.co/datasets/head_qa)
|
| 162 |
+
- FR:
|
| 163 |
+
- [Frenchmedmcqa](https://github.com/qanastek/FrenchMedMCQA)
|
| 164 |
+
- [MMLU_FR]
|
| 165 |
+
- Clinical knowledge, Medical genetics, Anatomy, Professional medicine, College biology, College medicine
|
| 166 |
+
- HI: [MMLU_HI](https://huggingface.co/datasets/FreedomIntelligence/MMLU_Hindi)
|
| 167 |
+
- Clinical knowledge, Medical genetics, Anatomy, Professional medicine, College biology, College medicine
|
| 168 |
+
- AR: [MMLU_AR](https://huggingface.co/datasets/FreedomIntelligence/MMLU_Arabic)
|
| 169 |
+
- Clinical knowledge, Medical genetics, Anatomy, Professional medicine, College biology, College medicine
|
| 170 |
+
- JA: [IgakuQA](https://github.com/jungokasai/IgakuQA)
|
| 171 |
+
- KO: [KorMedMCQA](https://huggingface.co/datasets/sean0042/KorMedMCQA)
|
| 172 |
+
- IT:
|
| 173 |
+
- [MedExpQA](https://huggingface.co/datasets/HiTZ/MedExpQA)
|
| 174 |
+
- [MMLU_IT]
|
| 175 |
+
- Clinical knowledge, Medical genetics, Anatomy, Professional medicine, College biology, College medicine
|
| 176 |
+
- DE: [BioInstructQA](https://huggingface.co/datasets/BioMistral/BioInstructQA): German part
|
| 177 |
+
- PT: [BioInstructQA](https://huggingface.co/datasets/BioMistral/BioInstructQA): Portuguese part
|
| 178 |
+
- RU: [RuMedBench](https://github.com/sb-ai-lab/MedBench)
|
| 179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
</details>
|
| 186 |
|
|
|
|
| 187 |
|
| 188 |
+
## Results reproduction
|
| 189 |
+
<details><summary>Click to expand</summary>
|
| 190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
+
We take Gemma-2b as example
|
| 193 |
+
1. Download Dataset for project:
|
| 194 |
|
| 195 |
+
```
|
| 196 |
+
bash 0.download_data.sh
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
2. Prepare test and dev for specific model:
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
- Create test data for with special token, you can use ./util/check.ipynb to check models' special tokens
|
| 203 |
+
|
| 204 |
+
```
|
| 205 |
+
bash 1.data_process_test&dev.sh
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
3. Prepare train data for specific model (Create tokenized data in advance):
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
- You can adjust data Training order and Training Epoch in this step
|
| 212 |
+
|
| 213 |
+
```
|
| 214 |
+
bash 2.data_process_train.sh
|
| 215 |
+
```
|
| 216 |
+
|
| 217 |
+
4. Train the model
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
- If you want to train in Multi Nodes please refer to ./scripts/multi_node_train_*.sh
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
```
|
| 226 |
+
bash 3.single_node_train_gemma.sh
|
| 227 |
+
```
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
5. Evaluate your model: Generate score for benchmark
|
| 231 |
+
|
| 232 |
+
```
|
| 233 |
+
bash 4.eval.sh
|
| 234 |
+
```
|
| 235 |
+
|
| 236 |
+
6. Evaluate your model: Play with your ckpts in bash
|
| 237 |
+
|
| 238 |
+
```
|
| 239 |
+
python ./src/evaluate/cli_demo.py --model_name='./ckpts/your/path/tfmr'
|
| 240 |
+
```
|
| 241 |
+
|
| 242 |
+
</details>
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
## Citation
|
| 247 |
+
Please use the following citation if you intend to use our dataset for training or evaluation:
|
| 248 |
+
|
| 249 |
+
```
|
| 250 |
+
@misc{zheng2024efficientlydemocratizingmedicalllms,
|
| 251 |
+
title={Efficiently Democratizing Medical LLMs for 50 Languages via a Mixture of Language Family Experts},
|
| 252 |
+
author={Guorui Zheng and Xidong Wang and Juhao Liang and Nuo Chen and Yuping Zheng and Benyou Wang},
|
| 253 |
+
year={2024},
|
| 254 |
+
eprint={2410.10626},
|
| 255 |
+
archivePrefix={arXiv},
|
| 256 |
+
primaryClass={cs.CL},
|
| 257 |
+
url={https://arxiv.org/abs/2410.10626},
|
| 258 |
}
|
| 259 |
```
|
| 260 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|