| | import torch |
| | from typing import Tuple |
| | from transformers import AutoTokenizer, AutoModelForCausalLM |
| |
|
| | |
| | tokenizer = None |
| | model = None |
| |
|
| |
|
| | def get_model_and_tokenizer() -> Tuple[AutoModelForCausalLM, AutoTokenizer]: |
| | """ |
| | Returns the preloaded model and tokenizer. If they haven't been loaded before, loads them. |
| | |
| | Returns: |
| | tuple: A tuple containing the preloaded model and tokenizer. |
| | """ |
| | global model, tokenizer |
| | if model is None or tokenizer is None: |
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| |
|
| | |
| | tokenizer = AutoTokenizer.from_pretrained("juancopi81/lmd_8bars_tokenizer") |
| | model = AutoModelForCausalLM.from_pretrained( |
| | "juancopi81/lmd-8bars-2048-epochs40_v4" |
| | ) |
| |
|
| | |
| | model = model.to(device) |
| |
|
| | return model, tokenizer |
| |
|