Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -29,10 +29,9 @@ login(token=HF_TOKEN)
|
|
| 29 |
# Available models
|
| 30 |
AVAILABLE_MODELS = {
|
| 31 |
"bloomz-560m": "bigscience/bloomz-560m",
|
| 32 |
-
"
|
| 33 |
-
"
|
| 34 |
-
"
|
| 35 |
-
"gpt2-large": "gpt2-large",
|
| 36 |
}
|
| 37 |
|
| 38 |
# Initialize model and tokenizer
|
|
@@ -50,7 +49,7 @@ def load_model(model_name):
|
|
| 50 |
current_model_name = model_name
|
| 51 |
|
| 52 |
# Load the default model at startup
|
| 53 |
-
load_model("
|
| 54 |
|
| 55 |
@spaces.GPU()
|
| 56 |
def get_next_token_predictions(text, model_name, top_k=10):
|
|
|
|
| 29 |
# Available models
|
| 30 |
AVAILABLE_MODELS = {
|
| 31 |
"bloomz-560m": "bigscience/bloomz-560m",
|
| 32 |
+
"bloomz-7B1": "bigscience/bloomz-7b1",
|
| 33 |
+
"GPT-J-6B": "EleutherAI/gpt-j-6b",
|
| 34 |
+
"mT5-XL": "google/mt5-xl",
|
|
|
|
| 35 |
}
|
| 36 |
|
| 37 |
# Initialize model and tokenizer
|
|
|
|
| 49 |
current_model_name = model_name
|
| 50 |
|
| 51 |
# Load the default model at startup
|
| 52 |
+
load_model("bloomz-560m")
|
| 53 |
|
| 54 |
@spaces.GPU()
|
| 55 |
def get_next_token_predictions(text, model_name, top_k=10):
|