Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,7 +9,9 @@ from synthid_text import detector_bayesian, synthid_mixin, logits_processing
|
|
| 9 |
MODEL_NAME = "google/gemma-7b-it" # Choose the model (Gemma models used in SynthID)
|
| 10 |
DEVICE = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
| 13 |
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 14 |
tokenizer.pad_token = tokenizer.eos_token
|
| 15 |
tokenizer.padding_side = "left"
|
|
|
|
| 9 |
MODEL_NAME = "google/gemma-7b-it" # Choose the model (Gemma models used in SynthID)
|
| 10 |
DEVICE = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
|
| 11 |
|
| 12 |
+
|
| 13 |
+
# Initialize model and tokenizer
|
| 14 |
+
model = transformers.AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(DEVICE)
|
| 15 |
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 16 |
tokenizer.pad_token = tokenizer.eos_token
|
| 17 |
tokenizer.padding_side = "left"
|