Update app.py
Browse files
app.py
CHANGED
|
@@ -29,7 +29,7 @@ else:
|
|
| 29 |
from transformers import BitsAndBytesConfig
|
| 30 |
import torch
|
| 31 |
|
| 32 |
-
model_variant = "
|
| 33 |
model_id = f"google/medgemma-{model_variant}"
|
| 34 |
|
| 35 |
use_quantization = True # @param {type: "boolean"}
|
|
@@ -66,15 +66,6 @@ else:
|
|
| 66 |
pipe.model.generation_config.do_sample = False
|
| 67 |
|
| 68 |
|
| 69 |
-
if "text" in model_variant:
|
| 70 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 71 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
|
| 72 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 73 |
-
else:
|
| 74 |
-
from transformers import AutoModelForImageTextToText, AutoProcessor
|
| 75 |
-
model = AutoModelForImageTextToText.from_pretrained(model_id, **model_kwargs)
|
| 76 |
-
processor = AutoProcessor.from_pretrained(model_id)
|
| 77 |
-
|
| 78 |
|
| 79 |
role_instruction = "You are an expert radiologist."
|
| 80 |
if "27b" in model_variant and is_thinking:
|
|
|
|
| 29 |
from transformers import BitsAndBytesConfig
|
| 30 |
import torch
|
| 31 |
|
| 32 |
+
model_variant = "4b-it" # @param ["4b-it", "27b-it", "27b-text-it"]
|
| 33 |
model_id = f"google/medgemma-{model_variant}"
|
| 34 |
|
| 35 |
use_quantization = True # @param {type: "boolean"}
|
|
|
|
| 66 |
pipe.model.generation_config.do_sample = False
|
| 67 |
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
role_instruction = "You are an expert radiologist."
|
| 71 |
if "27b" in model_variant and is_thinking:
|