|
|
import gradio as gr |
|
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
""" |
|
|
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference |
|
|
""" |
|
|
import os |
|
|
import sys |
|
|
|
|
|
google_colab = "google.colab" in sys.modules and not os.environ.get("VERTEX_PRODUCT") |
|
|
|
|
|
if google_colab: |
|
|
|
|
|
from google.colab import userdata |
|
|
os.environ["HF_TOKEN"] = userdata.get("HF_TOKEN") |
|
|
else: |
|
|
|
|
|
if os.environ.get("VERTEX_PRODUCT") == "COLAB_ENTERPRISE": |
|
|
os.environ["HF_HOME"] = "/content/hf" |
|
|
|
|
|
from huggingface_hub import get_token |
|
|
if get_token() is None: |
|
|
from huggingface_hub import notebook_login |
|
|
notebook_login() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers import BitsAndBytesConfig |
|
|
import torch |
|
|
|
|
|
model_variant = "4b-it" |
|
|
model_id = f"google/medgemma-{model_variant}" |
|
|
|
|
|
use_quantization = True |
|
|
|
|
|
|
|
|
is_thinking = False |
|
|
|
|
|
|
|
|
|
|
|
if "27b" in model_variant and google_colab: |
|
|
if not ("A100" in torch.cuda.get_device_name(0) and use_quantization): |
|
|
raise ValueError( |
|
|
"Runtime has insufficient memory to run a 27B variant. " |
|
|
"Please select an A100 GPU and use 4-bit quantization." |
|
|
) |
|
|
|
|
|
model_kwargs = dict( |
|
|
torch_dtype=torch.bfloat16, |
|
|
device_map="auto", |
|
|
) |
|
|
|
|
|
if use_quantization: |
|
|
model_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_4bit=True) |
|
|
|
|
|
|
|
|
|
|
|
from transformers import pipeline |
|
|
|
|
|
if "text" in model_variant: |
|
|
pipe = pipeline("text-generation", model=model_id, model_kwargs=model_kwargs) |
|
|
else: |
|
|
pipe = pipeline("image-text-to-text", model=model_id, model_kwargs=model_kwargs) |
|
|
|
|
|
pipe.model.generation_config.do_sample = False |
|
|
|
|
|
|
|
|
|
|
|
role_instruction = "You are an expert radiologist." |
|
|
if "27b" in model_variant and is_thinking: |
|
|
system_instruction = f"SYSTEM INSTRUCTION: think silently if needed. {role_instruction}" |
|
|
max_new_tokens = 1300 |
|
|
else: |
|
|
system_instruction = role_instruction |
|
|
max_new_tokens = 300 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def respond( |
|
|
message, |
|
|
history: list[tuple[str, str]], |
|
|
system_message, |
|
|
max_tokens, |
|
|
temperature, |
|
|
top_p, |
|
|
): |
|
|
messages = [ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": [{"type": "text", "text": system_instruction}] |
|
|
}, |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{"type": "text", "text": prompt}, |
|
|
{"type": "text", "text": message} |
|
|
] |
|
|
} |
|
|
] |
|
|
|
|
|
for val in history: |
|
|
if val[0]: |
|
|
messages.append({"role": "user", "content": val[0]}) |
|
|
if val[1]: |
|
|
messages.append({"role": "assistant", "content": val[1]}) |
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
response = "" |
|
|
|
|
|
output = pipe(text=messages, max_new_tokens=max_new_tokens) |
|
|
|
|
|
return output[0]["generated_text"][-1]["content"] |
|
|
|
|
|
|
|
|
""" |
|
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
|
""" |
|
|
demo = gr.ChatInterface( |
|
|
respond, |
|
|
additional_inputs=[ |
|
|
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), |
|
|
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), |
|
|
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), |
|
|
gr.Slider( |
|
|
minimum=0.1, |
|
|
maximum=1.0, |
|
|
value=0.95, |
|
|
step=0.05, |
|
|
label="Top-p (nucleus sampling)", |
|
|
), |
|
|
], |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|