import spaces import os import torch from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import gradio as gr text_generator = None def init(): huggingface_token = os.getenv("HUGGINGFACE_TOKEN") if not huggingface_token: pass print("no HUGGINGFACE_TOKEN if you need set secret ") #raise ValueError("HUGGINGFACE_TOKEN environment variable is not set") model_id = "google/gemma-2-9b-it" device = "auto" # torch.device("cuda" if torch.cuda.is_available() else "cpu") dtype = torch.bfloat16 tokenizer = AutoTokenizer.from_pretrained(model_id, token=huggingface_token) print(model_id,device,dtype) histories = [] #model = None model = AutoModelForCausalLM.from_pretrained( model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device ) text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device) #pipeline has not to(device) if next(model.parameters()).is_cuda: print("The model is on a GPU") else: print("The model is on a CPU") if text_generator.device == 'cuda': print("The pipeline is using a GPU") else: print("The pipeline is using a CPU") print("initialized") @spaces.GPU(duration=120) def generate_text(messages): # model = AutoModelForCausalLM.from_pretrained( # model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device # ) #text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device) #pipeline has not to(device) result = text_generator(messages, max_new_tokens=256, do_sample=True, temperature=0.7) generated_output = result[0]["generated_text"] if isinstance(generated_output, list): for message in reversed(generated_output): if message.get("role") == "assistant": content= message.get("content", "No content found.") return content return "No assistant response found." else: return "Unexpected output format." def call_generate_text(message, history): # history.append({"role": "user", "content": message}) print(message) print(history) messages = history+[{"role":"user","content":message}] try: text = generate_text(messages) return text except RuntimeError as e: print(f"An unexpected error occurred: {e}") return "" demo = gr.ChatInterface(call_generate_text,type="messages") if __name__ == "__main__": init() demo.launch(share=True)