#duplicated from https://huggingface.co/spaces/chheplo/DeepSeek-R1-Distill-Llama-8B import gradio as gr import os import spaces from transformers import GemmaTokenizer, AutoModelForCausalLM from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer from threading import Thread # Set an environment variable HF_TOKEN = os.environ.get("HF_TOKEN", None) DESCRIPTION = '''

deepseek-ai/DeepSeek-R1-Distill-Llama-8B

''' LICENSE = """

--- """ PLACEHOLDER = """

DeepSeek-R1-Distill-Llama-8B

Ask me anything...

""" css = """ h1 { text-align: center; display: block; } #duplicate-button { margin: auto; color: white; background: #1565c0; border-radius: 100vh; } """ model_id = "AXCXEPT/phi-4-deepseek-R1K-RL-EZO" model_id = "AXCXEPT/phi-4-open-R1-Distill-EZOv1" # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # to("cuda:0") terminators = [ tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>") ] @spaces.GPU(duration=120) def chat_llama3_8b(message: str, history: list, temperature: float, max_new_tokens: int ) -> str: """ Generate a streaming response using the llama3-8b model. Args: message (str): The input message. history (list): The conversation history used by ChatInterface. temperature (float): The temperature for generating the response. max_new_tokens (int): The maximum number of new tokens to generate. Returns: str: The generated response. """ conversation = [] for user, assistant in history: conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) conversation.append({"role": "user", "content": message}) input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device) streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( input_ids= input_ids, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, temperature=temperature, eos_token_id=terminators, ) # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash. if temperature == 0: generate_kwargs['do_sample'] = False t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() outputs = [] for text in streamer: outputs.append(text) #print(outputs) yield "".join(outputs) # Gradio block chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface') with gr.Blocks(fill_height=True, css=css) as demo: gr.Markdown(DESCRIPTION) gr.ChatInterface( fn=chat_llama3_8b, chatbot=chatbot, fill_height=True, additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False), additional_inputs=[ gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="Temperature", render=False), gr.Slider(minimum=128, maximum=4096, step=1, value=1024, label="Max new tokens", render=False ), ], examples=[ ['How to setup a human base on Mars? Give short answer.'], ['Explain theory of relativity to me like I’m 8 years old.'], ['What is 9,000 * 9,000?'], ['Write a pun-filled happy birthday message to my friend Alex.'], ['Justify why a penguin might make a good king of the jungle.'] ], cache_examples=False, ) gr.Markdown(LICENSE) if __name__ == "__main__": demo.launch() import spaces import os import torch from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from transformers import TextIteratorStreamer from threading import Thread import gradio as gr text_generator = None is_hugging_face = True model_id = "AXCXEPT/phi-4-deepseek-R1K-RL-EZO" model_id = "AXCXEPT/phi-4-open-R1-Distill-EZOv1" huggingface_token = os.getenv("HUGGINGFACE_TOKEN") huggingface_token = None device = "auto" # torch.device("cuda" if torch.cuda.is_available() else "cpu") device = "cuda" dtype = torch.bfloat16 dtype = torch.float16 if not huggingface_token: pass print("no HUGGINGFACE_TOKEN if you need set secret ") #raise ValueError("HUGGINGFACE_TOKEN environment variable is not set") tokenizer = AutoTokenizer.from_pretrained(model_id, token=huggingface_token) print(model_id,device,dtype) histories = [] #model = None if not is_hugging_face: model = AutoModelForCausalLM.from_pretrained( model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device ) text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device,stream=True ) #pipeline has not to(device) if next(model.parameters()).is_cuda: print("The model is on a GPU") else: print("The model is on a CPU") #print(f"text_generator.device='{text_generator.device}") if str(text_generator.device).strip() == 'cuda': print("The pipeline is using a GPU") else: print("The pipeline is using a CPU") print("initialized") def generate_text(messages): if is_hugging_face:#need everytime initialize for ZeroGPU model = AutoModelForCausalLM.from_pretrained( model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device ) model.to(device) question = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) question = tokenizer(question, return_tensors="pt").to(device) streamer = TextIteratorStreamer(tokenizer, skip_prompt=True) generation_kwargs = dict(question, streamer=streamer, max_new_tokens=200) thread = Thread(target=model.generate, kwargs=generation_kwargs) generated_output = "" thread.start() for new_text in streamer: generated_output += new_text yield generated_output generate_text.zerogpu = True @spaces.GPU(duration=60) def call_generate_text(message, history): # history.append({"role": "user", "content": message}) #print(message) #print(history) messages = history+[{"role":"user","content":message}] try: for text in generate_text(messages): yield text except RuntimeError as e: print(f"An unexpected error occurred: {e}") yield "" demo = gr.ChatInterface(call_generate_text,type="messages") #if __name__ == "__main__": demo.queue() demo.launch()