Spaces:
Build error
Build error
| import os | |
| from threading import Thread | |
| from typing import Iterator | |
| import gradio as gr | |
| import spaces | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
| MAX_MAX_NEW_TOKENS = 2048 | |
| DEFAULT_MAX_NEW_TOKENS = 1024 | |
| MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) | |
| ACCESS_TOKEN = os.getenv("HF_TOKEN", "") | |
| model_id = "Qwen/Qwen2.5-0.5B-Instruct" | |
| #filename = "Mistral-Nemo-Instruct-2407-Q6_K_L.gguf" | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| #gguf_file=filename, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| trust_remote_code=False, | |
| token=ACCESS_TOKEN) | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| model_id, | |
| #gguf_file=filename, | |
| trust_remote_code=False, | |
| token=ACCESS_TOKEN) | |
| tokenizer.use_default_system_prompt = False | |
| model.config.gradient_checkpointing = True | |
| def generate( | |
| message: str, | |
| system_prompt: str, | |
| max_new_tokens: int = 1024, | |
| temperature: float = 0.01, | |
| top_p: float = 0.7, | |
| ) -> Iterator[str]: | |
| conversation = [] | |
| if system_prompt: | |
| conversation.append({"role": "system", "content": system_prompt}) | |
| conversation.append({"role": "user", "content": message}) | |
| input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt") | |
| if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: | |
| input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] | |
| gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") | |
| input_ids = input_ids.to(model.device) | |
| ''' | |
| terminators = [ | |
| tokenizer.eos_token_id, | |
| tokenizer.convert_tokens_to_ids("<|eot_id|>") | |
| ] | |
| ''' | |
| streamer = TextIteratorStreamer(tokenizer, timeout=600.0, skip_prompt=True, skip_special_tokens=True) | |
| generate_kwargs = dict( | |
| {"input_ids": input_ids}, | |
| streamer=streamer, | |
| max_new_tokens=max_new_tokens, | |
| #eos_token_id=terminators, | |
| do_sample=True, | |
| top_p=top_p, | |
| temperature=temperature, | |
| num_beams=1, | |
| pad_token_id=tokenizer.eos_token_id, | |
| ) | |
| t = Thread(target=model.generate, kwargs=generate_kwargs) | |
| t.start() | |
| outputs = [] | |
| for text in streamer: | |
| outputs.append(text) | |
| yield "".join(outputs) | |
| chat_interface = gr.Interface( | |
| fn=generate, | |
| inputs=[ | |
| gr.Textbox(lines=2, placeholder="Prompt", label="Prompt"), | |
| ], | |
| outputs="text", | |
| additional_inputs=[ | |
| gr.Textbox(label="System prompt", lines=6), | |
| gr.Slider( | |
| label="Max new tokens", | |
| minimum=1, | |
| maximum=MAX_MAX_NEW_TOKENS, | |
| step=1, | |
| value=DEFAULT_MAX_NEW_TOKENS, | |
| ), | |
| gr.Slider( | |
| label="Temperature", | |
| minimum=0.1, | |
| maximum=4.0, | |
| step=0.01, | |
| value=0.01, | |
| ), | |
| gr.Slider( | |
| label="Top-p (nucleus sampling)", | |
| minimum=0.05, | |
| maximum=1.0, | |
| step=0.01, | |
| value=0.7, | |
| ), | |
| ], | |
| title="Model testing - Qwen/Qwen2.5-0.5B-Instruct", | |
| description="Provide system settings and a prompt to interact with the model.", | |
| ) | |
| chat_interface.queue(max_size=20).launch() | |