Spaces:
Runtime error
Runtime error
| #!/usr/bin/env python | |
| import os | |
| from threading import Thread | |
| from typing import Iterator | |
| import spaces | |
| import gradio as gr | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
| MAX_MAX_NEW_TOKENS = 1024 | |
| DEFAULT_MAX_NEW_TOKENS = 512 | |
| MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192")) | |
| if not torch.cuda.is_available(): | |
| print("❌ CUDA GPU not available. This demo requires a GPU to function properly.") | |
| print("Please run this application on a system with CUDA-compatible GPU.") | |
| exit(1) | |
| # Load model and tokenizer | |
| model_id = "utter-project/EuroLLM-9B-Instruct" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") | |
| def generate( | |
| message: str, | |
| chat_history: list[tuple[str, str]], | |
| max_new_tokens: int = 512, | |
| temperature: float = 0.06, | |
| top_p: float = 0.95, | |
| top_k: int = 40, | |
| repetition_penalty: float = 1.2, | |
| ) -> Iterator[str]: | |
| historical_text = "" | |
| #Prepend the entire chat history to the message with new lines between each message | |
| for user, assistant in chat_history: | |
| historical_text += f"\n{user}\n{assistant}" | |
| if len(historical_text) > 0: | |
| message = historical_text + f"\n{message}" | |
| input_ids = tokenizer([message], return_tensors="pt").input_ids | |
| if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: | |
| input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] | |
| gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") | |
| input_ids = input_ids.to(model.device) | |
| streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) | |
| generate_kwargs = dict( | |
| {"input_ids": input_ids}, | |
| streamer=streamer, | |
| max_new_tokens=max_new_tokens, | |
| do_sample=True, | |
| top_p=top_p, | |
| top_k=top_k, | |
| temperature=temperature, | |
| num_beams=1, | |
| pad_token_id = tokenizer.eos_token_id, | |
| repetition_penalty=repetition_penalty, | |
| no_repeat_ngram_size=5, | |
| early_stopping=False, | |
| ) | |
| t = Thread(target=model.generate, kwargs=generate_kwargs) | |
| t.start() | |
| outputs = [] | |
| for text in streamer: | |
| outputs.append(text) | |
| yield "".join(outputs) | |
| chat_interface = gr.ChatInterface( | |
| fn=generate, | |
| additional_inputs=[ | |
| gr.Slider( | |
| label="Max new tokens", | |
| minimum=1, | |
| maximum=MAX_MAX_NEW_TOKENS, | |
| step=1, | |
| value=DEFAULT_MAX_NEW_TOKENS, | |
| ), | |
| gr.Slider( | |
| label="Temperature", | |
| minimum=0.1, | |
| maximum=1.2, | |
| step=0.1, | |
| value=0.2, | |
| ), | |
| gr.Slider( | |
| label="Top-p (nucleus sampling)", | |
| minimum=0.05, | |
| maximum=1.0, | |
| step=0.05, | |
| value=0.9, | |
| ), | |
| gr.Slider( | |
| label="Top-k", | |
| minimum=1, | |
| maximum=1000, | |
| step=1, | |
| value=50, | |
| ), | |
| gr.Slider( | |
| label="Repetition penalty", | |
| minimum=1.0, | |
| maximum=2.0, | |
| step=0.05, | |
| value=1.2, | |
| ), | |
| ], | |
| stop_btn=None, | |
| examples=[ | |
| ["Describe the significance of the Eiffel Tower in French culture and history."], | |
| ["Что такое 'загадочная русская душа' и как это понятие отражается в русской литературе?"], # Russian: What is the "mysterious Russian soul" and how is this concept reflected in Russian literature? | |
| ["Jakie są najbardziej znane polskie tradycje bożonarodzeniowe?"], # Polish: What are the most well-known Polish Christmas traditions? | |
| ["Welche Rolle spielte die Hanse im mittelalterlichen Europa?"], # German: What role did the Hanseatic League play in medieval Europe? | |
| ["日本の茶道の精神と作法について説明してください。"] # Japanese: Please explain the spirit and etiquette of Japanese tea ceremony. | |
| ], | |
| ) | |
| with gr.Blocks(css="style.css") as demo: | |
| chat_interface.render() | |
| if __name__ == "__main__": | |
| demo.queue(max_size=20).launch() |