Spaces:
Paused
Paused
| from transformers import AutoTokenizer, MistralForCausalLM | |
| import torch | |
| import gradio as gr | |
| import random | |
| from textwrap import wrap | |
| from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM | |
| from peft import PeftModel, PeftConfig | |
| import torch | |
| import gradio as gr | |
| # Functions to Wrap the Prompt Correctly | |
| def wrap_text(text, width=90): | |
| lines = text.split('\n') | |
| wrapped_lines = [textwrap.fill(line, width=width) for line in lines] | |
| wrapped_text = '\n'.join(wrapped_lines) | |
| return wrapped_text | |
| def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"): | |
| """ | |
| Generates text using a large language model, given a user input and a system prompt. | |
| Args: | |
| user_input: The user's input text to generate a response for. | |
| system_prompt: Optional system prompt. | |
| Returns: | |
| A string containing the generated text. | |
| """ | |
| # Combine user input and system prompt | |
| formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]" | |
| # Encode the input text | |
| encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False) | |
| model_inputs = encodeds.to(device) | |
| # Generate a response using the model | |
| output = model.generate( | |
| **model_inputs, | |
| max_length=max_length, | |
| use_cache=True, | |
| early_stopping=True, | |
| bos_token_id=model.config.bos_token_id, | |
| eos_token_id=model.config.eos_token_id, | |
| pad_token_id=model.config.eos_token_id, | |
| temperature=0.1, | |
| do_sample=True | |
| ) | |
| # Decode the response | |
| response_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
| return response_text | |
| # Define the device | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| # Use the base model's ID | |
| base_model_id = "OpenLLM-France/Claire-Mistral-7B-0.1" | |
| # Instantiate the Tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained("OpenLLM-France/Claire-Mistral-7B-0.1", trust_remote_code=True, padding_side="left") | |
| tokenizer.pad_token = tokenizer.eos_token | |
| tokenizer.padding_side = 'left' | |
| model = AutoModelForCausalLM.from_pretrained("OpenLLM-France/Claire-Mistral-7B-0.1") | |
| class ChatBot: | |
| def __init__(self): | |
| self.history = [] | |
| def predict(self, user_input, system_prompt="You are an expert medical analyst:"): | |
| # Combine user input and system prompt | |
| formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]" | |
| # Encode user input | |
| user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt") | |
| # Concatenate the user input with chat history | |
| if len(self.history) > 0: | |
| chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1) | |
| else: | |
| chat_history_ids = user_input_ids | |
| # Generate a response using the PEFT model | |
| response = model.generate(input_ids=chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id) | |
| # Update chat history | |
| self.history = chat_history_ids | |
| # Decode and return the response | |
| response_text = tokenizer.decode(response[0], skip_special_tokens=True) | |
| return response_text | |
| bot = ChatBot() | |
| title = "👋🏻Welcome to Tonic's Claire Chat🚀" | |
| description = "You can use this Space to test out the current model (ClaireLLM) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on Discord to build together." | |
| examples = [["Oueche Normal, Claire, ça va ou quoi?", "bonjour je m'appele Claire et je suis une assistante francophone-first conçu par openLLM"]] | |
| iface = gr.Interface( | |
| fn=bot.predict, | |
| title=title, | |
| description=description, | |
| examples=examples, | |
| inputs=["text", "text"], # Take user input and system prompt separately | |
| outputs="text", | |
| theme="ParityError/Anime" | |
| ) | |
| iface.launch() | |