Spaces:
Runtime error
Runtime error
| """ | |
| from peft import PeftModel, PeftConfig | |
| from transformers import AutoModelForCausalLM | |
| import os | |
| access_token = os.environ['HF_TOKEN'] | |
| config = PeftConfig.from_pretrained("HiTZ/Mistral-7B-MedExpQA-EN") | |
| model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", token=access_token) | |
| model = PeftModel.from_pretrained(model, "HiTZ/Mistral-7B-MedExpQA-EN", token=access_token) | |
| tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", token=access_token) | |
| input_text = "Write me a poem about Machine Learning." | |
| input_ids = tokenizer(input_text, return_tensors="pt") | |
| outputs = model.generate(**input_ids) | |
| print(tokenizer.decode(outputs[0])) | |
| """ | |
| from huggingface_hub import InferenceClient | |
| import gradio as gr | |
| import os | |
| access_token = os.environ['HF_TOKEN'] | |
| import requests | |
| API_URL = "https://api-inference.huggingface.co/models/HiTZ/Mistral-7B-MedExpQA-EN" | |
| headers = {"Authorization": "Bearer "+access_token} | |
| def query(payload): | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| return response.json() | |
| output = query({ | |
| "inputs": "Can you please let us know more details about your ", | |
| }) | |
| """ | |
| client = InferenceClient("mistralai/Mistral-7B-v0.1",access_token) | |
| def format_prompt(message, history): | |
| prompt = "<s>" | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def generate( | |
| prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, | |
| ): | |
| temperature = float(temperature) | |
| if temperature < 1e-2: | |
| temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| formatted_prompt = format_prompt(prompt, history) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
| output = "" | |
| for response in stream: | |
| output += response.token.text | |
| yield output | |
| return output | |
| additional_inputs=[ | |
| gr.Slider( | |
| label="Temperature", | |
| value=0.9, | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values produce more diverse outputs", | |
| ), | |
| gr.Slider( | |
| label="Max new tokens", | |
| value=256, | |
| minimum=0, | |
| maximum=1048, | |
| step=64, | |
| interactive=True, | |
| info="The maximum numbers of new tokens", | |
| ), | |
| gr.Slider( | |
| label="Top-p (nucleus sampling)", | |
| value=0.90, | |
| minimum=0.0, | |
| maximum=1, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values sample more low-probability tokens", | |
| ), | |
| gr.Slider( | |
| label="Repetition penalty", | |
| value=1.2, | |
| minimum=1.0, | |
| maximum=2.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Penalize repeated tokens", | |
| ) | |
| ] | |
| gr.ChatInterface( | |
| fn=generate, | |
| chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
| additional_inputs=additional_inputs, | |
| title="Mistral 7B fine-tuned on MedExpQA with max RAG 32" | |
| ).launch(show_api=False) | |
| """ |