Spaces:
Runtime error
Runtime error
| from huggingface_hub import InferenceClient | |
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, AutoModel | |
| from peft import PeftModel | |
| import torch | |
| # load base model | |
| base_model = "mistralai/Mistral-7B-Instruct-v0.1" | |
| #bnb_config = BitsAndBytesConfig( | |
| # load_in_4bit= True, | |
| # bnb_4bit_quant_type= "nf4", | |
| # bnb_4bit_compute_dtype= torch.bfloat16, | |
| # bnb_4bit_use_double_quant= False, | |
| #) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| base_model, | |
| # quantization_config=bnb_config, | |
| torch_dtype=torch.bfloat16, | |
| device_map="auto", | |
| trust_remote_code=True, | |
| ) | |
| model.config.use_cache = True # silence the warnings. Please re-enable for inference! | |
| model.config.pretraining_tp = 1 | |
| model.gradient_checkpointing_enable() | |
| # load fine-tuned model | |
| ft_model = PeftModel.from_pretrained(model, "gildead/mistral_7b_AES_v2_epoch") | |
| ft_model.eval() | |
| # load tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained(ft_model, trust_remote_code=True) | |
| prompt = "How do I find true love?" | |
| pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=3500) | |
| result = pipe(f"<s>[INST] {prompt} [/INST]", max_new_tokens=7) | |
| print(result[0]['generated_text']) | |
| def format_prompt(message, history): | |
| prompt = "<s>" | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def generate( | |
| prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, | |
| ): | |
| temperature = float(temperature) | |
| if temperature < 1e-2: | |
| temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| formatted_prompt = format_prompt(prompt, history) | |
| stream = pipe(f"<s>[INST] {prompt} [/INST]") | |
| output = "" | |
| for response in stream: | |
| output += response.token.text | |
| yield output | |
| return output | |
| additional_inputs=[ | |
| gr.Slider( | |
| label="Temperature", | |
| value=0.9, | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values produce more diverse outputs", | |
| ), | |
| gr.Slider( | |
| label="Max new tokens", | |
| value=256, | |
| minimum=0, | |
| maximum=1048, | |
| step=64, | |
| interactive=True, | |
| info="The maximum numbers of new tokens", | |
| ), | |
| gr.Slider( | |
| label="Top-p (nucleus sampling)", | |
| value=0.90, | |
| minimum=0.0, | |
| maximum=1, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values sample more low-probability tokens", | |
| ), | |
| gr.Slider( | |
| label="Repetition penalty", | |
| value=1.2, | |
| minimum=1.0, | |
| maximum=2.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Penalize repeated tokens", | |
| ) | |
| ] | |
| css = """ | |
| #mkd { | |
| height: 500px; | |
| overflow: auto; | |
| border: 1px solid #ccc; | |
| } | |
| """ | |
| with gr.Blocks(css=css) as demo: | |
| gr.HTML("<h1><center>Mistral 7B Instruct<h1><center>") | |
| gr.HTML("<h3><center>In this demo, you can chat with <a href='https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1'>Mistral-7B-Instruct</a> model. 💬<h3><center>") | |
| gr.HTML("<h3><center>Learn more about the model <a href='https://huggingface.co/docs/transformers/main/model_doc/mistral'>here</a>. 📚<h3><center>") | |
| gr.ChatInterface( | |
| generate, | |
| additional_inputs=additional_inputs, | |
| examples=[["What is the secret to life?"], ["Write me a recipe for pancakes."]] | |
| ) | |
| demo.queue(max_size=100).launch(max_threads=75,debug=True) |