Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| from huggingface_hub import login | |
| model, tokenizer, device = None, None, None | |
| def load_model(token): | |
| global model, tokenizer, device | |
| if model is None: | |
| login(token=token) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model_kwargs = {} | |
| if torch.cuda.is_available(): | |
| model_kwargs = { | |
| 'load_in_8bit': True, | |
| 'device_map': 'auto', | |
| 'low_cpu_mem_usage': True | |
| } | |
| tokenizer = AutoTokenizer.from_pretrained("salmapm/llama2_salma") | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "salmapm/llama2_salma", | |
| **model_kwargs | |
| ) | |
| model.to(device) | |
| return model, tokenizer, device | |
| def respond(message, history, system_message, max_tokens, temperature, top_p, token): | |
| if not token: | |
| return "Please provide a Hugging Face token." | |
| try: | |
| model, tokenizer, device = load_model(token) | |
| except Exception as e: | |
| return f"An error occurred: {e}" | |
| messages = [{"role": "system", "content": system_message}] | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| prompt = f"{system_message}\n" + "\n".join( | |
| [f"{msg['role']}: {msg['content']}" for msg in messages] | |
| ) | |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
| outputs = model.generate( | |
| inputs["input_ids"], | |
| max_new_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=top_p, | |
| do_sample=True, | |
| ) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| # Create the Gradio interface | |
| demo = gr.Interface( | |
| fn=respond, | |
| inputs=[ | |
| gr.Textbox(label="Message"), | |
| gr.Textbox(label="History (format: (user_message, assistant_response))", lines=2), | |
| gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
| gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
| gr.Textbox(label="Hugging Face Token", type="password") # Token input field | |
| ], | |
| outputs="text", | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |