Spaces:
Build error
Build error
| # import torch | |
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel | |
| # device = "cuda" if torch.cuda.is_available() else "cpu" | |
| # tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga2", use_fast=False) | |
| # model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga2", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto") | |
| # system_prompt = "### System:\nYou are Stable Beluga, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n" | |
| # pipeline = pipeline(task="text-generation", model="meta-llama/Llama-2-7b") | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| "THUDM/chatglm2-6b-int4", trust_remote_code=True | |
| ) | |
| chat_model = AutoModel.from_pretrained( | |
| "THUDM/chatglm2-6b-int4", trust_remote_code=True | |
| ).float() | |
| def chat(message, history): | |
| # prompt = f"{system_prompt}### User: {message}\n\n### Assistant:\n" | |
| # inputs = tokenizer(prompt, return_tensors="pt").to(device=device) | |
| # output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256) | |
| # return tokenizer.decode(output[0], skip_special_tokens=True) | |
| for response, history in chat_model.stream_chat( | |
| tokenizer, message, history, max_length=2048, top_p=0.7, temperature=0.95 | |
| ): | |
| yield response | |
| gr.ChatInterface( | |
| chat, | |
| title="gradio-chatinterface-tryout", | |
| # description="fooling around", | |
| examples=[ | |
| ["test me"], | |
| ], | |
| theme=gr.themes.Soft(), | |
| ).queue(max_size=2).launch() | |