Spaces:
Sleeping
Sleeping
| import gradio as gr # type: ignore | |
| from huggingface_hub import InferenceClient # type: ignore | |
| """ | |
| For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| """ | |
| client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| ): | |
| messages = [{"role": "system", "content": system_message}] | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| for message in client.chat_completion( | |
| messages, | |
| max_tokens=max_tokens, | |
| stream=True, | |
| temperature=temperature, | |
| top_p=top_p, | |
| ): | |
| token = message.choices[0].delta.content | |
| response += token | |
| yield response | |
| """ | |
| For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
| """ | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
| gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (nucleus sampling)", | |
| ), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |
| ########################### | |
| # app.py | |
| import gradio as gr # type: ignore | |
| import os | |
| # import openai # type: ignore | |
| # # openai.api_key = os.getenv("OPENAI_API_KEY") | |
| # client = openai.OpenAI() | |
| # def respond( | |
| # message, | |
| # history: list[tuple[str, str]], | |
| # system_message, | |
| # max_tokens, | |
| # temperature, | |
| # top_p, | |
| # image_uploaded, | |
| # file_uploaded | |
| # ): | |
| # #read system message | |
| # messages = [{"role": "system", "content": system_message}] | |
| # #read history | |
| # for val in history: | |
| # if val[0]: | |
| # messages.append({"role": "user", "content": val[0]}) | |
| # if val[1]: | |
| # messages.append({"role": "assistant", "content": val[1]}) | |
| # #read output | |
| # messages.append({"role": "user", "content": message}) | |
| # print("## Messages: \n", messages) #debug output | |
| # #create output | |
| # response = client.responses.create( | |
| # model="gpt-4.1-nano", | |
| # input=messages, | |
| # temperature=temperature, | |
| # top_p=top_p, | |
| # max_output_tokens=max_tokens | |
| # ) | |
| # #read output | |
| # response = response.output_text | |
| # print("## Response: ", response) #debug output | |
| # print("\n") | |
| # yield response #chat reply | |
| # import torch | |
| # from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig | |
| # model_name = "deepseek-ai/deepseek-math-7b-base" | |
| # tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # model = AutoModelForCausalLM.from_pretrained(model_name) | |
| # # model.generation_config = GenerationConfig.from_pretrained(model_name) | |
| # # model.generation_config.pad_token_id = model.generation_config.eos_token_id | |
| # def deepseek( | |
| # message, | |
| # history: list[tuple[str, str]], | |
| # system_message, | |
| # max_tokens, | |
| # temperature, | |
| # top_p): | |
| # # messages = [ | |
| # # {"role": "user", "content": "what is the integral of x^2 from 0 to 2?\nPlease reason step by step, and put your final answer within \\boxed{}."} | |
| # # ] | |
| # messages = [ | |
| # {"role": "user", "content": message} | |
| # ] | |
| # input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") | |
| # outputs = model.generate(input_tensor.to(model.device), max_new_tokens=100) | |
| # print(outputs) | |
| # print("\n") | |
| # result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True) | |
| # print(result) | |
| # return result | |
| # import replicate | |
| # def deepseek_api_replicate( | |
| # user_message, | |
| # history: list[tuple[str, str]], | |
| # system_message, | |
| # max_new_tokens, | |
| # temperature, | |
| # top_p): | |
| # """ | |
| # Gọi DeepSeek Math trên Replicate và trả ngay kết quả. | |
| # Trả về: | |
| # str hoặc [bytes]: output model sinh ra | |
| # """ | |
| # # 1. Khởi tạo client và xác thực | |
| # # token = os.getenv("REPLICATE_API_TOKEN") | |
| # # if not token: | |
| # # raise RuntimeError("Missing REPLICATE_API_TOKEN") # bảo mật bằng biến môi trường | |
| # client = replicate.Client(api_token="REPLICATE_API_TOKEN") | |
| # # 2. Gọi model | |
| # output = client.run( | |
| # "deepseek-ai/deepseek-math-7b-base:61f572dae0985541cdaeb4a114fd5d2d16cb40dac3894da10558992fc60547c7", | |
| # input={ | |
| # "system_prompt": system_message, | |
| # "user_prompt": user_message, | |
| # "max_new_tokens": max_new_tokens, | |
| # "temperature": temperature, | |
| # "top_p": top_p | |
| # } | |
| # ) | |
| # # 3. Trả kết quả | |
| # return output | |
| import call_api | |
| chat = gr.ChatInterface( | |
| call_api.respond, #chat | |
| title="Trợ lý Học Tập AI", | |
| description="Nhập câu hỏi của bạn về Toán, Lý, Hóa, Văn… và nhận giải đáp chi tiết ngay lập tức!", | |
| additional_inputs=[ | |
| gr.Textbox("Bạn là một chatbot tiếng Việt thân thiện.", label="System message"), | |
| gr.Slider(1, 2048, value=200, step=1, label="Max new tokens"), | |
| gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
| # gr.Image(type="pil", label="Attach an image (optional)"), | |
| # gr.File(label="Upload a file (optional)"), | |
| ], | |
| examples=[ | |
| # Mỗi item: [message, system_message, max_tokens, temperature, top_p] | |
| ["tích phân của x^2 từ 0 đến 2 là gì? vui lòng lập luận từng bước, và đặt kết quả cuối cùng trong \boxed{}", "bạn là nhà toán học", 100, 0.7, 0.95], | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| chat.launch() | |