Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| import os | |
| # Replace 'your_huggingface_token' with your actual Hugging Face access token | |
| access_token = os.getenv('token') | |
| # Initialize the tokenizer and model with the Hugging Face access token | |
| tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it", use_auth_token=access_token) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "google/gemma-2b-it", | |
| torch_dtype=torch.bfloat16, | |
| use_auth_token=access_token | |
| ) | |
| model.eval() # Set the model to evaluation mode | |
| # Initialize the inference client (if needed for other API-based tasks) | |
| client = InferenceClient(provider="together",token=access_token) | |
| def conversation_predict(input_text): | |
| """Generate a response for single-turn input using the model.""" | |
| # Tokenize the input text | |
| input_ids = tokenizer(input_text, return_tensors="pt").input_ids | |
| # Generate a response with the model | |
| outputs = model.generate(input_ids, max_new_tokens=2048) | |
| # Decode and return the generated response | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| def respond( | |
| message: str, | |
| history: list[tuple[str, str]], | |
| system_message: str, | |
| max_tokens: int, | |
| temperature: float, | |
| top_p: float, | |
| ): | |
| """Generate a response for a multi-turn chat conversation.""" | |
| # Prepare the messages in the correct format for the API | |
| messages = [{"role": "system", "content": system_message}] | |
| for user_input, assistant_reply in history: | |
| if user_input: | |
| messages.append({"role": "user", "content": user_input}) | |
| if assistant_reply: | |
| messages.append({"role": "assistant", "content": assistant_reply}) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| # Stream response tokens from the chat completion API | |
| for message_chunk in client.chat_completion( | |
| model = "google/gemma-2b-it", | |
| messages=messages, | |
| max_tokens=max_tokens, | |
| stream=True, | |
| temperature=temperature, | |
| top_p=top_p, | |
| ): | |
| token = message_chunk["choices"][0]["delta"].get("content", "") | |
| response += token | |
| yield response | |
| # Create a Gradio ChatInterface demo | |
| demo = gr.ChatInterface( | |
| fn=respond, | |
| additional_inputs=[ | |
| gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
| gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (nucleus sampling)", | |
| ), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(share=True) |