Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Replace with your model ID | |
| client = InferenceClient("Saibalaji25/autotrain-0u37b-accmn") | |
| def respond(message, history, system_message, max_tokens, temperature, top_p): | |
| # Format the prompt (you can add system_message before if needed) | |
| prompt = message | |
| # Get the output from the model | |
| output = client.text_generation( | |
| prompt=prompt, | |
| max_new_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=top_p, | |
| stream=False, # True is only useful if you handle streamed responses | |
| ) | |
| return output | |
| demo = gr.ChatInterface( | |
| fn=respond, | |
| additional_inputs=[ | |
| gr.Textbox(value="You are a helpful code assistant. Complete the following code.", label="System message"), | |
| gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |