Spaces:
Sleeping
Sleeping
File size: 1,360 Bytes
4d3df66 049bf09 4d3df66 345d602 44970d9 ebb8517 049bf09 4d3df66 f18ec11 345d602 4012c26 f18ec11 4d3df66 345d602 4d3df66 049bf09 4d3df66 125cba5 4d3df66 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | import gradio as gr
from huggingface_hub import InferenceClient
# Use conversational endpoint
client = InferenceClient("meta-llama/Llama-3.2-1B-Instruct")
def generate(prompt, temperature=0.8, max_tokens=256):
try:
# Use conversational endpoint
messages = [{"role": "user", "content": prompt}]
response = client.chat_completion(
messages=messages,
temperature=temperature,
max_tokens=max_tokens
)
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"
with gr.Blocks(title="amkyaw-coder") as demo:
gr.Markdown("# amkyaw-coder\n🤖 Code Generation Model (via HF Inference)")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", lines=4, placeholder="Enter your prompt here...")
temperature = gr.Slider(0.1, 2.0, value=0.8, step=0.1, label="Temperature")
max_tokens = gr.Slider(32, 512, value=128, step=32, label="Max Tokens")
submit = gr.Button("Generate", variant="primary")
with gr.Column():
output = gr.Textbox(label="Output", lines=15)
submit.click(generate, inputs=[prompt, temperature, max_tokens], outputs=output)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860) |