File size: 3,641 Bytes
daa1f7e cbe35fe daa1f7e cbe35fe 8775f36 46fdc61 067fe69 daa1f7e c0d3986 cbe35fe daa1f7e cbe35fe 8775f36 daa1f7e 067ae5e daa1f7e 8775f36 daa1f7e 8775f36 cbe35fe c0d3986 cbe35fe | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import gradio as gr
import os
import requests
# Use Inference API with LLM only
MODEL_ID = "meta-llama/Llama-3.2-3B-Instruct"
token = os.getenv("HUGGING_FACE_HUB_TOKEN")
ROUTER_URL = "https://router.huggingface.co/v1/chat/completions"
def improve_description(user_input: str) -> str:
"""
Rewrites a rough project description using LLM via router.huggingface.co OpenAI-compatible API.
"""
if not user_input or not user_input.strip():
return "Please provide a description to improve."
user_text = user_input.strip()
if len(user_text) < 10:
return "The description is too short. Please add more details about your project."
if not token:
return "Error: HUGGING_FACE_HUB_TOKEN environment variable is not set. Please configure your Hugging Face token."
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
payload = {
"model": MODEL_ID,
"messages": [
{"role": "system", "content": "You are a technical writing assistant. Rewrite project descriptions to be clear, professional, and concise while preserving all technical details. Limit your output to a maximum of 3 sentences."},
{"role": "user", "content": f"Rewrite this project description professionally, concisely, and in no more than 3 sentences:\n\n{user_text}"}
],
"max_tokens": 300,
"temperature": 0.3
}
try:
resp = requests.post(ROUTER_URL, headers=headers, json=payload, timeout=60)
if resp.status_code != 200:
return f"Error: API request failed - {resp.status_code} {resp.reason}: {resp.text}"
data = resp.json()
improved_text = data["choices"][0]["message"]["content"].strip()
return improved_text if improved_text else "Error: No response generated."
except Exception as e:
return f"Error: An unexpected error occurred - {str(e)}"
# --- Gradio Interface (no changes needed here) ---
with gr.Blocks(title="Technical Description Assistant", theme=gr.themes.Soft()) as demo:
gr.Markdown(
"""
# 📝 Technical Description Assistant
Instantly transform your rough project notes into a polished, professional
technical description using a zero-shot AI model.
"""
)
with gr.Row():
input_text = gr.Textbox(
label="Your Rough Description",
placeholder="e.g., 'my app uses python and ml stuff to make pics look better'",
lines=10,
scale=1,
)
output_text = gr.Textbox(
label="Polished Technical Description",
lines=10,
scale=1,
)
submit_btn = gr.Button("✨ Generate Polished Description", variant="primary")
gr.Markdown("### A few examples to get you started:")
gr.Examples(
examples=[
"This is a web app that lets users upload pics and it uses AI to make them look better. Built with Python and some ML stuff.",
"Made a tool that checks code for bugs. It's really fast and works with multiple languages. Uses AST parsing.",
"API for weather data. Gets info from multiple sources and combines them. Has caching so it's faster.",
"This project uses docker and cloud stuff to run machines.",
],
inputs=input_text,
outputs=output_text,
fn=improve_description,
)
submit_btn.click(
fn=improve_description,
inputs=input_text,
outputs=output_text,
api_name="improve_description"
)
if __name__ == "__main__":
demo.launch()
|