File size: 3,029 Bytes
745400d
 
 
3b739f0
 
90d0abe
284a793
745400d
90d0abe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745400d
90d0abe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745400d
90d0abe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
from huggingface_hub import InferenceClient



# Initialize the Hugging Face model client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")  # Change to your preferred model

# Map descriptive length to token count
length_map = {
    "Short": 100,
    "Medium": 200,
    "Long": 300
}

# Function to generate creative writing content
def generate_text(category, theme, tone, length, max_tokens, temperature, top_p):
    # Set defaults if needed
    category = category or "story"
    theme = theme.strip() or "an interesting idea"
    tone = tone or "neutral"
    length = length or "Short"
    
    token_length = length_map.get(length, 100)

    # Construct the prompt
    prompt = (
        f"Write a {length.lower()} and {tone.lower()} {category.lower()} titled '{theme}'. "
        "It should have a beginning, middle, and end. Use vivid, imaginative language. "
        f"'The {theme.title()}'. Write it in a creative and expressive style.\n\n"
    )

    if category.lower() == "poem":
        prompt += "Make sure it's in poetic form, with vivid imagery and emotion."
    elif category.lower() == "story":
        prompt += "Start with an engaging opening, include a conflict, and resolve it clearly."

    prompt += "\n\n"

    # Prepare the message for the chat API
    messages = [
        {"role": "system", "content": "You are a creative writing assistant."},
        {"role": "user", "content": prompt}
    ]

    # Collect the response
    response = ""
    try:
        for message in client.chat_completion(
            messages=messages,
            max_tokens=max_tokens or token_length,
            temperature=temperature,
            top_p=top_p,
            stream=True,
        ):
            token = message.choices[0].delta.content
            response += token
    except Exception as e:
        return f"❌ Exception: {str(e)}"

    return response


# Create Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("## ✨ Creative Writing Generator (Hugging Face API)")

    with gr.Row():
        category = gr.Dropdown(["Story", "Poem"], label="Category")
        theme = gr.Textbox(label="Theme", placeholder="e.g. friendship, time travel, lost love")
        tone = gr.Dropdown(["Happy", "Sad", "Funny", "Dark", "Inspiring"], label="Tone")
        length = gr.Dropdown(["Short", "Medium", "Long"], label="Length")

    with gr.Accordion("🛠 Advanced Settings", open=False):
        max_tokens = gr.Slider(minimum=50, maximum=2048, value=300, step=10, label="Max Tokens")
        temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.8, step=0.1, label="Temperature")
        top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Nucleus Sampling)")

    generate_button = gr.Button("🪄 Generate")
    output = gr.Textbox(label="Generated Content", lines=15)

    generate_button.click(
        fn=generate_text,
        inputs=[category, theme, tone, length, max_tokens, temperature, top_p],
        outputs=output
    )

demo.launch()