Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,14 +4,21 @@ from huggingface_hub import InferenceClient
|
|
| 4 |
# Initialize the hugging face inference api client
|
| 5 |
client = InferenceClient(
|
| 6 |
provider="cerebras",
|
| 7 |
-
api_key=""
|
| 8 |
)
|
| 9 |
|
| 10 |
# Define the function to generate stories or poems
|
| 11 |
def generate_text(category, theme, tone, length):
|
| 12 |
prompt = f"Write a {length} {tone} {category} about {theme}."
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# Create Gradio UI
|
| 17 |
with gr.Blocks() as demo:
|
|
|
|
| 4 |
# Initialize the hugging face inference api client
|
| 5 |
client = InferenceClient(
|
| 6 |
provider="cerebras",
|
| 7 |
+
api_key="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"
|
| 8 |
)
|
| 9 |
|
| 10 |
# Define the function to generate stories or poems
|
| 11 |
def generate_text(category, theme, tone, length):
|
| 12 |
prompt = f"Write a {length} {tone} {category} about {theme}."
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
completion = client.chat.completions.create(
|
| 16 |
+
model="Qwen/Qwen3-32B",
|
| 17 |
+
messages=[{"role":"user", "content":prompt}],
|
| 18 |
+
)
|
| 19 |
+
return completion.choices[0].message.content
|
| 20 |
+
except Exception as e:
|
| 21 |
+
return f"Error: {e}"
|
| 22 |
|
| 23 |
# Create Gradio UI
|
| 24 |
with gr.Blocks() as demo:
|