Invescoz commited on
Commit
bffc8fa
·
verified ·
1 Parent(s): 7c71b5a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -0
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+ from typing import Generator
5
+
6
+ # Initialize Inference API client
7
+ # Try AstroSage-Llama-3.1-8B; fallback to Llama-3.2-3B-Instruct if unsupported
8
+ model = "AstroMLab/AstroSage-Llama-3.1-8B" # Replace with "meta-llama/Llama-3.2-3B-Instruct" if needed
9
+ client = InferenceClient(model=model, token=os.getenv("HF_TOKEN"))
10
+
11
+ def generate_astronomy_explanation(prompt: str) -> Generator[str, None, None]:
12
+ """
13
+ Generates astronomy-related explanations using the model via Inference API with streaming.
14
+ """
15
+ system_prompt = (
16
+ "You are AstroSage, an expert astronomy assistant like Grok. Given a user prompt about "
17
+ "astronomical phenomena, space exploration, stars, galaxies, cosmology, planetary science, "
18
+ "or astrophysics, provide a clear, educational explanation. Stream the output line by line. "
19
+ "Use bullet points for key points and keep responses concise yet informative."
20
+ )
21
+ messages = [
22
+ {"role": "system", "content": system_prompt},
23
+ {"role": "user", "content": prompt}
24
+ ]
25
+
26
+ # Stream output from Inference API using chat_completion
27
+ for chunk in client.chat_completion(
28
+ messages=messages,
29
+ max_tokens=1000, # Balanced for complete answers and speed
30
+ temperature=0.7,
31
+ top_p=0.9,
32
+ stream=True
33
+ ):
34
+ # Extract content from the chunk
35
+ content = chunk.choices[0].delta.content
36
+ if content: # Only yield non-empty content
37
+ yield content
38
+
39
+ # Gradio interface with streaming
40
+ def live_astronomy_generator(prompt: str):
41
+ """
42
+ Handles streaming output for the Gradio interface.
43
+ """
44
+ output = ""
45
+ for chunk in generate_astronomy_explanation(prompt):
46
+ output += chunk
47
+ yield output
48
+
49
+ # Gradio app
50
+ with gr.Blocks() as demo:
51
+ gr.Markdown("# Invescoz AI Studio: Astronomy Explainer")
52
+ prompt_input = gr.Textbox(
53
+ label="Enter your astronomy question",
54
+ placeholder="e.g., Explain how black holes form or Describe the lifecycle of a star"
55
+ )
56
+ output_display = gr.Textbox(label="Astronomy Explanation", interactive=False, lines=10)
57
+ submit_button = gr.Button("Generate")
58
+
59
+ submit_button.click(
60
+ fn=live_astronomy_generator,
61
+ inputs=prompt_input,
62
+ outputs=output_display
63
+ )
64
+
65
+ # Launch the app (handled by Hugging Face Spaces)
66
+ demo.launch()