Invescoz commited on
Commit
5b0fc48
·
verified ·
1 Parent(s): 0f4565b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -4,16 +4,15 @@ import os
4
  from typing import Generator
5
 
6
  # Initialize Inference API client
7
- # Try AstroSage-Llama-3.1-8B; fallback to Llama-3.2-3B-Instruct if unsupported
8
- model = "AstroMLab/AstroSage-Llama-3.1-8B" # Replace with "meta-llama/Llama-3.2-3B-Instruct" if needed
9
  client = InferenceClient(model=model, token=os.getenv("HF_TOKEN"))
10
 
11
  def generate_astronomy_explanation(prompt: str) -> Generator[str, None, None]:
12
  """
13
- Generates astronomy-related explanations using the model via Inference API with streaming.
14
  """
15
  system_prompt = (
16
- "You are AstroSage, an expert astronomy assistant like Grok. Given a user prompt about "
17
  "astronomical phenomena, space exploration, stars, galaxies, cosmology, planetary science, "
18
  "or astrophysics, provide a clear, educational explanation. Stream the output line by line. "
19
  "Use bullet points for key points and keep responses concise yet informative."
 
4
  from typing import Generator
5
 
6
  # Initialize Inference API client
7
+ model = "xAI/grok-3.2-mini" # Conversational model for astronomy queries
 
8
  client = InferenceClient(model=model, token=os.getenv("HF_TOKEN"))
9
 
10
  def generate_astronomy_explanation(prompt: str) -> Generator[str, None, None]:
11
  """
12
+ Generates astronomy-related explanations using xAI/grok-3.2-mini via Inference API with streaming.
13
  """
14
  system_prompt = (
15
+ "You are an expert astronomy assistant like Grok. Given a user prompt about "
16
  "astronomical phenomena, space exploration, stars, galaxies, cosmology, planetary science, "
17
  "or astrophysics, provide a clear, educational explanation. Stream the output line by line. "
18
  "Use bullet points for key points and keep responses concise yet informative."