Invescoz commited on
Commit
066cc56
·
verified ·
1 Parent(s): 20d2f5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -34
app.py CHANGED
@@ -1,62 +1,53 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- import os
4
  from typing import Generator
 
5
 
6
- # Initialize Inference API client
7
- model = "xai-org/grok-2" # Conversational model for astronomy queries
8
-
9
 
10
- def generate_astronomy_explanation(prompt: str) -> Generator[str, None, None]:
11
  """
12
- Generates astronomy-related explanations using xAI/grok-3.2-mini via Inference API with streaming.
13
  """
14
  system_prompt = (
15
- "You are an expert astronomy assistant like Grok. Given a user prompt about "
16
- "astronomical phenomena, space exploration, stars, galaxies, cosmology, planetary science, "
17
- "or astrophysics, provide a clear, educational explanation. Stream the output line by line. "
18
- "Use bullet points for key points and keep responses concise yet informative."
 
19
  )
20
- messages = [
21
- {"role": "system", "content": system_prompt},
22
- {"role": "user", "content": prompt}
23
- ]
24
 
25
- # Stream output from Inference API using chat_completion
26
- for chunk in client.chat_completion(
27
- messages=messages,
28
- max_tokens=1000, # Balanced for complete answers and speed
29
- temperature=0.7,
30
- top_p=0.9,
31
- stream=True
32
- ):
33
- # Extract content from the chunk
34
- content = chunk.choices[0].delta.content
35
- if content: # Only yield non-empty content
36
  yield content
37
 
38
  # Gradio interface with streaming
39
- def live_astronomy_generator(prompt: str):
40
  """
41
  Handles streaming output for the Gradio interface.
42
  """
43
  output = ""
44
- for chunk in generate_astronomy_explanation(prompt):
45
  output += chunk
46
  yield output
47
 
48
  # Gradio app
49
  with gr.Blocks() as demo:
50
- gr.Markdown("# Invescoz AI Studio: Astronomy Explainer")
51
  prompt_input = gr.Textbox(
52
- label="Enter your astronomy question",
53
- placeholder="e.g., Explain how black holes form or Describe the lifecycle of a star"
54
  )
55
- output_display = gr.Textbox(label="Astronomy Explanation", interactive=False, lines=10)
56
- submit_button = gr.Button("Generate")
57
 
58
  submit_button.click(
59
- fn=live_astronomy_generator,
60
  inputs=prompt_input,
61
  outputs=output_display
62
  )
 
1
  import gradio as gr
2
+ from llama_cpp import Llama
 
3
  from typing import Generator
4
+ import os
5
 
6
+ # Initialize llama.cpp model
7
+ model_path = "AstroMLab/AstroSage-8B-GGUF" # Downloaded from AstroMLab/AstroSage-8B-Q8_0-GGUF
8
+ llm = Llama(model_path=model_path, n_ctx=2048, n_threads=2) # Fits in 16GB RAM with 2 CPU cores
9
 
10
+ def generate_astrology_prediction(prompt: str) -> Generator[str, None, None]:
11
  """
12
+ Generates astrology-based fortune-telling predictions using AstroSage-8B-Q8_0-GGUF with streaming.
13
  """
14
  system_prompt = (
15
+ "You are an expert astrologer, specializing in fortune-telling. Given a user prompt "
16
+ "containing details like zodiac sign, birth date, or specific questions, provide predictions "
17
+ "about their future, career, love life, and success. Stream the output line by line. "
18
+ "Use bullet points for key predictions and keep responses engaging and concise. "
19
+ "Despite being trained on astronomy, adapt your knowledge to provide astrology-like insights."
20
  )
21
+ full_prompt = f"<|SYSTEM|> {system_prompt} <|USER|> {prompt} <|ASSISTANT|>"
 
 
 
22
 
23
+ # Stream output from llama.cpp
24
+ for output in llm(full_prompt, max_tokens=1000, temperature=0.7, top_p=0.9, stream=True):
25
+ content = output["choices"][0]["text"]
26
+ if content:
 
 
 
 
 
 
 
27
  yield content
28
 
29
  # Gradio interface with streaming
30
+ def live_astrology_generator(prompt: str):
31
  """
32
  Handles streaming output for the Gradio interface.
33
  """
34
  output = ""
35
+ for chunk in generate_astrology_prediction(prompt):
36
  output += chunk
37
  yield output
38
 
39
  # Gradio app
40
  with gr.Blocks() as demo:
41
+ gr.Markdown("# Invescoz AI Studio: Astrology Fortune-Teller")
42
  prompt_input = gr.Textbox(
43
+ label="Enter your astrology query",
44
+ placeholder="e.g., I'm a Scorpio, born November 5, 1995. What's my future in career and love?"
45
  )
46
+ output_display = gr.Textbox(label="Your Fortune", interactive=False, lines=10)
47
+ submit_button = gr.Button("Predict")
48
 
49
  submit_button.click(
50
+ fn=live_astrology_generator,
51
  inputs=prompt_input,
52
  outputs=output_display
53
  )