Invescoz commited on
Commit
5ba2072
·
verified ·
1 Parent(s): 468090f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -13,9 +13,9 @@ except ImportError:
13
  from llama_cpp import Llama
14
 
15
  # Initialize model
16
- model_path = "AstroSage-8B-BF16.gguf" # Downloaded from AstroMLab/AstroSage-8B-GGUF
17
  llm = Llama.from_pretrained(
18
- repo_id="AstroMLab/AstroSage-8B-GGUF",
19
  filename=model_path,
20
  n_ctx=2048, # Context length for prompts
21
  n_threads=2 # Use 2 CPU cores
@@ -23,14 +23,14 @@ llm = Llama.from_pretrained(
23
 
24
  def generate_astrology_prediction(prompt: str) -> Generator[str, None, None]:
25
  """
26
- Generates astrology-based fortune-telling predictions using AstroSage-8B-BF16.gguf with streaming.
27
  """
28
  system_prompt = (
29
  "You are an expert astrologer, specializing in fortune-telling. Given a user prompt "
30
  "containing details like zodiac sign, birth date, or specific questions, provide predictions "
31
  "about their future, career, love life, and success. Stream the output line by line. "
32
  "Use bullet points for key predictions and keep responses engaging and concise. "
33
- "Despite being trained on astronomy, adapt your knowledge to provide astrology-like insights."
34
  )
35
  full_prompt = f"<|SYSTEM|> {system_prompt}\n<|USER|> {prompt}\n<|ASSISTANT|>"
36
 
 
13
  from llama_cpp import Llama
14
 
15
  # Initialize model
16
+ model_path = "tinyllama-1.1b-chat-v1.0.Q4_0.gguf" # Downloaded from TinyLlama/TinyLlama-1.1B-Chat-v1.0-GGUF
17
  llm = Llama.from_pretrained(
18
+ repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0-GGUF",
19
  filename=model_path,
20
  n_ctx=2048, # Context length for prompts
21
  n_threads=2 # Use 2 CPU cores
 
23
 
24
  def generate_astrology_prediction(prompt: str) -> Generator[str, None, None]:
25
  """
26
+ Generates astrology-based fortune-telling predictions using TinyLlama-1.1B-Chat-v1.0 with streaming.
27
  """
28
  system_prompt = (
29
  "You are an expert astrologer, specializing in fortune-telling. Given a user prompt "
30
  "containing details like zodiac sign, birth date, or specific questions, provide predictions "
31
  "about their future, career, love life, and success. Stream the output line by line. "
32
  "Use bullet points for key predictions and keep responses engaging and concise. "
33
+ "If the prompt is vague (e.g., 'Hi'), ask for more details like zodiac sign or birth date."
34
  )
35
  full_prompt = f"<|SYSTEM|> {system_prompt}\n<|USER|> {prompt}\n<|ASSISTANT|>"
36