Kezovic commited on
Commit
598abe4
·
verified ·
1 Parent(s): 8f34be1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -19
app.py CHANGED
@@ -11,9 +11,8 @@ MAX_NEW_TOKENS = 400
11
  TEMPERATURE = 0.4 # Note: min_p = 0.1 is kept as requested in the configuration from the previous prompt
12
 
13
  # --- Model Loading ---
14
- llm = None
15
  def load_llm():
16
- global llm
17
  print("Downloading model...")
18
  try:
19
  model_path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILE)
@@ -27,7 +26,9 @@ def load_llm():
27
  print("Model loaded successfully!")
28
  except Exception as e:
29
  print(f"Error loading model: {e}")
30
- load_llm()
 
 
31
 
32
  # --- Persona and Format Maps (Hardened Constraints) ---
33
  persona_map = {
@@ -63,10 +64,6 @@ def generate_poem(format_type, persona, topic, progress=gr.Progress()):
63
  # 1. VISUAL FEEDBACK & Error Checks
64
  progress(0, desc="Consulting the Muse...")
65
  time.sleep(0.2)
66
- if not llm:
67
- return "Error: Model not loaded."
68
- if not topic:
69
- return "Please enter a topic!"
70
 
71
  # --- POEM GENERATION ---
72
  progress(0.2, desc=f"Summoning {persona} and drafting poem...")
@@ -74,7 +71,7 @@ def generate_poem(format_type, persona, topic, progress=gr.Progress()):
74
  selected_constraint = format_map.get(format_type, "Write a poem.")
75
 
76
  full_poem_prompt = build_poem_prompt(selected_voice, selected_constraint, format_type, topic)
77
- output = llm(
78
  prompt=full_poem_prompt,
79
  max_tokens=MAX_NEW_TOKENS,
80
  temperature=TEMPERATURE,
@@ -84,7 +81,8 @@ def generate_poem(format_type, persona, topic, progress=gr.Progress()):
84
  )
85
 
86
  # Clean up output
87
- poem_text = output['choices'][0]['text'].strip()
 
88
 
89
  # Simple post-processing to clean up any residual instruction text if the model echoes it
90
  # We are trusting the instruction "Your output must contain ONLY the poem, nothing else."
@@ -95,10 +93,6 @@ def generate_poem(format_type, persona, topic, progress=gr.Progress()):
95
 
96
  # --- LLM Function: Step 2 (Analyze Poem - SIMPLIFIED) ---
97
  def analyze_poem(poem_text, progress=gr.Progress()):
98
- if not llm:
99
- return "Error: Model not loaded."
100
- if not poem_text or poem_text.startswith("Error:") or poem_text.startswith("Please enter"):
101
- return "Please generate a valid poem first!"
102
 
103
  progress(0, desc="Analyzing style for literary match...")
104
  time.sleep(0.2)
@@ -110,7 +104,7 @@ def analyze_poem(poem_text, progress=gr.Progress()):
110
  f"POEM:\n{poem_text}\n"
111
  )
112
 
113
- output_poet = llm(
114
  prompt=poet_prompt,
115
  max_tokens=200, # Keep this response short
116
  temperature=0.4,
@@ -119,11 +113,8 @@ def analyze_poem(poem_text, progress=gr.Progress()):
119
  )
120
 
121
  # Clean up the output using simple string manipulation
122
- poet_suggestion = output_poet['choices'][0]['text'].strip()
123
-
124
- # Clean up any potential instruction echo or partial output
125
- if poet_suggestion.startswith("STRICT OUTPUT FORMAT:"):
126
- poet_suggestion = poet_suggestion[len("STRICT OUTPUT FORMAT:"):].strip()
127
 
128
  progress(1.0, desc="Analysis complete!")
129
 
 
11
  TEMPERATURE = 0.4 # Note: min_p = 0.1 is kept as requested in the configuration from the previous prompt
12
 
13
  # --- Model Loading ---
 
14
  def load_llm():
15
+ llm = None
16
  print("Downloading model...")
17
  try:
18
  model_path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILE)
 
26
  print("Model loaded successfully!")
27
  except Exception as e:
28
  print(f"Error loading model: {e}")
29
+ return llm
30
+ llm_generator = load_llm()
31
+ llm_analyst = load_llm()
32
 
33
  # --- Persona and Format Maps (Hardened Constraints) ---
34
  persona_map = {
 
64
  # 1. VISUAL FEEDBACK & Error Checks
65
  progress(0, desc="Consulting the Muse...")
66
  time.sleep(0.2)
 
 
 
 
67
 
68
  # --- POEM GENERATION ---
69
  progress(0.2, desc=f"Summoning {persona} and drafting poem...")
 
71
  selected_constraint = format_map.get(format_type, "Write a poem.")
72
 
73
  full_poem_prompt = build_poem_prompt(selected_voice, selected_constraint, format_type, topic)
74
+ output = llm_generator(
75
  prompt=full_poem_prompt,
76
  max_tokens=MAX_NEW_TOKENS,
77
  temperature=TEMPERATURE,
 
81
  )
82
 
83
  # Clean up output
84
+ #poem_text = output['choices'][0]['text'].strip()
85
+ poem_text = output
86
 
87
  # Simple post-processing to clean up any residual instruction text if the model echoes it
88
  # We are trusting the instruction "Your output must contain ONLY the poem, nothing else."
 
93
 
94
  # --- LLM Function: Step 2 (Analyze Poem - SIMPLIFIED) ---
95
  def analyze_poem(poem_text, progress=gr.Progress()):
 
 
 
 
96
 
97
  progress(0, desc="Analyzing style for literary match...")
98
  time.sleep(0.2)
 
104
  f"POEM:\n{poem_text}\n"
105
  )
106
 
107
+ output_poet = llm_analyst(
108
  prompt=poet_prompt,
109
  max_tokens=200, # Keep this response short
110
  temperature=0.4,
 
113
  )
114
 
115
  # Clean up the output using simple string manipulation
116
+ #poet_suggestion = output_poet['choices'][0]['text'].strip()
117
+ poet_suggestion = output_poet
 
 
 
118
 
119
  progress(1.0, desc="Analysis complete!")
120