sravan837 commited on
Commit
1b94782
·
verified ·
1 Parent(s): 5bccccf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -12
app.py CHANGED
@@ -9,7 +9,7 @@ from huggingface_hub import InferenceClient
9
  EXTRACTOR_MODEL = "meta-llama/Meta-Llama-3-8B-Instruct"
10
  PERSONALITY_MODEL = "HuggingFaceH4/zephyr-7b-beta"
11
 
12
- # Default Data: 30 Personality-focused messages
13
  DEFAULT_LOGS = """
14
  1. User: I feel completely drained after large social gatherings.
15
  2. User: I enjoy abstract theories more than practical details.
@@ -51,22 +51,33 @@ def extract_memory(chat_logs, hf_token):
51
  client = InferenceClient(token=hf_token)
52
 
53
  system_prompt = """
54
- Analyze the chat logs and extract a psychological profile.
55
- JSON KEYS: "traits" (Big Five/MBTI), "values" (Logic, Harmony, etc), "struggles" (Anxiety, Procrastination).
56
- OUTPUT: Valid JSON only.
 
57
  """
58
 
59
- prompt = f"<|system|>\n{system_prompt}</s>\n<|user|>\n{chat_logs}</s>\n<|assistant|>"
 
 
 
 
60
 
61
  try:
62
- response = client.text_generation(
63
  model=EXTRACTOR_MODEL,
64
- prompt=prompt,
65
- max_new_tokens=600,
66
  temperature=0.1
67
  )
 
68
  # Parse JSON
69
- text = response.strip()
 
 
 
 
 
70
  start = text.find("{")
71
  end = text.rfind("}") + 1
72
  return json.dumps(json.loads(text[start:end]), indent=2)
@@ -127,16 +138,16 @@ async def generate_response_and_audio(message, memory_json, persona, hf_token):
127
 
128
  return text_reply, output_file
129
 
130
- # Wrapper for Gradio (handling async)
131
  def process_interaction(message, memory_json, persona, hf_token):
132
  return asyncio.run(generate_response_and_audio(message, memory_json, persona, hf_token))
133
 
134
  # --- GRADIO UI ---
135
  with gr.Blocks(title="Personality Engine") as demo:
136
- gr.Markdown("# Personality Engine: Tones")
137
 
138
  with gr.Row():
139
- token_input = gr.Textbox(label="Hugging Face Token", type="password", placeholder="hf_...")
140
 
141
  with gr.Row():
142
  # Left Column: Memory
 
9
  EXTRACTOR_MODEL = "meta-llama/Meta-Llama-3-8B-Instruct"
10
  PERSONALITY_MODEL = "HuggingFaceH4/zephyr-7b-beta"
11
 
12
+ # Default Data
13
  DEFAULT_LOGS = """
14
  1. User: I feel completely drained after large social gatherings.
15
  2. User: I enjoy abstract theories more than practical details.
 
51
  client = InferenceClient(token=hf_token)
52
 
53
  system_prompt = """
54
+ You are a Psychological Analyst. Analyze the chat logs.
55
+ Extract a profile with these JSON KEYS:
56
+ "traits" (Big Five/MBTI), "values" (Logic, Harmony, etc), "struggles" (Anxiety, Procrastination).
57
+ OUTPUT: Valid JSON only. Do not add markdown blocks like ```json.
58
  """
59
 
60
+ # FIXED: Switched from text_generation to chat_completion
61
+ messages = [
62
+ {"role": "system", "content": system_prompt},
63
+ {"role": "user", "content": chat_logs}
64
+ ]
65
 
66
  try:
67
+ response = client.chat_completion(
68
  model=EXTRACTOR_MODEL,
69
+ messages=messages,
70
+ max_tokens=600,
71
  temperature=0.1
72
  )
73
+
74
  # Parse JSON
75
+ text = response.choices[0].message.content.strip()
76
+
77
+ # Cleaning potential markdown formatting from LLM response
78
+ if "```json" in text:
79
+ text = text.replace("```json", "").replace("```", "")
80
+
81
  start = text.find("{")
82
  end = text.rfind("}") + 1
83
  return json.dumps(json.loads(text[start:end]), indent=2)
 
138
 
139
  return text_reply, output_file
140
 
141
+ # Wrapper for Gradio
142
  def process_interaction(message, memory_json, persona, hf_token):
143
  return asyncio.run(generate_response_and_audio(message, memory_json, persona, hf_token))
144
 
145
  # --- GRADIO UI ---
146
  with gr.Blocks(title="Personality Engine") as demo:
147
+ gr.Markdown("# 🧠 Personality Engine: Memory & Tones")
148
 
149
  with gr.Row():
150
+ token_input = gr.Textbox(label="Hugging Face Token", type="password", placeholder="Paste HF Token here...")
151
 
152
  with gr.Row():
153
  # Left Column: Memory