rahul7star commited on
Commit
161ef48
·
verified ·
1 Parent(s): 4ed1f6e

Update app_low.py

Browse files
Files changed (1) hide show
  1. app_low.py +18 -4
app_low.py CHANGED
@@ -20,6 +20,7 @@ pipe = pipeline(
20
  device=device, # 0 for GPU, -1 for CPU
21
  )
22
 
 
23
  # ============================================================
24
  # 2️⃣ Define the generation function (chat-template style)
25
  # ============================================================
@@ -51,6 +52,18 @@ def enhance_prompt(user_prompt, temperature, max_tokens, chat_history):
51
 
52
  return chat_history
53
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
  # ===================== Prompt Enhancer Function =====================
56
  def enhance_prompt1(user_prompt, temperature, max_tokens, chat_history):
@@ -65,11 +78,12 @@ def enhance_prompt1(user_prompt, temperature, max_tokens, chat_history):
65
  output = pipe(prompt, max_new_tokens=256)
66
  enhanced_text = output[0]['generated_text']
67
  print(enhanced_text)
68
-
69
- # Wrap in a chat message format
70
- chat_history = chat_history or []
71
  chat_history.append({"role": "user", "content": user_prompt})
72
- chat_history.append({"role": "assistant", "content": enhanced_text})
 
 
73
 
74
  return chat_history
75
 
 
20
  device=device, # 0 for GPU, -1 for CPU
21
  )
22
 
23
+
24
  # ============================================================
25
  # 2️⃣ Define the generation function (chat-template style)
26
  # ============================================================
 
52
 
53
  return chat_history
54
 
55
+ def extract_later_part(user_prompt, generated_text):
56
+ """
57
+ Remove the user prompt from the start of the generated text
58
+ and return only the new added details.
59
+ """
60
+ # Ensure matching is case-insensitive and strip extra spaces
61
+ user_prompt_clean = user_prompt.strip().lower()
62
+ gen_clean = generated_text.strip()
63
+
64
+ if gen_clean.lower().startswith(user_prompt_clean):
65
+ return gen_clean[len(user_prompt_clean):].strip(", ").strip()
66
+ return gen_clean
67
 
68
  # ===================== Prompt Enhancer Function =====================
69
  def enhance_prompt1(user_prompt, temperature, max_tokens, chat_history):
 
78
  output = pipe(prompt, max_new_tokens=256)
79
  enhanced_text = output[0]['generated_text']
80
  print(enhanced_text)
81
+ enhanced_text = output[0]['generated_text']
82
+ later_part = extract_later_part(user_prompt, enhanced_text)
 
83
  chat_history.append({"role": "user", "content": user_prompt})
84
+ chat_history.append({"role": "assistant", "content": later_part})
85
+
86
+
87
 
88
  return chat_history
89