anktechsol commited on
Commit
526c5bd
·
verified ·
1 Parent(s): 3698ce5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -16,31 +16,31 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
16
  Market Intelligence Assistant for Indian Businesses powered by Anki-2.5
17
  Provides insights on Indian market trends, business strategies, and more
18
  """
19
- # Construct conversation with system message
20
- conversation = [{"role": "system", "content": system_message}]
21
 
22
  # Add history
23
  for msg in history:
24
  if isinstance(msg, dict):
25
- conversation.append(msg)
 
 
 
 
 
26
  elif len(msg) == 2:
27
- conversation.append({"role": "user", "content": msg[0]})
28
- conversation.append({"role": "assistant", "content": msg[1]})
29
 
30
  # Add current message
31
- conversation.append({"role": "user", "content": message})
32
 
33
  # Generate response
34
- inputs = tokenizer.apply_chat_template(
35
- conversation,
36
- tokenize=True,
37
- add_generation_prompt=True,
38
- return_tensors="pt"
39
- ).to(model.device)
40
 
41
  with torch.no_grad():
42
  outputs = model.generate(
43
- inputs,
44
  max_new_tokens=max_tokens,
45
  temperature=temperature,
46
  top_p=top_p,
@@ -48,7 +48,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
48
  pad_token_id=tokenizer.eos_token_id
49
  )
50
 
51
- response = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True)
52
  return response
53
 
54
  # Create ChatInterface focused on business intelligence
 
16
  Market Intelligence Assistant for Indian Businesses powered by Anki-2.5
17
  Provides insights on Indian market trends, business strategies, and more
18
  """
19
+ # Build prompt manually
20
+ prompt = system_message + "\n\n"
21
 
22
  # Add history
23
  for msg in history:
24
  if isinstance(msg, dict):
25
+ role = msg.get("role", "")
26
+ content = msg.get("content", "")
27
+ if role == "user":
28
+ prompt += f"User: {content}\n"
29
+ elif role == "assistant":
30
+ prompt += f"Assistant: {content}\n"
31
  elif len(msg) == 2:
32
+ prompt += f"User: {msg[0]}\n"
33
+ prompt += f"Assistant: {msg[1]}\n"
34
 
35
  # Add current message
36
+ prompt += f"User: {message}\nAssistant:"
37
 
38
  # Generate response
39
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
 
 
 
 
 
40
 
41
  with torch.no_grad():
42
  outputs = model.generate(
43
+ inputs["input_ids"],
44
  max_new_tokens=max_tokens,
45
  temperature=temperature,
46
  top_p=top_p,
 
48
  pad_token_id=tokenizer.eos_token_id
49
  )
50
 
51
+ response = tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
52
  return response
53
 
54
  # Create ChatInterface focused on business intelligence