jithenderchoudary commited on
Commit
1d8eb5a
·
verified ·
1 Parent(s): db6b113

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -10
app.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  from flask import Flask, render_template, request, jsonify
3
  from huggingface_hub import InferenceClient # Import InferenceClient correctly
4
  from dotenv import load_dotenv
 
5
 
6
  # Load environment variables from .env file
7
  load_dotenv()
@@ -18,6 +19,9 @@ client = InferenceClient(HUGGINGFACE_API_KEY)
18
  def respond(message, history, system_message, max_tokens, temperature, top_p):
19
  messages = [{"role": "system", "content": system_message}]
20
 
 
 
 
21
  # Include message history (FAQs)
22
  for val in history:
23
  if val[0]:
@@ -26,18 +30,21 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
26
  messages.append({"role": "assistant", "content": val[1]})
27
 
28
  messages.append({"role": "user", "content": message})
29
-
30
  # Get response from Hugging Face API (for FAQ)
31
  response = ""
32
- for message in client.chat_completion(
33
- messages,
34
- max_tokens=max_tokens,
35
- stream=True,
36
- temperature=temperature,
37
- top_p=top_p,
38
- ):
39
- token = message.choices[0].delta.content
40
- response += token
 
 
 
41
 
42
  return response
43
 
 
2
  from flask import Flask, render_template, request, jsonify
3
  from huggingface_hub import InferenceClient # Import InferenceClient correctly
4
  from dotenv import load_dotenv
5
+ import json
6
 
7
  # Load environment variables from .env file
8
  load_dotenv()
 
19
  def respond(message, history, system_message, max_tokens, temperature, top_p):
20
  messages = [{"role": "system", "content": system_message}]
21
 
22
+ # Ensure history is a list of tuples
23
+ history = json.loads(history) if isinstance(history, str) else history
24
+
25
  # Include message history (FAQs)
26
  for val in history:
27
  if val[0]:
 
30
  messages.append({"role": "assistant", "content": val[1]})
31
 
32
  messages.append({"role": "user", "content": message})
33
+
34
  # Get response from Hugging Face API (for FAQ)
35
  response = ""
36
+ try:
37
+ # Use the correct method depending on the Hugging Face model you're using
38
+ result = client.completion(
39
+ model="gpt-3.5-turbo", # Example, adjust to your model
40
+ inputs=messages,
41
+ max_tokens=max_tokens,
42
+ temperature=temperature,
43
+ top_p=top_p,
44
+ )
45
+ response = result['choices'][0]['text']
46
+ except Exception as e:
47
+ response = str(e)
48
 
49
  return response
50