coziSoul commited on
Commit
5823317
·
verified ·
1 Parent(s): 47285f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -49
app.py CHANGED
@@ -1,49 +1,44 @@
1
- # Import render_template to serve HTML files
2
- from flask import Flask, request, render_template
3
- from flask_cors import CORS
4
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
5
- import os
6
-
7
- # --- 1. SETUP ---
8
- app = Flask(__name__)
9
- CORS(app)
10
-
11
- MODEL_NAME = "facebook/blenderbot-400M-distill"
12
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
13
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
14
- print(f"Successfully loaded model: {MODEL_NAME}")
15
-
16
- # --- 2. API ENDPOINTS ---
17
-
18
- # MODIFIED: This now serves your HTML front-end
19
- @app.route('/')
20
- def home():
21
- """Serves the main HTML page for the chatbot."""
22
- return render_template('index.html')
23
-
24
- @app.route('/chatbot', methods=['POST'])
25
- def chatbot_endpoint():
26
- """The main endpoint to handle chatbot conversations."""
27
- if not request.is_json:
28
- return {"error": "Request must be a JSON"}, 400
29
-
30
- data = request.get_json()
31
- user_input = data.get("prompt")
32
-
33
- if not user_input:
34
- return {"error": "Missing 'prompt' in request body"}, 400
35
-
36
- try:
37
- inputs = tokenizer(user_input, return_tensors="pt")
38
- outputs = model.generate(**inputs, max_length=60)
39
- response_text = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
40
- return {"response": response_text}
41
- except Exception as e:
42
- print(f"Error during model inference: {e}")
43
- return {"error": "Failed to generate a response"}, 500
44
-
45
- # --- 3. RUN THE APP ---
46
- if __name__ == '__main__':
47
- # Check if the app is running in debug mode
48
- is_debug = os.environ.get('FLASK_DEBUG', 'False').lower() == 'true'
49
- app.run(host='0.0.0.0', port=5000, debug=is_debug)
 
1
+ from flask import Flask, request, render_template
2
+ from flask_cors import CORS
3
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
+
5
+ # --- 1. SETUP ---
6
+ app = Flask(__name__)
7
+ CORS(app)
8
+
9
+ MODEL_NAME = "facebook/blenderbot-400M-distill"
10
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
11
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
12
+ print(f"Successfully loaded model: {MODEL_NAME}")
13
+
14
+ # --- 2. API ENDPOINTS ---
15
+ @app.route('/')
16
+ def home():
17
+ """Serves the main HTML page for the chatbot."""
18
+ return render_template('index.html')
19
+
20
+ @app.route('/chatbot', methods=['POST'])
21
+ def chatbot_endpoint():
22
+ """The main endpoint to handle chatbot conversations."""
23
+ if not request.is_json:
24
+ return {"error": "Request must be a JSON"}, 400
25
+
26
+ data = request.get_json()
27
+ user_input = data.get("prompt")
28
+
29
+ if not user_input:
30
+ return {"error": "Missing 'prompt' in request body"}, 400
31
+
32
+ try:
33
+ inputs = tokenizer(user_input, return_tensors="pt")
34
+ outputs = model.generate(**inputs, max_length=60)
35
+ response_text = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
36
+ return {"response": response_text}
37
+ except Exception as e:
38
+ print(f"Error during model inference: {e}")
39
+ return {"error": "Failed to generate a response"}, 500
40
+
41
+ # --- 3. RUN THE APP ---
42
+ if __name__ == '__main__':
43
+ # Use port 7860, the standard for Hugging Face Spaces web apps
44
+ app.run(host='0.0.0.0', port=7860)