Aobangaming commited on
Commit
b86532d
·
verified ·
1 Parent(s): 4e8d78a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -50
app.py CHANGED
@@ -1,50 +1,53 @@
1
- import contextlib
2
- from flask import Flask, request, jsonify
3
- from flask_cors import CORS
4
- from scipy import io
5
- import torch
6
- # Import the specific functions from your ai_thingy script
7
- # Make sure your AI script is named ai_thingy.py
8
- from ai_thingy import (
9
- initialize_or_retrain,
10
- generate_text,
11
- current_model,
12
- current_tokenizer,
13
- device,
14
- MAX_SEQ_LENGTH
15
- )
16
- import ai_thingy
17
-
18
- app = Flask(__name__)
19
- CORS(app) # This allows your JS to talk to this Python server
20
-
21
- # Initialize the model once when the server starts
22
- print("[Bridge] Waking up the Aoban Brain...")
23
- initialize_or_retrain(initial_train=True, epochs=10)
24
-
25
- @app.route('/ask', methods=['POST'])
26
- def ask():
27
- try:
28
- data = request.json
29
- user_prompt = data.get("input", "")
30
-
31
- # Use the imported generate_text
32
- # We pass None for penalty/temp to let it use the script's defaults
33
- response = ai_thingy.generate_text(
34
- ai_thingy.current_model,
35
- ai_thingy.current_tokenizer,
36
- user_prompt,
37
- ai_thingy.MAX_SEQ_LENGTH,
38
- ai_thingy.device,
39
- top_k=3,
40
- penalty=3.0,
41
- temperature=1.0
42
- )
43
-
44
- return jsonify({"response": response, "logs": "Generation successful."})
45
-
46
- except Exception as e:
47
- print(f"[CRASH] Aoban Brain Error: {e}")
48
- return jsonify({"response": f"Brain Error: {str(e)}", "logs": str(e)}), 500
49
- if __name__ == '__main__':
50
- app.run(port=5000, debug=False)
 
 
 
 
1
+ import contextlib
2
+ from flask import Flask, request, jsonify
3
+ from flask_cors import CORS
4
+ from scipy import io
5
+ import torch
6
+ # Import the specific functions from your ai_thingy script
7
+ # Make sure your AI script is named ai_thingy.py
8
+ from ai_thingy import (
9
+ initialize_or_retrain,
10
+ generate_text,
11
+ current_model,
12
+ current_tokenizer,
13
+ device,
14
+ MAX_SEQ_LENGTH
15
+ )
16
+ import ai_thingy
17
+
18
+ app = Flask(__name__)
19
+ CORS(app) # This allows your JS to talk to this Python server
20
+
21
+ # Initialize the model once when the server starts
22
+ print("[Bridge] Waking up the Aoban Brain...")
23
+ initialize_or_retrain(initial_train=True, epochs=10)
24
+
25
+ @app.route('/ask', methods=['POST'])
26
+ def ask():
27
+ try:
28
+ data = request.json
29
+ user_prompt = data.get("input", "")
30
+
31
+ # Use the imported generate_text
32
+ # We pass None for penalty/temp to let it use the script's defaults
33
+ response = ai_thingy.generate_text(
34
+ ai_thingy.current_model,
35
+ ai_thingy.current_tokenizer,
36
+ user_prompt,
37
+ ai_thingy.MAX_SEQ_LENGTH,
38
+ ai_thingy.device,
39
+ top_k=3,
40
+ penalty=3.0,
41
+ temperature=1.0
42
+ )
43
+
44
+ return jsonify({"response": response, "logs": "Generation successful."})
45
+
46
+ except Exception as e:
47
+ print(f"[CRASH] Aoban Brain Error: {e}")
48
+ return jsonify({"response": f"Brain Error: {str(e)}", "logs": str(e)}), 500
49
+ if __name__ == "__main__":
50
+ # The cloud will tell us which port to use; default to 7860 for Hugging Face
51
+ import os
52
+ port = int(os.environ.get("PORT", 7860))
53
+ app.run(host="0.0.0.0", port=port)