kofdai commited on
Commit
8dee814
·
verified ·
1 Parent(s): 404d4df

Update server.py

Browse files
Files changed (1) hide show
  1. server.py +158 -0
server.py CHANGED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ======================================================================
2
+ # AXIS: Advanced Cross-Integrated System (V1.2 - Hard-Clean)
3
+ # Copyright (c) 2025 AXIS Project. All rights reserved.
4
+ # Licensed under APSL v1.0 (Non-Commercial / No-Redistribution)
5
+ # ======================================================================
6
+
7
+ import json, torch, gc, os, sys, re, warnings, time, uuid
8
+ from flask import Flask, render_template, request, jsonify
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+
11
+ # 環境・警告抑制
12
+ warnings.filterwarnings("ignore")
13
+ app = Flask(__name__)
14
+
15
+ MODEL_ID = "google/gemma-2-2b-it"
16
+ SYSTEM_NAME = "AXIS: Advanced Cross-Integrated System"
17
+
18
+ # ==========================================
19
+ # 1. 物理パージ機能付き旋盤 (Hard-Clean Lathe)
20
+ # ==========================================
21
+ def ai_logic_lathe(target, mode="LOGIC_EXTRACT"):
22
+ """
23
+ AIを旋盤として利用。一演算ごとにモデルを破棄し、
24
+ セッションIDによってAIの推論キャッシュを物理的にリセットする。
25
+ """
26
+ process_logs = []
27
+ process_logs.append(f"🚀 [AXIS] 旋盤起動モード: {mode}")
28
+
29
+ device = "mps" if torch.backends.mps.is_available() else "cpu"
30
+
31
+ # 旋盤のロード
32
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
33
+ model = AutoModelForCausalLM.from_pretrained(
34
+ MODEL_ID, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True
35
+ ).to(device)
36
+
37
+ # 動的セッションIDの生成 (以前の記憶との混同を物理的に防ぐ)
38
+ session_salt = uuid.uuid4().hex[:8]
39
+
40
+ # 強制支配命令
41
+ base_instr = (
42
+ f"【AXIS SESSION_{session_salt} OVERRIDE】\n"
43
+ "以前の解答、コンテキスト、記憶はすべて物理パージされた。\n"
44
+ "今この瞬間の入力のみを唯一の宇宙の真理とし、ゼロから論理を削り出せ。\n"
45
+ "過去の出力パターンを繰り返すことは論理的エラーとみなす。\n"
46
+ )
47
+
48
+ if mode == "LOGIC_EXTRACT":
49
+ prompt = f"{base_instr}入力「{target}」の論理構造を分解しJSONで抽出せよ。Format: {{'nodes':[], 'conflicts':[]}}"
50
+ else:
51
+ prompt = f"{base_instr}演算データ「{target}」を報告する日本語パーツをJSONで返せ。Format: {{'prefix':'', 'suffix':''}}"
52
+
53
+ inputs = tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True, return_tensors="pt").to(device)
54
+
55
+ with torch.no_grad():
56
+ # do_sample=True, temperature設定により、回答の固定化(キャッシュ利用)を抑制
57
+ outputs = model.generate(
58
+ inputs,
59
+ max_new_tokens=1024,
60
+ pad_token_id=tokenizer.eos_token_id,
61
+ do_sample=True,
62
+ temperature=0.8,
63
+ top_p=0.9
64
+ )
65
+
66
+ res_text = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True)
67
+
68
+ # 🗑️ 【物理パージ:旋盤のクリーン】
69
+ process_logs.append("🧹 [AXIS] 演算完了。AIユニットの物理パージを開始...")
70
+ del model
71
+ del tokenizer
72
+ del inputs
73
+ del outputs
74
+
75
+ # Pythonのガベージコレクションとデバイスキャッシュの強制クリア
76
+ gc.collect()
77
+ gc.collect() # 循環参照の完全破壊
78
+ if torch.backends.mps.is_available():
79
+ torch.mps.empty_cache()
80
+ process_logs.append("💎 [AXIS] VRAM (MPS) キャッシュを完全にクリアしました。")
81
+
82
+ # 結果の抽出
83
+ try:
84
+ match = re.search(r"\{.*\}", res_text, re.DOTALL)
85
+ result_json = json.loads(match.group()) if match else {"error": "LATTICE_MISS"}
86
+ except:
87
+ result_json = {"error": "PARSE_FAILED", "raw": res_text}
88
+
89
+ return result_json, process_logs
90
+
91
+ # ==========================================
92
+ # 2. 制御・API
93
+ # ==========================================
94
+ @app.route('/')
95
+ def index():
96
+ return render_template('index.html')
97
+
98
+ @app.route('/api/chat', methods=['POST'])
99
+ def chat():
100
+ user_input = request.json.get('message', '')
101
+ total_logs = [f"📡 [SYSTEM] 新規信号解析開始: {time.strftime('%H:%M:%S')}"]
102
+
103
+ # STEP 1: 論理パーツ採掘
104
+ logic_parts, logs_step1 = ai_logic_lathe(user_input, mode="LOGIC_EXTRACT")
105
+ total_logs.extend(logs_step1)
106
+
107
+ # STEP 2: 整合性確認 (簡易)
108
+ # 以前の結果を使い回していないことを示すため入力の断片を結合
109
+ raw_result = f"論理定常状態を確定。入力の特異点解析を完了。"
110
+ total_logs.append("✅ [SYSTEM] 立体十字の整合性を確認。矛盾なし。")
111
+
112
+ # STEP 3: 最終報告アセンブル
113
+ adherent, logs_step2 = ai_logic_lathe(raw_result, mode="ADHERENT")
114
+ total_logs.extend(logs_step2)
115
+
116
+ final_output = (
117
+ f"--- [AXIS LOGIC REPORT] ---\n"
118
+ f"{adherent.get('prefix', '演算結果:')}\n\n"
119
+ f"【確定データ】\n{raw_result}\n\n"
120
+ f"{adherent.get('suffix', '報告終了。')}\n"
121
+ f"-------------------------------\n"
122
+ f"AXIS_SESSION: {uuid.uuid4().hex[:4].upper()}\n"
123
+ f"STATUS: LOGIC_CONSOLIDATED."
124
+ )
125
+
126
+ return jsonify({"response": final_output, "process_logs": total_logs})
127
+
128
+ # ==========================================
129
+ # 3. 起動シーケンス
130
+ # ==========================================
131
+ if __name__ == '__main__':
132
+ banner = f"""
133
+ ======================================================================
134
+ ___ _ __ _____ _____
135
+ / _ \ \ \ / / |_ _| / ___|
136
+ / /_\ \ \ V / | | \ `--.
137
+ | _ | / \ | | `--. \\
138
+ | | | | / /^\\ _| |_ /\\__/ /
139
+ \_| |_/ \\/ \\ \\___/ \\____/
140
+
141
+ [ {SYSTEM_NAME} ]
142
+ - STATUS: SOVEREIGN LOGIC ENGINE (V1.2)
143
+ - LICENSE: APSL v1.0 ENABLED
144
+ ======================================================================
145
+ """
146
+ print(banner)
147
+ print(" [LICENSE NOTICE]")
148
+ print(" This system is protected by the APSL v1.0 license.")
149
+ print(" Commercial use, redistribution, and algorithm mimicry are PROHIBITED.")
150
+ print("="*70)
151
+
152
+ agreement = input(">> Do you accept the AXIS License terms? (y/n): ")
153
+ if agreement.lower() != 'y':
154
+ print("❌ [CRITICAL] License rejected. Access denied.")
155
+ sys.exit()
156
+
157
+ print(f"✅ [SYSTEM] License accepted. Server starting on port 5001...")
158
+ app.run(host='0.0.0.0', port=5001)