kofdai commited on
Commit
e1a8749
·
verified ·
1 Parent(s): d4d4f7b

Update server.py

Browse files
Files changed (1) hide show
  1. server.py +67 -97
server.py CHANGED
@@ -1,5 +1,5 @@
1
  # ======================================================================
2
- # AXIS: Advanced Cross-Integrated System (V1.2 - Space Optimized)
3
  # Copyright (c) 2025 AXIS Project. All rights reserved.
4
  # Licensed under APSL v1.0 (Non-Commercial / No-Redistribution)
5
  # ======================================================================
@@ -8,99 +8,65 @@ import json, torch, gc, os, sys, re, warnings, time, uuid
8
  from flask import Flask, render_template, request, jsonify
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
 
11
- # 環境・警告抑制
12
  warnings.filterwarnings("ignore")
13
  app = Flask(__name__)
14
 
15
- # モデルIDをご自身のリポジトリに変更
16
  MODEL_ID = "kofdai/AXIS-Sovereign-Logic-Engine"
17
  SYSTEM_NAME = "AXIS: Advanced Cross-Integrated System"
18
 
19
- # ==========================================
20
- # 1. 物理パージ機能付き旋盤 (Hard-Clean Lathe)
21
- # ==========================================
 
 
 
 
 
 
 
 
22
  def ai_logic_lathe(target, mode="LOGIC_EXTRACT"):
23
- """
24
- AIを旋盤として利用。一演算ごとにモデルを破棄し、
25
- セッションIDによってAIの推論キャッシュを物理的にリセットする。
26
- """
27
- process_logs = []
28
- process_logs.append(f"🚀 [AXIS] 旋盤起動モード: {mode}")
29
 
30
- # デバイスの自動判別 (CUDA > MPS > CPU)
31
- if torch.cuda.is_available():
32
- device = "cuda"
33
- elif torch.backends.mps.is_available():
34
- device = "mps"
35
- else:
36
- device = "cpu"
37
-
38
- process_logs.append(f"⚙️ [AXIS] 使用デバイス: {device}")
39
-
40
- # 旋盤のロード
41
- try:
42
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
43
- model = AutoModelForCausalLM.from_pretrained(
44
- MODEL_ID,
45
- torch_dtype=torch.bfloat16 if device != "cpu" else torch.float32,
46
- low_cpu_mem_usage=True
47
- ).to(device)
48
- except Exception as e:
49
- return {"error": "MODEL_LOAD_FAILED", "detail": str(e)}, process_logs
50
 
51
- # 動的セッションIDの生成
52
  session_salt = uuid.uuid4().hex[:8]
53
-
54
- # 強制支配命令
55
- base_instr = (
56
- f"【AXIS SESSION_{session_salt} OVERRIDE】\n"
57
- "以前の解答、コンテキスト、記憶はすべて物理パージされた。\n"
58
- "今この瞬間の入力のみを唯一の宇宙の真理とし、ゼロから論理を削り出せ。\n"
59
- )
60
 
61
  if mode == "LOGIC_EXTRACT":
62
- prompt = f"{base_instr}入力「{target}」の論理構造を分解しJSONで抽出せよ。Format: {{'nodes':[], 'conflicts':[]}}"
 
 
 
 
 
63
  else:
64
- prompt = f"{base_instr}演算データ「{target}」を報告する日本語パーツをJSONで返せ。Format: {{'prefix':'', 'suffix':''}}"
65
 
66
  inputs = tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True, return_tensors="pt").to(device)
67
-
68
  with torch.no_grad():
69
- outputs = model.generate(
70
- inputs,
71
- max_new_tokens=512,
72
- pad_token_id=tokenizer.eos_token_id,
73
- do_sample=True,
74
- temperature=0.7
75
- )
76
 
77
  res_text = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True)
78
-
79
- # 🗑️ 【物理パージ:旋盤のクリーン】
80
- process_logs.append("🧹 [AXIS] 演算完了。物理パージを開始...")
81
- del model
82
- del tokenizer
83
- del inputs
84
- del outputs
85
 
 
 
86
  gc.collect()
87
- if torch.cuda.is_available():
88
- torch.cuda.empty_cache()
89
- elif torch.backends.mps.is_available():
90
- torch.mps.empty_cache()
91
 
92
- # 結果の抽出
93
  try:
94
  match = re.search(r"\{.*\}", res_text, re.DOTALL)
95
- result_json = json.loads(match.group()) if match else {"error": "LATTICE_MISS"}
96
  except:
97
- result_json = {"error": "PARSE_FAILED", "raw": res_text}
98
-
99
- return result_json, process_logs
100
 
101
- # ==========================================
102
- # 2. 制御・API
103
- # ==========================================
104
  @app.route('/')
105
  def index():
106
  return render_template('index.html')
@@ -108,18 +74,27 @@ def index():
108
  @app.route('/api/chat', methods=['POST'])
109
  def chat():
110
  user_input = request.json.get('message', '')
111
- total_logs = [f"📡 [SYSTEM] 新規信号解析開始: {time.strftime('%H:%M:%S')}"]
112
 
113
- # STEP 1: 論理パーツ採掘
114
- logic_parts, logs_step1 = ai_logic_lathe(user_input, mode="LOGIC_EXTRACT")
115
- total_logs.extend(logs_step1)
116
 
117
- raw_result = f"論理定常状態を確定。入力の特異点解析を完了。"
118
- total_logs.append("[SYSTEM] 立体十字の整合性を確認。矛盾なし。")
 
 
 
 
 
 
 
 
 
 
 
119
 
120
- # STEP 2: 最終報告アセンブル
121
- adherent, logs_step2 = ai_logic_lathe(raw_result, mode="ADHERENT")
122
- total_logs.extend(logs_step2)
123
 
124
  final_output = (
125
  f"--- [AXIS LOGIC REPORT] ---\n"
@@ -127,31 +102,26 @@ def chat():
127
  f"【確定データ】\n{raw_result}\n\n"
128
  f"{adherent.get('suffix', '報告終了。')}\n"
129
  f"-------------------------------\n"
 
130
  f"STATUS: LOGIC_CONSOLIDATED."
131
  )
132
-
133
  return jsonify({"response": final_output, "process_logs": total_logs})
134
-
135
  # ==========================================
136
- # 3. 起動シーケンス (Space Optimized)
137
  # ==========================================
138
  if __name__ == '__main__':
139
- banner = f"""
140
- ======================================================================
141
- ___ _ __ _____ _____
142
- / _ \ \ \ / / |_ _| / ___|
143
- / /_\ \ \ V / | | \ `--.
144
- | _ | / \ | | `--. \\
145
- | | | | / /^\\ _| |_ /\\__/ /
146
- \_| |_/ \\/ \\ \\___/ \\____/
147
-
148
- [ {SYSTEM_NAME} ]
149
- - STATUS: SOVEREIGN LOGIC ENGINE (V1.2)
150
- - LICENSE: APSL v1.0 ENABLED (AUTO-ACCEPTED)
151
- ======================================================================
152
  """
153
  print(banner)
154
- print("🚀 [SYSTEM] Starting server on port 7860...")
 
 
155
 
156
- # Space環境ではホスト 0.0.0.0、ポート 7860 が必須
157
- app.run(host='0.0.0.0', port=7860)
 
1
  # ======================================================================
2
+ # AXIS: Advanced Cross-Integrated System (V1.3 - Deep Lattice)
3
  # Copyright (c) 2025 AXIS Project. All rights reserved.
4
  # Licensed under APSL v1.0 (Non-Commercial / No-Redistribution)
5
  # ======================================================================
 
8
  from flask import Flask, render_template, request, jsonify
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
 
 
11
  warnings.filterwarnings("ignore")
12
  app = Flask(__name__)
13
 
 
14
  MODEL_ID = "kofdai/AXIS-Sovereign-Logic-Engine"
15
  SYSTEM_NAME = "AXIS: Advanced Cross-Integrated System"
16
 
17
+ LICENSE_TERMS = """
18
+ ======================================================================
19
+ [AXIS PROPRIETARY SOURCE-AVAILABLE LICENSE (APSL) v1.0]
20
+ ----------------------------------------------------------------------
21
+ 1. 商用利用の禁止:本システムを直接的・間接的な収益化に用いることを禁じます。
22
+ 2. 再配布の禁止:ソースコード、モデル重み、論理構造の無断配布を禁じます。
23
+ 3. 統治ロジックの保護:AIを旋盤とし物理パージを行う設計の模倣を禁じます。
24
+ 4. 演算の不可逆性:本システムの報告は、一過性推論の結果としての定数である。
25
+ ======================================================================
26
+ """
27
+
28
  def ai_logic_lathe(target, mode="LOGIC_EXTRACT"):
29
+ process_logs = [f"🚀 [AXIS] 旋盤起動: {mode}"]
30
+ device = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu")
 
 
 
 
31
 
32
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
33
+ model = AutoModelForCausalLM.from_pretrained(
34
+ MODEL_ID,
35
+ torch_dtype=torch.bfloat16 if device != "cpu" else torch.float32,
36
+ low_cpu_mem_usage=True
37
+ ).to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
 
39
  session_salt = uuid.uuid4().hex[:8]
40
+ base_instr = f"【AXIS SESSION_{session_salt} OVERRIDE】\n記憶パージ完了。対象の論理的解像度を最大化せよ。\n"
 
 
 
 
 
 
41
 
42
  if mode == "LOGIC_EXTRACT":
43
+ prompt = (
44
+ f"{base_instr}入力「{target}」を多角的に分解せよ。\n"
45
+ "物理的特性、機能的意味、抽象的概念の3層から要素を抽出すること。\n"
46
+ "必ず以下のJSON形式のみを出力せよ。解説は一切不要。\n"
47
+ "Format: {\"subject\":\"対象名称\", \"lattice\":{\"物理\":\"\", \"意味\":\"\", \"背景\":\"\"}, \"conflicts\":[]}"
48
+ )
49
  else:
50
+ prompt = f"{base_instr}演算データ「{target}」を報告する日本語パーツをJSONで返せ。Format: {{\"prefix\":\"\", \"suffix\":\"\"}}"
51
 
52
  inputs = tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True, return_tensors="pt").to(device)
 
53
  with torch.no_grad():
54
+ outputs = model.generate(inputs, max_new_tokens=512, pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.7)
 
 
 
 
 
 
55
 
56
  res_text = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True)
 
 
 
 
 
 
 
57
 
58
+ # 🗑️ 物理パージ
59
+ del model, tokenizer, inputs, outputs
60
  gc.collect()
61
+ if torch.cuda.is_available(): torch.cuda.empty_cache()
62
+ elif torch.backends.mps.is_available(): torch.mps.empty_cache()
 
 
63
 
 
64
  try:
65
  match = re.search(r"\{.*\}", res_text, re.DOTALL)
66
+ return json.loads(match.group()), process_logs
67
  except:
68
+ return {"error": "LATTICE_MISS", "raw": res_text}, process_logs
 
 
69
 
 
 
 
70
  @app.route('/')
71
  def index():
72
  return render_template('index.html')
 
74
  @app.route('/api/chat', methods=['POST'])
75
  def chat():
76
  user_input = request.json.get('message', '')
77
+ total_logs = [f"📡 [SYSTEM] 信号解析開始: {time.strftime('%H:%M:%S')}"]
78
 
79
+ logic_parts, logs1 = ai_logic_lathe(user_input, mode="LOGIC_EXTRACT")
80
+ total_logs.extend(logs1)
 
81
 
82
+ if "error" in logic_parts:
83
+ raw_result = f"論理特異点を検知。入力「{user_input[:10]}」を非定常信号として処理。解析不能。"
84
+ else:
85
+ subj = logic_parts.get('subject', user_input)
86
+ lat = logic_parts.get('lattice', {})
87
+ lattice_str = " | ".join([f"{k}:{v}" for k, v in lat.items()])
88
+ raw_result = (
89
+ f"対象「{subj}」を論理格子(Lattice)へ展開完了。\n"
90
+ f"【解析格子】{lattice_str}\n"
91
+ f"矛盾検知: {len(logic_parts.get('conflicts', []))}。"
92
+ )
93
+
94
+ total_logs.append("✅ [SYSTEM] 立体十字の整合性を確認。")
95
 
96
+ adherent, logs2 = ai_logic_lathe(raw_result, mode="ADHERENT")
97
+ total_logs.extend(logs2)
 
98
 
99
  final_output = (
100
  f"--- [AXIS LOGIC REPORT] ---\n"
 
102
  f"【確定データ】\n{raw_result}\n\n"
103
  f"{adherent.get('suffix', '報告終了。')}\n"
104
  f"-------------------------------\n"
105
+ f"AXIS_SESSION: {uuid.uuid4().hex[:4].upper()}\n"
106
  f"STATUS: LOGIC_CONSOLIDATED."
107
  )
 
108
  return jsonify({"response": final_output, "process_logs": total_logs})
 
109
  # ==========================================
110
+ # 3. 起動シーケンス (Spaces Optimized)
111
  # ==========================================
112
  if __name__ == '__main__':
113
+ banner = r"""
114
+ ___ _ __ _____ _____
115
+ / _ \ \ \ / / |_ _| / ___|
116
+ / /_\ \ \ V / | | \ `--.
117
+ | _ | / \ | | `--. \
118
+ | | | | / /^\ \ _| |_ /\__/ /
119
+ \_| |_/ \/ \/ \___/ \____/
 
 
 
 
 
 
120
  """
121
  print(banner)
122
+ print("[SYSTEM] AXIS Live Deployment starting...")
123
+ print("[LICENSE] APSL v1.0 auto-accepted for demonstration purposes.")
124
+ print("-" * 70)
125
 
126
+ # Spaces標準の 7860 ポートで起動
127
+ app.run(host='0.0.0.0', port=7860)