|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
MODEL_ID = "kofdai/AXIS-Sovereign-Logic-Engine" |
|
|
|
|
|
def extract_to_lattice(text): |
|
|
print(f"🧐 [AXIS] 知識抽出中: {text[:30]}...") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
|
|
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16, device_map="auto") |
|
|
|
|
|
prompt = f"以下のテキストをAXIS立体十字形式(JSON)に変換せよ。論理矛盾があればconflictsに記載せよ。\n入力: {text}\nFormat: {{'nodes':[], 'edges':[], 'conflicts':[]}}" |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
|
with torch.no_grad(): |
|
|
outputs = model.generate(**inputs, max_new_tokens=512) |
|
|
|
|
|
result = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
return result |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
raw_knowledge = [ |
|
|
"意味は計算の結果ではなく、計算が走るための初期構造である。", |
|
|
"AXISの物理パージは、知能の純粋性を保つための儀式である。" |
|
|
] |
|
|
|
|
|
knowledge_base = [] |
|
|
for k in raw_knowledge: |
|
|
lattice_piece = extract_to_lattice(k) |
|
|
knowledge_base.append(lattice_piece) |
|
|
|
|
|
|
|
|
with open("local_massive_data.json", "w", encoding="utf-8") as f: |
|
|
json.dump(knowledge_base, f, ensure_ascii=False, indent=4) |
|
|
|
|
|
print("✅ [SUCCESS] 知識拡張完了。local_massive_data.json を生成しました。") |