Spaces:
Sleeping
Sleeping
Update irpr/deps.py
Browse files- irpr/deps.py +134 -126
irpr/deps.py
CHANGED
|
@@ -1,157 +1,165 @@
|
|
| 1 |
-
# irpr/deps.py ---
|
| 2 |
from __future__ import annotations
|
| 3 |
-
import os
|
| 4 |
-
from typing import List, Dict, Optional
|
| 5 |
import numpy as np
|
| 6 |
from irpr.config import settings
|
| 7 |
|
| 8 |
-
# ====
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
-
#
|
| 14 |
-
os.
|
| 15 |
-
os.
|
| 16 |
-
os.environ.setdefault("SENTENCE_TRANSFORMERS_HOME", DEFAULT_CACHE)
|
| 17 |
-
os.environ.setdefault("HUGGINGFACE_HUB_CACHE", DEFAULT_CACHE)
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
BASE, settings.CHROMA_PATH]:
|
| 22 |
try:
|
| 23 |
-
|
| 24 |
-
except Exception:
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
-
#
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
def
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
_EMB_DIM = _EMB.get_sentence_embedding_dimension()
|
| 44 |
-
return _EMB
|
| 45 |
|
|
|
|
| 46 |
def embed_texts(texts: List[str]) -> np.ndarray:
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
if _CHROMA_COLLECTION is None:
|
| 61 |
-
import chromadb
|
| 62 |
-
from chromadb.config import Settings as CS
|
| 63 |
-
client = chromadb.PersistentClient(
|
| 64 |
-
path=settings.CHROMA_PATH,
|
| 65 |
-
settings=CS(allow_reset=True)
|
| 66 |
-
)
|
| 67 |
-
_CHROMA_COLLECTION = client.get_or_create_collection(name="irpr_docs")
|
| 68 |
-
return _CHROMA_COLLECTION
|
| 69 |
|
| 70 |
-
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
| 72 |
if not records:
|
| 73 |
-
return
|
| 74 |
-
col = _get_chroma()
|
| 75 |
texts = [r["text"] for r in records]
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
-
ids, metas = [], []
|
| 79 |
for r in records:
|
| 80 |
-
doc_id = r.get("doc_id") or
|
| 81 |
chunk_id = r.get("chunk_id") or ""
|
| 82 |
-
rid = f"{doc_id}:{chunk_id}" if chunk_id else doc_id
|
| 83 |
-
ids.append(rid)
|
| 84 |
metas.append({
|
| 85 |
"source_url": r.get("source_url"),
|
| 86 |
"title": r.get("title"),
|
| 87 |
"doc_id": doc_id,
|
| 88 |
"chunk_id": chunk_id,
|
|
|
|
| 89 |
})
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
| 91 |
|
|
|
|
| 92 |
def search(query: str, top_k=8) -> List[Dict]:
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
)
|
| 100 |
-
docs = res.get("documents", [[]])[0]
|
| 101 |
-
metas = res.get("metadatas", [[]])[0]
|
| 102 |
-
dists = res.get("distances", [[]])[0]
|
| 103 |
out: List[Dict] = []
|
| 104 |
-
for
|
| 105 |
-
|
| 106 |
out.append({
|
| 107 |
-
"text":
|
| 108 |
-
"source_url":
|
| 109 |
-
"title":
|
| 110 |
-
"doc_id":
|
| 111 |
-
"chunk_id":
|
| 112 |
-
"score":
|
| 113 |
})
|
| 114 |
return out
|
| 115 |
|
| 116 |
-
# ==== 生成
|
| 117 |
-
def _get_gen_pipeline():
|
| 118 |
-
"""
|
| 119 |
-
GEN_MODEL が空なら LLM 無効の合図として例外を投げる。
|
| 120 |
-
CPU環境でも動くように dtype/device_map は保守的に。
|
| 121 |
-
"""
|
| 122 |
-
if not settings.GEN_MODEL:
|
| 123 |
-
raise RuntimeError("GEN_MODEL is empty (LLM disabled).")
|
| 124 |
-
global _GEN, _TOK
|
| 125 |
-
if _GEN is None:
|
| 126 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 127 |
-
# torch は任意(無ければCPU既定)
|
| 128 |
-
try:
|
| 129 |
-
import torch # noqa
|
| 130 |
-
torch_dtype = getattr(torch, "bfloat16", None)
|
| 131 |
-
except Exception:
|
| 132 |
-
torch = None
|
| 133 |
-
torch_dtype = None
|
| 134 |
-
|
| 135 |
-
name = settings.GEN_MODEL
|
| 136 |
-
cache_dir = os.environ.get("HF_HOME", DEFAULT_CACHE)
|
| 137 |
-
_TOK = AutoTokenizer.from_pretrained(name, cache_dir=cache_dir)
|
| 138 |
-
# dtype/device_map はCPUでも成立する保守的な指定にする
|
| 139 |
-
model_kwargs = dict(cache_dir=cache_dir, low_cpu_mem_usage=True)
|
| 140 |
-
if torch and hasattr(torch, "cuda") and torch.cuda.is_available():
|
| 141 |
-
model_kwargs["torch_dtype"] = getattr(torch, "bfloat16", None) or getattr(torch, "float16", None)
|
| 142 |
-
model_kwargs["device_map"] = "auto"
|
| 143 |
-
|
| 144 |
-
_MODEL = AutoModelForCausalLM.from_pretrained(name, **model_kwargs)
|
| 145 |
-
_GEN = pipeline("text-generation", model=_MODEL, tokenizer=_TOK)
|
| 146 |
-
return _GEN, _TOK
|
| 147 |
-
|
| 148 |
def generate_chat(messages: List[Dict], max_new_tokens=600, temperature=0.2) -> str:
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
temperature=temperature,
|
| 155 |
-
|
| 156 |
-
)
|
| 157 |
-
return
|
|
|
|
| 1 |
+
# irpr/deps.py --- OpenAI埋め込み + 自前ベクタストア(numpy)/LLM生成
|
| 2 |
from __future__ import annotations
|
| 3 |
+
import os, json, uuid
|
| 4 |
+
from typing import List, Dict, Optional, Tuple
|
| 5 |
import numpy as np
|
| 6 |
from irpr.config import settings
|
| 7 |
|
| 8 |
+
# ==== 書き込み可能ディレクトリの決定 ====
|
| 9 |
+
def _pick_writable_dir() -> str:
|
| 10 |
+
candidates = [settings.DATA_DIR, "/data", "./var", "/tmp/irpr", "."]
|
| 11 |
+
for base in candidates:
|
| 12 |
+
try:
|
| 13 |
+
if not base: continue
|
| 14 |
+
os.makedirs(base, exist_ok=True)
|
| 15 |
+
p = os.path.join(base, ".write_test")
|
| 16 |
+
with open(p, "w") as w: w.write("ok")
|
| 17 |
+
os.remove(p)
|
| 18 |
+
return base
|
| 19 |
+
except Exception:
|
| 20 |
+
continue
|
| 21 |
+
return "."
|
| 22 |
+
|
| 23 |
+
BASE_DIR = _pick_writable_dir()
|
| 24 |
+
INDEX_DIR = settings.INDEX_DIR or os.path.join(BASE_DIR, "simple_index")
|
| 25 |
+
os.makedirs(INDEX_DIR, exist_ok=True)
|
| 26 |
|
| 27 |
+
VECS_PATH = os.path.join(INDEX_DIR, "vectors.npy") # np.float32 [N,D](正規化済)
|
| 28 |
+
META_PATH = os.path.join(INDEX_DIR, "meta.jsonl") # 1行1メタ
|
| 29 |
+
TEXT_PATH = os.path.join(INDEX_DIR, "texts.jsonl") # 1行1テキスト
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
# ==== OpenAI クライアント ====
|
| 32 |
+
def _openai_client():
|
|
|
|
| 33 |
try:
|
| 34 |
+
from openai import OpenAI
|
| 35 |
+
except Exception as e:
|
| 36 |
+
raise RuntimeError("`openai` パッケージが見つかりません。requirements.txt に openai を追加してください。") from e
|
| 37 |
+
key = os.environ.get("OPENAI_API_KEY", "").strip()
|
| 38 |
+
if not key:
|
| 39 |
+
raise RuntimeError("OPENAI_API_KEY が未設定です。環境変数に設定してください。")
|
| 40 |
+
return OpenAI(api_key=key)
|
| 41 |
|
| 42 |
+
# ==== 収納・ロード ====
|
| 43 |
+
def _load_index() -> Tuple[np.ndarray, List[dict], List[str]]:
|
| 44 |
+
if os.path.exists(VECS_PATH):
|
| 45 |
+
vecs = np.load(VECS_PATH).astype(np.float32, copy=False)
|
| 46 |
+
else:
|
| 47 |
+
vecs = np.zeros((0, 0), dtype=np.float32)
|
| 48 |
+
metas: List[dict] = []
|
| 49 |
+
texts: List[str] = []
|
| 50 |
+
if os.path.exists(META_PATH):
|
| 51 |
+
with open(META_PATH, "r", encoding="utf-8") as f:
|
| 52 |
+
for line in f:
|
| 53 |
+
line = line.strip()
|
| 54 |
+
if line:
|
| 55 |
+
metas.append(json.loads(line))
|
| 56 |
+
if os.path.exists(TEXT_PATH):
|
| 57 |
+
with open(TEXT_PATH, "r", encoding="utf-8") as f:
|
| 58 |
+
for line in f:
|
| 59 |
+
texts.append(line.rstrip("\n"))
|
| 60 |
+
# 整合性チェック
|
| 61 |
+
if vecs.size == 0:
|
| 62 |
+
return np.zeros((0, 0), dtype=np.float32), [], []
|
| 63 |
+
n = vecs.shape[0]
|
| 64 |
+
if len(metas) != n or len(texts) != n:
|
| 65 |
+
# 壊れているなら初期化
|
| 66 |
+
return np.zeros((0, 0), dtype=np.float32), [], []
|
| 67 |
+
return vecs, metas, texts
|
| 68 |
|
| 69 |
+
def _save_index(vecs: np.ndarray, metas: List[dict], texts: List[str]) -> None:
|
| 70 |
+
os.makedirs(INDEX_DIR, exist_ok=True)
|
| 71 |
+
np.save(VECS_PATH, vecs.astype(np.float32, copy=False))
|
| 72 |
+
with open(META_PATH, "w", encoding="utf-8") as f:
|
| 73 |
+
for m in metas:
|
| 74 |
+
f.write(json.dumps(m, ensure_ascii=False) + "\n")
|
| 75 |
+
with open(TEXT_PATH, "w", encoding="utf-8") as f:
|
| 76 |
+
for t in texts:
|
| 77 |
+
f.write((t or "").replace("\n", "\\n") + "\n") # 1行1テキストに正規化
|
|
|
|
|
|
|
| 78 |
|
| 79 |
+
# ==== Embedding ====
|
| 80 |
def embed_texts(texts: List[str]) -> np.ndarray:
|
| 81 |
+
client = _openai_client()
|
| 82 |
+
model = settings.OPENAI_EMBED_MODEL
|
| 83 |
+
# バッチで呼ぶ
|
| 84 |
+
B = 128
|
| 85 |
+
out = []
|
| 86 |
+
for i in range(0, len(texts), B):
|
| 87 |
+
batch = texts[i:i+B]
|
| 88 |
+
resp = client.embeddings.create(model=model, input=batch)
|
| 89 |
+
out.extend([d.embedding for d in resp.data])
|
| 90 |
+
arr = np.array(out, dtype=np.float32)
|
| 91 |
+
# 正規化(コサイン類似度用)
|
| 92 |
+
norms = np.linalg.norm(arr, axis=1, keepdims=True) + 1e-12
|
| 93 |
+
return arr / norms
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
+
# ==== 追加 ====
|
| 96 |
+
def add_to_index(records: List[Dict]) -> int:
|
| 97 |
+
"""
|
| 98 |
+
records: [{text, title, source_url, doc_id, chunk_id}]
|
| 99 |
+
"""
|
| 100 |
if not records:
|
| 101 |
+
return 0
|
|
|
|
| 102 |
texts = [r["text"] for r in records]
|
| 103 |
+
vecs_new = embed_texts(texts)
|
| 104 |
+
|
| 105 |
+
vecs, metas, old_texts = _load_index()
|
| 106 |
+
if vecs.size == 0:
|
| 107 |
+
vecs = vecs_new
|
| 108 |
+
metas = []
|
| 109 |
+
old_texts = []
|
| 110 |
+
else:
|
| 111 |
+
if vecs.shape[1] != vecs_new.shape[1]:
|
| 112 |
+
# 埋め込み次元が違う(モデルを変えた等)→作り直し
|
| 113 |
+
vecs = vecs_new
|
| 114 |
+
metas = []
|
| 115 |
+
old_texts = []
|
| 116 |
+
else:
|
| 117 |
+
vecs = np.vstack([vecs, vecs_new])
|
| 118 |
|
|
|
|
| 119 |
for r in records:
|
| 120 |
+
doc_id = r.get("doc_id") or str(uuid.uuid4())
|
| 121 |
chunk_id = r.get("chunk_id") or ""
|
|
|
|
|
|
|
| 122 |
metas.append({
|
| 123 |
"source_url": r.get("source_url"),
|
| 124 |
"title": r.get("title"),
|
| 125 |
"doc_id": doc_id,
|
| 126 |
"chunk_id": chunk_id,
|
| 127 |
+
"id": f"{doc_id}:{chunk_id}" if chunk_id else doc_id
|
| 128 |
})
|
| 129 |
+
old_texts.append(r.get("text", ""))
|
| 130 |
+
|
| 131 |
+
_save_index(vecs, metas, old_texts)
|
| 132 |
+
return len(records)
|
| 133 |
|
| 134 |
+
# ==== 検索 ====
|
| 135 |
def search(query: str, top_k=8) -> List[Dict]:
|
| 136 |
+
vecs, metas, texts = _load_index()
|
| 137 |
+
if vecs.size == 0:
|
| 138 |
+
return []
|
| 139 |
+
q = embed_texts([query])[0] # (D,)
|
| 140 |
+
scores = vecs @ q # cosine (正規化済み)
|
| 141 |
+
idx = np.argsort(-scores)[:max(1, top_k)]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
out: List[Dict] = []
|
| 143 |
+
for i in idx.tolist():
|
| 144 |
+
m = metas[i]
|
| 145 |
out.append({
|
| 146 |
+
"text": (texts[i] or "").replace("\\n", "\n"),
|
| 147 |
+
"source_url": m.get("source_url"),
|
| 148 |
+
"title": m.get("title"),
|
| 149 |
+
"doc_id": m.get("doc_id"),
|
| 150 |
+
"chunk_id": m.get("chunk_id"),
|
| 151 |
+
"score": float(scores[i]),
|
| 152 |
})
|
| 153 |
return out
|
| 154 |
|
| 155 |
+
# ==== 生成 ====
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
def generate_chat(messages: List[Dict], max_new_tokens=600, temperature=0.2) -> str:
|
| 157 |
+
client = _openai_client()
|
| 158 |
+
model = settings.OPENAI_CHAT_MODEL
|
| 159 |
+
resp = client.chat.completions.create(
|
| 160 |
+
model=model,
|
| 161 |
+
messages=messages,
|
| 162 |
+
temperature=float(temperature),
|
| 163 |
+
max_tokens=int(max_new_tokens),
|
| 164 |
+
)
|
| 165 |
+
return (resp.choices[0].message.content or "").strip()
|