File size: 13,432 Bytes
c59d808 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 |
from dataclasses import asdict
from datetime import datetime
import io
import json
import os
import time
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
from pathlib import Path
from click import Tuple
from pymongo import MongoClient, UpdateOne, errors
from .dto.stream_opts import StreamOptions
from .dto.recipe_doc import RecipeDoc
from .soup_client import SoupClient
from backend.utils.sanitization import clean
from bs4 import BeautifulSoup
from backend.config.database import db_settings
class JsonArraySink:
"""
Append-safe JSON array writer.
- Creates file with `[` ... `]`
- If file exists, removes trailing `]`, appends items, and re-closes.
"""
def __init__(self, path: str):
self.path = path
self._opened = False
self._first = True
self.f = None
def _prepare(self):
if self._opened:
return
# Ensure file exists with an empty array
if not os.path.exists(self.path):
with open(self.path, "w", encoding="utf-8") as f:
f.write("[\n]")
self.f = open(self.path, "r+", encoding="utf-8")
# Find the position of the final ']' from the end
self.f.seek(0, io.SEEK_END)
end = self.f.tell()
step = min(4096, end)
pos = end
last_bracket = -1
while pos > 0:
pos = max(0, pos - step)
self.f.seek(pos)
chunk = self.f.read(step)
j = chunk.rfind("]")
if j != -1:
last_bracket = pos + j
break
if last_bracket == -1:
# Corrupt file: reset to empty array
self.f.seek(0); self.f.truncate(0); self.f.write("[\n]"); self.f.flush()
last_bracket = 2 # index of ']' in "[\n]"
# Decide "is first item?" by inspecting the content BEFORE the ']'
self.f.seek(0)
prefix = self.f.read(last_bracket).strip() # content up to (but not including) ']'
# Empty array has only '[' (possibly with whitespace/newline)
self._first = (prefix == "[")
# Now remove the closing ']' so we can append
self.f.seek(last_bracket)
self.f.truncate()
self._opened = True
def write_many(self, docs: List[Dict[str, Any]]):
if not docs:
return
self._prepare()
for d in docs:
if not self._first:
self.f.write(",\n")
else:
# First item: no leading comma
self._first = False
self.f.write(json.dumps(d, ensure_ascii=False, indent=2, default=str))
# Restore the closing bracket
self.f.write("\n]")
self.f.flush()
def close(self):
if self.f:
self.f.close()
self._opened = False
class MongoSink:
def __init__(self, ):
db_config = db_settings.get_vector_store_config()
self.client = MongoClient(db_config["uri"], retryWrites=True, serverSelectionTimeoutMS=10000)
self.col = self.client[db_config["database"]][db_config["collection_name"]]
self._ensure_indexes()
def _ensure_indexes(self):
self.col.create_index("url", unique=True)
self.col.create_index("title")
self.col.create_index("category")
self.col.create_index("scraped_at")
def write_many(self, docs: List[Dict[str, Any]]):
if not docs: return
ops = []
now = datetime.utcnow()
for d in docs:
d = d.copy()
d.setdefault("scraped_at", now)
ops.append(UpdateOne({"url": d["url"]}, {"$set": d, "$setOnInsert": {"created_at": now}}, upsert=True))
try:
self.col.bulk_write(ops, ordered=False)
except errors.BulkWriteError as e:
# duplicates or minor issues won't halt unordered bulk
pass
def close(self):
self.client.close()
# we need to create a search function to fetch recipes by title or ingredients from the embeddings given the embedding fields, db and collection
class DualSink:
def __init__(self, json_sink: Optional[JsonArraySink], mongo_sink: Optional[MongoSink]):
self.json = json_sink
self.mongo = mongo_sink
def write_many(self, docs: List[Dict[str, Any]]):
if self.json: self.json.write_many(docs)
if self.mongo: self.mongo.upsert_batch(docs)
def close(self):
if self.json: self.json.close()
if self.mongo: self.mongo.close()
class BaseRecipeScraper(SoupClient):
HEADING_TAGS = ("h1","h2","h3","h4","h5","h6")
def __init__(
self,
*args,
embedder= None,
embedding_fields= None,
**kwargs
):
"""
embedder: HFEmbedder(), optional
embedding_fields: list of (source_field, target_field) like:
[("title", "title_emb"), ("instructions_text", "instr_emb")]
"""
super().__init__(*args, **kwargs)
self.embedder = embedder
self.embedding_fields = embedding_fields or []
self.logger = self.log
def extract_jsonld(self, soup: BeautifulSoup) -> Optional[Dict[str, Any]]:
def to_list(x): return x if isinstance(x, list) else [x]
for tag in soup.find_all("script", type="application/ld+json"):
try:
data = json.loads(tag.string or "{}")
except Exception:
continue
nodes = (data.get("@graph", [data]) if isinstance(data, dict)
else (data if isinstance(data, list) else []))
for n in nodes:
if not isinstance(n, dict): continue
t = n.get("@type")
if t == "Recipe" or (isinstance(t, list) and "Recipe" in t):
doc = RecipeDoc()
doc.title = clean(n.get("name"))
# ingredients
ings = []
for ing in to_list(n.get("recipeIngredient") or []):
if isinstance(ing, dict):
ings.append(clean(ing.get("name") or ing.get("text")))
else:
ings.append(clean(str(ing)))
doc.ingredients = [x for x in ings if x]
# instructions
steps = []
for st in to_list(n.get("recipeInstructions") or []):
if isinstance(st, dict):
steps.append(clean(st.get("text") or st.get("name")))
else:
steps.append(clean(str(st)))
doc.instructions = [x for x in steps if x]
doc.servings = n.get("recipeYield")
doc.image_url = clean((n.get("image") or {}).get("url") if isinstance(n.get("image"), dict) else (n.get("image")[0] if isinstance(n.get("image"), list) else n.get("image")))
doc.course = clean(n.get("recipeCategory")) if isinstance(n.get("recipeCategory"), str) else None
doc.cuisine = clean(n.get("recipeCuisine")) if isinstance(n.get("recipeCuisine"), str) else None
return asdict(doc)
return None
@staticmethod
def _dedupe_preserve_order(items: List[str]) -> List[str]:
seen = set()
out = []
for x in items:
x = clean(x)
if not x or x in seen:
continue
seen.add(x); out.append(x)
return out
@staticmethod
def _to_ingredients_text(items: List[str]) -> str:
"""
Turn ingredient bullets into a single text block.
Using one-per-line is great for embeddings and human readability.
"""
items = [clean(x) for x in items if x]
items = BaseRecipeScraper._dedupe_preserve_order(items)
return "\n".join(f"- {x}" for x in items)
@staticmethod
def _to_instructions_text(steps: List[str]) -> str:
"""
Turn ordered steps into a single text block.
Numbered paragraphs help embeddings keep sequence context.
"""
steps = [clean(x) for x in steps if x]
steps = BaseRecipeScraper._dedupe_preserve_order(steps)
return "\n\n".join(f"{i}. {s}" for i, s in enumerate(steps, 1))
# site-specific scrapers override these two:
def discover_urls(self) -> Iterable[str]:
raise NotImplementedError
def extract_recipe(self, soup: BeautifulSoup, url: str, category: Optional[str] = None) -> RecipeDoc:
raise NotImplementedError
# shared streaming loop
def stream(self, sink: DualSink, options: Optional[StreamOptions] = None) -> int:
opts = options or StreamOptions()
self.log.info(
f"Starting stream: limit={opts.limit} batch_size={opts.batch_size} "
f"resume_file={opts.resume_file} sink={type(sink).__name__}"
)
processed = set()
if opts.resume_file:
resume_path = Path("data") / opts.resume_file # <-- not ../data
print(resume_path, 'resume_path')
if resume_path.exists(): # <-- open only if it exists
with resume_path.open("r", encoding="utf-8") as f:
processed = {line.strip() for line in f if line.strip()}
else:
processed = set()
self.log.info(f"[resume] {len(processed)} URLs already done")
batch, saved = [], 0
try:
for i, url in enumerate(self.discover_urls(), 1):
if opts.limit and i > opts.limit: break
if not self.same_domain(url): continue
if url in processed: continue
try:
soup = self.fetch_soup(url)
doc = self.extract_recipe(soup, url)
doc.finalize()
batch.append(asdict(doc))
except Exception as e:
self.log.warning(f"[skip] {url} -> {e}")
if opts.resume_file:
resume_path = Path("data") / opts.resume_file
with open(resume_path, "a", encoding="utf-8") as rf:
rf.write(url + "\n")
if len(batch) >= opts.batch_size:
self._apply_embeddings(batch)
sink.write_many(batch); saved += len(batch); batch = []
if opts.progress_callback: opts.progress_callback(saved)
self.log.info(f"[resume] {saved} URLs already done 1")
if i % 25 == 0:
self.log.info(f"…processed {i}, saved {saved}")
time.sleep(opts.delay)
if batch:
self._apply_embeddings(batch)
sink.write_many(batch); saved += len(batch)
if opts.progress_callback: opts.progress_callback(saved)
self.log.info(f"[resume] {saved} URLs already done2 ")
finally:
sink.close()
self.log.info(f"[done] saved {saved}")
return saved
@staticmethod
def _field_to_text(val: Any) -> str:
if isinstance(val, list):
return "\n".join(str(x) for x in val)
if val is None:
return ""
return str(val)
def _gather_text(self, doc: Dict[str, Any], src: Any) -> str:
if isinstance(src, tuple):
parts: List[str] = []
for f in src:
t = self._field_to_text(doc.get(f))
if t:
# Optional: label sections to help the embedder
label = "Ingredients" if f == "ingredients" else ("Instructions" if f == "instructions" else f)
parts.append(f"{label}:\n{t}")
return "\n\n".join(parts)
else:
return self._field_to_text(doc.get(src))
def _apply_embeddings(self, batch: List[Dict[str, Any]]) -> None:
"""
Applies embeddings to specified fields in a batch of documents.
For each (source_field, destination_field) pair in `self.embedding_fields`, this method:
- Extracts the value from `source_field` in each document of the batch.
- Converts the value to a string. If the value is a list, joins its elements with newlines.
- Handles `None` values by converting them to empty strings.
- Uses `self.embedder.encode` to generate embeddings for the processed texts.
- Stores the resulting embedding vector in `destination_field` of each document.
If `self.embedder`, `self.embedding_fields`, or `batch` is not set or empty, the method returns immediately.
Args:
batch (List[Dict[str, Any]]): A list of documents to process, where each document is a dictionary.
Returns:
None
"""
if not self.embedder or not self.embedding_fields or not batch:
return
try:
for src_spec, dst_field in self.embedding_fields:
texts = [ self._gather_text(doc, src_spec) for doc in batch ]
embs = self.embedder.encode(texts)
for document, vec in zip(batch, embs):
document[dst_field] = vec
except Exception as e:
self.logger.warning(f"[stream error]: {e}")
|