Canna-Corpus-Source / scripts /prepare_canna_vision.py
TheMindExpansionNetwork's picture
Upload folder using huggingface_hub
b739644 verified
#!/usr/bin/env python3
"""Build a vision-language cannabis strain dataset with local images."""
from __future__ import annotations
import csv
import json
import sys
from collections import Counter
from pathlib import Path
from typing import Dict, Iterable, List, Sequence, Tuple
from urllib.parse import urlparse
from urllib.request import Request, urlopen
ROOT = Path(__file__).resolve().parents[1]
DATA_DIR = ROOT / "data"
LEAFLY_PATH = DATA_DIR / "leafly_strain_data.csv"
OUTPUT_DIR = DATA_DIR / "canna-corpus-vision"
IMAGE_DIR = OUTPUT_DIR / "images"
METADATA_PATH = OUTPUT_DIR / "metadata.jsonl"
def clean_text(value: str | None) -> str:
"""Normalize whitespace."""
if not value:
return ""
return " ".join(value.replace("\xa0", " ").split())
def slugify(value: str) -> str:
"""Convert strain name into a filesystem-friendly slug."""
slug = value.lower()
cleaned: List[str] = []
for segment in slug.split():
segment = "".join(ch for ch in segment if ch.isalnum() or ch == "-")
if segment:
cleaned.append(segment)
slug = "-".join(cleaned)
return slug or "item"
def collect_ranked_items(
row: Dict[str, str],
keys: Iterable[str],
limit: int = 5,
) -> List[str]:
"""Return labels for the highest-percentage fields in the source row."""
scored: List[Tuple[float, str]] = []
for key in keys:
raw = clean_text(row.get(key, ""))
if not raw:
continue
if raw.endswith("%"):
raw = raw[:-1]
try:
value = float(raw)
except ValueError:
continue
if value <= 0:
continue
scored.append((value, key.replace("_", " ")))
scored.sort(key=lambda item: item[0], reverse=True)
return [label for _, label in scored[:limit]]
def build_text(segments: Sequence[str]) -> str:
"""Join non-empty segments into a multi-line description."""
return "\n".join([segment for segment in segments if segment])
def infer_extension(url: str) -> str:
"""Guess a file extension from the URL path."""
path = urlparse(url).path
suffix = Path(path).suffix.lower()
if suffix in {".jpg", ".jpeg"}:
return ".jpg"
if suffix in {".png", ".webp"}:
return suffix
return ".jpg"
def download_image(url: str, destination: Path) -> bool:
"""Download an image to the given destination path."""
if destination.exists():
return True
try:
request = Request(url, headers={"User-Agent": "Mozilla/5.0"})
with urlopen(request, timeout=30) as response:
destination.write_bytes(response.read())
return True
except Exception as exc: # noqa: BLE001
print(f"[warn] Failed to download {url}: {exc}", file=sys.stderr)
return False
def rows_with_images() -> List[Dict[str, str]]:
"""Load Leafly CSV rows that include an image URL."""
if not LEAFLY_PATH.exists():
raise FileNotFoundError(f"Leafly CSV not found at {LEAFLY_PATH}")
rows: List[Dict[str, str]] = []
with LEAFLY_PATH.open("r", encoding="utf-8", newline="") as handle:
reader = csv.DictReader(handle)
for row in reader:
img_url = clean_text(row.get("img_url"))
if img_url:
row["img_url"] = img_url
rows.append(row)
return rows
def positive_keys() -> set[str]:
return {
"relaxed",
"happy",
"euphoric",
"uplifted",
"sleepy",
"hungry",
"talkative",
"creative",
"energetic",
"focused",
"giggly",
"tingly",
"aroused",
}
def negative_keys() -> set[str]:
return {
"dry_mouth",
"dry_eyes",
"dizzy",
"paranoid",
"anxious",
"headache",
}
def build_metadata(row: Dict[str, str], image_path: Path) -> Dict[str, object]:
"""Convert a CSV row into a structured metadata record."""
strain = clean_text(row.get("name"))
strain_type = clean_text(row.get("type"))
thc_level = clean_text(row.get("thc_level"))
terpene = clean_text(row.get("most_common_terpene"))
description = clean_text(row.get("description"))
positives_set = positive_keys()
negatives_set = negative_keys()
positives = collect_ranked_items(row, positives_set)
negatives = collect_ranked_items(row, negatives_set)
ignore = {
"name",
"img_url",
"type",
"thc_level",
"most_common_terpene",
"description",
}
all_keys = set(row.keys())
relief_pool = all_keys - positives_set - negatives_set - ignore
reliefs = collect_ranked_items(row, relief_pool)
text_segments = [
f"Strain: {strain}" if strain else "",
f"Type: {strain_type}" if strain_type else "",
f"THC level: {thc_level}" if thc_level else "",
f"Dominant terpene: {terpene}" if terpene else "",
f"Reported effects: {', '.join(positives)}" if positives else "",
f"Common uses: {', '.join(reliefs)}" if reliefs else "",
f"Possible side effects: {', '.join(negatives)}" if negatives else "",
f"Description: {description}" if description else "",
]
return {
"id": f"leafly:{slugify(strain)}",
"source": "leafly_strain_data_with_images",
"strain": strain,
"strain_type": strain_type or None,
"thc_level": thc_level or None,
"dominant_terpene": terpene or None,
"effects": positives,
"reliefs": reliefs,
"side_effects": negatives,
"text": build_text(text_segments),
"image": str(image_path.as_posix()),
"image_url": row.get("img_url"),
}
def main() -> None:
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
IMAGE_DIR.mkdir(parents=True, exist_ok=True)
rows = rows_with_images()
if not rows:
print("[info] No rows with image URLs were found.", file=sys.stderr)
return
slug_counts: Counter[str] = Counter()
records: List[Dict[str, object]] = []
for row in rows:
strain = clean_text(row.get("name"))
if not strain:
continue
slug = slugify(strain)
slug_counts[slug] += 1
if slug_counts[slug] > 1:
slug = f"{slug}-{slug_counts[slug]}"
url = row["img_url"]
extension = infer_extension(url)
image_path = IMAGE_DIR / f"{slug}{extension}"
downloaded = download_image(url, image_path)
if not downloaded:
continue
record = build_metadata(row, image_path.relative_to(OUTPUT_DIR))
records.append(record)
if not records:
print("[warn] No records were produced; aborting.", file=sys.stderr)
return
with METADATA_PATH.open("w", encoding="utf-8") as handle:
for record in records:
json.dump(record, handle, ensure_ascii=False)
handle.write("\n")
summary = {
"count": len(records),
"image_dir": str(IMAGE_DIR.relative_to(OUTPUT_DIR)),
}
summary_path = OUTPUT_DIR / "stats.json"
with summary_path.open("w", encoding="utf-8") as handle:
json.dump(summary, handle, indent=2)
print(f"[info] Wrote {len(records)} records to {METADATA_PATH}")
if __name__ == "__main__":
main()