| |
| """Build a text-focused cannabis strain corpus from the raw CSV sources.""" |
| from __future__ import annotations |
|
|
| import csv |
| import json |
| import re |
| from pathlib import Path |
| from typing import Dict, Iterable, List, Optional, Sequence, Tuple |
|
|
| ROOT = Path(__file__).resolve().parents[1] |
| DATA_DIR = ROOT / "data" |
| OUTPUT_DIR = DATA_DIR / "canna-corpus" |
| TRAIN_JSONL_PATH = OUTPUT_DIR / "train.jsonl" |
| ALPACA_PATH = OUTPUT_DIR / "alpaca.json" |
|
|
|
|
| def clean_text(value: Optional[str]) -> str: |
| """Normalize whitespace and strip stray characters.""" |
| if not value: |
| return "" |
| text = value.replace("\xa0", " ").strip() |
| text = re.sub(r"\s+", " ", text) |
| return text |
|
|
|
|
| def parse_list(value: Optional[str]) -> List[str]: |
| """Split comma-delimited fields into trimmed lists.""" |
| if not value: |
| return [] |
| return [part.strip() for part in value.split(",") if part.strip()] |
|
|
|
|
| def slugify(value: str) -> str: |
| """Generate a lowercase slug suitable for IDs.""" |
| value = re.sub(r"[^\w]+", "-", value.lower()) |
| value = value.strip("-") |
| return value or "item" |
|
|
|
|
| def build_text(lines: Sequence[str]) -> str: |
| """Join non-empty segments with newlines.""" |
| return "\n".join([segment for segment in lines if segment]) |
|
|
|
|
| def collect_ranked_items( |
| row: Dict[str, str], |
| keys: Iterable[str], |
| limit: int = 5, |
| ) -> List[str]: |
| """Return a list of keys with the highest percentage values.""" |
| scored: List[Tuple[float, str]] = [] |
| for key in keys: |
| raw = row.get(key, "") |
| if not raw: |
| continue |
| raw = raw.strip() |
| if raw.endswith("%"): |
| raw = raw[:-1] |
| try: |
| score = float(raw) |
| except ValueError: |
| continue |
| if score <= 0: |
| continue |
| scored.append((score, key.replace("_", " "))) |
| scored.sort(key=lambda item: item[0], reverse=True) |
| return [label for _, label in scored[:limit]] |
|
|
|
|
| def load_cannabis_file(path: Path, source: str) -> List[Dict[str, object]]: |
| """Load a cannabis strains CSV and convert to the unified schema.""" |
| records: List[Dict[str, object]] = [] |
| with path.open("r", encoding="utf-8", newline="") as handle: |
| reader = csv.DictReader(handle) |
| for row in reader: |
| strain = clean_text(row.get("Strain")) |
| if not strain: |
| continue |
| strain_type = clean_text(row.get("Type")) |
| rating_value = clean_text(row.get("Rating")) |
| rating = float(rating_value) if rating_value else None |
| effects = parse_list(row.get("Effects")) |
| flavors = parse_list(row.get("Flavor")) |
| description = clean_text(row.get("Description")) |
| item_id = f"{source}:{slugify(strain)}" |
| text_segments = [ |
| f"Strain: {strain}", |
| f"Type: {strain_type}" if strain_type else "", |
| f"Rating: {rating_value}" if rating_value else "", |
| f"Effects: {', '.join(effects)}" if effects else "", |
| f"Flavors: {', '.join(flavors)}" if flavors else "", |
| f"Description: {description}" if description else "", |
| ] |
| record = { |
| "id": item_id, |
| "source": source, |
| "strain": strain, |
| "strain_type": strain_type or None, |
| "rating": rating, |
| "thc_level": None, |
| "dominant_terpene": None, |
| "effects": effects, |
| "reliefs": [], |
| "side_effects": [], |
| "flavors": flavors, |
| "text": build_text(text_segments), |
| } |
| records.append(record) |
| return records |
|
|
|
|
| def load_leafly_file(path: Path) -> List[Dict[str, object]]: |
| """Load the Leafly dataset and convert to the unified schema.""" |
| positive_keys = { |
| "relaxed", |
| "happy", |
| "euphoric", |
| "uplifted", |
| "sleepy", |
| "hungry", |
| "talkative", |
| "creative", |
| "energetic", |
| "focused", |
| "giggly", |
| "tingly", |
| "aroused", |
| } |
| negative_keys = { |
| "dry_mouth", |
| "dry_eyes", |
| "dizzy", |
| "paranoid", |
| "anxious", |
| "headache", |
| "lack_of_appetite", |
| "nausea", |
| } |
| ignore_keys = { |
| "name", |
| "img_url", |
| "type", |
| "thc_level", |
| "most_common_terpene", |
| "description", |
| } |
| records: List[Dict[str, object]] = [] |
| with path.open("r", encoding="utf-8", newline="") as handle: |
| reader = csv.DictReader(handle) |
| for row in reader: |
| strain = clean_text(row.get("name")) |
| if not strain: |
| continue |
| strain_type = clean_text(row.get("type")) |
| thc_level = clean_text(row.get("thc_level")) |
| terpene = clean_text(row.get("most_common_terpene")) |
| description = clean_text(row.get("description")) |
| effects = collect_ranked_items(row, positive_keys) |
| side_effects = collect_ranked_items(row, negative_keys) |
| all_keys = set(row.keys()) |
| relief_pool = all_keys - positive_keys - negative_keys - ignore_keys |
| reliefs = collect_ranked_items(row, relief_pool) |
| item_id = f"leafly:{slugify(strain)}" |
| text_segments = [ |
| f"Strain: {strain}", |
| f"Type: {strain_type}" if strain_type else "", |
| f"THC level: {thc_level}" if thc_level else "", |
| f"Dominant terpene: {terpene}" if terpene else "", |
| f"Reported effects: {', '.join(effects)}" if effects else "", |
| f"Common uses: {', '.join(reliefs)}" if reliefs else "", |
| f"Possible side effects: {', '.join(side_effects)}" if side_effects else "", |
| f"Description: {description}" if description else "", |
| ] |
| record = { |
| "id": item_id, |
| "source": "leafly_strain_data", |
| "strain": strain, |
| "strain_type": strain_type or None, |
| "rating": None, |
| "thc_level": thc_level or None, |
| "dominant_terpene": terpene or None, |
| "effects": effects, |
| "reliefs": reliefs, |
| "side_effects": side_effects, |
| "flavors": [], |
| "text": build_text(text_segments), |
| } |
| records.append(record) |
| return records |
|
|
|
|
| def consolidate_records() -> List[Dict[str, object]]: |
| """Combine all source records while deduplicating identical entries.""" |
| datasets: List[Tuple[str, Path]] = [ |
| ("cannabis_csv", DATA_DIR / "cannabis.csv"), |
| ("cannabis_features_csv", DATA_DIR / "Cannabis StrainsFeatures - cannabis.csv"), |
| ] |
| combined: List[Dict[str, object]] = [] |
| seen: set[Tuple[str, str]] = set() |
|
|
| for source_name, path in datasets: |
| if not path.exists(): |
| continue |
| for record in load_cannabis_file(path, source_name): |
| fingerprint = (record["strain"].lower(), record["text"]) |
| if fingerprint in seen: |
| continue |
| seen.add(fingerprint) |
| combined.append(record) |
|
|
| leafly_path = DATA_DIR / "leafly_strain_data.csv" |
| if leafly_path.exists(): |
| for record in load_leafly_file(leafly_path): |
| fingerprint = (record["strain"].lower(), record["text"]) |
| if fingerprint in seen: |
| continue |
| seen.add(fingerprint) |
| combined.append(record) |
|
|
| combined.sort(key=lambda item: item["id"]) |
| return combined |
|
|
|
|
| def build_alpaca_dataset(records: Sequence[Dict[str, object]]) -> List[Dict[str, object]]: |
| """Convert records into Alpaca-style instruction-tuning examples.""" |
| output: List[Dict[str, object]] = [] |
| for record in records: |
| strain = record.get("strain", "") |
| text = record.get("text", "") |
| instruction = f"Provide a detailed profile for the cannabis strain \"{strain}\"." |
| output.append( |
| { |
| "instruction": instruction, |
| "input": "", |
| "output": text, |
| } |
| ) |
| return output |
|
|
|
|
| def main() -> None: |
| OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
| records = consolidate_records() |
| with TRAIN_JSONL_PATH.open("w", encoding="utf-8") as handle: |
| for record in records: |
| json.dump(record, handle, ensure_ascii=False) |
| handle.write("\n") |
| alpaca_records = build_alpaca_dataset(records) |
| with ALPACA_PATH.open("w", encoding="utf-8") as handle: |
| json.dump(alpaca_records, handle, ensure_ascii=False, indent=2) |
| summary = { |
| "count": len(records), |
| "sources": {}, |
| } |
| for record in records: |
| summary["sources"].setdefault(record["source"], 0) |
| summary["sources"][record["source"]] += 1 |
| summary_path = OUTPUT_DIR / "stats.json" |
| with summary_path.open("w", encoding="utf-8") as handle: |
| json.dump(summary, handle, indent=2, sort_keys=True) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|