ecoverify-ai / dispute_module.py
lmcuong199's picture
Create dispute_module.py
06f00a7 verified
# Disclaimer + Right to Reply / Dispute module
# Mục đích: đảm bảo tính công bằng và minh bạch của hệ thống
DISCLAIMER = """
---
> ⚠️ **Disclaimer:** Results are generated by AI based on publicly available data
> (visitor reviews, inspection reports, site metadata). EcoVerify AI does not
> guarantee the absolute accuracy of third-party source data. Risk scores are
> indicative only and should be used alongside official inspection processes.
> This tool supports — but does not replace — formal certification audits.
---
"""
def get_disclaimer() -> str:
return DISCLAIMER
def process_dispute(
site_name: str,
dispute_reason: str,
evidence_text: str,
evidence_image,
current_nlp_risk: float,
current_img_risk: float,
) -> tuple[str, float, float]:
"""
Xử lý dispute từ operator.
- Phân tích evidence_text bằng NLP
- Phân tích evidence_image nếu có
- Điều chỉnh risk score nếu evidence hợp lệ
"""
if not dispute_reason.strip() and not evidence_text.strip():
return "⚠️ Please provide a reason and/or evidence to submit a dispute.", current_nlp_risk, current_img_risk
adjustments = []
new_nlp_risk = current_nlp_risk
new_img_risk = current_img_risk
# ── Phân tích evidence text ───────────────────────────────────────
if evidence_text.strip():
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
POSITIVE_EVIDENCE = [
"installed", "improved", "upgraded", "certified", "trained",
"composting", "recycling", "reduced", "clean", "sorted",
"hired", "contracted", "fixed", "resolved", "invested",
"new bins", "waste system", "collection", "audit", "inspection passed"
]
evidence_lower = evidence_text.lower()
sentiment = analyzer.polarity_scores(evidence_text)['compound']
hits = [w for w in POSITIVE_EVIDENCE if w in evidence_lower]
# Tính mức điều chỉnh
text_adjustment = 0.0
if sentiment > 0.3:
text_adjustment += 0.08
if len(hits) >= 3:
text_adjustment += 0.10
elif len(hits) >= 1:
text_adjustment += 0.05
if text_adjustment > 0:
new_nlp_risk = round(max(0.0, current_nlp_risk - text_adjustment), 3)
adjustments.append(
f"✅ Evidence text accepted — NLP risk reduced by {text_adjustment:.2f} "
f"(keywords found: {', '.join(hits[:5])})"
)
else:
adjustments.append("⚠️ Evidence text did not contain sufficient positive indicators.")
# ── Phân tích evidence image ──────────────────────────────────────
if evidence_image is not None:
try:
from image_module import analyze_image
from PIL import Image
import numpy as np
img_output, new_evidence_risk = analyze_image(evidence_image)
if new_evidence_risk < current_img_risk:
reduction = round(current_img_risk - new_evidence_risk, 3)
new_img_risk = new_evidence_risk
adjustments.append(
f"✅ Evidence image accepted — Image risk reduced by {reduction:.3f} "
f"(new image risk: {new_evidence_risk:.3f})"
)
else:
adjustments.append(
f"⚠️ Evidence image did not show improvement over original "
f"(image risk: {new_evidence_risk:.3f} vs original: {current_img_risk:.3f})"
)
except Exception as e:
adjustments.append(f"⚠️ Could not analyze evidence image: {e}")
# ── Tính combined risk mới ────────────────────────────────────────
new_combined = round((new_nlp_risk + new_img_risk) / 2, 3)
old_combined = round((current_nlp_risk + current_img_risk) / 2, 3)
delta = round(old_combined - new_combined, 3)
if delta > 0:
verdict = f"✅ Dispute accepted — risk score updated"
color = "improved"
else:
verdict = "ℹ️ Dispute noted — insufficient evidence to update score"
color = "unchanged"
adj_str = "\n".join(adjustments)
output = f"""## 🏳️ Dispute / Right to Reply — {site_name}
### {verdict}
---
### 📋 Dispute Summary
| Field | Value |
|-------|-------|
| Reason submitted | {dispute_reason[:100] if dispute_reason else "Not provided"} |
| Evidence text | {"Provided ✅" if evidence_text.strip() else "Not provided"} |
| Evidence image | {"Provided ✅" if evidence_image is not None else "Not provided"} |
---
### 🔍 AI Review of Evidence
{adj_str}
---
### 📊 Score Update
| Metric | Before | After | Change |
|--------|--------|-------|--------|
| NLP Risk | {current_nlp_risk:.3f} | {new_nlp_risk:.3f} | {"↓ " + str(round((current_nlp_risk - new_nlp_risk)*100)) + "%" if new_nlp_risk < current_nlp_risk else "—"} |
| Image Risk | {current_img_risk:.3f} | {new_img_risk:.3f} | {"↓ " + str(round((current_img_risk - new_img_risk)*100)) + "%" if new_img_risk < current_img_risk else "—"} |
| **Combined Risk** | **{old_combined:.3f}** | **{new_combined:.3f}** | **{"↓ " + str(delta) if delta > 0 else "No change"}** |
---
{DISCLAIMER}
> 💡 **Note:** All disputes are logged for transparency. Updated scores reflect
> newly submitted evidence and do not erase original analysis. Both original
> and updated scores are retained in the final report.
"""
return output, new_nlp_risk, new_img_risk