|
|
import re |
|
|
import json |
|
|
import traceback |
|
|
from typing import List, Dict, Any |
|
|
from llama_cpp import Llama |
|
|
|
|
|
class CommunityBrain: |
|
|
def __init__(self, llm_instance: Llama): |
|
|
self.llm = llm_instance |
|
|
print("--- Community Brain initialized. ---") |
|
|
|
|
|
|
|
|
def moderate_content(self, text: str) -> Dict[str, Any]: |
|
|
""" |
|
|
Check content for toxicity using a local Keyword list first, |
|
|
then AI for deeper semantic checks if needed. |
|
|
""" |
|
|
|
|
|
|
|
|
TOXIC_KEYWORDS = [ |
|
|
"abuse", "kill", "suicide", "hate", "idiot", "scam", |
|
|
"hack", "betting", "casino", "stupid", "fuck" |
|
|
] |
|
|
|
|
|
score = 0.0 |
|
|
flags = [] |
|
|
text_lower = text.lower() |
|
|
|
|
|
|
|
|
for word in TOXIC_KEYWORDS: |
|
|
if word in text_lower: |
|
|
score += 0.4 |
|
|
flags.append(f"Potential inappropriate word: {word}") |
|
|
|
|
|
|
|
|
if score > 0.5: |
|
|
return {"toxicity_score": min(score, 1.0), "is_safe": False, "flags": flags} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return { |
|
|
"toxicity_score": score, |
|
|
"is_safe": score < 0.5, |
|
|
"flags": flags |
|
|
} |
|
|
|
|
|
|
|
|
def generate_smart_tags(self, content: str) -> List[str]: |
|
|
""" |
|
|
Extracts 3-5 relevant hashtags from the content using LLM. |
|
|
""" |
|
|
try: |
|
|
prompt = f"""[INST] Extract 4 relevant hashtags for this post. Return ONLY hashtags separated by spaces. No explanation. |
|
|
|
|
|
Post: "I just bought this amazing lipstick shade, the red is so vibrant and it lasts all day! Perfect for summer looks." |
|
|
Hashtags: #Beauty #Lipstick #Makeup #SummerVibes |
|
|
|
|
|
Post: "{content[:300]}" |
|
|
Hashtags: [/INST]""" |
|
|
|
|
|
response = self.llm( |
|
|
prompt, |
|
|
max_tokens=30, |
|
|
temperature=0.3, |
|
|
stop=["[INST]", "\n"], |
|
|
echo=False |
|
|
) |
|
|
|
|
|
raw_tags = response['choices'][0]['text'].strip() |
|
|
|
|
|
tags = [t.strip() for t in raw_tags.split() if t.strip()] |
|
|
|
|
|
|
|
|
clean_tags = [] |
|
|
for t in tags: |
|
|
if not t.startswith('#'): t = f"#{t}" |
|
|
|
|
|
t = re.sub(r'[^a-zA-Z0-9#]', '', t) |
|
|
clean_tags.append(t) |
|
|
|
|
|
return clean_tags[:5] |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Smart Tag Error: {e}") |
|
|
return ["#Community"] |
|
|
|
|
|
|
|
|
def summarize_thread(self, comments: List[str]) -> str: |
|
|
""" |
|
|
Summarizes a list of comments into a key insight. |
|
|
""" |
|
|
if not comments: return "No activity yet." |
|
|
|
|
|
|
|
|
context_text = " | ".join(comments[:15])[:1500] |
|
|
|
|
|
prompt = f"""[INST] Summarize the main sentiment and topic of this discussion in one sentence. |
|
|
|
|
|
Discussion: {context_text} |
|
|
|
|
|
Summary: [/INST]""" |
|
|
|
|
|
try: |
|
|
response = self.llm( |
|
|
prompt, |
|
|
max_tokens=60, |
|
|
temperature=0.5, |
|
|
stop=["[INST]", "\n"], |
|
|
echo=False |
|
|
) |
|
|
return response['choices'][0]['text'].strip() |
|
|
except Exception as e: |
|
|
return "Discussion is active." |