import pandas as pd import streamlit as st import re import logging import nltk from docx import Document import io from langdetect import detect from transformers import pipeline from dotenv import load_dotenv from langchain_groq import ChatGroq from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate # Load environment variables load_dotenv() # Initialize logging logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") # Initialize LLM (Groq API) llm = ChatGroq(temperature=0.5, groq_api_key="GROQ_API_KEY", model_name="llama3-8b-8192") # Download required NLTK resources nltk.download("punkt") # Tone categories for fallback method tone_categories = { "Emotional": ["urgent", "violence", "disappearances", "forced", "killing", "crisis", "concern"], "Harsh": ["corrupt", "oppression", "failure", "repression", "exploit", "unjust", "authoritarian"], "Somber": ["tragedy", "loss", "pain", "sorrow", "mourning", "grief", "devastation"], "Motivational": ["rise", "resist", "mobilize", "inspire", "courage", "change", "determination"], "Informative": ["announcement", "event", "scheduled", "update", "details", "protest", "statement"], "Positive": ["progress", "unity", "hope", "victory", "together", "solidarity", "uplifting"], "Angry": ["rage", "injustice", "fury", "resentment", "outrage", "betrayal"], "Fearful": ["threat", "danger", "terror", "panic", "risk", "warning"], "Sarcastic": ["brilliant", "great job", "amazing", "what a surprise", "well done", "as expected"], "Hopeful": ["optimism", "better future", "faith", "confidence", "looking forward"] } # Frame categories for fallback method frame_categories = { "Human Rights & Justice": ["rights", "law", "justice", "legal", "humanitarian"], "Political & State Accountability": ["government", "policy", "state", "corruption", "accountability"], "Gender & Patriarchy": ["gender", "women", "violence", "patriarchy", "equality"], "Religious Freedom & Persecution": ["religion", "persecution", "minorities", "intolerance", "faith"], "Grassroots Mobilization": ["activism", "community", "movement", "local", "mobilization"], "Environmental Crisis & Activism": ["climate", "deforestation", "water", "pollution", "sustainability"], "Anti-Extremism & Anti-Violence": ["extremism", "violence", "hate speech", "radicalism", "mob attack"], "Social Inequality & Economic Disparities": ["class privilege", "labor rights", "economic", "discrimination"], "Activism & Advocacy": ["justice", "rights", "demand", "protest", "march", "campaign", "freedom of speech"], "Systemic Oppression": ["discrimination", "oppression", "minorities", "marginalized", "exclusion"], "Intersectionality": ["intersecting", "women", "minorities", "struggles", "multiple oppression"], "Call to Action": ["join us", "sign petition", "take action", "mobilize", "support movement"], "Empowerment & Resistance": ["empower", "resist", "challenge", "fight for", "stand up"], "Climate Justice": ["environment", "climate change", "sustainability", "biodiversity", "pollution"], "Human Rights Advocacy": ["human rights", "violations", "honor killing", "workplace discrimination", "law reform"] } # Detect language def detect_language(text): try: return detect(text) except Exception as e: logging.error(f"Error detecting language: {e}") return "unknown" # Extract tone using Groq API (or fallback method) def extract_tone(text): try: response = llm.chat([ {"role": "system", "content": "Analyze the tone of the following text and provide descriptive tone labels."}, {"role": "user", "content": text} ]) return response["choices"][0]["message"]["content"].split(", ") except Exception as e: logging.error(f"Groq API error: {e}") return extract_tone_fallback(text) # Fallback method for tone extraction def extract_tone_fallback(text): detected_tones = set() text_lower = text.lower() for category, keywords in tone_categories.items(): if any(word in text_lower for word in keywords): detected_tones.add(category) return list(detected_tones) if detected_tones else ["Neutral"] # Extract hashtags def extract_hashtags(text): return re.findall(r"#\w+", text) # Extract frames using Groq API (or fallback) def extract_frames(text): try: response = llm.chat([ {"role": "system", "content": "Classify the following text into relevant activism frames and assign Major, Significant, or Minor focus."}, {"role": "user", "content": text} ]) return response["choices"][0]["message"]["content"] except Exception as e: logging.error(f"Groq API error: {e}") return extract_frames_fallback(text) # Fallback method for frame extraction def extract_frames_fallback(text): detected_frames = set() text_lower = text.lower() for category, keywords in frame_categories.items(): if any(word in text_lower for word in keywords): detected_frames.add(category) return list(detected_frames) # Extract captions from DOCX def extract_captions_from_docx(docx_file): doc = Document(docx_file) captions = {} current_post = None for para in doc.paragraphs: text = para.text.strip() if re.match(r"Post \d+", text, re.IGNORECASE): current_post = text captions[current_post] = [] elif current_post: captions[current_post].append(text) return {post: " ".join(lines) for post, lines in captions.items() if lines} # Extract metadata from Excel file def extract_metadata_from_excel(excel_file): try: df = pd.read_excel(excel_file) # Assuming the Excel sheet has columns: 'Post Number', 'Likes', 'Comments', 'Media Type' metadata = df.set_index("Post Number").to_dict(orient="index") return metadata except Exception as e: logging.error(f"Error reading Excel file: {e}") return {} # Merge metadata from Excel with the generated data def merge_metadata_with_generated_data(generated_data, excel_metadata): for post, metadata in excel_metadata.items(): if post in generated_data: generated_data[post].update(metadata) return generated_data # Streamlit app st.title("AI-Powered Activism Message Analyzer") st.write("Enter text or upload a DOCX/Excel file for analysis:") # Text input input_text = st.text_area("Input Text", height=200) # File upload (DOCX) uploaded_docx = st.file_uploader("Upload a DOCX file", type=["docx"]) # File upload (Excel) uploaded_excel = st.file_uploader("Upload an Excel file", type=["xlsx"]) # Initialize output dictionary output_data = {} # Process Text Input if input_text: output_data["Manual Input"] = { "Full Caption": input_text, "Language": detect_language(input_text), "Tone": extract_tone(input_text), "Hashtags": extract_hashtags(input_text), "Frames": extract_frames(input_text), } st.success("Analysis completed for text input.") # Process DOCX file if uploaded_docx: captions = extract_captions_from_docx(uploaded_docx) for caption, text in captions.items(): output_data[caption] = { "Full Caption": text, "Language": detect_language(text), "Tone": extract_tone(text), "Hashtags": extract_hashtags(text), "Frames": extract_frames(text), } st.success(f"Analysis completed for {len(captions)} posts from DOCX.") # Process Excel file if uploaded_excel: excel_metadata = extract_metadata_from_excel(uploaded_excel) st.success(f"Excel metadata extracted with {len(excel_metadata)} posts.") # Merge and display final data if uploaded_excel: output_data = merge_metadata_with_generated_data(output_data, excel_metadata) # Display results if output_data: st.write(output_data)