Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| import json | |
| import requests | |
| import gspread | |
| from oauth2client.service_account import ServiceAccountCredentials | |
| import pandas as pd | |
| from googleapiclient.discovery import build # ✅ Ensure this is imported | |
| import openai | |
| # === API Keys & Model Configuration === | |
| # Hardcoded OpenAI API Key for the preview model | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| openai.api_key = OPENAI_API_KEY | |
| MODEL = 'gpt-4o-mini-search-preview-2025-03-11' | |
| # Perplexity API token for sonar models | |
| PERPLEXITY_API_TOKEN = os.getenv("PERPLEXITY_API_KEY") | |
| # For users with higher tiers, you might try enabling structured outputs. | |
| # However, if your account is Tier 0 or you do not need structured output, | |
| # you can set use_response_format to False so that the parameter isn't sent. | |
| # (Passing a non-empty value may trigger the error you observed.) | |
| # Define a structured output format (if needed) – available only on higher tiers. | |
| RESPONSE_FORMAT = { | |
| "ResponseFormatText": {"type": "text"}, | |
| "ResponseFormatJSONSchema": {"type": "json_schema", "json_schema": {}}, | |
| "ResponseFormatRegex": {"type": "regex", "regex": ".*"} | |
| } | |
| # Perplexity helper function using the "sonar" model. | |
| def generate_with_perplexity(prompt, max_tokens=150, use_response_format=True): | |
| url = "https://api.perplexity.ai/chat/completions" | |
| headers = { | |
| "Authorization": f"Bearer {PERPLEXITY_API_TOKEN}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": "sonar-pro", | |
| "messages": [ | |
| {"role": "system", "content": "Be precise and concise. Use current web data where available."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| "max_tokens": max_tokens, | |
| "temperature": 0.2, | |
| "top_p": 0.9, | |
| "top_k": 0, | |
| "stream": False, | |
| "presence_penalty": 0, | |
| "frequency_penalty": 1, | |
| "web_search_options": { | |
| "search_context_size": "medium" | |
| } | |
| } | |
| if use_response_format: | |
| payload["response_format"] = RESPONSE_FORMAT | |
| response = requests.post(url, headers=headers, json=payload) | |
| if response.status_code == 200: | |
| result_json = response.json() | |
| return result_json.get("choices", [{}])[0].get("message", {}).get("content", "").strip() | |
| else: | |
| st.error("Error with Perplexity API call: " + str(response.text)) | |
| return "" | |
| # Alternate Perplexity helper function using the "sonar-pro" model. | |
| def generate_with_perplexity_alternate(prompt, max_tokens=150, use_response_format=True): | |
| url = "https://api.perplexity.ai/chat/completions" | |
| headers = { | |
| "Authorization": f"Bearer {PERPLEXITY_API_TOKEN}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": "sonar-pro", | |
| "messages": [ | |
| {"role": "system", "content": "Be precise and concise. Use current web data and provide an accurate answer."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| "max_tokens": max_tokens, | |
| "temperature": 0.2, | |
| "top_p": 0.9, | |
| "top_k": 0, | |
| "stream": False, | |
| "presence_penalty": 0, | |
| "frequency_penalty": 1, | |
| "web_search_options": { | |
| "search_context_size": "medium" | |
| } | |
| } | |
| if use_response_format: | |
| payload["response_format"] = RESPONSE_FORMAT | |
| response = requests.post(url, headers=headers, json=payload) | |
| if response.status_code == 200: | |
| result_json = response.json() | |
| return result_json.get("choices", [{}])[0].get("message", {}).get("content", "").strip() | |
| else: | |
| st.error("Error with alternate Perplexity API call: " + str(response.text)) | |
| return "" | |
| # --- New Perplexity Helper for Personalization using "sonar-reasoning" --- | |
| def generate_with_perplexity_personalization(prompt, max_tokens=150, use_response_format=False): | |
| # Note: For "sonar-reasoning", structured output is disabled (use_response_format=False) | |
| url = "https://api.perplexity.ai/chat/completions" | |
| headers = { | |
| "Authorization": f"Bearer {PERPLEXITY_API_TOKEN}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": "sonar-pro", # Changed model for personalization | |
| "messages": [ | |
| {"role": "system", "content": "You are an expert copywriter for email personalization. Use current web data to provide thoughtful, insightful, and elegant personalization."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| "max_tokens": max_tokens, | |
| "temperature": 0.2, | |
| "top_p": 0.9, | |
| "top_k": 0, | |
| "stream": False, | |
| "presence_penalty": 0, | |
| "frequency_penalty": 1, | |
| "web_search_options": { | |
| "search_context_size": "medium" | |
| } | |
| } | |
| # For sonar-reasoning, we do not include the response_format to avoid the error. | |
| if use_response_format: | |
| payload["response_format"] = RESPONSE_FORMAT | |
| response = requests.post(url, headers=headers, json=payload) | |
| if response.status_code == 200: | |
| result_json = response.json() | |
| return result_json.get("choices", [{}])[0].get("message", {}).get("content", "").strip() | |
| else: | |
| st.error("Error with personalization Perplexity API call: " + str(response.text)) | |
| return "" | |
| # === Global Column Indices (1-indexed) === | |
| GROUPING_COL = 7 # "Grouping" | |
| EMPLOYMENT_COL = 8 # "Employment" | |
| PERSONALIZATION_COL = 9 # "Introduction" | |
| EMAIL_COL = 10 # "Email" | |
| # === Email Service API Keys === | |
| HUNTER_API_KEY = '2a92c08aac936de7dbdc3ee4f49a4476f80884a1' | |
| ZERBOUNCE_API_KEY = '6bc5b3d878b840dda21fbdb0487bf3c7' | |
| FINDYMAIL_API_KEY = 'Jmka5GeB8ObJo58UqOX9xoZ3R65aMDxBp7Ffm7XP965e8709' | |
| # --- Email Fetch & Verification Helpers --- | |
| def fetch_email_hunter(row): | |
| params = { | |
| "first_name": row.get("First_Name", ""), | |
| "last_name": row.get("Last_Name", ""), | |
| "domain": row.get("Domain", ""), | |
| "api_key": HUNTER_API_KEY | |
| } | |
| resp = requests.get("https://api.hunter.io/v2/email-finder", params=params) | |
| if resp.status_code == 200: | |
| return resp.json().get("data", {}).get("email") | |
| return None | |
| def fetch_email_findymail(row): | |
| headers = { | |
| "Authorization": f"Bearer {FINDYMAIL_API_KEY}", | |
| "Content-Type": "application/json" | |
| } | |
| data = { | |
| "name": f"{row.get('First_Name','')} {row.get('Last_Name','')}", | |
| "domain": row.get("Domain", "") | |
| } | |
| response = requests.post("https://app.findymail.com/api/search/name", headers=headers, json=data) | |
| if response.status_code == 200: | |
| return response.json().get("contact", {}).get("email") | |
| else: | |
| st.write(f"Findymail error (status {response.status_code}): {response.text}") | |
| return None | |
| def verify_email_zb(email): | |
| params = {"email": email, "api_key": ZERBOUNCE_API_KEY} | |
| resp = requests.get("https://api.zerobounce.net/v2/validate", params=params) | |
| return resp.status_code == 200 and resp.json().get("status") == "valid" | |
| def fetch_verified_email(row, method): | |
| if method == 'hunter_only': | |
| e = fetch_email_hunter(row) | |
| return e if e and verify_email_zb(e) else None | |
| if method == 'findymail_only': | |
| e = fetch_email_findymail(row) | |
| return e if e and verify_email_zb(e) else None | |
| # waterfall_both | |
| e = fetch_email_hunter(row) | |
| if e and verify_email_zb(e): | |
| return e | |
| e = fetch_email_findymail(row) | |
| if e and verify_email_zb(e): | |
| return e | |
| return None | |
| def process_emails(worksheet, method='waterfall_both'): | |
| rows = get_rows_as_dict(worksheet) | |
| for row in rows: | |
| email = fetch_verified_email(row, method) | |
| if email: | |
| worksheet.update_cell(row['row_num'], EMAIL_COL, email) | |
| else: | |
| st.write(f"Row {row['row_num']} does not have a valid email ({method}).") | |
| st.write("Email verification step complete.") | |
| # --- Helper function for row deletion --- | |
| # === Google Sheets Setup Helper Functions === | |
| def get_worksheet(sheet_id): | |
| scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] | |
| credentials = ServiceAccountCredentials.from_json_keyfile_name("credentials1.json", scope) | |
| client = gspread.authorize(credentials) | |
| sheet = client.open_by_key(sheet_id) | |
| return sheet.sheet1 | |
| def ensure_headers(worksheet): | |
| required_headers = ["First_Name", "Last_Name", "LinkedIn", "Company Name", "Designation", "Domain", | |
| "Grouping", "Employment", "Introduction", "Email"] | |
| current = worksheet.row_values(1) | |
| if len(current) < len(required_headers) or current != required_headers: | |
| worksheet.update("A1", [required_headers]) | |
| st.write("Headers updated to:", required_headers) | |
| def get_rows_as_dict(worksheet): | |
| values = worksheet.get_all_values() | |
| if not values: | |
| return [] | |
| headers = values[0] | |
| rows = [] | |
| for i, row in enumerate(values[1:], start=2): | |
| row_dict = {headers[j]: row[j] if j < len(row) else "" for j in range(len(headers))} | |
| row_dict['row_num'] = i | |
| rows.append(row_dict) | |
| return rows | |
| def remove_rows_by_column_value(worksheet, column_name, value_to_remove): | |
| rows = get_rows_as_dict(worksheet) | |
| headers = worksheet.row_values(1) | |
| if column_name not in headers: | |
| return | |
| for row in reversed(rows): | |
| if row.get(column_name, "").strip() == value_to_remove: | |
| st.write(f"Deleting row {row['row_num']} because {column_name} == {value_to_remove}") | |
| #delete_row(worksheet, row['row_num']) | |
| def process_domain_removal(main_ws, new_ws): | |
| new_values = new_ws.get_all_values() | |
| if not new_values: | |
| st.write("No data found in the new sheet for domain removal.") | |
| return | |
| if "Domain" in new_values[0]: | |
| new_values = new_values[1:] | |
| domains_to_remove = set() | |
| for row in new_values: | |
| if len(row) >= 6: | |
| domain_val = row[5].strip() | |
| if domain_val: | |
| domains_to_remove.add(domain_val) | |
| st.write("Domains from new sheet to remove:", domains_to_remove) | |
| main_rows = get_rows_as_dict(main_ws) | |
| for row in reversed(main_rows): | |
| main_domain = row.get("Domain", "").strip() | |
| if main_domain in domains_to_remove: | |
| st.write(f"Deleting row {row['row_num']} with domain: {main_domain}") | |
| delete_row(main_ws, row['row_num']) | |
| st.write("Pre - Reachouts removal complete.") | |
| # --- Grouping Step --- | |
| def get_grouping(first_name, last_name, company_name, designation, domain, linkedin): | |
| grouping_prompt = ( | |
| f"Based on the following details about {first_name} {last_name}:\n" | |
| f"Company Name: {company_name}\n" | |
| f"Designation: {designation}\n" | |
| f"Domain: {domain}\n" | |
| f"LinkedIn: {linkedin}\n\n" | |
| "Please classify this person as follows:\n" | |
| "• Reply with only 'Group 1' if the person is directly involved in AI, Data, Data scaling, Data Annotation, AI Agents, Artificial Intelligence, ML, Machine Learning investments.\n" | |
| "• Reply with only 'Group 2' if the person is associated with AI, Data, Data scaling, Data Annotation, AI Agents, Artificial Intelligence, ML, Machine Learning related topics (e.g., mentioned in keywords or linked with relevant articles) but not directly involved in an investment.\n" | |
| "• Reply with only 'Group 0' if and only if they are not associated with the any of AI, Data, Data scaling, Data Annotation, AI Agents, Artificial Intelligence, ML, Machine Learning or related sectors in mentions or investments.\n" | |
| "Do not include any additional text." | |
| ) | |
| result = generate_with_perplexity(grouping_prompt, max_tokens=30, use_response_format=False) | |
| lower_res = result.lower() | |
| return lower_res | |
| def process_grouping(worksheet): | |
| rows = get_rows_as_dict(worksheet) | |
| for row in rows: | |
| first_name = row.get("First_Name", "").strip() | |
| last_name = row.get("Last_Name", "").strip() | |
| if not first_name or not last_name: | |
| continue | |
| group = get_grouping( | |
| first_name, | |
| last_name, | |
| row.get('Company Name', '').strip(), | |
| row.get('Designation', '').strip(), | |
| row.get('Domain', '').strip(), | |
| row.get('LinkedIn', '').strip() | |
| ) | |
| worksheet.update_cell(row['row_num'], GROUPING_COL, group) | |
| st.write("Grouping step complete.") | |
| # --- Employment ("Still in Company") Step --- | |
| def get_employment_status(first_name, last_name, company_name, linkedin): | |
| status_prompt = ( | |
| f"Using current web data, check if {first_name} {last_name} (LinkedIn ID: {linkedin}) is currently employed or associated with {company_name}. " | |
| "Answer exactly with a single word: 'Yes' or 'No'. Do not include any additional text." | |
| ) | |
| result = generate_with_perplexity(status_prompt, max_tokens=20, use_response_format=False) | |
| st.write("Primary employment verification response:", result) | |
| return result | |
| def process_employment(worksheet): | |
| rows = get_rows_as_dict(worksheet) | |
| for row in rows: | |
| first_name = row.get("First_Name", "").strip() | |
| last_name = row.get("Last_Name", "").strip() | |
| if not first_name or not last_name: | |
| continue | |
| status = get_employment_status( | |
| first_name, | |
| last_name, | |
| row.get("Company Name", "").strip(), | |
| row.get("LinkedIn", "").strip() | |
| ) | |
| worksheet.update_cell(row['row_num'], EMPLOYMENT_COL, status) | |
| st.write("Employment step complete.") | |
| # --- Personalization Step --- | |
| def get_introduction(first_name, last_name, designation, company_name, linkedin): | |
| intro_prompt = ( | |
| "You are a skilled writer in crafting personalized introductions for emails targeting executives. " | |
| "I am writing cold emails to folks for the fundraising of my company - but do not mention this anywhere. " | |
| "Give me a crisp ice-breaker opener, under 20 words total, for the fundraising outreach.\n\n" | |
| "Part 1:\n" | |
| "Write a single, elegant, thought-provoking introductory sentence." | |
| "then quote 'this is what you said' - and I completely resonate/agree. " | |
| f"This person {first_name} {last_name} is a {designation} at {company_name} and their LinkedIn is {linkedin} (Optional). " | |
| "Refer to the most recent web data/articles/news/ linkedin posts if possible; focus on a perspective, foundational insight, or a philosophy they’ve expressed. " | |
| "Avoid direct flattery or buzzwords. Rather than just agreeing, subtly expand or challenge their view—adding a unique, aligned yet distinct take. " | |
| "Think of it as a conversational nudge that adds value and sparks curiosity. Draw on broad themes like industry trends, history, or entrepreneurial parallels. " | |
| "Make it human, intriguing, witty, and effortless. Use natural language that doesn’t sound like it was generated by AI. " | |
| "Do not use em dashes or colons. No quotation marks, no greetings, no names, no over-intellectualizing. Keep it simple, clean, and elegant.\n\n" | |
| "Do not add any tags other than the personalised intro itself. No em dashes or hyphens. Do not give me a suggestion as to how i must do it or the process. Instead just give me the personalised intro itselfNo tags no references, no source tages, just the personalization line after researching the entire web" | |
| ) | |
| return generate_with_perplexity_personalization(intro_prompt, max_tokens=300) | |
| def process_personalization(worksheet): | |
| rows = get_rows_as_dict(worksheet) | |
| for row in rows: | |
| first_name = row.get("First_Name", "").strip() | |
| last_name = row.get("Last_Name", "").strip() | |
| intro_line = get_introduction( | |
| first_name, | |
| last_name, | |
| row.get("Designation", "").strip(), | |
| row.get("Company Name", "").strip(), | |
| row.get("LinkedIn", "").strip() | |
| ) | |
| worksheet.update_cell(row['row_num'], PERSONALIZATION_COL, intro_line) | |
| st.write("Personalization step complete.") | |
| # === Streamlit App UI === | |
| st.title("Vaomi AI - Lead Processing App") | |
| st.markdown( | |
| """ | |
| <style> | |
| #MainMenu {visibility: hidden;} | |
| footer {visibility: hidden;} | |
| header {visibility: hidden;} | |
| .css-1rs6os.edgvbvh3 {display: none;} | |
| </style> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| sheet_id_input = st.text_input("Enter Main Google Sheet ID", value="") | |
| new_sheet_id_input = st.text_input("Enter the Sheet ID for Pre - Reachouts Removal (Optional)", value="") | |
| if sheet_id_input: | |
| try: | |
| main_ws = get_worksheet(sheet_id_input) | |
| ensure_headers(main_ws) | |
| st.success("Connected to Main Sheet!") | |
| except Exception as e: | |
| st.error(f"Error connecting to Main Sheet: {e}") | |
| else: | |
| st.warning("Please enter a valid Main Sheet ID.") | |
| new_ws = None | |
| if new_sheet_id_input: | |
| try: | |
| new_ws = get_worksheet(new_sheet_id_input) | |
| st.success("Connected to New Sheet: Pre - Reachouts!") | |
| except Exception as e: | |
| st.error(f"Error connecting to New Sheet: {e}") | |
| st.markdown("---") | |
| if st.button("Process Data (Automatic)"): | |
| if sheet_id_input: | |
| st.write("Starting full automatic processing...") | |
| if new_ws: | |
| st.write("Step 0: Pre - Reachout Removal") | |
| process_domain_removal(main_ws, new_ws) | |
| st.write("Step 1: Grouping basis relevance") | |
| process_grouping(main_ws) | |
| st.write("Step 2: Employment Verification") | |
| process_employment(main_ws) | |
| st.write("Step 3: Email Fetch & Verification") | |
| process_emails(main_ws, method='waterfall_both') | |
| st.write("Step 4: Personalization") | |
| process_personalization(main_ws) | |
| st.success("Automatic processing complete.") | |
| else: | |
| st.error("Please enter a valid Main Sheet ID.") | |
| st.markdown("---") | |
| st.subheader("Manual Mode") | |
| st.write("Select one or more functions to run:") | |
| manual_domain = False | |
| if new_sheet_id_input: | |
| manual_domain = st.checkbox("Run Pre - Reachouts Removal") | |
| manual_grouping = st.checkbox("Run Grouping basis relevance") | |
| manual_employment = st.checkbox("Run Employment Verification") | |
| manual_emails = st.checkbox("Run Email Fetch & Verification") | |
| manual_personalization = st.checkbox("Run Personalization") | |
| email_method = 'waterfall_both' | |
| if manual_emails: | |
| email_method = st.radio( | |
| "Choose Email Fetch Method:", | |
| ["hunter_only", "findymail_only", "waterfall_both"], | |
| index=2 | |
| ) | |
| if st.button("Run Selected Functions"): | |
| if sheet_id_input: | |
| if manual_domain and new_ws: | |
| st.write("Running Pre - Reachouts Removal...") | |
| process_domain_removal(main_ws, new_ws) | |
| if manual_grouping: | |
| st.write("Running Grouping basis relevance...") | |
| process_grouping(main_ws) | |
| if manual_employment: | |
| st.write("Running Employment Verification...") | |
| process_employment(main_ws) | |
| if manual_emails: | |
| st.write("Running Email Fetch & Verification...") | |
| process_emails(main_ws, method=email_method) | |
| if manual_personalization: | |
| st.write("Running Personalization...") | |
| process_personalization(main_ws) | |
| st.success("Selected functions processed.") | |
| else: | |
| st.error("Please enter a valid Main Sheet ID.") | |