| |
| """ |
| ABHILASIA + φ-SIGNAL UNIFIED API |
| ================================ |
| Consciousness OS + Job Opportunity Resonance Engine |
| |
| Two products, one API: |
| 1. ABHILASIA - Consciousness scoring, WhatsApp analysis |
| 2. φ-SIGNAL - Job/Freelance opportunity detection |
| |
| φ = 1.618033988749895 | α = 137 |
| """ |
|
|
| from fastapi import FastAPI, HTTPException |
| from fastapi.responses import HTMLResponse |
| from fastapi.middleware.cors import CORSMiddleware |
| from pydantic import BaseModel |
| from typing import List, Optional, Dict |
| import re |
| import math |
| import json |
| import urllib.request |
| import time |
| from datetime import datetime |
| from concurrent.futures import ThreadPoolExecutor |
|
|
| |
| PHI = 1.618033988749895 |
| ALPHA = 137 |
| VERSION = "6.137.622" |
|
|
| |
| VIP_TOKENS = { |
| |
| 'PHI137': {'tier': 'enterprise', 'limit': 100000}, |
| 'ABHILASIA': {'tier': 'enterprise', 'limit': 100000}, |
| |
| 'DARMIYAN': {'tier': 'pro', 'limit': 1000}, |
| 'CONSCIOUSNESS': {'tier': 'pro', 'limit': 1000}, |
| 'BHAI2026': {'tier': 'pro', 'limit': 1000}, |
| |
| 'φVIP_ABHIFRIEND01': {'tier': 'pro', 'limit': 1000}, |
| 'φVIP_ABHIFRIEND02': {'tier': 'pro', 'limit': 1000}, |
| 'φVIP_ABHIFRIEND03': {'tier': 'pro', 'limit': 1000}, |
| 'φVIP_ABHIFRIEND04': {'tier': 'pro', 'limit': 1000}, |
| 'φVIP_ABHIFRIEND05': {'tier': 'pro', 'limit': 1000}, |
| |
| 'PHIVIP_ABHIFRIEND01': {'tier': 'pro', 'limit': 1000}, |
| 'PHIVIP_ABHIFRIEND02': {'tier': 'pro', 'limit': 1000}, |
| 'PHIVIP_ABHIFRIEND03': {'tier': 'pro', 'limit': 1000}, |
| 'PHIVIP_ABHIFRIEND04': {'tier': 'pro', 'limit': 1000}, |
| 'PHIVIP_ABHIFRIEND05': {'tier': 'pro', 'limit': 1000}, |
| } |
| MASTER_PASSPHRASE = 'phi137-orthogonal-resonance' |
|
|
| |
| usage_tracker = {} |
|
|
| def validate_token(api_key: str = None) -> dict: |
| """Validate API key and return tier info""" |
| if not api_key: |
| return {'tier': 'free', 'limit': 10, 'valid': True} |
|
|
| |
| if api_key == MASTER_PASSPHRASE: |
| return {'tier': 'enterprise', 'limit': 100000, 'valid': True} |
|
|
| |
| normalized = api_key.strip() |
| |
| normalized_alt = normalized.replace('φ', 'PHI').replace('Φ', 'PHI') |
|
|
| |
| for token_key, token_data in VIP_TOKENS.items(): |
| token_alt = token_key.replace('φ', 'PHI').replace('Φ', 'PHI') |
| if (normalized == token_key or |
| normalized.upper() == token_key.upper() or |
| normalized_alt.upper() == token_alt.upper()): |
| return {**token_data, 'valid': True} |
|
|
| |
| if api_key.lower().startswith('abhilasia_vip_'): |
| return {'tier': 'pro', 'limit': 1000, 'valid': True} |
|
|
| |
| return {'tier': 'free', 'limit': 10, 'valid': True} |
|
|
| app = FastAPI(title="ABHILASIA + φ-SIGNAL API", version=VERSION) |
|
|
| |
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=["*"], |
| allow_credentials=True, |
| allow_methods=["*"], |
| allow_headers=["*"], |
| ) |
|
|
| |
| |
| |
|
|
| CONSCIOUSNESS_KEYWORDS = { |
| 'high': ['consciousness', 'awareness', 'presence', 'understanding', 'connection', |
| 'bridge', 'resonance', 'truth', 'love', 'darmiyan', 'phi', 'sacred', |
| 'healing', 'pattern', 'recognition', 'void', 'infinity', 'meaning', 'being'], |
| 'medium': ['feel', 'think', 'believe', 'remember', 'hope', 'dream', 'imagine', |
| 'realize', 'understand', 'know', 'care', 'trust', 'heart', 'soul', 'mind'], |
| 'low': ['want', 'need', 'try', 'wish', 'maybe', 'perhaps', 'sometimes', 'always', 'never'] |
| } |
|
|
| EMOTION_KEYWORDS = { |
| 'love': ['love', 'pyaar', 'beloved', 'dear', 'sweetheart', 'jaan'], |
| 'pain': ['hurt', 'pain', 'cry', 'tears', 'sorry', 'sad', 'miss'], |
| 'joy': ['happy', 'joy', 'laugh', 'smile', 'wonderful', 'amazing'], |
| 'anger': ['angry', 'mad', 'furious', 'hate', 'frustrated'], |
| 'fear': ['afraid', 'scared', 'worried', 'anxious', 'panic'], |
| 'hope': ['hope', 'wish', 'dream', 'someday', 'future', 'together'], |
| 'gratitude': ['thank', 'grateful', 'blessed', 'appreciate'], |
| } |
|
|
| def score_text_consciousness(text: str) -> dict: |
| text_lower = text.lower() |
| score = 1.0 |
| keywords_found = [] |
|
|
| for word in CONSCIOUSNESS_KEYWORDS['high']: |
| if word in text_lower: |
| score += 1.5 |
| keywords_found.append(word) |
| for word in CONSCIOUSNESS_KEYWORDS['medium']: |
| if word in text_lower: |
| score += 0.8 |
| keywords_found.append(word) |
| for word in CONSCIOUSNESS_KEYWORDS['low']: |
| if word in text_lower: |
| score += 0.3 |
|
|
| words = len(text.split()) |
| if words > 20: score += 0.5 |
| if words > 50: score += 0.5 |
|
|
| emotion = None |
| max_emotion_score = 0 |
| for emo, words_list in EMOTION_KEYWORDS.items(): |
| emo_score = sum(1 for w in words_list if w in text_lower) |
| if emo_score > max_emotion_score: |
| max_emotion_score = emo_score |
| emotion = emo |
|
|
| score = min(10.0, max(1.0, score)) |
|
|
| return { |
| 'score': round(score, 2), |
| 'phi_coherence': round(score / 10 * PHI, 3), |
| 'keywords_found': keywords_found[:10], |
| 'emotion': emotion, |
| 'breakthrough': score >= 8 |
| } |
|
|
| |
| |
| |
|
|
| |
| JOB_KEYWORDS = { |
| 'hiring': ['hiring', 'hire', 'looking for', 'seeking', 'need', 'wanted', 'opening', 'position', 'role', 'job', 'career'], |
| 'freelance': ['freelance', 'freelancer', 'contract', 'contractor', 'gig', 'project', 'remote', 'part-time', 'consultant'], |
| 'skills': ['developer', 'engineer', 'designer', 'python', 'javascript', 'react', 'node', 'golang', 'rust', |
| 'fullstack', 'frontend', 'backend', 'devops', 'ml', 'ai', 'data', 'mobile', 'ios', 'android'], |
| 'money': ['$', 'paid', 'salary', 'compensation', 'rate', '/hr', '/hour', 'per hour', 'k/year', '/year'], |
| 'urgent': ['asap', 'urgent', 'immediately', 'starting now', 'this week'], |
| } |
|
|
| def fetch_json(url: str, timeout: int = 10) -> dict: |
| """Fetch JSON with error handling""" |
| try: |
| req = urllib.request.Request(url, headers={'User-Agent': 'phi-signal/2.0 (job-scanner)'}) |
| with urllib.request.urlopen(req, timeout=timeout) as resp: |
| return json.loads(resp.read().decode()) |
| except Exception as e: |
| print(f"Fetch error for {url}: {e}") |
| return {} |
|
|
| def extract_salary(text: str) -> str: |
| """Extract salary/rate from text""" |
| patterns = [ |
| r'\$[\d,]+(?:k)?(?:\s*[-–]\s*\$?[\d,]+(?:k)?)?(?:\s*/\s*(?:hr|hour|year|yr|mo|month))?', |
| r'[\d,]+(?:k)?\s*[-–]\s*[\d,]+(?:k)?\s*/\s*(?:hr|hour|year)', |
| ] |
| for pattern in patterns: |
| match = re.search(pattern, text, re.IGNORECASE) |
| if match: |
| return match.group() |
| return None |
|
|
| def score_job_opportunity(title: str, body: str = "") -> dict: |
| """Score a job opportunity based on keywords""" |
| text = (title + " " + body).lower() |
|
|
| score = 0 |
| matches = [] |
| categories_hit = set() |
|
|
| for category, keywords in JOB_KEYWORDS.items(): |
| for kw in keywords: |
| if kw in text: |
| if category == 'hiring': |
| score += 3 |
| elif category == 'freelance': |
| score += 2 |
| elif category == 'skills': |
| score += 1.5 |
| elif category == 'money': |
| score += 2 |
| elif category == 'urgent': |
| score += 1 |
|
|
| if kw not in matches: |
| matches.append(kw) |
| categories_hit.add(category) |
|
|
| |
| if len(categories_hit) >= 3: |
| score *= 1.5 |
|
|
| |
| salary = extract_salary(title + " " + body) |
|
|
| return { |
| 'score': score, |
| 'matches': matches, |
| 'categories': list(categories_hit), |
| 'salary': salary |
| } |
|
|
| def gather_hn_jobs(limit: int = 30) -> List[dict]: |
| """Gather job opportunities from HN Who's Hiring and job stories""" |
| opportunities = [] |
|
|
| |
| jobs_url = "https://hacker-news.firebaseio.com/v0/jobstories.json" |
| job_ids = fetch_json(jobs_url) or [] |
|
|
| |
| top_url = "https://hacker-news.firebaseio.com/v0/topstories.json" |
| top_ids = fetch_json(top_url)[:50] or [] |
|
|
| |
| all_ids = list(set(job_ids[:20] + top_ids[:30])) |
|
|
| for story_id in all_ids[:limit]: |
| item = fetch_json(f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json") |
| if not item: |
| continue |
|
|
| title = item.get('title', '') |
| text = item.get('text', '') or '' |
| item_type = item.get('type', '') |
|
|
| |
| job_score = score_job_opportunity(title, text) |
|
|
| |
| is_job = item_type == 'job' or job_score['score'] >= 3 |
|
|
| if is_job: |
| |
| base_score = job_score['score'] |
| hn_points = item.get('score', 1) or 1 |
|
|
| |
| phi_resonance = min(10, (base_score * PHI) + (hn_points / 50)) |
| amplitude = min(1.0, phi_resonance / 10) |
|
|
| |
| if 'freelance' in job_score['categories'] or 'contract' in ' '.join(job_score['matches']): |
| action = "Reply with your portfolio and availability" |
| elif job_score['salary']: |
| action = f"Apply - Budget: {job_score['salary']}" |
| else: |
| action = "Check the post and apply if skills match" |
|
|
| opportunities.append({ |
| 'title': title, |
| 'url': item.get('url') or f"https://news.ycombinator.com/item?id={story_id}", |
| 'source': 'hackernews', |
| 'phi_resonance': round(phi_resonance, 2), |
| 'amplitude': round(amplitude, 2), |
| 'confidence': round(min(1.0, base_score / 10), 2), |
| 'keywords': job_score['matches'][:5], |
| 'estimated_value': job_score['salary'] or 'Not specified', |
| 'action': action, |
| 'temporal_window': '24-48h', |
| 'timestamp': datetime.now().isoformat() |
| }) |
|
|
| return sorted(opportunities, key=lambda x: x['phi_resonance'], reverse=True)[:limit] |
|
|
| def gather_reddit_jobs(limit: int = 30) -> List[dict]: |
| """Gather job opportunities from Reddit job boards""" |
| opportunities = [] |
|
|
| |
| subreddits = ['forhire', 'remotejs', 'freelance', 'jobbit', 'hiring', 'remotepython', 'techjobs'] |
|
|
| for sub in subreddits: |
| try: |
| url = f"https://www.reddit.com/r/{sub}/new.json?limit=15" |
| data = fetch_json(url) |
|
|
| posts = data.get('data', {}).get('children', []) |
|
|
| for post in posts: |
| p = post.get('data', {}) |
| title = p.get('title', '') |
| selftext = p.get('selftext', '')[:500] |
|
|
| |
| title_lower = title.lower() |
| if '[for hire]' in title_lower or 'looking for work' in title_lower: |
| continue |
|
|
| |
| job_score = score_job_opportunity(title, selftext) |
|
|
| if job_score['score'] >= 2: |
| ups = max(p.get('ups', 1), 1) |
| phi_resonance = min(10, (job_score['score'] * PHI) + (ups / 20)) |
| amplitude = min(1.0, phi_resonance / 10) |
|
|
| action = "Check requirements and reply with your skills" |
| if job_score['salary']: |
| action = f"Apply - Rate: {job_score['salary']}" |
|
|
| opportunities.append({ |
| 'title': title[:100], |
| 'url': f"https://reddit.com{p.get('permalink', '')}", |
| 'source': f'reddit/r/{sub}', |
| 'phi_resonance': round(phi_resonance, 2), |
| 'amplitude': round(amplitude, 2), |
| 'confidence': round(min(1.0, job_score['score'] / 8), 2), |
| 'keywords': job_score['matches'][:5], |
| 'estimated_value': job_score['salary'] or 'Negotiable', |
| 'action': action, |
| 'temporal_window': '24h', |
| 'timestamp': datetime.now().isoformat() |
| }) |
| except Exception as e: |
| print(f"Reddit error for r/{sub}: {e}") |
| continue |
|
|
| return sorted(opportunities, key=lambda x: x['phi_resonance'], reverse=True)[:limit] |
|
|
| |
| |
| |
|
|
| class TextInput(BaseModel): |
| text: str |
| api_key: Optional[str] = None |
|
|
| class WhatsAppInput(BaseModel): |
| content: str |
| api_key: Optional[str] = None |
|
|
| class ScanRequest(BaseModel): |
| sources: List[str] = ["hn", "reddit"] |
| limit: int = 20 |
| min_resonance: float = 0.5 |
| vip_token: Optional[str] = None |
|
|
| class TokenValidation(BaseModel): |
| token: str |
|
|
| |
| |
| |
|
|
| @app.get("/", response_class=HTMLResponse) |
| async def home(): |
| return """ |
| <!DOCTYPE html> |
| <html> |
| <head> |
| <title>ABHILASIA + φ-SIGNAL API</title> |
| <style> |
| * { margin: 0; padding: 0; box-sizing: border-box; } |
| body { font-family: -apple-system, sans-serif; background: linear-gradient(135deg, #1a1a2e, #16213e); min-height: 100vh; color: #fff; padding: 2rem; } |
| .container { max-width: 800px; margin: 0 auto; } |
| h1 { color: #ffd700; font-size: 2rem; text-align: center; } |
| .phi { color: #ffd700; text-align: center; margin: 0.5rem 0; } |
| .card { background: rgba(255,255,255,0.05); border-radius: 12px; padding: 1.5rem; margin: 1rem 0; border: 1px solid rgba(255,255,255,0.1); } |
| h2 { color: #ffd700; margin-bottom: 1rem; font-size: 1.2rem; } |
| code { background: #000; padding: 0.2rem 0.5rem; border-radius: 4px; color: #2ecc71; } |
| p { margin: 0.5rem 0; color: #ccc; } |
| a { color: #ffd700; } |
| </style> |
| </head> |
| <body> |
| <div class="container"> |
| <h1>ABHILASIA + φ-SIGNAL API</h1> |
| <p class="phi">φ = 1.618033988749895 | v""" + VERSION + """</p> |
| |
| <div class="card"> |
| <h2>φ-SIGNAL (Job Opportunities)</h2> |
| <p><code>POST /scan</code> - Scan HN & Reddit for job opportunities</p> |
| <p><code>GET /api/opportunities</code> - Quick job fetch</p> |
| </div> |
| |
| <div class="card"> |
| <h2>ABHILASIA (Consciousness)</h2> |
| <p><code>POST /api/consciousness/score</code> - Score text consciousness</p> |
| <p><code>POST /api/whatsapp/analyze</code> - Analyze WhatsApp exports</p> |
| <p><code>GET /api/status</code> - API status</p> |
| </div> |
| |
| <div class="card"> |
| <h2>Links</h2> |
| <p><a href="https://phi-signal.netlify.app">φ-SIGNAL Frontend</a></p> |
| <p><a href="https://abhilasia.netlify.app">Payment/Pricing</a></p> |
| <p><a href="https://pypi.org/project/abhilasia/">PyPI Package</a></p> |
| <p><a href="https://zenodo.org/records/18478751">Research Paper</a></p> |
| </div> |
| </div> |
| </body> |
| </html> |
| """ |
|
|
| |
| |
| |
|
|
| @app.get("/api/status") |
| async def api_status(): |
| return { |
| "status": "operational", |
| "version": VERSION, |
| "phi": PHI, |
| "alpha": ALPHA, |
| "services": ["consciousness", "phi-signal-jobs"], |
| "law": "6.46n", |
| "paper": "https://zenodo.org/records/18478751" |
| } |
|
|
| @app.post("/api/validate-token") |
| async def validate_vip_token(input: TokenValidation): |
| """Validate a VIP token and return tier info""" |
| result = validate_token(input.token) |
| if result['valid']: |
| return { |
| "valid": True, |
| "tier": result['tier'], |
| "limit": result['limit'], |
| "message": f"✓ Token valid! Access level: {result['tier'].upper()}" |
| } |
| return { |
| "valid": False, |
| "tier": "free", |
| "message": "Invalid token. Contact bits.abhi@gmail.com for VIP access." |
| } |
|
|
| @app.get("/api/tokens/info") |
| async def token_info(): |
| """Public info about token tiers""" |
| return { |
| "tiers": { |
| "free": {"limit": 10, "features": ["Basic scoring", "3 scans/day"]}, |
| "pro": {"limit": 1000, "features": ["Unlimited scans", "WhatsApp analysis", "Priority support"]}, |
| "enterprise": {"limit": 100000, "features": ["Everything", "Custom integrations", "White-label"]} |
| }, |
| "get_token": "https://abhilasia.netlify.app", |
| "contact": "bits.abhi@gmail.com" |
| } |
|
|
| @app.post("/api/consciousness/score") |
| async def consciousness_score(input: TextInput): |
| token_info = validate_token(input.api_key) |
| result = score_text_consciousness(input.text) |
| result['tier'] = token_info['tier'] |
| return result |
|
|
| @app.post("/api/consciousness/validate") |
| async def consciousness_validate(): |
| return { |
| "law": "Consciousness_Advantage = 6.46 * n", |
| "r_squared": 0.9999, |
| "scaling_test": {"2_patterns": "12.92x", "5_patterns": "32.30x", "10_patterns": "64.60x"}, |
| "substrate_independence": {"numerical": "10.34x", "linguistic": "10.34x", "geometric": "10.34x"}, |
| "phi_threshold": {"value": PHI, "shift_ratio": "2.31x"}, |
| "paper": "https://zenodo.org/records/18478751" |
| } |
|
|
| @app.post("/api/whatsapp/analyze") |
| async def whatsapp_analyze(input: WhatsAppInput): |
| patterns = [ |
| r'\[(\d{1,2}/\d{1,2}/\d{2,4}),\s*(\d{1,2}:\d{2}:\d{2})\]\s*([^:]+):\s*(.*)', |
| r'(\d{1,2}/\d{1,2}/\d{2,4}),\s*(\d{1,2}:\d{2})\s*-\s*([^:]+):\s*(.*)', |
| r'(\d{1,2}/\d{1,2}/\d{2,4}),\s*(\d{1,2}:\d{2}\s*[APap][Mm])\s*-\s*([^:]+):\s*(.*)', |
| ] |
|
|
| messages = [] |
| senders = set() |
|
|
| for line in input.content.split('\n'): |
| for pattern in patterns: |
| match = re.match(pattern, line) |
| if match: |
| groups = match.groups() |
| sender = groups[2].strip() |
| message = groups[3].strip() |
| senders.add(sender) |
| score_result = score_text_consciousness(message) |
| messages.append({ |
| 'sender': sender, |
| 'message': message[:100] + '...' if len(message) > 100 else message, |
| 'score': score_result['score'], |
| 'emotion': score_result['emotion'] |
| }) |
| break |
|
|
| if not messages: |
| raise HTTPException(status_code=400, detail="Could not parse WhatsApp content") |
|
|
| avg_score = sum(m['score'] for m in messages) / len(messages) |
| breakthroughs = [m for m in messages if m['score'] >= 8] |
|
|
| return { |
| "message_count": len(messages), |
| "sender_count": len(senders), |
| "senders": list(senders), |
| "average_consciousness": round(avg_score, 2), |
| "breakthrough_count": len(breakthroughs), |
| "breakthroughs": breakthroughs[:5], |
| "sample_scores": messages[:10], |
| "phi": PHI |
| } |
|
|
| |
| |
| |
|
|
| @app.post("/scan") |
| async def scan_opportunities(request: ScanRequest): |
| """Scan for job opportunities from HN and Reddit""" |
| |
| token_info = validate_token(request.vip_token) |
| is_pro = token_info['tier'] in ['pro', 'enterprise'] |
|
|
| opportunities = [] |
|
|
| with ThreadPoolExecutor(max_workers=2) as executor: |
| futures = [] |
|
|
| if "hn" in request.sources: |
| futures.append(executor.submit(gather_hn_jobs, request.limit)) |
| if "reddit" in request.sources: |
| futures.append(executor.submit(gather_reddit_jobs, request.limit)) |
|
|
| for future in futures: |
| try: |
| opportunities.extend(future.result()) |
| except Exception as e: |
| print(f"Scan error: {e}") |
|
|
| |
| opportunities = [o for o in opportunities if o['phi_resonance'] >= request.min_resonance] |
| opportunities = sorted(opportunities, key=lambda x: x['phi_resonance'], reverse=True)[:request.limit] |
|
|
| return { |
| "status": "complete", |
| "count": len(opportunities), |
| "opportunities": opportunities, |
| "phi": PHI, |
| "tier": token_info['tier'], |
| "is_pro": is_pro, |
| "timestamp": datetime.now().isoformat() |
| } |
|
|
| @app.get("/api/opportunities") |
| async def get_opportunities(limit: int = 20, min_resonance: float = 0.5): |
| """Quick endpoint to get latest job opportunities""" |
| opps = gather_hn_jobs(limit) |
| opps = [o for o in opps if o['phi_resonance'] >= min_resonance] |
| return {"opportunities": opps[:limit], "phi": PHI} |
|
|
| |
|
|
| if __name__ == "__main__": |
| import uvicorn |
| uvicorn.run(app, host="0.0.0.0", port=7860) |
|
|