Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from bs4 import BeautifulSoup
|
| 3 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 4 |
+
import gradio as gr
|
| 5 |
+
|
| 6 |
+
# Function to scrape LinkedIn profile
|
| 7 |
+
def scrape_linkedin_profile(url):
|
| 8 |
+
response = requests.get(url)
|
| 9 |
+
if response.status_code == 200:
|
| 10 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
| 11 |
+
profile = {}
|
| 12 |
+
|
| 13 |
+
# Example scraping logic (adjust based on actual LinkedIn page structure)
|
| 14 |
+
profile['name'] = soup.find('title').text.strip()
|
| 15 |
+
profile['headline'] = soup.find('div', {'class': 'ph5'}).text.strip()
|
| 16 |
+
profile['about'] = soup.find('section', {'id': 'about'}).text.strip() if soup.find('section', {'id': 'about'}) else "No About Section"
|
| 17 |
+
|
| 18 |
+
return profile
|
| 19 |
+
else:
|
| 20 |
+
return f"Failed to fetch LinkedIn page, status code: {response.status_code}"
|
| 21 |
+
|
| 22 |
+
# Function to generate roast using a Gen AI model
|
| 23 |
+
def generate_roast(profile_data):
|
| 24 |
+
# Initialize the Gen AI model from Hugging Face
|
| 25 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 26 |
+
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 27 |
+
|
| 28 |
+
# Format profile into a prompt
|
| 29 |
+
prompt = f"Roast this LinkedIn profile:\nName: {profile_data['name']}\nHeadline: {profile_data['headline']}\nAbout: {profile_data['about']}\n\nRoast:"
|
| 30 |
+
|
| 31 |
+
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
| 32 |
+
|
| 33 |
+
# Generate roast
|
| 34 |
+
roast = generator(prompt, max_length=150, num_return_sequences=1)
|
| 35 |
+
|
| 36 |
+
return roast[0]['generated_text']
|
| 37 |
+
|
| 38 |
+
# Gradio interface function
|
| 39 |
+
def roast_linkedin(url):
|
| 40 |
+
profile_data = scrape_linkedin_profile(url)
|
| 41 |
+
|
| 42 |
+
if isinstance(profile_data, dict):
|
| 43 |
+
roast = generate_roast(profile_data)
|
| 44 |
+
return roast
|
| 45 |
+
else:
|
| 46 |
+
return profile_data
|
| 47 |
+
|
| 48 |
+
# Create Gradio interface
|
| 49 |
+
interface = gr.Interface(fn=roast_linkedin, inputs="text", outputs="text",
|
| 50 |
+
title="LinkedIn Profile Roaster",
|
| 51 |
+
description="Enter the LinkedIn profile URL and get a humorous roast generated by AI!")
|
| 52 |
+
|
| 53 |
+
# Launch Gradio app
|
| 54 |
+
interface.launch()
|