Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -24,49 +24,59 @@ def generate_roast(resume_text):
|
|
| 24 |
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 25 |
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 26 |
|
| 27 |
-
# Define the prompt
|
| 28 |
prompt_text = "Roast this resume:\n\n"
|
| 29 |
|
| 30 |
# Tokenize the prompt
|
| 31 |
prompt_tokenized = tokenizer(prompt_text, return_tensors="pt")
|
| 32 |
prompt_tokens = prompt_tokenized['input_ids'].shape[1]
|
| 33 |
|
| 34 |
-
# Calculate remaining tokens for resume text
|
| 35 |
max_resume_tokens = 2048 - prompt_tokens
|
| 36 |
|
| 37 |
-
# Tokenize and
|
| 38 |
resume_tokenized = tokenizer(resume_text, truncation=True, max_length=max_resume_tokens, return_tensors="pt")
|
| 39 |
|
| 40 |
-
# Decode the truncated resume back into a string
|
| 41 |
truncated_resume_text = tokenizer.decode(resume_tokenized['input_ids'][0], skip_special_tokens=True)
|
| 42 |
|
| 43 |
-
# Combine
|
| 44 |
final_prompt = f"{prompt_text}{truncated_resume_text}\n\nRoast:"
|
| 45 |
|
| 46 |
-
# Generate
|
| 47 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
| 48 |
-
|
| 49 |
-
# Ensure generated roast doesn't exceed token limit
|
| 50 |
roast = generator(final_prompt, max_new_tokens=50, num_return_sequences=1)
|
| 51 |
|
| 52 |
return roast[0]['generated_text']
|
| 53 |
|
| 54 |
-
#
|
| 55 |
-
def roast_resume(file):
|
| 56 |
-
if file
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
else:
|
| 61 |
-
return "
|
| 62 |
|
|
|
|
| 63 |
roast = generate_roast(resume_text)
|
| 64 |
return roast
|
| 65 |
|
| 66 |
-
#
|
| 67 |
-
interface = gr.Interface(
|
| 68 |
-
|
| 69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
# Launch
|
| 72 |
interface.launch()
|
|
|
|
| 24 |
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 25 |
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 26 |
|
| 27 |
+
# Define the prompt
|
| 28 |
prompt_text = "Roast this resume:\n\n"
|
| 29 |
|
| 30 |
# Tokenize the prompt
|
| 31 |
prompt_tokenized = tokenizer(prompt_text, return_tensors="pt")
|
| 32 |
prompt_tokens = prompt_tokenized['input_ids'].shape[1]
|
| 33 |
|
| 34 |
+
# Calculate remaining tokens for resume text
|
| 35 |
max_resume_tokens = 2048 - prompt_tokens
|
| 36 |
|
| 37 |
+
# Tokenize and truncate resume text
|
| 38 |
resume_tokenized = tokenizer(resume_text, truncation=True, max_length=max_resume_tokens, return_tensors="pt")
|
| 39 |
|
| 40 |
+
# Decode the truncated resume text back into a string
|
| 41 |
truncated_resume_text = tokenizer.decode(resume_tokenized['input_ids'][0], skip_special_tokens=True)
|
| 42 |
|
| 43 |
+
# Combine prompt and truncated resume text
|
| 44 |
final_prompt = f"{prompt_text}{truncated_resume_text}\n\nRoast:"
|
| 45 |
|
| 46 |
+
# Generate roast
|
| 47 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
|
|
|
|
|
|
| 48 |
roast = generator(final_prompt, max_new_tokens=50, num_return_sequences=1)
|
| 49 |
|
| 50 |
return roast[0]['generated_text']
|
| 51 |
|
| 52 |
+
# Function to handle file uploads and extract text from resume files
|
| 53 |
+
def roast_resume(file=None, resume_text=None):
|
| 54 |
+
if file:
|
| 55 |
+
# Handle file uploads for PDF or DOCX
|
| 56 |
+
if file.name.endswith('.pdf'):
|
| 57 |
+
resume_text = extract_text_from_pdf(file)
|
| 58 |
+
elif file.name.endswith('.docx'):
|
| 59 |
+
resume_text = extract_text_from_docx(file)
|
| 60 |
+
else:
|
| 61 |
+
return "Unsupported file format. Please upload a PDF or DOCX file."
|
| 62 |
+
elif resume_text:
|
| 63 |
+
# Use pasted resume text
|
| 64 |
+
pass
|
| 65 |
else:
|
| 66 |
+
return "No resume provided."
|
| 67 |
|
| 68 |
+
# Generate the roast based on extracted or pasted resume text
|
| 69 |
roast = generate_roast(resume_text)
|
| 70 |
return roast
|
| 71 |
|
| 72 |
+
# Gradio interface with file upload or text input options
|
| 73 |
+
interface = gr.Interface(
|
| 74 |
+
fn=roast_resume,
|
| 75 |
+
inputs=[gr.File(label="Upload Resume (PDF/DOCX)"), gr.Textbox(label="Or Paste Your Resume")],
|
| 76 |
+
outputs="text",
|
| 77 |
+
title="Resume Roaster",
|
| 78 |
+
description="Upload your resume in PDF/DOCX format or paste your resume text, and let the AI roast it!"
|
| 79 |
+
)
|
| 80 |
|
| 81 |
+
# Launch Gradio app
|
| 82 |
interface.launch()
|