| import streamlit as st |
| import pickle |
| import re |
| import nltk |
|
|
| nltk.download('punkt') |
| nltk.stopwords('stopwords') |
|
|
| model = pickle.load(open('model.pkl','rb')) |
| tfidf = pickle.load(open('tfidf.pkl','rb')) |
|
|
| def clean_resume(resume_text): |
| cleanText = re.sub('http\S+\s', " ", txt) |
| cleanText = re.sub('RT|cc', ' ', cleanText) |
| cleanText = re.sub("#\S+\s", ' ', cleanText) |
| cleanText = re.sub('@\S+\s', ' ', cleanText) |
| cleanText = re.sub('[%s]' % re.escape("""!#$%&'()*+-,":/\;<=>?_[]^{}~`"""), ' ', cleanText) |
| cleanText = re.sub(r'[^\x00-\x7f]', " ", cleanText) |
| return cleanText |
|
|
| def main(): |
| st.title("Resume Screening Application") |
| uploaded_file = st.file_uploader("Upload Resume Here",type=['txt','pdf']) |
| if uploaded_file is not None: |
| try: |
| resume_bytes = uploaded_file.read() |
| resume_text = resume_bytes.decode('utf-8') |
| except: |
| resume_text = resume_bytes.decode('latin-1') |
|
|
| cleaned_resume = clean_resume(resume_text) |
| input_features = tfidf.transform([cleaned_resume]) |
| prediction_id = model.predict(input_features)[0] |
| st.write(prediction_id) |
|
|
| category_mapping = { |
| 15: "Java Developer", |
| 23: "Testing", |
| 8: "DevOps Engineer", |
| 20: "Python Developer", |
| 24: "Web Designing", |
| 12: "HR", |
| 13: "Hadoop", |
| 3: "Blockchain", |
| 10: "ETL Developer", |
| 18: "Operations Manager", |
| 6: "Data Science", |
| 22: "Sales", |
| 16: "Mechanical Engineer", |
| 1: "Arts", |
| 7: "Database", |
| 11: "Electrical Engineering", |
| 14: "Health and fitness", |
| 19: "PMO", |
| 4: "Business Analyst", |
| 9: "DotNet Developer", |
| 2: "Automation Testing", |
| 17: "Network Security Engineer", |
| 21: "SAP Developer", |
| 5: "Civil Engineer", |
| 0: "Advocate", |
| } |
|
|
| category_name = category_mapping.get(prediction_id,'Unknown') |
| st.write("THE PREDICTED CATEGORY IS: ",category_name) |
|
|
| if __name__ == '__main__': |
| main() |