Update pages/model.py
Browse files- pages/model.py +72 -78
pages/model.py
CHANGED
|
@@ -1,92 +1,86 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
import os
|
| 3 |
-
|
| 4 |
-
from langchain_huggingface import HuggingFaceEndpoint
|
| 5 |
from langchain_community.document_loaders import UnstructuredPDFLoader
|
| 6 |
-
|
|
|
|
| 7 |
|
| 8 |
os.environ["HF_TOKEN"]=os.getenv('HF_Token')
|
| 9 |
os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('HF_Token')
|
| 10 |
|
| 11 |
-
|
| 12 |
-
repo_id="
|
| 13 |
provider="nebius",
|
| 14 |
-
temperature=0.
|
| 15 |
-
max_new_tokens=
|
| 16 |
task="conversational"
|
| 17 |
)
|
|
|
|
| 18 |
llm = ChatHuggingFace(
|
| 19 |
-
llm=
|
| 20 |
-
repo_id="
|
| 21 |
-
provider="nebius"
|
|
|
|
|
|
|
|
|
|
| 22 |
)
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
else:
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
{resume_text}
|
| 60 |
-
"""
|
| 61 |
-
with st.spinner("Extracting from Resume..."):
|
| 62 |
-
resume_output = llm.invoke(resume_prompt)
|
| 63 |
-
st.success("Resume data extracted!")
|
| 64 |
-
st.subheader("π§Ύ Resume Summary")
|
| 65 |
-
st.write(resume_output)
|
| 66 |
-
|
| 67 |
-
# Process JD (file or text)
|
| 68 |
-
if jd_file or jd_text_input:
|
| 69 |
if jd_file:
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
jd_output = llm.invoke(jd_prompt)
|
| 88 |
-
st.success("Job description data extracted!")
|
| 89 |
-
st.subheader("π Job Description Summary")
|
| 90 |
-
st.write(jd_output)
|
| 91 |
-
else:
|
| 92 |
-
st.warning("Please upload or paste a job description.")
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import streamlit as st
|
|
|
|
| 3 |
from langchain_community.document_loaders import UnstructuredPDFLoader
|
| 4 |
+
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
| 5 |
+
import tempfile
|
| 6 |
|
| 7 |
os.environ["HF_TOKEN"]=os.getenv('HF_Token')
|
| 8 |
os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('HF_Token')
|
| 9 |
|
| 10 |
+
llm_skeleton = HuggingFaceEndpoint(
|
| 11 |
+
repo_id="deepseek-ai/DeepSeek-R1",
|
| 12 |
provider="nebius",
|
| 13 |
+
temperature=0.7,
|
| 14 |
+
max_new_tokens=150,
|
| 15 |
task="conversational"
|
| 16 |
)
|
| 17 |
+
|
| 18 |
llm = ChatHuggingFace(
|
| 19 |
+
llm=llm_skeleton,
|
| 20 |
+
repo_id="deepseek-ai/DeepSeek-R1",
|
| 21 |
+
provider="nebius",
|
| 22 |
+
temperature=0.7,
|
| 23 |
+
max_new_tokens=150,
|
| 24 |
+
task="conversational"
|
| 25 |
)
|
| 26 |
|
| 27 |
+
def extract_text_from_pdf(uploaded_file):
|
| 28 |
+
try:
|
| 29 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
|
| 30 |
+
tmp_file.write(uploaded_file.read())
|
| 31 |
+
tmp_path = tmp_file.name
|
| 32 |
+
loader = UnstructuredPDFLoader(tmp_path)
|
| 33 |
+
return loader.load()[0].page_content
|
| 34 |
+
except Exception as e:
|
| 35 |
+
st.error(f"Error reading PDF file: {e}")
|
| 36 |
+
return ""
|
| 37 |
+
|
| 38 |
+
def extract_text_from_txt(uploaded_file):
|
| 39 |
+
try:
|
| 40 |
+
return uploaded_file.read().decode("utf-8")
|
| 41 |
+
except Exception as e:
|
| 42 |
+
st.error(f"Error reading text file: {e}")
|
| 43 |
+
return ""
|
| 44 |
+
|
| 45 |
+
st.set_page_config(page_title="Resume & JD Extractor", layout="centered")
|
| 46 |
+
st.title("π Resume & Job Description Extractor")
|
| 47 |
+
|
| 48 |
+
resume_file = st.file_uploader("Upload Resume (PDF)", type=["pdf"])
|
| 49 |
+
jd_file = st.file_uploader("Upload Job Description (PDF or TXT)", type=["pdf", "txt"])
|
| 50 |
+
jd_text = st.text_area("Or paste Job Description text here")
|
| 51 |
+
|
| 52 |
+
if st.button("π Extract Data"):
|
| 53 |
+
if not resume_file and not (jd_file or jd_text):
|
| 54 |
+
st.warning("Please upload at least one file (Resume or JD) or paste JD text.")
|
| 55 |
else:
|
| 56 |
+
if resume_file:
|
| 57 |
+
resume_text = extract_text_from_pdf(resume_file)
|
| 58 |
+
if resume_text:
|
| 59 |
+
resume_prompt = (
|
| 60 |
+
"Extract the following from the resume:\n"
|
| 61 |
+
"1. Name\n2. Education\n3. Experience\n4. Skills\n5. Project Names and Results\n\n"
|
| 62 |
+
f"Resume:\n{resume_text}"
|
| 63 |
+
)
|
| 64 |
+
resume_data = llm.invoke(resume_prompt)
|
| 65 |
+
st.subheader("π Extracted Resume Data")
|
| 66 |
+
st.markdown(f"<div style='background-color:#f9f9f9;padding:10px;border-radius:8px;'>{resume_data}</div>", unsafe_allow_html=True)
|
| 67 |
+
|
| 68 |
+
jd_text_extracted = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
if jd_file:
|
| 70 |
+
if jd_file.type == "application/pdf":
|
| 71 |
+
jd_text_extracted = extract_text_from_pdf(jd_file)
|
| 72 |
+
elif jd_file.type == "text/plain":
|
| 73 |
+
jd_text_extracted = extract_text_from_txt(jd_file)
|
| 74 |
+
elif jd_text:
|
| 75 |
+
jd_text_extracted = jd_text
|
| 76 |
+
|
| 77 |
+
if jd_text_extracted:
|
| 78 |
+
jd_prompt = (
|
| 79 |
+
"Extract the following from the job description:\n"
|
| 80 |
+
"1. Job ID\n2. Company Name\n3. Role\n4. Experience Required\n5. Skills Required\n"
|
| 81 |
+
"6. Education Required\n7. Location\n\n"
|
| 82 |
+
f"Job Description:\n{jd_text_extracted}"
|
| 83 |
+
)
|
| 84 |
+
jd_data = llm.invoke(jd_prompt)
|
| 85 |
+
st.subheader("π Extracted Job Description Data")
|
| 86 |
+
st.markdown(f"<div style='background-color:#f9f9f9;padding:10px;border-radius:8px;'>{jd_data}</div>", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|