Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from huggingface_hub import InferenceClient | |
| from dotenv import load_dotenv | |
| import os | |
| # Load environment variables | |
| load_dotenv() | |
| api_key = os.getenv("HF_API_KEY") | |
| # Initialize Hugging Face client | |
| client = InferenceClient(api_key=api_key) | |
| # Streamlit UI | |
| st.title("Job Fit Assessment") | |
| st.write("Find out if your resume is a strong fit for the job you're applying to.") | |
| # User inputs | |
| job_title = st.text_input("Job Title") | |
| job_description = st.text_area("Job Description") | |
| resume_description = st.text_area("Resume Description") | |
| educational_qualifications = st.text_area("Educational Qualifications") | |
| if st.button("Assess Job Fit"): | |
| # Combine inputs into a message for the model | |
| input_text = ( | |
| f"Based on the following:\n" | |
| f"- Job Title: {job_title}\n" | |
| f"- Job Description: {job_description}\n" | |
| f"- Resume Description: {resume_description}\n" | |
| f"- Educational Qualifications: {educational_qualifications}\n\n" | |
| "Evaluate if this candidate's profile is a strong fit for the job." | |
| ) | |
| messages = [{"role": "user", "content": input_text}] | |
| # Make the API call | |
| result = "" | |
| try: | |
| stream = client.chat.completions.create( | |
| model="meta-llama/Llama-3.2-1B-Instruct", | |
| messages=messages, | |
| max_tokens=500, | |
| stream=True | |
| ) | |
| # Collect the response in chunks | |
| for chunk in stream: | |
| content = chunk.choices[0].delta.content | |
| result += content | |
| # Display the full result at once | |
| st.write("Job Fit Assessment Result:") | |
| st.write(result) | |
| except Exception as e: | |
| st.error(f"Error: {e}") | |