RohitCSharp commited on
Commit
40972da
·
verified ·
1 Parent(s): a444e25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -18
app.py CHANGED
@@ -2,24 +2,19 @@ import gradio as gr
2
  from langchain.chains import LLMChain
3
  from langchain.prompts import PromptTemplate
4
  from langchain.chat_models import ChatOpenAI
5
- from langchain.output_parsers import StructuredOutputParser, ResponseSchema
6
 
7
  # Replace this with a CPU-compatible LLM for Hugging Face Spaces
8
  llm = ChatOpenAI(temperature=0.2, model="gpt-3.5-turbo") # Replace with HuggingFacePipeline if no OpenAI key
9
 
10
- schemas = [
11
- ResponseSchema(name="MatchScore", description="Score from 0 to 100 for how well the resume matches the job description"),
12
- ResponseSchema(name="KeySkills", description="List of key skills matched between resume and job description"),
13
- ResponseSchema(name="Justification", description="Short reasoning for the score and skills match")
14
- ]
15
- parser = StructuredOutputParser.from_response_schemas(schemas)
16
-
17
  prompt_template = PromptTemplate(
18
  template="""
19
  You are a professional resume screener AI.
20
 
21
  Below is a resume and a job description.
22
- Evaluate how well the resume fits the job and answer with the required JSON fields.
 
 
 
23
 
24
  Resume:
25
  {resume}
@@ -27,11 +22,9 @@ Resume:
27
  Job Description:
28
  {job}
29
 
30
- Format your response as JSON:
31
- {format_instructions}
32
  """,
33
- input_variables=["resume", "job"],
34
- partial_variables={"format_instructions": parser.get_format_instructions()},
35
  )
36
 
37
  chain = LLMChain(llm=llm, prompt=prompt_template)
@@ -39,9 +32,9 @@ chain = LLMChain(llm=llm, prompt=prompt_template)
39
  def screen_resume(resume, jd):
40
  try:
41
  response = chain.run(resume=resume, job=jd)
42
- return parser.parse(response)
43
  except Exception as e:
44
- return {"error": str(e)}
45
 
46
  iface = gr.Interface(
47
  fn=screen_resume,
@@ -49,10 +42,10 @@ iface = gr.Interface(
49
  gr.Textbox(label="Paste Resume Text", lines=15, placeholder="Paste plain text from resume..."),
50
  gr.Textbox(label="Paste Job Description", lines=10, placeholder="Paste plain text from JD..."),
51
  ],
52
- outputs="json",
53
  title="Resume Screener Agent",
54
- description="Upload a resume and a job description. The AI will match and score them.",
55
  )
56
 
57
  if __name__ == "__main__":
58
- iface.launch(share=True)
 
2
  from langchain.chains import LLMChain
3
  from langchain.prompts import PromptTemplate
4
  from langchain.chat_models import ChatOpenAI
 
5
 
6
  # Replace this with a CPU-compatible LLM for Hugging Face Spaces
7
  llm = ChatOpenAI(temperature=0.2, model="gpt-3.5-turbo") # Replace with HuggingFacePipeline if no OpenAI key
8
 
 
 
 
 
 
 
 
9
  prompt_template = PromptTemplate(
10
  template="""
11
  You are a professional resume screener AI.
12
 
13
  Below is a resume and a job description.
14
+ Evaluate how well the resume fits the job and provide a plain text output with:
15
+ - Match Score (0-100)
16
+ - Key Skills matched
17
+ - Justification for the score
18
 
19
  Resume:
20
  {resume}
 
22
  Job Description:
23
  {job}
24
 
25
+ Response:
 
26
  """,
27
+ input_variables=["resume", "job"]
 
28
  )
29
 
30
  chain = LLMChain(llm=llm, prompt=prompt_template)
 
32
  def screen_resume(resume, jd):
33
  try:
34
  response = chain.run(resume=resume, job=jd)
35
+ return response
36
  except Exception as e:
37
+ return f"Error: {str(e)}"
38
 
39
  iface = gr.Interface(
40
  fn=screen_resume,
 
42
  gr.Textbox(label="Paste Resume Text", lines=15, placeholder="Paste plain text from resume..."),
43
  gr.Textbox(label="Paste Job Description", lines=10, placeholder="Paste plain text from JD..."),
44
  ],
45
+ outputs=gr.Textbox(label="Analysis Result"),
46
  title="Resume Screener Agent",
47
+ description="Upload a resume and a job description. The AI will match and score them."
48
  )
49
 
50
  if __name__ == "__main__":
51
+ iface.launch()