shekkari21 Claude Opus 4.6 commited on
Commit
f3adfbf
·
1 Parent(s): 1a1a2a1

Add HF Spaces deployment and fix broken app

Browse files

- Fix LangChain imports (langchain_classic for ConversationChain/Memory)
- Fix variable scoping bugs and duplicate methods
- Make JD optional, support text input for JD
- Add app.py + requirements.txt + README metadata for HF Spaces
- Add .streamlit config at project root

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

Files changed (4) hide show
  1. Code+Folder/src/resume_suggestions.py +80 -71
  2. README.md +28 -0
  3. app.py +254 -0
  4. requirements.txt +15 -0
Code+Folder/src/resume_suggestions.py CHANGED
@@ -29,71 +29,77 @@ st.set_page_config(page_title="Resume Reviewer")
29
  llm = None
30
  resume_chain = None
31
 
32
- # Add API key input in the sidebar
 
 
 
 
33
  with st.sidebar:
34
  st.title('Resume Reviewer')
35
- st.write("Upload your resume and JD for my recommendations.")
36
-
37
- # API Key handling - cleaner UI
38
- st.write("---") # Add a separator
39
  st.write("### OpenAI API Key")
40
  st.write("Enter your API key to use the application. Get one at [OpenAI](https://platform.openai.com/api-keys)")
41
  api_key = st.text_input("API Key", type="password", help="Your API key will not be stored")
42
-
43
  if api_key:
44
  os.environ['OPENAI_API_KEY'] = api_key
45
  llm = ChatOpenAI(temperature=0.0, model=OPENAI_MODEL_NAME)
46
  else:
47
- st.info("👆 Please enter your OpenAI API key to start")
48
  llm = None
49
-
50
- # Add upload method selection
51
- upload_method = st.radio("Choose input method:", ["File Upload", "Text Input"])
52
-
53
- # Initialize variables to avoid NameError
54
- resume_file = None
55
- jd_file = None
56
- resume_content = None
57
- job_description_content = None
58
 
59
- with st.sidebar:
60
- if upload_method == "File Upload":
61
- st.write("Note: File size should be less than 5MB")
62
- resume_file = st.file_uploader("Upload your resume (pdf file only)", type=["pdf"], accept_multiple_files=False)
63
- jd_file = st.file_uploader("Upload your JD (txt file only)", type=["txt"], accept_multiple_files=False)
64
- else:
65
- resume_text = st.text_area("Paste your resume content here:", height=200)
66
- jd_text = st.text_area("Paste your job description here:", height=200)
67
 
68
- if resume_text and jd_text:
69
- resume_content = resume_text
70
- job_description_content = jd_text
71
- st.sidebar.success("Text content received successfully!")
 
72
 
73
- if upload_method == "File Upload" and resume_file is not None and jd_file is not None and api_key:
 
74
  try:
75
  with st.spinner("Processing resume file..."):
76
  directory_reader = DirectoryReader("", "")
77
  resume_content = directory_reader.extract_text_from_pdf(resume_file)
78
  st.sidebar.success("Resume processed successfully!")
79
-
80
- with st.spinner("Processing job description file..."):
81
- if jd_file.type == 'text/plain':
82
- try:
83
- from io import StringIO
84
- stringio = StringIO(jd_file.getvalue().decode('utf-8'))
85
- read_data = stringio.read()
86
- job_description_content = read_data
87
- st.sidebar.success("JD processed successfully!")
88
- except Exception as e:
89
- st.sidebar.error(f"Error processing JD file: {str(e)}")
90
- job_description_content = None
91
  except Exception as e:
92
  st.sidebar.error(f"Error processing resume file: {str(e)}")
93
  resume_content = None
94
- job_description_content = None
95
 
96
- SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT + "<RESUME STARTS HERE> {}. <RESUME ENDS HERE> with the job description: <JOB DESCRIPTION STARTS HERE> {}.<JOB DESCRIPTION ENDS HERE>\n\nBe crisp and clear in response.DO NOT provide the resume and job description in the response\n\n".format(resume_content, job_description_content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
 
99
  # Store LLM generated responses
@@ -133,18 +139,28 @@ def clear_chat_history():
133
  def generate_report():
134
  user_message = {"role": "user", "content": "Generate a Report!"}
135
  st.session_state.messages.append(user_message)
136
- if resume_content is not None and job_description_content is not None:
137
- with st.chat_message("assistant"):
138
- with st.spinner("Just a moment..."):
 
 
 
 
 
 
139
 
140
- comparison_analysis = generate_response(comparison_prompt.format(resume_content, job_description_content))
141
- resume_analysis = generate_response(resume_analysis_prompt.format(resume_content))
 
 
 
 
142
  job_description_analysis = generate_response(
143
  job_description_analysis_prompt.format(job_description_content))
144
- gap_analysis = generate_response(gap_analysis_prompt.format(resume_content,
145
- job_description_content))
146
- actionable_steps_analysis = generate_response(actionable_steps_prompt.format(
147
- resume_content, job_description_content))
148
  experience_enhancement_analysis = generate_response(
149
  experience_enhancement_prompt.format(resume_content, job_description_content))
150
  additional_qualifications_analysis = generate_response(
@@ -153,28 +169,21 @@ def generate_report():
153
  resume_tailoring_prompt.format(resume_content, job_description_content))
154
  relevant_skills_highlight_analysis = generate_response(
155
  relevant_skills_highlight_prompt.format(resume_content, job_description_content))
156
- resume_formatting_analysis = generate_response(
157
- resume_formatting_prompt.format(resume_content, job_description_content))
158
  resume_length_analysis = generate_response(
159
  resume_length_prompt.format(resume_content, job_description_content))
160
 
161
- # Compile the report
162
- report = f"Comparison Analysis:\n{comparison_analysis}\n\n" \
163
- f"Resume Analysis:\n{resume_analysis}\n\n" \
164
- f"Job Description Analysis:\n{job_description_analysis}\n" \
165
- f"\nGap Analysis:\n{gap_analysis} \n\n" \
166
- f"Actionable Steps:\n{actionable_steps_analysis}\n\n" \
167
- f"Experience Enhancement:\n{experience_enhancement_analysis}\n\n" \
168
- f"Additional Qualifications:\n{additional_qualifications_analysis}\n\n" \
169
- f"Resume Tailoring:\n{resume_tailoring_analysis}\n\n" \
170
- f"Relevant Skills Highlight:\n{relevant_skills_highlight_analysis}\n\n" \
171
- f"Resume Formatting:\n{resume_formatting_analysis}\n\n" \
172
- f"Resume Length:\n{resume_length_analysis} "
173
-
174
- report_message = {"role": "assistant", "content": report}
175
- st.session_state.messages.append(report_message)
176
- else:
177
- st.error("Please upload a resume and enter a job description!")
178
 
179
 
180
  # Setup the system message and prompt template
 
29
  llm = None
30
  resume_chain = None
31
 
32
+ # Initialize variables
33
+ resume_content = None
34
+ job_description_content = None
35
+
36
+ # Sidebar
37
  with st.sidebar:
38
  st.title('Resume Reviewer')
39
+ st.write("Upload your resume for my recommendations. Job description is optional.")
40
+
41
+ # API Key
42
+ st.write("---")
43
  st.write("### OpenAI API Key")
44
  st.write("Enter your API key to use the application. Get one at [OpenAI](https://platform.openai.com/api-keys)")
45
  api_key = st.text_input("API Key", type="password", help="Your API key will not be stored")
46
+
47
  if api_key:
48
  os.environ['OPENAI_API_KEY'] = api_key
49
  llm = ChatOpenAI(temperature=0.0, model=OPENAI_MODEL_NAME)
50
  else:
51
+ st.info("Please enter your OpenAI API key to start")
52
  llm = None
 
 
 
 
 
 
 
 
 
53
 
54
+ # Resume upload (file only)
55
+ st.write("---")
56
+ st.write("### Resume")
57
+ st.write("Note: File size should be less than 5MB")
58
+ resume_file = st.file_uploader("Upload your resume (PDF)", type=["pdf"], accept_multiple_files=False)
 
 
 
59
 
60
+ # JD input (file or text, optional)
61
+ st.write("---")
62
+ st.write("### Job Description (optional)")
63
+ jd_file = st.file_uploader("Upload a JD (txt file)", type=["txt"], accept_multiple_files=False)
64
+ jd_text = st.text_area("Or paste the job description here:", height=150)
65
 
66
+ # Process resume
67
+ if resume_file is not None and api_key:
68
  try:
69
  with st.spinner("Processing resume file..."):
70
  directory_reader = DirectoryReader("", "")
71
  resume_content = directory_reader.extract_text_from_pdf(resume_file)
72
  st.sidebar.success("Resume processed successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
73
  except Exception as e:
74
  st.sidebar.error(f"Error processing resume file: {str(e)}")
75
  resume_content = None
 
76
 
77
+ # Process JD - prefer file upload, fall back to text input
78
+ if jd_file is not None:
79
+ try:
80
+ from io import StringIO
81
+ stringio = StringIO(jd_file.getvalue().decode('utf-8'))
82
+ job_description_content = stringio.read()
83
+ st.sidebar.success("JD processed successfully!")
84
+ except Exception as e:
85
+ st.sidebar.error(f"Error processing JD file: {str(e)}")
86
+ elif jd_text:
87
+ job_description_content = jd_text
88
+
89
+ # Build system prompt based on what's provided
90
+ if resume_content and job_description_content:
91
+ SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT + \
92
+ "<RESUME STARTS HERE> {}. <RESUME ENDS HERE> with the job description: " \
93
+ "<JOB DESCRIPTION STARTS HERE> {}.<JOB DESCRIPTION ENDS HERE>\n\n" \
94
+ "Be crisp and clear in response. DO NOT provide the resume and job description in the response.\n\n".format(
95
+ resume_content, job_description_content)
96
+ elif resume_content:
97
+ SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT + \
98
+ "<RESUME STARTS HERE> {}. <RESUME ENDS HERE>\n\n" \
99
+ "No job description was provided. Focus on general resume feedback, strengths, and areas for improvement. " \
100
+ "Be crisp and clear in response. DO NOT provide the resume in the response.\n\n".format(resume_content)
101
+ else:
102
+ SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT
103
 
104
 
105
  # Store LLM generated responses
 
139
  def generate_report():
140
  user_message = {"role": "user", "content": "Generate a Report!"}
141
  st.session_state.messages.append(user_message)
142
+ if resume_content is None:
143
+ st.error("Please upload a resume first!")
144
+ return
145
+
146
+ with st.chat_message("assistant"):
147
+ with st.spinner("Just a moment..."):
148
+ resume_analysis = generate_response(resume_analysis_prompt.format(resume_content))
149
+ resume_formatting_analysis = generate_response(
150
+ resume_formatting_prompt.format(resume_content, "N/A"))
151
 
152
+ report = f"**Resume Analysis:**\n{resume_analysis}\n\n" \
153
+ f"**Resume Formatting:**\n{resume_formatting_analysis}"
154
+
155
+ if job_description_content is not None:
156
+ comparison_analysis = generate_response(
157
+ comparison_prompt.format(resume_content, job_description_content))
158
  job_description_analysis = generate_response(
159
  job_description_analysis_prompt.format(job_description_content))
160
+ gap_analysis = generate_response(
161
+ gap_analysis_prompt.format(resume_content, job_description_content))
162
+ actionable_steps_analysis = generate_response(
163
+ actionable_steps_prompt.format(resume_content, job_description_content))
164
  experience_enhancement_analysis = generate_response(
165
  experience_enhancement_prompt.format(resume_content, job_description_content))
166
  additional_qualifications_analysis = generate_response(
 
169
  resume_tailoring_prompt.format(resume_content, job_description_content))
170
  relevant_skills_highlight_analysis = generate_response(
171
  relevant_skills_highlight_prompt.format(resume_content, job_description_content))
 
 
172
  resume_length_analysis = generate_response(
173
  resume_length_prompt.format(resume_content, job_description_content))
174
 
175
+ report += f"\n\n**Comparison Analysis:**\n{comparison_analysis}\n\n" \
176
+ f"**Job Description Analysis:**\n{job_description_analysis}\n\n" \
177
+ f"**Gap Analysis:**\n{gap_analysis}\n\n" \
178
+ f"**Actionable Steps:**\n{actionable_steps_analysis}\n\n" \
179
+ f"**Experience Enhancement:**\n{experience_enhancement_analysis}\n\n" \
180
+ f"**Additional Qualifications:**\n{additional_qualifications_analysis}\n\n" \
181
+ f"**Resume Tailoring:**\n{resume_tailoring_analysis}\n\n" \
182
+ f"**Relevant Skills Highlight:**\n{relevant_skills_highlight_analysis}\n\n" \
183
+ f"**Resume Length:**\n{resume_length_analysis}"
184
+
185
+ report_message = {"role": "assistant", "content": report}
186
+ st.session_state.messages.append(report_message)
 
 
 
 
 
187
 
188
 
189
  # Setup the system message and prompt template
README.md CHANGED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Resume Reviewer
3
+ emoji: 📄
4
+ colorFrom: blue
5
+ colorTo: indigo
6
+ sdk: streamlit
7
+ sdk_version: "1.54.0"
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ # Resume Reviewer
13
+
14
+ AI-powered resume analyzer that reviews your resume and compares it against job descriptions using OpenAI.
15
+
16
+ ## Features
17
+
18
+ - Upload a PDF resume for instant analysis
19
+ - Optionally provide a job description (file or text) for targeted feedback
20
+ - Get a full report: gap analysis, tailoring suggestions, formatting tips, and more
21
+ - Chat with the AI for follow-up questions about your resume
22
+
23
+ ## Usage
24
+
25
+ 1. Enter your OpenAI API key in the sidebar
26
+ 2. Upload your resume (PDF)
27
+ 3. Optionally add a job description (upload .txt or paste text)
28
+ 4. Click **Generate Report** or ask questions in the chat
app.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ # Add source directory to path so sibling imports work
5
+ sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "Code+Folder", "src"))
6
+
7
+ import streamlit as st
8
+
9
+ try:
10
+ from streamlit_feedback import streamlit_feedback
11
+ FEEDBACK_AVAILABLE = True
12
+ except ImportError:
13
+ FEEDBACK_AVAILABLE = False
14
+
15
+ from langchain_classic.chains import ConversationChain
16
+ from langchain_classic.memory import ConversationBufferWindowMemory
17
+ from langchain_core.messages import SystemMessage
18
+ from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
19
+ from langchain_openai import ChatOpenAI
20
+ from constants import OPENAI_MODEL_NAME, TEMPLATE_CONTENT, comparison_prompt, resume_analysis_prompt, \
21
+ job_description_analysis_prompt, gap_analysis_prompt, actionable_steps_prompt, experience_enhancement_prompt, \
22
+ additional_qualifications_prompt, resume_tailoring_prompt, relevant_skills_highlight_prompt, \
23
+ resume_formatting_prompt, resume_length_prompt
24
+ from directory_reader import DirectoryReader
25
+
26
+ st.set_page_config(page_title="Resume Reviewer")
27
+
28
+ # Initialize llm as None at the top level
29
+ llm = None
30
+ resume_chain = None
31
+
32
+ # Initialize variables
33
+ resume_content = None
34
+ job_description_content = None
35
+
36
+ # Sidebar
37
+ with st.sidebar:
38
+ st.title('Resume Reviewer')
39
+ st.write("Upload your resume for my recommendations. Job description is optional.")
40
+
41
+ # API Key
42
+ st.write("---")
43
+ st.write("### OpenAI API Key")
44
+ st.write("Enter your API key to use the application. Get one at [OpenAI](https://platform.openai.com/api-keys)")
45
+ api_key = st.text_input("API Key", type="password", help="Your API key will not be stored")
46
+
47
+ if api_key:
48
+ os.environ['OPENAI_API_KEY'] = api_key
49
+ llm = ChatOpenAI(temperature=0.0, model=OPENAI_MODEL_NAME)
50
+ else:
51
+ st.info("Please enter your OpenAI API key to start")
52
+ llm = None
53
+
54
+ # Resume upload (file only)
55
+ st.write("---")
56
+ st.write("### Resume")
57
+ st.write("Note: File size should be less than 5MB")
58
+ resume_file = st.file_uploader("Upload your resume (PDF)", type=["pdf"], accept_multiple_files=False)
59
+
60
+ # JD input (file or text, optional)
61
+ st.write("---")
62
+ st.write("### Job Description (optional)")
63
+ jd_file = st.file_uploader("Upload a JD (txt file)", type=["txt"], accept_multiple_files=False)
64
+ jd_text = st.text_area("Or paste the job description here:", height=150)
65
+
66
+ # Process resume
67
+ if resume_file is not None and api_key:
68
+ try:
69
+ with st.spinner("Processing resume file..."):
70
+ directory_reader = DirectoryReader("", "")
71
+ resume_content = directory_reader.extract_text_from_pdf(resume_file)
72
+ st.sidebar.success("Resume processed successfully!")
73
+ except Exception as e:
74
+ st.sidebar.error(f"Error processing resume file: {str(e)}")
75
+ resume_content = None
76
+
77
+ # Process JD - prefer file upload, fall back to text input
78
+ if jd_file is not None:
79
+ try:
80
+ from io import StringIO
81
+ stringio = StringIO(jd_file.getvalue().decode('utf-8'))
82
+ job_description_content = stringio.read()
83
+ st.sidebar.success("JD processed successfully!")
84
+ except Exception as e:
85
+ st.sidebar.error(f"Error processing JD file: {str(e)}")
86
+ elif jd_text:
87
+ job_description_content = jd_text
88
+
89
+ # Build system prompt based on what's provided
90
+ if resume_content and job_description_content:
91
+ SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT + \
92
+ "<RESUME STARTS HERE> {}. <RESUME ENDS HERE> with the job description: " \
93
+ "<JOB DESCRIPTION STARTS HERE> {}.<JOB DESCRIPTION ENDS HERE>\n\n" \
94
+ "Be crisp and clear in response. DO NOT provide the resume and job description in the response.\n\n".format(
95
+ resume_content, job_description_content)
96
+ elif resume_content:
97
+ SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT + \
98
+ "<RESUME STARTS HERE> {}. <RESUME ENDS HERE>\n\n" \
99
+ "No job description was provided. Focus on general resume feedback, strengths, and areas for improvement. " \
100
+ "Be crisp and clear in response. DO NOT provide the resume in the response.\n\n".format(resume_content)
101
+ else:
102
+ SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT
103
+
104
+
105
+ # Store LLM generated responses
106
+ if "messages" not in st.session_state.keys():
107
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
108
+
109
+ # Display or clear chat messages
110
+ for message in st.session_state.messages:
111
+ if message["role"] != "feedback":
112
+ with st.chat_message(message["role"]):
113
+ st.write(message["content"])
114
+
115
+
116
+ def clear_chat_history():
117
+ global resume_chain
118
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
119
+ if llm is not None:
120
+ resume_chain = ConversationChain(
121
+ llm=llm,
122
+ prompt=prompt_template,
123
+ memory=memory,
124
+ verbose=False
125
+ )
126
+
127
+
128
+ def generate_report():
129
+ user_message = {"role": "user", "content": "Generate a Report!"}
130
+ st.session_state.messages.append(user_message)
131
+ if resume_content is None:
132
+ st.error("Please upload a resume first!")
133
+ return
134
+
135
+ with st.chat_message("assistant"):
136
+ with st.spinner("Just a moment..."):
137
+ resume_analysis = generate_response(resume_analysis_prompt.format(resume_content))
138
+ resume_formatting_analysis = generate_response(
139
+ resume_formatting_prompt.format(resume_content, "N/A"))
140
+
141
+ report = f"**Resume Analysis:**\n{resume_analysis}\n\n" \
142
+ f"**Resume Formatting:**\n{resume_formatting_analysis}"
143
+
144
+ if job_description_content is not None:
145
+ comparison_analysis = generate_response(
146
+ comparison_prompt.format(resume_content, job_description_content))
147
+ job_description_analysis = generate_response(
148
+ job_description_analysis_prompt.format(job_description_content))
149
+ gap_analysis = generate_response(
150
+ gap_analysis_prompt.format(resume_content, job_description_content))
151
+ actionable_steps_analysis = generate_response(
152
+ actionable_steps_prompt.format(resume_content, job_description_content))
153
+ experience_enhancement_analysis = generate_response(
154
+ experience_enhancement_prompt.format(resume_content, job_description_content))
155
+ additional_qualifications_analysis = generate_response(
156
+ additional_qualifications_prompt.format(resume_content, job_description_content))
157
+ resume_tailoring_analysis = generate_response(
158
+ resume_tailoring_prompt.format(resume_content, job_description_content))
159
+ relevant_skills_highlight_analysis = generate_response(
160
+ relevant_skills_highlight_prompt.format(resume_content, job_description_content))
161
+ resume_length_analysis = generate_response(
162
+ resume_length_prompt.format(resume_content, job_description_content))
163
+
164
+ report += f"\n\n**Comparison Analysis:**\n{comparison_analysis}\n\n" \
165
+ f"**Job Description Analysis:**\n{job_description_analysis}\n\n" \
166
+ f"**Gap Analysis:**\n{gap_analysis}\n\n" \
167
+ f"**Actionable Steps:**\n{actionable_steps_analysis}\n\n" \
168
+ f"**Experience Enhancement:**\n{experience_enhancement_analysis}\n\n" \
169
+ f"**Additional Qualifications:**\n{additional_qualifications_analysis}\n\n" \
170
+ f"**Resume Tailoring:**\n{resume_tailoring_analysis}\n\n" \
171
+ f"**Relevant Skills Highlight:**\n{relevant_skills_highlight_analysis}\n\n" \
172
+ f"**Resume Length:**\n{resume_length_analysis}"
173
+
174
+ report_message = {"role": "assistant", "content": report}
175
+ st.session_state.messages.append(report_message)
176
+
177
+
178
+ # Setup the system message and prompt template
179
+ system_message = SystemMessage(content=TEMPLATE_CONTENT)
180
+ human_message = HumanMessagePromptTemplate.from_template("{history} User:{input} Assistant:")
181
+ prompt_template = ChatPromptTemplate(messages=[system_message, human_message])
182
+ memory = ConversationBufferWindowMemory(k=2)
183
+
184
+ # Initialize the chain if llm is available
185
+ if llm is not None:
186
+ resume_chain = ConversationChain(
187
+ llm=llm,
188
+ prompt=prompt_template,
189
+ memory=memory,
190
+ verbose=False
191
+ )
192
+
193
+ def generate_response(prompt_input):
194
+ if resume_chain is None:
195
+ return "Please enter your OpenAI API key to use this application"
196
+ output = resume_chain.predict(input=prompt_input)
197
+ return output
198
+
199
+
200
+ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
201
+ st.sidebar.button('Generate Report', on_click=generate_report)
202
+
203
+
204
+ def get_feedback():
205
+ st.session_state.messages.append({"role": "feedback", "content": st.session_state.fbk})
206
+
207
+
208
+ # At the beginning of your script, initialize the prompt in session state
209
+ if "current_prompt" not in st.session_state:
210
+ st.session_state.current_prompt = ""
211
+
212
+ # When user enters a prompt
213
+ if prompt := st.chat_input():
214
+ st.session_state.current_prompt = prompt
215
+ st.session_state.messages.append({"role": "user", "content": prompt})
216
+ with st.chat_message("user"):
217
+ st.write(prompt)
218
+
219
+
220
+ def get_llm_response():
221
+ with st.chat_message("assistant"):
222
+ with st.spinner("Thinking..."):
223
+ response = generate_response(st.session_state.current_prompt + SYSTEM_PROMPT)
224
+ placeholder = st.empty()
225
+ placeholder.markdown(response)
226
+ full_response = response
227
+ message = {"role": "assistant", "content": full_response}
228
+ st.session_state.messages.append(message)
229
+
230
+ # Only show feedback form if the feature is available
231
+ if FEEDBACK_AVAILABLE:
232
+ with st.form("form"):
233
+ streamlit_feedback(feedback_type="thumbs", optional_text_label="[Optional] Please provide an explanation", key="fbk")
234
+ st.form_submit_button('Save feedback', on_click=get_feedback)
235
+
236
+
237
+ # Generate a new response if last message is not from assistant
238
+ if st.session_state.messages[-1]["role"] not in ["assistant", "feedback"]:
239
+ get_llm_response()
240
+
241
+ if st.session_state.messages[-1]["role"] in ["feedback"]:
242
+ try:
243
+ feedback_response = st.session_state.messages[-1]["content"]
244
+ score_mappings = {
245
+ "thumbs": {"thumbs_up": 1, "thumbs_down": 0},
246
+ }
247
+ score = score_mappings[feedback_response["type"]][feedback_response["score"]]
248
+ if score == 0:
249
+ feedback = st.session_state.messages[-1]["content"]['text']
250
+ prompt = "Please respond according to feedback '{0}' on the previous response on \n".format(feedback) \
251
+ + st.session_state.messages[-3]["content"]
252
+ get_llm_response()
253
+ except:
254
+ pass
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit>=1.31.0
2
+ langchain>=0.3.0
3
+ langchain-classic>=1.0.0
4
+ langchain-community>=0.3.0
5
+ langchain-core>=0.3.0
6
+ langchain-openai>=0.2.0
7
+ openai>=1.10.0
8
+ streamlit-feedback>=0.1.3
9
+ PyPDF2>=3.0.1
10
+ pypdf>=3.17.1
11
+ numpy>=1.26.3
12
+ pandas>=2.1.4
13
+ scikit-learn>=1.3.2
14
+ tqdm>=4.66.1
15
+ python-dotenv>=1.0.0