YAMITEK commited on
Commit
cf8045f
·
verified ·
1 Parent(s): c72e182

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +196 -27
app.py CHANGED
@@ -1,27 +1,196 @@
1
- import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
-
5
- def load_model():
6
- tokenizer = AutoTokenizer.from_pretrained("quantized_model")
7
- model = AutoModelForCausalLM.from_pretrained(
8
- "quantized_model",
9
- device_map="auto",
10
- torch_dtype=torch.bfloat16,
11
- )
12
- return tokenizer, model
13
-
14
- tokenizer, model = load_model()
15
-
16
- st.title("Quantized Model Inference")
17
-
18
- user_input = st.text_input("Enter your prompt:")
19
-
20
- if st.button("Generate"):
21
- if user_input:
22
- inputs = tokenizer(user_input, return_tensors="pt").to("cuda")
23
- outputs = model.generate(**inputs)
24
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
- st.write(f"Response: {response}")
26
- else:
27
- st.write("Please enter a prompt.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from crewai import Agent, Task, Crew, LLM
3
+ from crewai_tools import SerperDevTool
4
+ from gtts import gTTS
5
+ import speech_recognition as sr
6
+ import os
7
+
8
+ gemini_key=os.getenv("gemini_api")
9
+
10
+
11
+ serper_key=os.getenv("serper_api")
12
+
13
+ search_tool = SerperDevTool()
14
+
15
+ recognizer = sr.Recognizer()
16
+
17
+ def recognize_speech():
18
+ with sr.Microphone() as source:
19
+ st.info("Listening... Speak now!")
20
+ recognizer.adjust_for_ambient_noise(source)
21
+ try:
22
+ audio = recognizer.listen(source, timeout=5,stream=False)
23
+ text = recognizer.recognize_google(audio)
24
+ return text
25
+ except sr.UnknownValueError:
26
+ return "Sorry, could not understand the audio."
27
+ except sr.RequestError:
28
+ return "Could not request results, check your internet."
29
+ def text_input():
30
+ text= st.text_input("Enter the answer")
31
+ return (text)
32
+
33
+
34
+ # Initialize LLM
35
+ llm = LLM(model="gemini/gemini-1.5-flash",
36
+ verbose=True,
37
+ temperature=0.5,
38
+ api_key=gemini_key)
39
+
40
+
41
+ # Define Agents
42
+ question_agent = Agent(
43
+ role="interviewer",
44
+ goal="Frame {number} questions based on the {job_description}",
45
+ verbose=False,
46
+ memory=True,
47
+ backstory="You need to frame interview questions based on the job description.",
48
+ llm=llm,
49
+ tools=[search_tool],
50
+ allow_delegation=True
51
+ )
52
+
53
+ generator_agent = Agent(
54
+ role="answer generator",
55
+ goal="frame answer to {question} based on the {job_description}.",
56
+ verbose=False,
57
+ memory=True,
58
+ backstory="you are expert in answering the question based on {job_description}.",
59
+ llm=llm,
60
+ tools=[search_tool],
61
+ allow_delegation=True
62
+ )
63
+
64
+ evaluation_agent = Agent(
65
+ role="evulation",
66
+ goal="frame answer to {question} based on the {job_description}.",
67
+ verbose=False,
68
+ memory=True,
69
+ backstory="you are expert in answering the question based on {job_description}.",
70
+ llm=llm,
71
+ tools=[search_tool],
72
+ allow_delegation=True
73
+ )
74
+
75
+
76
+ # Define Tasks
77
+ question_task = Task(
78
+ description="Generate {number} interview questions based on the {job_description} .",
79
+ expected_output="A list of {number} questions.",
80
+ tools=[search_tool],
81
+ agent=question_agent
82
+ )
83
+
84
+ generator_task = Task(
85
+ description="frame the answer to the {question} based on the {job_description}.",
86
+ expected_output="Correct if the answer is right; otherwise, return the correct answer.",
87
+ tools=[search_tool],
88
+ agent=generator_agent
89
+ )
90
+
91
+
92
+ evaluation_task = Task(
93
+ description="frame the answer to the {question} based on the {job_description}.",
94
+ expected_output="Correct if the answer is right; otherwise, return the correct answer.",
95
+ tools=[search_tool],
96
+ agent=generator_agent
97
+ )
98
+
99
+ # Define Crews
100
+ crew1 = Crew(agents=[question_agent], tasks=[question_task])
101
+ crew2 = Crew(agents=[generator_agent], tasks=[generator_task])
102
+ crew3 = Crew(agents=[evaluation_agent],tasks=[evaluation_task])
103
+
104
+ # Initialize Streamlit App
105
+ st.title("Interview Preparation Bot")
106
+
107
+ # *Reset function to clear all session variables*
108
+ def reset_session():
109
+ st.session_state.job_description = ""
110
+ st.session_state.number=1
111
+ st.session_state.questions = []
112
+ st.session_state.current_question_index = 0
113
+ st.session_state.answers = []
114
+ st.session_state.evaluations = []
115
+ st.session_state.completed = False
116
+
117
+ # Initialize session state variables
118
+ if "questions" not in st.session_state:
119
+ reset_session()
120
+
121
+ if "number" not in st.session_state:
122
+ st.session_state.number = 1
123
+
124
+ # User Input
125
+ st.session_state.job_description = st.text_input("Enter the Topic you need Practice", value=st.session_state.job_description)
126
+ st.session_state.number = st.number_input("Enter the Number of Question you need",min_value=1,value=st.session_state.number)
127
+
128
+ # Button to Generate Questions
129
+ if st.button("Start") and st.session_state.job_description and st.session_state.number:
130
+ result = crew1.kickoff(inputs={"job_description": st.session_state.job_description,'number':st.session_state.number})
131
+ st.session_state.questions = [q.strip(' ```') for q in result.raw.split("\n") if q.strip()]
132
+
133
+ st.session_state.current_question_index = 0
134
+ st.session_state.answers = []
135
+ st.session_state.evaluations = []
136
+ st.session_state.completed = False
137
+
138
+ # # Display Questions One by One
139
+ if st.session_state.questions and st.session_state.current_question_index < len(st.session_state.questions):
140
+ question = st.session_state.questions[st.session_state.current_question_index]
141
+ st.write(f"*Question {st.session_state.current_question_index + 1}:* {question}")
142
+ tts = gTTS(question, lang="en")
143
+ tts.save("response.mp3")
144
+ audio_file = open("response.mp3", "rb")
145
+ audio_bytes = audio_file.read()
146
+ st.audio(audio_bytes, format="audio/mp3")
147
+
148
+ # User enters answer
149
+ type=st.radio("select the type",["Answer with voice ","Answer by type"])
150
+
151
+ if type=="Answer with voice ":
152
+ output=""
153
+
154
+ if st.button("Start Recording"):
155
+ output = recognize_speech()
156
+ st.write("Transcription: ", output)
157
+
158
+
159
+ elif type=="Answer by type":
160
+
161
+ output=st.text_input(f"Enter your answer for Question ")
162
+
163
+
164
+ # st.session_state.answers .append(st.session_state.current_question_index + 1)
165
+
166
+ answer=answer = f"{st.session_state.current_question_index + 1}{output}"
167
+
168
+
169
+ # answer = st.text_input(f"Enter your answer for Question {st.session_state.current_question_index + 1}")
170
+
171
+ if st.button("Submit Answer") and answer:
172
+ # Evaluate Answer
173
+ result2 = crew2.kickoff(inputs={"job_description": st.session_state.job_description, "question": question, "answer": answer})
174
+ evaluation_result = result2.raw
175
+
176
+ # Store answer and evaluation
177
+ st.session_state.answers.append(answer)
178
+ st.session_state.evaluations.append(evaluation_result)
179
+
180
+ # Move to the next question
181
+ st.session_state.current_question_index += 1
182
+
183
+ # If all questions are answered, mark as completed
184
+ if st.session_state.current_question_index == len(st.session_state.questions)-1:
185
+ st.session_state.completed = True
186
+ col1,col2=st.columns(2)
187
+ # Show Evaluations After All Answers
188
+ if st.session_state.completed:
189
+ with col2:
190
+ if st.button("Review The Answers"):
191
+ st.write("### Final Evaluation:")
192
+ for i in range(len(st.session_state.questions)-1):
193
+ st.write(f"*Q{i+1}:* {st.session_state.questions[i]}")
194
+ st.write(f"*Your Answer:* {st.session_state.answers[i]}")
195
+ st.write(f"*Evaluation:* {st.session_state.evaluations[i]}")
196
+ st.write("---")