Spaces:
Sleeping
Sleeping
Update function.py
Browse files- function.py +19 -16
function.py
CHANGED
|
@@ -26,24 +26,27 @@ def get_answers(questions,model):
|
|
| 26 |
|
| 27 |
|
| 28 |
|
| 29 |
-
def GetLLMResponse(selected_topic_level, selected_topic,
|
| 30 |
-
question_prompt = (f'You are an AI interview assistant that helps generate customized interview questions for various technical and non-technical roles. Your task is to create a set of interview questions based on the {selected_topic_level} and topic : {selected_topic}.Ensure the questions match the indicated level of understanding:{selected_level} and difficulty:{selected_Question_Difficulty}. Generate only {num_quizzes} questions and give it in a python list with variable name question_list')
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
if model == "Open AI":
|
| 34 |
-
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
|
| 35 |
-
questions = llm(question_prompt)
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
elif model == "Gemini":
|
| 39 |
-
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
| 40 |
-
questions = llm.invoke(question_prompt)
|
| 41 |
-
questions = questions.content
|
| 42 |
-
# return questions.content
|
| 43 |
|
| 44 |
-
# answers = "testing"
|
| 45 |
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
|
| 49 |
return(questions,answers)
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
|
| 29 |
+
def GetLLMResponse(selected_topic_level, selected_topic, selected_Question_Difficulty, selected_level, model):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
|
|
|
| 31 |
|
| 32 |
+
for i in range(num_quizzes):
|
| 33 |
+
question_prompt = (f'You are an AI interview assistant that helps generate customized interview questions for various technical and non-technical roles. Your task is to create a set of interview questions based on the {selected_topic_level} and topic : {selected_topic}.Ensure the questions match the indicated level of understanding:{selected_level} and difficulty:{selected_Question_Difficulty}. Generate only 1 question.')
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if model == "Open AI":
|
| 37 |
+
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
|
| 38 |
+
questions = llm(question_prompt)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
elif model == "Gemini":
|
| 42 |
+
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
| 43 |
+
questions = llm.invoke(question_prompt)
|
| 44 |
+
questions = questions.content
|
| 45 |
+
# return questions.content
|
| 46 |
+
|
| 47 |
+
# answers = "testing"
|
| 48 |
+
|
| 49 |
+
answers = get_answers(questions,model)
|
| 50 |
|
| 51 |
|
| 52 |
return(questions,answers)
|