AdamyaG commited on
Commit
8b389ab
·
verified ·
1 Parent(s): ef78ff1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -20,18 +20,23 @@ import os
20
 
21
 
22
  def generate_question(role, topic, difficulty_level):
23
- prompt = f"Generate an interview question for the role of {role} on the topic of {topic} with difficulty level {difficulty_level}."
 
 
 
24
  # llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-lite", google_api_key=st.secrets["GOOGLE_API_KEY"])
25
- repo_id = "Qwen/Qwen3-8B"
26
  llm = HuggingFaceEndpoint(
27
  repo_id=repo_id,
28
  max_length=512,
29
  temperature=0.5,
30
  )
31
  # llm_chain = prompt | llm
32
- prompt = PromptTemplate.from_template(prompt)
 
33
  llm_chain = prompt | llm
34
- response = llm_chain.invoke(prompt)
 
35
  response = response.content
36
 
37
  return response
 
20
 
21
 
22
  def generate_question(role, topic, difficulty_level):
23
+ question = f"Generate an interview question for the role of {role} on the topic of {topic} with difficulty level {difficulty_level}."
24
+ template = """Question: {question}
25
+ Answer:
26
+ """
27
  # llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-lite", google_api_key=st.secrets["GOOGLE_API_KEY"])
28
+ repo_id = "mistralai/Mistral-7B-Instruct-v0.2" #"Qwen/Qwen3-8B"
29
  llm = HuggingFaceEndpoint(
30
  repo_id=repo_id,
31
  max_length=512,
32
  temperature=0.5,
33
  )
34
  # llm_chain = prompt | llm
35
+ prompt = PromptTemplate.from_template(template)
36
+ # prompt = PromptTemplate.from_template(prompt)
37
  llm_chain = prompt | llm
38
+ # response = llm_chain.invoke(prompt)
39
+ llm_chain.invoke({"question": question})
40
  response = response.content
41
 
42
  return response