AdamyaG commited on
Commit
d3c48ae
·
verified ·
1 Parent(s): 8a265f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -23
app.py CHANGED
@@ -6,36 +6,18 @@ from langchain.chains import LLMChain
6
  from langchain_core.prompts import PromptTemplate
7
  import os
8
 
9
- # os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
10
- # HUGGINGFACEHUB_API_TOKEN = os.getenv["HUGGINGFACEHUB_API_TOKEN"]
11
-
12
- # repo_id = "Qwen/Qwen3-8B"
13
- # llm = HuggingFaceEndpoint(
14
- # repo_id=repo_id,
15
- # max_length=512,
16
- # temperature=0.5,
17
- # )
18
- # llm_chain = prompt | llm
19
- # print(llm_chain.invoke({"question": question}))
20
-
21
-
22
  def generate_question(role, topic, difficulty_level):
23
  question = f"Generate an interview question for the role of {role} on the topic of {topic} with difficulty level {difficulty_level}."
24
  template = """Question: {question}
25
  Answer:
26
  """
27
- # llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-lite", google_api_key=st.secrets["GOOGLE_API_KEY"])
28
- repo_id = "mistralai/Mistral-7B-Instruct-v0.2" #"Qwen/Qwen3-8B"
29
- llm = HuggingFaceEndpoint(
30
- repo_id=repo_id,
31
- # max_length=512,
32
- temperature=0.5,
33
- )
34
- # llm_chain = prompt | llm
35
  prompt = PromptTemplate.from_template(template)
36
  # prompt = PromptTemplate.from_template(prompt)
37
- llm_chain = prompt | llm
38
- # response = llm_chain.invoke(prompt)
39
  llm_chain.invoke({"question": question})
40
  response = response.content
41
 
 
6
  from langchain_core.prompts import PromptTemplate
7
  import os
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  def generate_question(role, topic, difficulty_level):
10
  question = f"Generate an interview question for the role of {role} on the topic of {topic} with difficulty level {difficulty_level}."
11
  template = """Question: {question}
12
  Answer:
13
  """
14
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-lite", google_api_key=st.secrets["GOOGLE_API_KEY"])
15
+
16
+ llm_chain = prompt | llm
 
 
 
 
 
17
  prompt = PromptTemplate.from_template(template)
18
  # prompt = PromptTemplate.from_template(prompt)
19
+ # llm_chain = prompt | llm
20
+ response = llm_chain.invoke(prompt)
21
  llm_chain.invoke({"question": question})
22
  response = response.content
23