AdamyaG commited on
Commit
aa9b8ef
·
verified ·
1 Parent(s): 373d1ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -1
app.py CHANGED
@@ -1,10 +1,35 @@
1
  import streamlit as st
2
  from langchain_google_genai import ChatGoogleGenerativeAI
3
  import re
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  def generate_question(role, topic, difficulty_level):
6
  prompt = f"Generate an interview question for the role of {role} on the topic of {topic} with difficulty level {difficulty_level}."
7
- llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-lite", google_api_key=st.secrets["GOOGLE_API_KEY"])
 
 
 
 
 
 
 
 
8
  response = llm.invoke(prompt)
9
  response = response.content
10
 
 
1
  import streamlit as st
2
  from langchain_google_genai import ChatGoogleGenerativeAI
3
  import re
4
+ from langchain_huggingface import HuggingFaceEndpoint
5
+ from langchain.chains import LLMChain
6
+ from langchain_core.prompts import PromptTemplate
7
+ import os
8
+
9
+ # os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
10
+ # HUGGINGFACEHUB_API_TOKEN = os.getenv["HUGGINGFACEHUB_API_TOKEN"]
11
+
12
+ # repo_id = "Qwen/Qwen3-8B"
13
+ # llm = HuggingFaceEndpoint(
14
+ # repo_id=repo_id,
15
+ # max_length=512,
16
+ # temperature=0.5,
17
+ # )
18
+ # llm_chain = prompt | llm
19
+ # print(llm_chain.invoke({"question": question}))
20
+
21
 
22
  def generate_question(role, topic, difficulty_level):
23
  prompt = f"Generate an interview question for the role of {role} on the topic of {topic} with difficulty level {difficulty_level}."
24
+ # llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash-lite", google_api_key=st.secrets["GOOGLE_API_KEY"])
25
+ repo_id = "Qwen/Qwen3-8B"
26
+ llm = HuggingFaceEndpoint(
27
+ repo_id=repo_id,
28
+ max_length=512,
29
+ temperature=0.5,
30
+ )
31
+ # llm_chain = prompt | llm
32
+ prompt = PromptTemplate.from_template(prompt)
33
  response = llm.invoke(prompt)
34
  response = response.content
35