Spaces:
Sleeping
Sleeping
File size: 3,619 Bytes
81a49f7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
#Ajetaan tarvittavat kirjastot
import gradio as gr
import torch
import os
from huggingface_hub import login
from transformers import AutoTokenizer, AutoModelForCausalLM
from huggingface_hub import login
token = os.getenv("GreenerGlass") # Get from Hugging Face secret
if token:
login(token=token)
# Lataa malli tokenin kanssa
model_name = "google/gemma-2-2b-it" # Corrected model name based on previous successful load
tokenizer = AutoTokenizer.from_pretrained(model_name, token=token) # Pass token here
model = AutoModelForCausalLM.from_pretrained(
model_name,
token=token, # Pass token here
device_map="auto",
torch_dtype=torch.float16
)
device = "cuda" if torch.cuda.is_available() else "cpu"
def generate_text(job_title, num_questions, temperature):
# Muodosta prompt haastatttelukysymysten generointiin
prompt = f"Generate {num_questions} professional interview questions for a {job_title} position. Provide clear, insightful questions that assess the candidate's skills and experience:"
# Tokenize the input prompt
inputs = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
# Generate text using the model
outputs = model.generate(
inputs,
max_length=300, # Riittävä pituus useammalle kysymykselle
temperature=temperature,
num_return_sequences=1,
do_sample=True,
top_p=0.9,
top_k=50
)
# Decode the generated text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Process the generated text to remove asterisks and extract questions
processed_text = generated_text.replace('*', '') # Remove asterisks
return processed_text
# Gradio custom theme
custom_theme = gr.themes.Base(
primary_hue=gr.themes.Color(
name="green",
c50="#e8f5e9",
c100="#c8e6c9",
c200="#a5d6a7",
c300="#81c784",
c400="#66bb6a",
c500="#4caf50",
c600="#43a047",
c700="#388e3c",
c800="#2e7d32",
c900="#1b5e20",
c950="#0d3b0d", # Dark green for background
),
neutral_hue=gr.themes.Color(
name="gray",
c50="#f9fafb",
c100="#f3f4f6",
c200="#e5e7eb",
c300="#d1d5db",
c400="#9ca3af",
c500="#6b7280",
c600="#4b5563",
c700="#374151",
c800="#1f2937",
c900="#111827",
c950="#030712",
),
).set(
body_background_fill_dark="--primary-950", # Set body background to dark green
)
# Gradio-käyttöliittymä
with gr.Blocks(theme=custom_theme) as interface:
gr.Markdown("🍀 **GreenerGlass question manager** 🍀")
gr.Markdown("""Powered by Google's Gemma 2 model to generate professional interview questions.
⚠️ Note: Works better in English. ⚠️""")
job_title_input = gr.Textbox(
label="Job Title ",
placeholder="Job title in English, e.g. Software Developer",
lines=2
)
with gr.Accordion("More options"):
num_questions_slider = gr.Slider(3, 8, value=5, step=1, label="Number of Questions")
temperature_slider = gr.Slider(0.6, 1.2, value=0.8, step=0.1, label="Temperature (higher = more creative)")
generate_button = gr.Button("Generate Questions", variant='primary') # Ensure variant is set to primary
output_text = gr.Textbox(label="Interview Questions", lines=15)
generate_button.click(
fn=generate_text,
inputs=[job_title_input, num_questions_slider, temperature_slider],
outputs=output_text
)
if __name__ == "__main__":
interface.launch(share=True) |