Spaces:
Sleeping
Sleeping
| #Ajetaan tarvittavat kirjastot | |
| import gradio as gr | |
| import torch | |
| import os | |
| from huggingface_hub import login | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from huggingface_hub import login | |
| token = os.getenv("GreenerGlass") # Get from Hugging Face secret | |
| if token: | |
| login(token=token) | |
| # Lataa malli tokenin kanssa | |
| model_name = "google/gemma-2-2b-it" # Corrected model name based on previous successful load | |
| tokenizer = AutoTokenizer.from_pretrained(model_name, token=token) # Pass token here | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| token=token, # Pass token here | |
| device_map="auto", | |
| torch_dtype=torch.float16 | |
| ) | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| def generate_text(job_title, num_questions, temperature): | |
| # Muodosta prompt haastatttelukysymysten generointiin | |
| prompt = f"Generate {num_questions} professional interview questions for a {job_title} position. Provide clear, insightful questions that assess the candidate's skills and experience:" | |
| # Tokenize the input prompt | |
| inputs = tokenizer.encode(prompt, return_tensors="pt").to(model.device) | |
| # Generate text using the model | |
| outputs = model.generate( | |
| inputs, | |
| max_length=300, # Riittävä pituus useammalle kysymykselle | |
| temperature=temperature, | |
| num_return_sequences=1, | |
| do_sample=True, | |
| top_p=0.9, | |
| top_k=50 | |
| ) | |
| # Decode the generated text | |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Process the generated text to remove asterisks and extract questions | |
| processed_text = generated_text.replace('*', '') # Remove asterisks | |
| return processed_text | |
| # Gradio custom theme | |
| custom_theme = gr.themes.Base( | |
| primary_hue=gr.themes.Color( | |
| name="green", | |
| c50="#e8f5e9", | |
| c100="#c8e6c9", | |
| c200="#a5d6a7", | |
| c300="#81c784", | |
| c400="#66bb6a", | |
| c500="#4caf50", | |
| c600="#43a047", | |
| c700="#388e3c", | |
| c800="#2e7d32", | |
| c900="#1b5e20", | |
| c950="#0d3b0d", # Dark green for background | |
| ), | |
| neutral_hue=gr.themes.Color( | |
| name="gray", | |
| c50="#f9fafb", | |
| c100="#f3f4f6", | |
| c200="#e5e7eb", | |
| c300="#d1d5db", | |
| c400="#9ca3af", | |
| c500="#6b7280", | |
| c600="#4b5563", | |
| c700="#374151", | |
| c800="#1f2937", | |
| c900="#111827", | |
| c950="#030712", | |
| ), | |
| ).set( | |
| body_background_fill_dark="--primary-950", # Set body background to dark green | |
| ) | |
| # Gradio-käyttöliittymä | |
| with gr.Blocks(theme=custom_theme) as interface: | |
| gr.Markdown("🍀 **GreenerGlass question manager** 🍀") | |
| gr.Markdown("""Powered by Google's Gemma 2 model to generate professional interview questions. | |
| ⚠️ Note: Works better in English. ⚠️""") | |
| job_title_input = gr.Textbox( | |
| label="Job Title ", | |
| placeholder="Job title in English, e.g. Software Developer", | |
| lines=2 | |
| ) | |
| with gr.Accordion("More options"): | |
| num_questions_slider = gr.Slider(3, 8, value=5, step=1, label="Number of Questions") | |
| temperature_slider = gr.Slider(0.6, 1.2, value=0.8, step=0.1, label="Temperature (higher = more creative)") | |
| generate_button = gr.Button("Generate Questions", variant='primary') # Ensure variant is set to primary | |
| output_text = gr.Textbox(label="Interview Questions", lines=15) | |
| generate_button.click( | |
| fn=generate_text, | |
| inputs=[job_title_input, num_questions_slider, temperature_slider], | |
| outputs=output_text | |
| ) | |
| if __name__ == "__main__": | |
| interface.launch(share=True) |