Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from joblib import Memory | |
| import datetime | |
| # Initialize cache | |
| cache_dir = "./cache" | |
| memory = Memory(cache_dir, verbose=0) | |
| # Load pre-trained model and tokenizer | |
| model_name = "distilgpt2" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| # Set pad_token_id to eos_token_id to avoid warnings | |
| tokenizer.pad_token = tokenizer.eos_token | |
| model.config.pad_token_id = tokenizer.eos_token_id | |
| # Define prompt template | |
| PROMPT_TEMPLATE = """You are an AI coach for construction supervisors. Based on the following inputs, generate a daily checklist, focus suggestions, and a motivational quote. Format your response with clear labels as follows: | |
| Checklist: | |
| - Item 1 | |
| - Item 2 | |
| Suggestions: | |
| - Suggestion 1 | |
| - Suggestion 2 | |
| Quote: | |
| - Your motivational quote here | |
| Inputs: | |
| Role: {role} | |
| Project: {project_id} | |
| Milestones: {milestones} | |
| Reflection: {reflection} | |
| """ | |
| # Cache reset check | |
| last_reset = datetime.date.today() | |
| def reset_cache_if_new_day(): | |
| global last_reset | |
| today = datetime.date.today() | |
| if today > last_reset: | |
| memory.clear() | |
| last_reset = today | |
| # Cached generation function | |
| def generate_outputs(role, project_id, milestones, reflection): | |
| reset_cache_if_new_day() | |
| # Validate inputs | |
| if not all([role, project_id, milestones, reflection]): | |
| return "Error: All fields are required.", "", "" | |
| # Create prompt | |
| prompt = PROMPT_TEMPLATE.format( | |
| role=role, | |
| project_id=project_id, | |
| milestones=milestones, | |
| reflection=reflection | |
| ) | |
| # Tokenize with attention_mask | |
| inputs = tokenizer( | |
| prompt, | |
| return_tensors="pt", | |
| max_length=512, | |
| truncation=True, | |
| padding=True, | |
| return_attention_mask=True | |
| ) | |
| # Generate with attention_mask | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| inputs["input_ids"], | |
| attention_mask=inputs["attention_mask"], | |
| max_length=1000, | |
| num_return_sequences=1, | |
| no_repeat_ngram_size=2, | |
| do_sample=True, | |
| top_p=0.9, | |
| temperature=0.8, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| # Decode generated text | |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Parse the output | |
| checklist = "No checklist generated." | |
| suggestions = "No suggestions generated." | |
| quote = "No quote generated." | |
| # Extract sections using labels | |
| if "Checklist:" in generated_text: | |
| checklist_start = generated_text.find("Checklist:") + len("Checklist:") | |
| suggestions_start = generated_text.find("Suggestions:") if "Suggestions:" in generated_text else len(generated_text) | |
| checklist = generated_text[checklist_start:suggestions_start].strip() | |
| if not checklist: | |
| checklist = "No checklist generated." | |
| if "Suggestions:" in generated_text: | |
| suggestions_start = generated_text.find("Suggestions:") + len("Suggestions:") | |
| quote_start = generated_text.find("Quote:") if "Quote:" in generated_text else len(generated_text) | |
| suggestions = generated_text[suggestions_start:quote_start].strip() | |
| if not suggestions: | |
| suggestions = "No suggestions generated." | |
| if "Quote:" in generated_text: | |
| quote_start = generated_text.find("Quote:") + len("Quote:") | |
| quote = generated_text[quote_start:].strip() | |
| if not quote: | |
| quote = "No quote generated." | |
| # Context-aware fallbacks | |
| if checklist == "No checklist generated.": | |
| checklist_items = [] | |
| milestone_list = [m.strip() for m in milestones.split(",")] | |
| for i, milestone in enumerate(milestone_list, 1): | |
| time = 8 + (i-1)*2 if i <= 3 else 4 # Follow sample timing (8 AM, 10 AM, 12 PM, 4 PM) | |
| period = "AM" if i <= 3 else "PM" | |
| checklist_items.append(f"- {milestone} by {time} {period}.") | |
| # Add context-specific task | |
| if "safety" in reflection.lower(): | |
| checklist_items.append("- Review safety compliance by 4 PM.") | |
| else: | |
| checklist_items.append("- Check equipment status by 4 PM.") | |
| checklist = "\n".join(checklist_items) | |
| if suggestions == "No suggestions generated.": | |
| suggestions_items = [] | |
| reflection_lower = reflection.lower() | |
| if "safety" in reflection_lower: | |
| suggestions_items.append("- Address safety concerns with a team briefing on proper procedures.") | |
| if "delay" in reflection_lower or "late" in reflection_lower: | |
| suggestions_items.append("- Follow up with suppliers to prevent future delays.") | |
| if "weather" in reflection_lower: | |
| suggestions_items.append("- Monitor weather updates and plan contingencies.") | |
| if "equipment" in reflection_lower: | |
| suggestions_items.append("- Schedule equipment maintenance to avoid future issues.") | |
| if "suppliers" in reflection_lower: | |
| suggestions_items.append("- Set up a morning call with suppliers to confirm timelines.") | |
| # Add generic suggestion | |
| suggestions_items.append("- Brief the team on tomorrow’s goals during the daily huddle.") | |
| suggestions = "\n".join(suggestions_items if suggestions_items else ["- Coordinate with the team.", "- Plan for contingencies."]) | |
| if quote == "No quote generated.": | |
| reflection_lower = reflection.lower() | |
| if "safety" in reflection_lower: | |
| quote = "- Build with care—safety today ensures success tomorrow!" | |
| elif "delay" in reflection_lower or "late" in reflection_lower: | |
| quote = "- Keep moving forward—every challenge is a step toward success!" | |
| elif "weather" in reflection_lower: | |
| quote = "- Steady progress leads to great achievements, no matter the weather!" | |
| else: | |
| quote = "- Success is built one solid step at a time!" | |
| return checklist, suggestions, quote | |
| # Gradio interface | |
| def create_interface(): | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Construction Supervisor AI Coach") | |
| gr.Markdown("Enter details to generate a daily checklist, focus suggestions, and a motivational quote.") | |
| with gr.Row(): | |
| role = gr.Dropdown(choices=["Supervisor", "Foreman", "Project Manager"], label="Role") | |
| project_id = gr.Textbox(label="Project ID") | |
| milestones = gr.Textbox(label="Milestones (comma-separated KPIs)") | |
| reflection = gr.Textbox(label="Reflection Log", lines=5) | |
| with gr.Row(): | |
| submit = gr.Button("Generate") | |
| clear = gr.Button("Clear") | |
| checklist_output = gr.Textbox(label="Daily Checklist") | |
| suggestions_output = gr.Textbox(label="Focus Suggestions") | |
| quote_output = gr.Textbox(label="Motivational Quote") | |
| submit.click( | |
| fn=generate_outputs, | |
| inputs=[role, project_id, milestones, reflection], | |
| outputs=[checklist_output, suggestions_output, quote_output] | |
| ) | |
| clear.click( | |
| fn=lambda: ("", "", "", ""), | |
| inputs=None, | |
| outputs=[role, project_id, milestones, reflection] | |
| ) | |
| return demo | |
| if __name__ == "__main__": | |
| demo = create_interface() | |
| demo.launch() |