Spaces:
Running
Running
| import gradio as gr | |
| import os | |
| from agent import analyze_case | |
| from rag7 import generate_practical_guide | |
| from rag24 import generate_academic_analysis | |
| import dataset_manager | |
| print("[DEBUG] Importing modules...") | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| if not HF_TOKEN: | |
| raise RuntimeError("Missing HF_TOKEN in Secrets") | |
| print("[DEBUG] HF_TOKEN found") | |
| # Global variables for tracking | |
| current_timestamp = None | |
| current_input = None | |
| current_acad_response = None | |
| current_pract_response = None | |
| current_metadata = None | |
| current_temp_acad = None | |
| current_topp_acad = None | |
| current_context = None # NEW: store citations to display | |
| print("[DEBUG] Global variables initialized") | |
| def generate_responses( | |
| message, | |
| temp_acad, topp_acad, max_tok_acad, | |
| temp_pract, topp_pract, max_tok_pract | |
| ): | |
| """ | |
| Generates both responses (academic + practical) and enables rating system | |
| """ | |
| global current_timestamp, current_input, current_acad_response, current_pract_response | |
| global current_metadata, current_temp_acad, current_topp_acad, current_context | |
| from datetime import datetime | |
| print(f"\n{'='*80}") | |
| print(f"π NEW QUERY: {message[:100]}...") | |
| print(f"{'='*80}") | |
| current_timestamp = datetime.now().isoformat() | |
| current_input = message | |
| current_temp_acad = temp_acad | |
| current_topp_acad = topp_acad | |
| try: | |
| # 1. ANALYZE CASE with RAG24 (retrieval + conflict detection) | |
| print("\nπ Step 1: Analyzing case with agent.analyze_case()...") | |
| result = analyze_case(message, k=5) | |
| metadata = result["metadata"] | |
| citations = result["citations"] | |
| current_metadata = metadata | |
| current_context = citations # STORE for display | |
| print(f"β Analysis completed:") | |
| print(f" - Conflict type: {metadata['conflict_type']} ({metadata['confidence']}%)") | |
| print(f" - Sources retrieved: {metadata['num_sources']}") | |
| print(f" - Unique tags: {metadata['unique_tags']}") | |
| # 2. GENERATE ACADEMIC ANALYSIS with Llama-3.3-70B-Instruct | |
| print("\nπ§ Step 2: Generating academic analysis with Llama-3.3-70B...") | |
| acad_response, acad_metadata = generate_academic_analysis( | |
| user_message=message, | |
| context=citations, # citations is the formatted string | |
| metadata=metadata, | |
| temperature=temp_acad, | |
| top_p=topp_acad, | |
| max_tokens=max_tok_acad | |
| ) | |
| current_acad_response = acad_response | |
| print(f"β Academic analysis generated ({len(acad_response)} chars)") | |
| print(f" - Works cited: {acad_metadata.get('logical_works_used', [])}") | |
| # 3. GENERATE PRACTICAL GUIDE with GPT-OSS-20B | |
| print("\nπ Step 3: Generating practical guide with GPT-OSS-20B...") | |
| pract = generate_practical_guide( | |
| message, | |
| temperature=temp_pract, | |
| top_p=topp_pract, | |
| max_tokens=max_tok_pract | |
| ) | |
| current_pract_response = pract | |
| print(f"β Practical guide generated ({len(pract)} chars)") | |
| print(f"\n{'='*80}\n") | |
| # Enable rating system | |
| return ( | |
| acad_response, | |
| pract, | |
| citations, # NEW: show academic context | |
| gr.update(interactive=True), # stars | |
| gr.update(interactive=True), # coach_notes | |
| gr.update(interactive=True), # submit_btn | |
| gr.update(value="") # feedback_out | |
| ) | |
| except Exception as e: | |
| error_msg = f"β Error in generate_responses: {str(e)}" | |
| print(error_msg) | |
| import traceback | |
| traceback.print_exc() | |
| return ( | |
| f"Error generating academic analysis:\n{str(e)}", | |
| f"Error generating practical guide:\n{str(e)}", | |
| f"Error in retrieval:\n{str(e)}", # context_out | |
| gr.update(interactive=False), | |
| gr.update(interactive=False), | |
| gr.update(interactive=False), | |
| gr.update(value=error_msg) | |
| ) | |
| def save_rating_to_dataset(stars, coach_notes): | |
| """ | |
| Saves coach rating to HuggingFace Dataset for fine-tuning | |
| """ | |
| global current_timestamp, current_input, current_acad_response, current_pract_response | |
| global current_metadata, current_temp_acad, current_topp_acad, current_context | |
| if not current_timestamp: | |
| return "β οΈ No active interaction to rate" | |
| if not stars: | |
| return "β οΈ Please select a rating (1-5 stars)" | |
| try: | |
| # Extract star number | |
| star_value = int(stars.split()[0]) | |
| print(f"\nβ SAVING RATING: {star_value} stars") | |
| print(f"π Coach notes: {coach_notes[:100] if coach_notes else 'No notes'}...") | |
| # Extract categories used | |
| categories_used = list(current_metadata.get("faiss_weights_applied", {}).keys()) | |
| # Create dataset entry | |
| data = dataset_manager.create_dataset_entry( | |
| timestamp=current_timestamp, | |
| user_input=current_input, | |
| stars=star_value, | |
| coach_notes=coach_notes or "", | |
| conflict_type=current_metadata.get("conflict_type", "unknown"), | |
| confidence=current_metadata.get("confidence", 0), | |
| num_sources=current_metadata.get("num_sources", 0), | |
| unique_tags=current_metadata.get("unique_tags", []), | |
| sources_raw=current_metadata.get("sources_raw", []), | |
| logical_works_used=current_metadata.get("logical_works_used", []), | |
| categories_used=categories_used, | |
| faiss_weights_applied=current_metadata.get("faiss_weights_applied", {}), | |
| temperature_used=current_temp_acad, | |
| top_p_used=current_topp_acad, | |
| academic_response=current_acad_response, | |
| practical_response=current_pract_response, | |
| citations_context=current_context or "" # NEW: full academic context | |
| ) | |
| # Push to HuggingFace | |
| dataset_manager.push_interaction_to_dataset(data) | |
| success_msg = f"β Rating saved successfully to HuggingFace ({star_value} stars)" | |
| print(success_msg) | |
| print(f"π Context saved: {len(current_context) if current_context else 0} chars") | |
| return success_msg | |
| except Exception as e: | |
| error_msg = f"β Error saving to dataset: {str(e)}" | |
| print(error_msg) | |
| import traceback | |
| traceback.print_exc() | |
| return error_msg | |
| # ============================================================================ | |
| # GRADIO INTERFACE | |
| # ============================================================================ | |
| print("[DEBUG] Creating Gradio interface...") | |
| with gr.Blocks(title="Quoota RAG24 Alpha") as demo: | |
| gr.Markdown(""" | |
| # π§ Quoota RAG24 Alpha | |
| ### Corporate Conflict Analysis System for Fine-Tuning | |
| **RAG24:** Academic analysis based on 24 works (16,997 vectorized chunks) | |
| **RAG7:** Step-by-step practical guide | |
| """) | |
| with gr.Row(): | |
| # LEFT COLUMN: Input and configuration | |
| with gr.Column(scale=2): | |
| inp = gr.Textbox( | |
| label="Describe the workplace conflict", | |
| placeholder="E.g.: My boss asks for updates every hour...", | |
| lines=4 | |
| ) | |
| btn = gr.Button("π Analyze Conflict", variant="primary") | |
| # RIGHT COLUMN: LLM Parameters | |
| with gr.Column(scale=1): | |
| gr.Markdown("### βοΈ RAG24 Parameters") | |
| gr.Markdown("*Llama-3.3-70B-Instruct (Academic)*") | |
| temp_acad = gr.Slider(0.1, 1.0, value=0.3, step=0.1, label="Temperature") | |
| topp_acad = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p") | |
| max_tok_acad = gr.Slider(256, 1024, value=768, step=128, label="Max tokens") | |
| gr.Markdown("### βοΈ RAG7 Parameters") | |
| gr.Markdown("*GPT-OSS-20B (Practical)*") | |
| temp_pract = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature") | |
| topp_pract = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p") | |
| max_tok_pract = gr.Slider(256, 2048, value=2048, step=256, label="Max tokens") | |
| gr.Markdown("---") | |
| gr.Markdown("### β Rating System") | |
| stars = gr.Radio( | |
| choices=["1 star", "2 stars", "3 stars", "4 stars", "5 stars"], | |
| label="Response Quality", | |
| value=None, | |
| interactive=False | |
| ) | |
| coach_notes = gr.Textbox( | |
| label="Coach Notes (optional)", | |
| placeholder="E.g.: Excellent EGO conflict detection, cited Brown & Levinson correctly...", | |
| lines=3, | |
| interactive=False | |
| ) | |
| submit_btn = gr.Button( | |
| "πΎ Save to HuggingFace", | |
| interactive=False, | |
| variant="secondary" | |
| ) | |
| feedback_out = gr.Textbox( | |
| label="Status", | |
| interactive=False, | |
| show_label=False | |
| ) | |
| # ROW 1: Main outputs (analysis + guide) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| gr.Markdown("### π Academic Analysis (RAG24)") | |
| gr.Markdown("*Based on 24 academic works with multilingual system*") | |
| acad_out = gr.Textbox( | |
| lines=25, | |
| max_lines=40, | |
| interactive=False | |
| ) | |
| with gr.Column(scale=2): | |
| gr.Markdown("### π Practical Guide (RAG7)") | |
| gr.Markdown("*Concrete steps to resolve the conflict*") | |
| pract_out = gr.Textbox( | |
| lines=25, | |
| max_lines=40, | |
| interactive=False | |
| ) | |
| # ROW 2: Academic context (collapsible) | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Accordion("π¬ Academic Context (for coach validation)", open=True): | |
| gr.Markdown(""" | |
| **This panel shows the context the LLM received to generate the academic analysis.** | |
| Use it to validate if: | |
| - β Retrieved sources are relevant to the conflict | |
| - β Methodological weights are correct (ego/data/mixed) | |
| - β Hybrid scores make sense (similarity + weight + tags) | |
| - β The LLM cited sources correctly in its academic response | |
| **Included information:** | |
| - Applied retrieval system (conflict type, weights by category) | |
| - Top 5 sources with broken-down hybrid scoring | |
| - Full content of each chunk | |
| - Authors identified in each source | |
| """) | |
| context_out = gr.Textbox( | |
| label="Full Academic Citations (sent to LLM)", | |
| lines=30, | |
| max_lines=50, | |
| interactive=False | |
| ) | |
| # EXAMPLES | |
| gr.Examples( | |
| examples=[ | |
| ["My department colleague always takes credit for my ideas in meetings. She says 'we thought' but the ideas are mine. It's happened three times this month and I'm fed up."], | |
| ["My boss is never in the office and keeps calling to demand work without providing the means. He asks for reports that require access to databases only he can authorize. I'm exhausted."], | |
| ["In team meetings, my boss interrupts me every time I speak. He doesn't let me finish a sentence. Others don't even look at me when I try to intervene. I feel invisible."], | |
| ["A colleague made a mistake in a report I reviewed. Now everyone thinks the mistake was mine because I signed it. They avoid me in the hallways. No one talks to me."], | |
| ["My boss assigns me twice as many tasks as my colleague. When I ask why, he says 'because you're faster'. But that means I work twice as hard for the same salary. It's unfair."], | |
| ["My boss asks for updates every hour on what I'm doing. She messages to ask 'what are you working on' constantly. I can't focus. I feel like she doesn't trust me."], | |
| ["In yesterday's meeting, my boss said in front of everyone 'even an intern could do this'. He was talking about MY presentation. I was paralyzed. I didn't know what to say."], | |
| ["My boss moved me to a different project without telling me. I found out because a colleague asked why I wasn't in the meeting anymore. I'd been on that project for 6 months. I feel disregarded."], | |
| ["Someone has been saying I leaked confidential information. It's a lie. Now my boss looks at me strangely and I was excluded from the last important meeting. I don't know who said it or how to prove it."], | |
| ["I've been working until 11pm every day for 3 months. I asked my boss for help a month ago. He said 'hang in there a bit longer'. I can't take it anymore. I'm on the verge of collapse. I don't sleep. I don't eat well. I feel like I'm going to explode."] | |
| ], | |
| inputs=inp, | |
| label="π‘ Example Cases" | |
| ) | |
| # EVENTS | |
| btn.click( | |
| fn=generate_responses, | |
| inputs=[ | |
| inp, | |
| temp_acad, topp_acad, max_tok_acad, | |
| temp_pract, topp_pract, max_tok_pract | |
| ], | |
| outputs=[ | |
| acad_out, | |
| pract_out, | |
| context_out, # NEW: show academic context | |
| stars, | |
| coach_notes, | |
| submit_btn, | |
| feedback_out | |
| ] | |
| ) | |
| submit_btn.click( | |
| fn=save_rating_to_dataset, | |
| inputs=[stars, coach_notes], | |
| outputs=[feedback_out] | |
| ) | |
| print("[DEBUG] Gradio interface created successfully") | |
| # ============================================================================ | |
| # LAUNCH | |
| # ============================================================================ | |
| if __name__ == "__main__": | |
| print("\n" + "="*80) | |
| print("π LAUNCHING QUOOTA RAG24 ALPHA") | |
| print("="*80 + "\n") | |
| demo.launch() | |
| print("\nβ Demo launched successfully\n") |