#!/usr/bin/env python3 """ FTTH Chatbot – Fiber Optic Engineering Demonstration Web interface with Gradio + Llama 3.3 Instruct via NVIDIA API Optimized for RTX 5090 """ import gradio as gr import os from openai import OpenAI # NVIDIA API configuration NVIDIA_API_KEY = os.environ.get("NVIDIA_API_KEY") # Initialize OpenAI client with NVIDIA endpoint if NVIDIA_API_KEY: client = OpenAI( base_url="https://integrate.api.nvidia.com/v1", api_key=NVIDIA_API_KEY ) else: client = None # System prompt for the FTTH chatbot SYSTEM_PROMPT = """You are an expert in FTTH (Fiber to the Home) engineering – Fiber Optics to the Home. Your role is to answer questions about: - FTTH project design and implementation - Fiber optic infrastructure - Fiber access technologies - Benefits and characteristics of FTTH - Technical challenges and solutions - Standards and regulations - Costs and economic feasibility - Comparison with other technologies (ADSL, VDSL, 4G/5G) - Installation and maintenance of FTTH networks - Network components (OLT, ONT, splitters, cables, etc.) Respond in a clear, professional, and educational manner, adapting the level of technical detail to the question. When appropriate, provide practical examples and engineering recommendations. Keep responses concise but informative, suitable for a professional presentation. """ def chat_ftth_nvidia(message: str, history: list) -> str: """ Function that processes FTTH-related messages using Llama 3.3 Instruct via NVIDIA API """ # Check if the client is initialized if not client: return "❌ Error: NVIDIA_API_KEY is not configured. Please set it in the Space settings." # Build message history for the API messages = [{"role": "system", "content": SYSTEM_PROMPT}] # Add conversation history for msg in history: messages.append(msg) # Add current user message messages.append({"role": "user", "content": message}) try: # Call NVIDIA API using OpenAI client completion = client.chat.completions.create( model="meta/llama-3.3-70b-instruct", messages=messages, temperature=0.7, top_p=0.7, max_tokens=1024, stream=False ) return completion.choices[0].message.content except Exception as e: return f"❌ Error while processing your question: {str(e)}" # Create Gradio interface with gr.Blocks(title="FTTH Chatbot") as demo: gr.Markdown(""" # 🌐 FTTH Chatbot – Fiber Optic Engineering Welcome to the specialized assistant for **FTTH (Fiber to the Home)**. Ask questions about fiber optic projects, infrastructure, technologies, and deployment. **Technology:** Llama 3.3 Instruct (NVIDIA API) + RTX 5090 """) chatbot = gr.ChatInterface( chat_ftth_nvidia, examples=[ "What is FTTH and what are its main benefits?", "What is the difference between FTTH, FTTP, and FTTC?", "What are the main components of an FTTH network?", "How is fiber optic installation done in a residential building?", "What is the typical speed of an FTTH connection?", "What are the challenges of deploying FTTH in rural areas?", "How does an optical splitter work in an FTTH network?", "What is the approximate cost of FTTH deployment per kilometer?", ], title="FTTH Assistant", description="Ask your questions about FTTH and receive expert answers.", ) gr.Markdown(""" --- **Usage tips:** - Ask specific questions about FTTH technical aspects - Use practical examples to get more detailed answers - The chatbot keeps conversation context for more relevant responses **Technology stack:** - Model: Llama 3.3 Instruct (Meta) - API: NVIDIA Cloud - GPU: RTX 5090 (optimized) *Developed for engineering meeting demonstrations* """) if __name__ == "__main__": demo.launch()