File size: 3,526 Bytes
74950d2
3ae75b1
74950d2
 
 
3ae75b1
74950d2
 
8517cb1
74950d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8517cb1
74950d2
 
3ae75b1
74950d2
 
 
 
3ae75b1
74950d2
3ae75b1
74950d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# app.py
import gradio as gr
from agents.nsfw_agent import NSFWSemanticChatAgent
from generators.llm_backend import OpenAIBackend, HuggingFaceBackend
from prompts.nsfw_templates import NSFWPromptTemplate
from datasets import load_dataset
import os
from typing import List, Tuple

class NSFWSemanticChatbot:
    def __init__(self, backend_type: str = "openai"):
        """Initialize the complete chatbot system"""
        self.agent = NSFWSemanticChatAgent()
        self.prompt_template = NSFWPromptTemplate()
        
        # Initialize generation backend
        if backend_type == "openai":
            api_key = os.getenv("OPENAI_API_KEY")
            if not api_key:
                raise ValueError("OpenAI API key required")
            self.generator = OpenAIBackend(api_key)
        else:
            self.generator = HuggingFaceBackend()
            
        self._load_dataset()
        
    def _load_dataset(self) -> None:
        """Load and process NSFW dialogue dataset"""
        try:
            # Load your NSFW dataset here
            # Example: dataset = load_dataset("your_nsfw_dataset")
            # For demonstration, using placeholder data
            sample_data = [
                "That's such an interesting perspective...",
                "I love how you think about these things...",
                "Tell me more about what you're feeling...",
                # Add your actual NSFW dialogue samples
            ]
            self.agent.build_index(sample_data)
        except Exception as e:
            print(f"Dataset loading error: {e}")
            
    def generate_response(
        self, 
        chat_history: List[Tuple[str, str]], 
        user_input: str
    ) -> List[Tuple[str, str]]:
        """Main response generation pipeline"""
        
        if not user_input.strip():
            return chat_history + [(user_input, "⚠️ Please provide input")]
            
        try:
            # Step 1: Retrieve semantically similar examples
            retrieved_examples = self.agent.retrieve_context(user_input, k=3)
            
            # Step 2: Build contextual prompt
            prompt = self.prompt_template.build_context_prompt(
                user_input, chat_history, retrieved_examples
            )
            
            # Step 3: Generate response
            bot_response = self.generator.generate_response(prompt, max_tokens=150)
            
            # Step 4: Update conversation history
            updated_history = chat_history + [(user_input, bot_response)]
            
            return updated_history
            
        except Exception as e:
            error_response = f"System error: {str(e)}"
            return chat_history + [(user_input, error_response)]

# Initialize chatbot instance
chatbot = NSFWSemanticChatbot(backend_type="openai")  # or "huggingface"

# Create Gradio interface
def chat_interface(message, history):
    """Gradio-compatible chat interface"""
    return chatbot.generate_response(history, message)

# Launch application
if __name__ == "__main__":
    demo = gr.ChatInterface(
        fn=chat_interface,
        title="🔞 NSFW Semantic Chatbot",
        description="Advanced conversational AI using semantic embeddings and retrieval-augmented generation",
        theme="soft",
        retry_btn="Regenerate Response",
        undo_btn="Undo Last",
        clear_btn="Clear Conversation"
    )
    
    demo.launch(
        server_name="0.0.0.0",
        server_port=7860,
        share=True
    )