File size: 5,596 Bytes
cd73647
 
 
 
 
 
 
 
 
 
 
 
4195703
 
 
 
 
 
 
cd73647
4195703
 
cd73647
 
 
 
4195703
 
 
 
 
 
 
cd73647
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4195703
cd73647
 
 
 
4195703
cd73647
 
 
 
4195703
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd73647
 
 
 
4195703
cd73647
 
 
 
4195703
cd73647
4195703
cd73647
 
4195703
cd73647
 
4195703
cd73647
 
4195703
cd73647
4195703
 
 
 
 
 
 
 
 
 
 
 
cd73647
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import os
import json
import time
import uuid
import datetime
import torch
import gradio as gr
import numpy as np
import nltk
from nltk.sentiment import SentimentIntensityAnalyzer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import sys

# Debugging info
print("Python version:", sys.version)
print("PyTorch version:", torch.__version__)
print("CUDA available:", torch.cuda.is_available())

try:
    from transformers import pipeline, __version__ as transformers_version
    print("Transformers version:", transformers_version)
except ImportError as e:
    print(f"Failed to import transformers: {e}")
    pipeline = None

# Download NLTK resources with error handling
try:
    nltk.download('vader_lexicon', quiet=True)
    nltk.download('punkt', quiet=True)
    nltk.download('stopwords', quiet=True)
except Exception as e:
    print(f"Error downloading NLTK data: {e}")

# Initialize sentiment analyzer
sia = SentimentIntensityAnalyzer()

# Data storage
class DataStore:
    def __init__(self):
        self.users = {}
        self.blogs = {}
        self.comments = {}
        self.chats = {}
        self.user_interactions = {}
        self.current_user_id = None
        self.create_sample_data()

    def create_sample_data(self):
        user_ids = [self.add_user("admin", "admin123", "admin@example.com", "Admin User", "admin")]
        user_ids.extend([self.add_user(f"blogger{i}", f"password{i}", f"blogger{i}@example.com", f"Blogger {i}", "blogger") for i in range(1, 4)])
        user_ids.extend([self.add_user(f"reader{i}", f"password{i}", f"reader{i}@example.com", f"Reader {i}", "reader") for i in range(1, 6)])

        blog_topics = [
            "Machine Learning Fundamentals",
            "Web Development Best Practices",
            "Data Science in Practice",
            "Python Programming Tips",
            "Neural Networks Explained",
            "Modern JavaScript Frameworks",
            "Cloud Computing Services"
        ]

        blog_contents = [
            "Machine learning is a branch of artificial intelligence that enables computers to learn from data without being explicitly programmed...",
            "Web development involves creating websites and web applications using technologies like HTML, CSS, and JavaScript...",
            "Data science combines domain expertise, programming skills, and knowledge of math and statistics to extract meaningful insights from data...",
            "Python is a versatile programming language widely used in various domains including web development, data analysis, and artificial intelligence...",
            "Neural networks are computing systems inspired by the biological neural networks that constitute animal brains...",
            "Modern JavaScript frameworks like React, Vue, and Angular have revolutionized front-end web development...",
            "Cloud computing services provide businesses with on-demand access to computing resources without direct active management by the user..."
        ]

        blog_ids = []
        for i in range(len(blog_topics)):
            author_id = user_ids[1 + (i % 3)]
            blog_id = self.add_blog(blog_topics[i], blog_contents[i], author_id, ["tech", "programming"])
            blog_ids.append(blog_id)
            for j in range(3):
                commenter_id = user_ids[4 + (j % 5)]
                self.add_comment(blog_id, f"Great article on {blog_topics[i]}!", commenter_id)
            for user_id in user_ids[4:]:
                self.record_interaction(user_id, blog_id, np.random.randint(1, 10))

    # [Rest of your DataStore methods remain unchanged...]

# Initialize data store
data_store = DataStore()

# Machine Learning Models with improved error handling
class MLModels:
    def __init__(self):
        self.sentiment_model_name = "distilbert-base-uncased-finetuned-sst-2-english"
        self.sentiment_analyzer = None
        
        # Initialize with proper device handling
        try:
            device = -1  # Default to CPU
            if torch.cuda.is_available():
                device = 0
                print(f"Using GPU (device {device})")
            else:
                print("Using CPU")
                
            self.sentiment_analyzer = pipeline(
                "sentiment-analysis", 
                model=self.sentiment_model_name,
                device=device
            )
        except Exception as e:
            print(f"Failed to initialize sentiment analyzer: {e}")
            print("Falling back to NLTK sentiment analysis")
            self.sentiment_analyzer = None
        
        self.tfidf_vectorizer = TfidfVectorizer(max_features=1000)
        self.blog_vectors = None
        self.update_blog_vectors()

    # [Rest of your MLModels methods remain unchanged...]

# Initialize ML models
ml_models = MLModels()

# Custom CSS (unchanged)
custom_css = """
/* Your existing CSS here */
"""

# Gradio Interface (unchanged except for launch settings)
def create_interface():
    with gr.Blocks(css=custom_css) as interface:
        # [Your existing interface code...]
    return interface

# Updated launch function
def launch_app():
    try:
        interface = create_interface()
        # Settings optimized for Hugging Face Spaces
        interface.launch(
            server_name="0.0.0.0",
            server_port=7860,
            share=False,
            show_error=True
        )
    except Exception as e:
        print(f"Failed to launch app: {e}")
        raise

if __name__ == "__main__":
    launch_app()