File size: 15,044 Bytes
cf68fcd
e6de8ec
eebbd16
 
 
 
edcf5f0
eebbd16
383eb85
 
4d191fb
4843a5e
edcf5f0
 
 
4843a5e
 
 
eebbd16
4d191fb
 
4843a5e
e6de8ec
 
7e97218
eebbd16
4843a5e
eebbd16
4d191fb
4843a5e
eebbd16
 
 
 
383eb85
 
 
232da3d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383eb85
 
 
 
 
 
 
 
 
 
 
 
 
 
7e97218
4843a5e
edcf5f0
eebbd16
 
7e97218
4843a5e
eebbd16
4843a5e
 
eebbd16
 
7e97218
4843a5e
eebbd16
4843a5e
 
eebbd16
 
e6de8ec
d475145
 
edcf5f0
d475145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53b0b4f
d475145
eebbd16
d475145
 
 
edcf5f0
d475145
edcf5f0
 
 
 
 
 
d475145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
edcf5f0
d475145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
edcf5f0
d475145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
edcf5f0
 
 
eebbd16
 
 
 
e6de8ec
4d191fb
 
 
 
4843a5e
4d191fb
e6de8ec
f2f5f4c
eebbd16
4d191fb
edcf5f0
 
 
 
eebbd16
edcf5f0
 
 
 
 
eebbd16
383eb85
 
 
edcf5f0
eebbd16
383eb85
 
1aa493a
 
edcf5f0
383eb85
 
eebbd16
232da3d
eebbd16
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
import spaces
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
from diffusers import StableDiffusionPipeline
import torch
import numpy as np
import random
from datasets import Dataset
from huggingface_hub import HfApi
from datetime import datetime, time
from accelerate import Accelerator
from accelerate.utils import set_seed
from PIL import Image
import io
import base64

# Set a seed for reproducibility
set_seed(42)

# Initialize Accelerator
accelerator = Accelerator()
device = 0 if torch.cuda.is_available() else -1  # Use GPU 0 if available, else CPU

# Initialize models
text_generator = pipeline("text-generation", model="gpt2-medium", device=device)
image_generator = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
image_generator = image_generator.to(f"cuda:{device}" if device >= 0 else "cpu")
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
sentiment_model = sentiment_model.to(f"cuda:{device}" if device >= 0 else "cpu")

# Global variables for feedback collection
feedback_data = []

# Initialize HfApi
api = HfApi()

# Define the topics list in the global scope
topics = [
    "space exploration", "artificial intelligence", "environmental conservation", 
    "quantum computing", "renewable energy", "climate change", "biotechnology",
    "nanotechnology", "cybersecurity", "robotics", "virtual reality and augmented reality",
    "autonomous vehicles", "genetic engineering", "3D printing and additive manufacturing",
    "internet of things (IoT)", "blockchain technology", "sustainable agriculture",
    "smart cities", "digital health and telemedicine", "renewable energy storage solutions",
    "advanced materials science", "machine learning and data science",
    "oceanography and marine conservation", "AI ethics and governance",
    "futuristic urban planning", "human-computer interaction", "sustainable architecture",
    "nuclear fusion research", "environmental impact of technology", "bioinformatics",
    "synthetic biology", "renewable energy grid integration",
    "environmental policy and legislation", "human augmentation and enhancement",
    "climate engineering (geoengineering)", "high-performance computing (HPC)",
    "sustainable transportation", "energy harvesting technologies", "cognitive computing",
    "deep learning and neural networks", "zero-waste living",
    "environmental education and awareness", "sustainable water management",
    "green technology innovation", "impact of AI on employment", "ethical hacking",
    "personalized medicine", "advanced prosthetics and bionics", "circular economy",
    "environmental justice", "human-robot collaboration",
    "artificial intelligence in art and creativity", "AI in climate modeling",
    "renewable energy microgrids", "sustainable fashion", "quantum cryptography",
    "energy-efficient computing", "wildlife conservation and habitat preservation",
    "genomic editing and CRISPR", "big data analytics", "ethics in genetic engineering",
    "sustainable fisheries and aquaculture", "urban resilience to climate change",
    "AI in healthcare diagnostics", "eco-friendly packaging solutions",
    "AI in financial markets", "conservation technology",
    "green building certifications (e.g., LEED)", "AI-powered drug discovery",
    "sustainable mining practices", "remote sensing for environmental monitoring",
    "conservation of endangered species", "AI in supply chain optimization",
    "sustainable product design", "regenerative agriculture", "quantum teleportation",
    "sustainable tourism", "AI-driven autonomous systems",
    "carbon capture and storage (CCS)", "resilient infrastructure development",
    "sustainable energy policies", "AI in language translation",
    "sustainable waste management", "advanced robotics in manufacturing",
    "precision agriculture", "smart grid technology", "biomimicry in engineering",
    "ethical AI development", "neurotechnology", "urban vertical farming",
    "quantum sensors", "blockchain in supply chain transparency",
    "AI in education and personalized learning", "sustainable fashion and textile innovation",
    "green chemistry", "smart home technology", "AI in cybersecurity",
    "sustainable packaging solutions", "edge computing",
    "autonomous drones in various industries", "AI in music composition",
    "sustainable urban mobility", "quantum machine learning",
    "bioplastics and biodegradable materials", "AI in weather forecasting",
    "sustainable construction materials", "brain-computer interfaces",
    "AI in legal tech and justice systems", "sustainable food systems",
    "quantum computing in finance", "smart wearable technology",
    "AI in agriculture (precision farming)", "sustainable aviation fuels",
    "advanced recycling technologies", "AI in content creation and journalism",
    "sustainable concrete alternatives", "quantum sensing in healthcare",
    "AI in customer service and chatbots", "sustainable urban water systems",
    "bioengineered organs", "AI in predictive maintenance", "sustainable packaging design",
    "quantum computing in drug discovery", "AI-powered personal assistants",
    "sustainable forestry practices", "next-generation batteries",
    "AI in sports analytics and training", "sustainable textile production",
    "quantum metrology", "AI in disaster prediction and management",
    "sustainable refrigeration technologies", "neuromorphic computing",
    "AI in wildlife conservation", "sustainable desalination techniques",
    "quantum-resistant cryptography", "AI in urban planning and design",
    "sustainable coffee production", "advanced materials for energy storage",
    "AI in mental health support", "sustainable chocolate production",
    "topological quantum computing", "AI in archaeological discoveries",
    "sustainable livestock management", "perovskite solar cells",
    "AI in air quality monitoring and improvement", "sustainable paper and pulp production"
]

def set_sleep_time():
    sleep_start = time(hour=2, minute=0)
    sleep_end = time(hour=6, minute=0)
    try:
        api.set_space_sleep_time(
            repo_id="Oranblock/Websitem",  # Replace with your actual Space name
            sleep_start_time=sleep_start,
            sleep_end_time=sleep_end,
            timezone="UTC"
        )
        return "Sleep time set successfully"
    except Exception as e:
        return f"Error setting sleep time: {str(e)}"

@spaces.GPU
@torch.no_grad()
def generate_text(prompt):
    return text_generator(prompt, max_length=100, num_return_sequences=1)[0]['generated_text']

@spaces.GPU
@torch.no_grad()
def generate_image(prompt):
    with torch.autocast("cuda" if device >= 0 else "cpu"):
        image = image_generator(prompt, guidance_scale=7.5).images[0]
    return image

@spaces.GPU
@torch.no_grad()
def analyze_sentiment(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(f"cuda:{device}" if device >= 0 else "cpu")
    outputs = sentiment_model(**inputs)
    probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
    return probabilities.cpu().numpy()[0]

def generate_random_css():
    font_families = ['Arial', 'Helvetica', 'Verdana', 'Georgia', 'Palatino', 'Garamond', 'Bookman', 'Comic Sans MS', 'Trebuchet MS', 'Arial Black']
    
    css = {
        'font_family': random.choice(font_families),
        'font_size': f'{random.randint(14, 24)}px',
        'background_color1': "#{:06x}".format(random.randint(0, 0xFFFFFF)),
        'background_color2': "#{:06x}".format(random.randint(0, 0xFFFFFF)),
        'text_color': "#{:06x}".format(random.randint(0, 0xFFFFFF)),
        'border_radius': f'{random.randint(0, 20)}px',
        'padding': f'{random.randint(10, 30)}px',
        'margin': f'{random.randint(10, 30)}px',
        'box_shadow': f'{random.randint(0, 10)}px {random.randint(0, 10)}px {random.randint(0, 20)}px rgba(0,0,0,{random.uniform(0.1, 0.5):.1f})',
        'transform': f'rotate({random.uniform(-5, 5):.2f}deg)',
        'animation_duration': f'{random.uniform(0.5, 2):.1f}s',
    }
    return css

def generate_website_content():
    topic = random.choice(topics)
    title = generate_text(f"A unique website title about {topic}:").split(':')[-1].strip()
    main_content = generate_text(f"A short paragraph about {topic}:").split(':')[-1].strip()
    tab1_content = generate_text(f"Interesting facts about {topic}:").split(':')[-1].strip()
    tab2_content = generate_text(f"Future prospects of {topic}:").split(':')[-1].strip()
    tab3_content = generate_text(f"How {topic} impacts our daily lives:").split(':')[-1].strip()
    image = generate_image(f"An artistic representation of {topic}")
    sentiment = analyze_sentiment(main_content)
    sentiment_label = "Positive" if sentiment[1] > sentiment[0] else "Negative"
    css = generate_random_css()
    return title, main_content, tab1_content, tab2_content, tab3_content, image, sentiment_label, css

def update_website():
    title, main_content, tab1, tab2, tab3, image, sentiment, css = generate_website_content()
    
    buffered = io.BytesIO()
    image.save(buffered, format="PNG")
    img_str = base64.b64encode(buffered.getvalue()).decode()
    
    html_content = f"""
    <style>
        body {{
            font-family: {css['font_family']}, sans-serif;
            margin: 0;
            padding: 0;
            background: linear-gradient(135deg, {css['background_color1']}, {css['background_color2']});
            min-height: 100vh;
            color: {css['text_color']};
        }}
        .container {{
            max-width: 800px;
            margin: {css['margin']} auto;
            padding: {css['padding']};
            transform: {css['transform']};
        }}
        h1 {{
            text-shadow: 2px 2px 4px rgba(0,0,0,0.5);
            animation: pulse {css['animation_duration']} infinite alternate;
        }}
        @keyframes pulse {{
            from {{ transform: scale(1); }}
            to {{ transform: scale(1.05); }}
        }}
        .content {{
            background-color: rgba(255,255,255,0.8);
            padding: {css['padding']};
            border-radius: {css['border_radius']};
            box-shadow: {css['box_shadow']};
        }}
        p {{
            font-size: {css['font_size']};
            line-height: 1.6;
        }}
        img {{
            max-width: 100%;
            height: auto;
            border-radius: {css['border_radius']};
            margin-top: 20px;
            transition: transform 0.3s ease-in-out;
        }}
        img:hover {{
            transform: scale(1.05);
        }}
        .tab {{
            overflow: hidden;
            border: 1px solid #ccc;
            background-color: #f1f1f1;
            border-radius: 5px 5px 0 0;
        }}
        .tab button {{
            background-color: inherit;
            float: left;
            border: none;
            outline: none;
            cursor: pointer;
            padding: 14px 16px;
            transition: 0.3s;
            font-family: inherit;
        }}
        .tab button:hover {{
            background-color: #ddd;
        }}
        .tab button.active {{
            background-color: #ccc;
        }}
        .tabcontent {{
            display: none;
            padding: 6px 12px;
            border: 1px solid #ccc;
            border-top: none;
            border-radius: 0 0 5px 5px;
            animation: fadeIn 0.5s;
        }}
        @keyframes fadeIn {{
            from {{ opacity: 0; }}
            to {{ opacity: 1; }}
        }}
    </style>
    <div class="container">
        <h1>{title}</h1>
        <div class="content">
            <p>{main_content}</p>
            <div class="tab">
                <button class="tablinks" onclick="openTab(event, 'Tab1')">Interesting Facts</button>
                <button class="tablinks" onclick="openTab(event, 'Tab2')">Future Prospects</button>
                <button class="tablinks" onclick="openTab(event, 'Tab3')">Daily Impact</button>
            </div>
            <div id="Tab1" class="tabcontent">
                <p>{tab1}</p>
            </div>
            <div id="Tab2" class="tabcontent">
                <p>{tab2}</p>
            </div>
            <div id="Tab3" class="tabcontent">
                <p>{tab3}</p>
            </div>
            <img src="data:image/png;base64,{img_str}" alt="Generated Image">
            <p>Content Sentiment: {sentiment}</p>
        </div>
        <button onclick="document.getElementById('refresh_button').click()" style="margin-top: 20px;">Regenerate Website</button>
    </div>
    <script>
    function openTab(evt, tabName) {{
        var i, tabcontent, tablinks;
        tabcontent = document.getElementsByClassName("tabcontent");
        for (i = 0; i < tabcontent.length; i++) {{
            tabcontent[i].style.display = "none";
        }}
        tablinks = document.getElementsByClassName("tablinks");
        for (i = 0; i < tablinks.length; i++) {{
            tablinks[i].className = tablinks[i].className.replace(" active", "");
        }}
        document.getElementById(tabName).style.display = "block";
        evt.currentTarget.className += " active";
    }}
    // Open the first tab by default
    document.getElementsByClassName("tablinks")[0].click();
    </script>
    """
    
    return html_content

def save_feedback(feedback, rating):
    feedback_data.append({"text": feedback, "rating": rating})
    return f"Feedback saved. Total feedback collected: {len(feedback_data)}"

def get_gpu_info():
    if torch.cuda.is_available():
        return f"Using GPU: {torch.cuda.get_device_name(0)}"
    else:
        return "GPU not available, using CPU"

# Create Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# AI-Driven Dynamic Website")
    gr.Markdown(get_gpu_info())
    
    html_output = gr.HTML()
    refresh_button = gr.Button("Regenerate Website", elem_id="refresh_button")
    
    with gr.Row():
        feedback_input = gr.Textbox(label="Provide Feedback")
        feedback_rating = gr.Radio(["Positive", "Negative"], label="Rate the content")
        feedback_button = gr.Button("Submit Feedback")
    
    feedback_output = gr.Textbox(label="Feedback Status")
    
    sleep_button = gr.Button("Set Sleep Time")
    sleep_output = gr.Textbox(label="Sleep Time Status")
    
    refresh_button.click(update_website, outputs=html_output)
    feedback_button.click(save_feedback, inputs=[feedback_input, feedback_rating], outputs=feedback_output)
    sleep_button.click(set_sleep_time, outputs=sleep_output)

    # Initialize the website on startup
    demo.load(fn=update_website, outputs=html_output, every=None, show_progress=False)

# Set sleep time when the app starts
set_sleep_time()

# Launch the demo
demo.launch()