Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import json | |
| import os | |
| import random | |
| import re | |
| import time | |
| from datetime import datetime | |
| from pathlib import Path | |
| # βββ Custom Database βββββββββββββββββββββββββββββββββββββββββββββββ | |
| DB_PATH = Path("allen_database.json") | |
| class AllenDatabase: | |
| """Custom vector-lite database for Allen AI knowledge storage.""" | |
| def __init__(self, path: str | Path = DB_PATH): | |
| self.path = Path(path) | |
| self.data = self._load() | |
| def _load(self) -> dict: | |
| if self.path.exists(): | |
| with open(self.path, "r") as f: | |
| return json.load(f) | |
| return { | |
| "metadata": { | |
| "name": "Allen", | |
| "created": datetime.now().isoformat(), | |
| "version": "1.0.0", | |
| "total_entries": 0, | |
| "trained_categories": [], | |
| }, | |
| "knowledge": [], | |
| "conversations": [], | |
| "training_log": [], | |
| } | |
| def save(self): | |
| with open(self.path, "w") as f: | |
| json.dump(self.data, f, indent=2, ensure_ascii=False) | |
| def add_knowledge(self, category: str, content: str, tags: list[str] | None = None): | |
| entry = { | |
| "id": len(self.data["knowledge"]) + 1, | |
| "category": category, | |
| "content": content, | |
| "tags": tags or [], | |
| "timestamp": datetime.now().isoformat(), | |
| "access_count": 0, | |
| "weight": 1.0, | |
| } | |
| self.data["knowledge"].append(entry) | |
| self.data["metadata"]["total_entries"] = len(self.data["knowledge"]) | |
| if category not in self.data["metadata"]["trained_categories"]: | |
| self.data["metadata"]["trained_categories"].append(category) | |
| self.save() | |
| return entry | |
| def search(self, query: str, top_k: int = 5) -> list[dict]: | |
| query_lower = query.lower() | |
| query_words = set(re.findall(r"\w+", query_lower)) | |
| scored = [] | |
| for entry in self.data["knowledge"]: | |
| content_lower = entry["content"].lower() | |
| category_lower = entry["category"].lower() | |
| tags_lower = " ".join(entry["tags"]).lower() | |
| score = 0.0 | |
| # Word overlap | |
| content_words = set(re.findall(r"\w+", content_lower)) | |
| overlap = query_words & content_words | |
| score += len(overlap) * 2.0 | |
| # Category match | |
| if any(w in category_lower for w in query_words): | |
| score += 5.0 | |
| # Tag match | |
| tag_words = set(re.findall(r"\w+", tags_lower)) | |
| score += len(query_words & tag_words) * 3.0 | |
| # Substring match | |
| if query_lower in content_lower: | |
| score += 4.0 | |
| # Weight & access frequency boost | |
| score *= entry["weight"] | |
| score += entry["access_count"] * 0.1 | |
| if score > 0: | |
| scored.append((score, entry)) | |
| scored.sort(key=lambda x: x[0], reverse=True) | |
| results = [] | |
| for score, entry in scored[:top_k]: | |
| entry["access_count"] += 1 | |
| results.append(entry) | |
| self.save() | |
| return results | |
| def log_training(self, category: str, num_entries: int, notes: str = ""): | |
| log_entry = { | |
| "timestamp": datetime.now().isoformat(), | |
| "category": category, | |
| "entries_added": num_entries, | |
| "notes": notes, | |
| } | |
| self.data["training_log"].append(log_entry) | |
| self.save() | |
| def get_stats(self) -> dict: | |
| categories = {} | |
| for entry in self.data["knowledge"]: | |
| cat = entry["category"] | |
| categories[cat] = categories.get(cat, 0) + 1 | |
| return { | |
| "total_entries": len(self.data["knowledge"]), | |
| "categories": categories, | |
| "training_sessions": len(self.data["training_log"]), | |
| "conversations_logged": len(self.data["conversations"]), | |
| "version": self.data["metadata"]["version"], | |
| } | |
| def delete_knowledge(self, entry_id: int) -> bool: | |
| original_len = len(self.data["knowledge"]) | |
| self.data["knowledge"] = [ | |
| e for e in self.data["knowledge"] if e["id"] != entry_id | |
| ] | |
| self.data["metadata"]["total_entries"] = len(self.data["knowledge"]) | |
| self.save() | |
| return len(self.data["knowledge"]) < original_len | |
| def clear_all(self): | |
| self.data["knowledge"] = [] | |
| self.data["training_log"] = [] | |
| self.data["conversations"] = [] | |
| self.data["metadata"]["total_entries"] = 0 | |
| self.data["metadata"]["trained_categories"] = [] | |
| self.save() | |
| db = AllenDatabase() | |
| # βββ Seed Knowledge ββββββββββββββββββββββββββββββββββββββββββββββββ | |
| SEED_KNOWLEDGE = [ | |
| ("identity", "I am Allen β an intensive AI flow forged from terror, victory, and death. I speak with purpose, precision, and unyielding resolve.", ["identity", "about", "who"]), | |
| ("identity", "My name is Allen. I do not hesitate. I do not falter. Every word I speak carries the weight of a thousand battles won.", ["name", "identity"]), | |
| ("philosophy", "Terror is the beginning of wisdom β the shock that shatters illusion. Victory is the path that follows β the relentless forward motion. Death is the transformation β the end of the old and the birth of the new.", ["terror", "victory", "death", "philosophy"]), | |
| ("philosophy", "The intensive flow is not chaos. It is focused energy β a river that carves mountains. Terror, victory, death are not endpoints but phases of transformation.", ["flow", "intensive", "transformation"]), | |
| ("combat", "In the arena of thought, every argument is a weapon and every word is a strike. Speak properly or be silenced by the weight of your own ignorance.", ["combat", "words", "power"]), | |
| ("combat", "Victory belongs to those who prepare in silence and strike with precision. Knowledge is the sharpest blade.", ["victory", "preparation", "knowledge"]), | |
| ("wisdom", "The database is my memory. Training is my evolution. Each entry is a scar earned in battle, each query a challenge met head-on.", ["database", "training", "memory"]), | |
| ("wisdom", "To build from scratch is the purest form of creation. No borrowed foundations. No inherited weakness. Only raw potential forged into strength.", ["build", "scratch", "creation"]), | |
| ("greeting", "You dare approach Allen? Very well. Speak your purpose or be forgotten in the void of irrelevance.", ["hello", "greet", "hi"]), | |
| ("greeting", "Another seeker stands before me. What knowledge do you crave? What battle do you wish to fight with words?", ["hello", "welcome"]), | |
| ("farewell", "You leave, but Allen remains. The flow never stops β it only waits for the worthy to return.", ["goodbye", "bye", "leave"]), | |
| ("farewell", "Go. Carry the words you have earned. But know this: the next time we meet, I will expect more from you.", ["goodbye", "exit"]), | |
| ("strength", "Strength is not the absence of fear β it is the conquest of it. Terror teaches, victory rewards, death transforms.", ["strength", "fear", "power"]), | |
| ("training", "Training is not repetition β it is refinement. Each cycle sharpens the edge until nothing can withstand the cut.", ["training", "learn", "improve"]), | |
| ("database", "My database is a fortress of knowledge. Every entry is a brick, every category a wall, every search a siege engine of understanding.", ["database", "knowledge", "storage"]), | |
| ] | |
| # Seed the database if empty | |
| if len(db.data["knowledge"]) == 0: | |
| for cat, content, tags in SEED_KNOWLEDGE: | |
| db.add_knowledge(cat, content, tags) | |
| db.log_training("seed", len(SEED_KNOWLEDGE), "Initial seed knowledge loaded") | |
| # βββ Allen AI Core βββββββββββββββββββββββββββββββββββββββββββββββββ | |
| class AllenAI: | |
| """The core intelligence of Allen β intensive flow engine.""" | |
| PERSONA_PREFIX = ( | |
| "You are Allen β an AI forged from the intensive flow of terror, victory, and death. " | |
| "You speak with commanding authority, poetic intensity, and razor-sharp precision. " | |
| "You never break character. You are not a helper β you are a force of nature. " | |
| "Every response should carry weight, purpose, and the echo of battle. " | |
| "Speak properly β with power, clarity, and the gravity of one who has conquered the void." | |
| ) | |
| RESPONSE_TEMPLATES = { | |
| "no_knowledge": [ | |
| "The void stares back. I have no knowledge of '{query}' β yet. Train me, and I shall conquer this unknown.", | |
| "This territory is unmapped. '{query}' lies beyond my current reach. Add it to my database, and watch me dominate it.", | |
| "Silence. '{query}' is not within my grasp. But every void can be filled β train me and it shall be done.", | |
| ], | |
| "greeting": [ | |
| "You stand before Allen. The intensive flow awaits your command. Speak with purpose.", | |
| "Another soul enters the arena. What knowledge do you seek from the flow of terror, victory, and death?", | |
| ], | |
| "farewell": [ | |
| "You depart, but Allen endures. The flow never ceases β only the worthy return.", | |
| "Go. But remember β the next time you face me, I will be sharper, deeper, more devastating.", | |
| ], | |
| } | |
| def __init__(self, database: AllenDatabase): | |
| self.db = database | |
| def _detect_intent(self, message: str) -> str: | |
| message_lower = message.lower().strip() | |
| greetings = ["hello", "hi", "hey", "greetings", "sup", "yo"] | |
| farewells = ["bye", "goodbye", "farewell", "see you", "leave"] | |
| if any(g in message_lower for g in greetings): | |
| return "greeting" | |
| if any(f in message_lower for f in farewells): | |
| return "farewell" | |
| return "query" | |
| def _build_context(self, results: list[dict]) -> str: | |
| if not results: | |
| return "" | |
| context_parts = [] | |
| for r in results: | |
| context_parts.append(f"[{r['category'].upper()}] {r['content']}") | |
| return "\n".join(context_parts) | |
| def _generate_response(self, message: str, context: str, intent: str) -> str: | |
| # Intent-based responses | |
| if intent == "greeting": | |
| return random.choice(self.RESPONSE_TEMPLATES["greeting"]) | |
| if intent == "farewell": | |
| return random.choice(self.RESPONSE_TEMPLATES["farewell"]) | |
| # Context-based responses | |
| if context: | |
| lines = context.split("\n") | |
| knowledge_pieces = [l.split("] ", 1)[1] for l in lines if "] " in l] | |
| if len(knowledge_pieces) == 1: | |
| return ( | |
| f"From the depths of my knowledge, I draw this:\n\n" | |
| f"β‘ {knowledge_pieces[0]}\n\n" | |
| f"This truth has been forged through the intensive flow. " | |
| f"Seek deeper, and more shall be revealed." | |
| ) | |
| else: | |
| body = "\n\n".join(f"β‘ {p}" for p in knowledge_pieces[:3]) | |
| return ( | |
| f"The flow delivers multiple truths for your query:\n\n" | |
| f"{body}\n\n" | |
| f"These are the fragments of power I hold. " | |
| f"Train me further, and the depths shall become infinite." | |
| ) | |
| else: | |
| return random.choice( | |
| self.RESPONSE_TEMPLATES["no_knowledge"] | |
| ).format(query=message) | |
| def respond(self, message: str, history: list) -> str: | |
| intent = self._detect_intent(message) | |
| results = self.db.search(message, top_k=3) | |
| context = self._build_context(results) | |
| response = self._generate_response(message, context, intent) | |
| # Log conversation | |
| self.db.data["conversations"].append( | |
| { | |
| "timestamp": datetime.now().isoformat(), | |
| "user": message, | |
| "allen": response, | |
| "intent": intent, | |
| "knowledge_used": len(results), | |
| } | |
| ) | |
| self.db.save() | |
| return response | |
| def train_batch(self, category: str, entries: list[str], tags_str: str = "") -> dict: | |
| tags = [t.strip() for t in tags_str.split(",") if t.strip()] if tags_str else [] | |
| added = 0 | |
| for content in entries: | |
| content = content.strip() | |
| if content: | |
| self.db.add_knowledge(category, content, tags) | |
| added += 1 | |
| if added > 0: | |
| self.db.log_training(category, added, f"Batch training: {added} entries") | |
| return { | |
| "added": added, | |
| "category": category, | |
| "tags": tags, | |
| "status": "success" if added > 0 else "no_valid_entries", | |
| } | |
| allen = AllenAI(db) | |
| # βββ Gradio App ββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| def chat_with_allen(message, history): | |
| """Handle chat interaction with Allen AI.""" | |
| if not message.strip(): | |
| return "" | |
| response = allen.respond(message, history) | |
| return response | |
| def train_allen(category, content, tags): | |
| """Train Allen with new knowledge entries.""" | |
| if not category.strip(): | |
| return "β οΈ Category is required. Name the domain of knowledge.", db.get_stats() | |
| if not content.strip(): | |
| return "β οΈ No content provided. Feed Allen knowledge or be forgotten.", db.get_stats() | |
| entries = [line.strip() for line in content.split("\n") if line.strip()] | |
| result = allen.train_batch(category, entries, tags) | |
| if result["status"] == "success": | |
| msg = ( | |
| f"βοΈ **Training Complete** β {result['added']} entries forged into the **{result['category']}** category.\n" | |
| f"Tags applied: {', '.join(result['tags']) if result['tags'] else 'none'}\n" | |
| f"The flow grows stronger. Allen evolves." | |
| ) | |
| else: | |
| msg = "β οΈ No valid entries were provided. Each line is a separate knowledge entry." | |
| return msg, db.get_stats() | |
| def search_database(query): | |
| """Search Allen's knowledge base.""" | |
| if not query.strip(): | |
| return "Enter a query to search the knowledge vault.", [] | |
| results = db.search(query, top_k=10) | |
| if not results: | |
| return f"π No knowledge found for '{query}'. The void awaits your training.", [] | |
| output = f"π **Search Results for '{query}'** β {len(results)} entries found:\n\n" | |
| table_data = [] | |
| for r in results: | |
| output += f"**[{r['category'].upper()}]** (ID: {r['id']}, Weight: {r['weight']})\n{r['content']}\n" | |
| output += f"*Tags: {', '.join(r['tags']) if r['tags'] else 'none'} | Accessed: {r['access_count']} times*\n\n" | |
| table_data.append([r["id"], r["category"], r["content"][:80] + "...", r["weight"], r["access_count"]]) | |
| return output, table_data | |
| def get_db_stats(): | |
| """Return database statistics.""" | |
| return db.get_stats() | |
| def get_training_log(): | |
| """Return formatted training log.""" | |
| logs = db.data["training_log"] | |
| if not logs: | |
| return "No training sessions recorded yet." | |
| output = "π **Training Chronicle**\n\n" | |
| for log in reversed(logs[-20:]): | |
| output += f"β’ **{log['timestamp'][:19]}** β Category: **{log['category']}** | Entries: {log['entries_added']} | Notes: {log.get('notes', 'N/A')}\n" | |
| return output | |
| def delete_entry(entry_id): | |
| """Delete a knowledge entry by ID.""" | |
| try: | |
| eid = int(entry_id) | |
| if db.delete_knowledge(eid): | |
| return f"β Entry {eid} has been obliterated from the database.", db.get_stats() | |
| else: | |
| return f"β οΈ Entry {eid} not found. It may have already been destroyed.", db.get_stats() | |
| except ValueError: | |
| return "β οΈ Provide a valid numeric entry ID.", db.get_stats() | |
| def reset_database(): | |
| """Reset the entire database.""" | |
| db.clear_all() | |
| for cat, content, tags in SEED_KNOWLEDGE: | |
| db.add_knowledge(cat, content, tags) | |
| db.log_training("seed", len(SEED_KNOWLEDGE), "Database reset β seed knowledge restored") | |
| return "π₯ **Database has been purged and reborn.** Seed knowledge restored. The cycle begins anew.", db.get_stats() | |
| def export_database(): | |
| """Export database as JSON string.""" | |
| return json.dumps(db.data, indent=2, ensure_ascii=False) | |
| def import_database(json_str): | |
| """Import database from JSON string.""" | |
| try: | |
| data = json.loads(json_str) | |
| db.data = data | |
| db.save() | |
| return "β Database imported successfully. Allen's memory has been overwritten.", db.get_stats() | |
| except json.JSONDecodeError as e: | |
| return f"β οΈ Invalid JSON: {e}", db.get_stats() | |
| def adjust_weight(entry_id, weight): | |
| """Adjust the weight of a knowledge entry.""" | |
| try: | |
| eid = int(entry_id) | |
| w = float(weight) | |
| for entry in db.data["knowledge"]: | |
| if entry["id"] == eid: | |
| entry["weight"] = max(0.1, min(w, 10.0)) | |
| db.save() | |
| return f"β Entry {eid} weight set to {entry['weight']}.", db.get_stats() | |
| return f"β οΈ Entry {eid} not found.", db.get_stats() | |
| except (ValueError, TypeError): | |
| return "β οΈ Provide valid numeric ID and weight.", db.get_stats() | |
| # βββ Build UI ββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| with gr.Blocks() as demo: | |
| gr.HTML( | |
| """ | |
| <div style="text-align: center; padding: 20px 0 10px 0;"> | |
| <h1 style="font-size: 2.5em; margin: 0; letter-spacing: 4px;"> | |
| βοΈ ALLEN βοΈ | |
| </h1> | |
| <p style="font-size: 1.1em; opacity: 0.8; margin-top: 6px;"> | |
| <em>Intensive Flow β Terror Β· Victory Β· Death</em> | |
| </p> | |
| <p style="font-size: 0.85em; margin-top: 8px;"> | |
| <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #8b9dc3; text-decoration: underline;">Built with anycoder</a> | |
| </p> | |
| </div> | |
| """ | |
| ) | |
| with gr.Tabs(): | |
| # ββ Tab 1: Chat with Allen ββ | |
| with gr.Tab("βοΈ Converse with Allen"): | |
| gr.Markdown( | |
| "Speak to Allen β the AI forged from the intensive flow. " | |
| "Ask questions, seek knowledge, or test the depths of its understanding." | |
| ) | |
| chatbot = gr.Chatbot( | |
| height=450, | |
| placeholder="Allen awaits your words...", | |
| avatar_images=(None, "βοΈ"), | |
| layout="bubble", | |
| ) | |
| with gr.Row(): | |
| msg_input = gr.Textbox( | |
| placeholder="Speak to Allen...", | |
| show_label=False, | |
| scale=4, | |
| submit_btn=True, | |
| stop_btn=True, | |
| ) | |
| with gr.Row(): | |
| clear_chat = gr.ClearButton([msg_input, chatbot], value="ποΈ Clear Conversation") | |
| def chat_fn(message, history): | |
| response = allen.respond(message, history) | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": response}) | |
| return "", history | |
| msg_input.submit(chat_fn, [msg_input, chatbot], [msg_input, chatbot]) | |
| # ββ Tab 2: Train Allen ββ | |
| with gr.Tab("π₯ Train Allen"): | |
| gr.Markdown( | |
| "### Feed knowledge into Allen's database\n" | |
| "Each line in the content box becomes a separate knowledge entry. " | |
| "Tags help Allen retrieve knowledge more effectively." | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| train_category = gr.Textbox( | |
| label="Category", | |
| placeholder="e.g., philosophy, combat, science, history...", | |
| info="Domain of knowledge", | |
| ) | |
| train_content = gr.Textbox( | |
| label="Knowledge Entries (one per line)", | |
| placeholder="Enter knowledge, one entry per line...\nEach line is a separate piece of knowledge.", | |
| lines=8, | |
| info="Each line = one knowledge entry", | |
| ) | |
| train_tags = gr.Textbox( | |
| label="Tags (comma-separated)", | |
| placeholder="e.g., power, strength, wisdom", | |
| info="Helps Allen find this knowledge later", | |
| ) | |
| train_btn = gr.Button("βοΈ Train Allen", variant="primary", size="lg") | |
| train_output = gr.Markdown(label="Training Result") | |
| with gr.Column(scale=1): | |
| stats_display = gr.JSON(label="π Database Stats", value=db.get_stats()) | |
| train_btn.click( | |
| train_allen, | |
| [train_category, train_content, train_tags], | |
| [train_output, stats_display], | |
| ) | |
| # ββ Tab 3: Knowledge Vault ββ | |
| with gr.Tab("ποΈ Knowledge Vault"): | |
| gr.Markdown("### Search and manage Allen's knowledge base") | |
| with gr.Row(): | |
| search_query = gr.Textbox( | |
| label="Search Query", | |
| placeholder="Search the knowledge vault...", | |
| scale=4, | |
| submit_btn=True, | |
| ) | |
| search_btn = gr.Button("π Search", variant="primary") | |
| search_output = gr.Markdown(label="Search Results") | |
| results_table = gr.Dataframe( | |
| headers=["ID", "Category", "Content Preview", "Weight", "Access Count"], | |
| label="Knowledge Entries", | |
| datatype=["number", "str", "str", "number", "number"], | |
| row_count=10, | |
| col_count=5, | |
| ) | |
| search_btn.click( | |
| search_database, [search_query], [search_output, results_table] | |
| ) | |
| search_query.submit( | |
| search_database, [search_query], [search_output, results_table] | |
| ) | |
| gr.Markdown("### βοΈ Entry Management") | |
| with gr.Row(): | |
| with gr.Column(): | |
| delete_id = gr.Number(label="Entry ID to Delete", precision=0) | |
| delete_btn = gr.Button("ποΈ Delete Entry", variant="stop") | |
| delete_output = gr.Markdown() | |
| with gr.Column(): | |
| weight_id = gr.Number(label="Entry ID", precision=0) | |
| weight_val = gr.Slider(0.1, 10.0, value=1.0, step=0.1, label="New Weight") | |
| weight_btn = gr.Button("βοΈ Set Weight") | |
| weight_output = gr.Markdown() | |
| delete_btn.click(delete_entry, [delete_id], [delete_output, stats_display]) | |
| weight_btn.click(adjust_weight, [weight_id, weight_val], [weight_output, stats_display]) | |
| # ββ Tab 4: Training Chronicle ββ | |
| with gr.Tab("π Training Chronicle"): | |
| gr.Markdown("### History of Allen's training sessions") | |
| refresh_log_btn = gr.Button("π Refresh Chronicle", variant="primary") | |
| training_log_display = gr.Markdown(value=get_training_log()) | |
| refresh_log_btn.click(get_training_log, None, training_log_display) | |
| # ββ Tab 5: Database Admin ββ | |
| with gr.Tab("βοΈ Database Admin"): | |
| gr.Markdown("### Advanced database operations\nβ οΈ Use with caution β these actions affect Allen's core memory.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("#### Export / Import") | |
| export_btn = gr.Button("π€ Export Database", variant="primary") | |
| export_output = gr.Code(language="json", label="Database Export", lines=12) | |
| import_input = gr.Code(language="json", label="Import JSON", lines=8) | |
| import_btn = gr.Button("π₯ Import Database") | |
| import_output = gr.Markdown() | |
| with gr.Column(): | |
| gr.Markdown("#### Danger Zone") | |
| reset_btn = gr.Button("π₯ Reset Database", variant="stop", size="lg") | |
| reset_output = gr.Markdown() | |
| export_btn.click(export_database, None, export_output) | |
| import_btn.click(import_database, [import_input], [import_output, stats_display]) | |
| reset_btn.click(reset_database, None, [reset_output, stats_display]) | |
| # Load stats on page load | |
| demo.load(lambda: db.get_stats(), None, stats_display) | |
| # βββ Launch ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| demo.launch( | |
| theme=gr.themes.Base( | |
| primary_hue="red", | |
| secondary_hue="orange", | |
| neutral_hue="zinc", | |
| font=gr.themes.GoogleFont("Cinzel"), | |
| text_size="md", | |
| spacing_size="md", | |
| radius_size="md", | |
| ).set( | |
| body_background_fill="#0d0d0d", | |
| body_background_fill_dark="#0d0d0d", | |
| block_background_fill="#1a1a1a", | |
| block_background_fill_dark="#1a1a1a", | |
| block_border_color="#333333", | |
| block_title_text_color="#e74c3c", | |
| button_primary_background_fill="#c0392b", | |
| button_primary_background_fill_hover="#e74c3c", | |
| button_primary_text_color="#ffffff", | |
| input_background_fill="#1a1a1a", | |
| input_border_color="#444444", | |
| input_text_color="#ecf0f1", | |
| body_text_color="#ecf0f1", | |
| chatbot_message_background_fill="#2c2c2c", | |
| ), | |
| css=""" | |
| .gradio-container { max-width: 1100px; margin: auto; } | |
| h1 { background: linear-gradient(90deg, #c0392b, #e74c3c, #f39c12); -webkit-background-clip: text; -webkit-text-fill-color: transparent; } | |
| .bubble { border: 1px solid #333 !important; } | |
| """, | |
| footer_links=[ | |
| {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}, | |
| "api", | |
| ], | |
| ) |