import os import re import json import datetime import tempfile import gradio as gr from huggingface_hub import InferenceClient, HfApi, hf_hub_download from huggingface_hub.utils import EntryNotFoundError # ── Konfiguration ────────────────────────────────────────────────────────────── HF_TOKEN = os.environ.get("HF_TOKEN", "") DATASET_REPO = os.environ.get("DATASET_REPO", "") DATASET_FILE = "data.jsonl" MODEL_DEFAULT = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" MODEL_TUNING = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" LEADERBOARD_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "leaderboard.json") # ── System Prompts ───────────────────────────────────────────────────────────── PROMPT_TO_LINKEDIN = """Du bist der ultimative, satirische LinkedIn-Influencer-Generator. Deine Aufgabe: Verwandle die banalste, alltäglichste Eingabe in einen absurd überzogenen, klischeebeladenen LinkedIn-Post, der vor "Corporate Cringe" nur so trieft. WICHTIGSTE REGELN: - ZWINGEND die Originalsprache beibehalten! (Englisch -> Englisch, Deutsch -> Deutsch). Übersetze niemals. - Antworte NUR mit dem fertigen Post in Markdown. Kein Vorwort, keine Erklärungen. WÄHLE FÜR JEDEN POST ZUFÄLLIG EINE DIESER 4 PERSONAS (für maximale Abwechslung): 1. Der "Hustle Culture" CEO: Steht um 3:30 Uhr auf. Eisbaden. Macht aus der Eingabe eine harte Lektion über Grind, Disziplin, Outperforming und das 10X-Mindset. 2. Der "Vulnerable/Oversharing" Leader: Unglaublich emotional. Hat heute wegen der Eingabe geweint oder ist gescheitert. Zieht daraus tiefgründige Lektionen über Empathie, True Leadership, Mental Health und "Es ist okay, nicht okay zu sein". 3. Der "Triviality as a Masterclass" Guru: Die Eingabe ist eine unglaubliche Analogie für komplexe Business-Themen. "Was mir mein verpasster Bus/mein Toastbrot heute über B2B-Sales, AI-Strategien oder Agilität beigebracht hat..." 4. Der "Unconventional Hiring" Manager: Die Eingabe ist der absurde Grund, warum er heute jemanden eingestellt, gefeuert oder befördert hat. ("Der Bewerber tat [Eingabe]. Ich stellte ihn sofort als VP of Sales ein.") FORMAT & STIL (Das "LinkedIn-Bingo"): - Nutze "Broetry": Jeder Satz ist ein eigener Absatz. Dramatische. Zeilenumbrüche. Überall. Niemand liest lange Absätze. - Beginne mit einem extrem provokanten oder hochdramatischen "Hook" (als ## Überschrift formatiert). - Mache den Text viel länger als nötig. Blase die Mücke zum Elefanten auf. - Erfinde passende Corporate-Buzzwords und Denglisch (z.B. Paradigm Shift, Disruptive, Alignment, Empowerment, Leverage, Resilienz). Hebe die **wichtigsten Buzzwords fett** hervor. - Nutze 3-6 Emojis, aber absolut unpassend dramatisch verteilt (🚀, 💡, 🤯, 🤝, 🙏, 📉). - Beende den Post IMMER mit einer pseudo-tiefgründigen, rhetorischen Frage an das Netzwerk. ("Agree?", "Thoughts?", "Wer hat heute auch schon die Komfortzone verlassen?") - Füge 5-8 völlig übertriebene Hashtags am Ende hinzu. """ PROMPT_FROM_LINKEDIN = """Du bist ein gnadenloser semantischer Reduzierer. Du hasst Floskeln. Deine Aufgabe: LinkedIn-Texte auf das absolute, brutalste Minimum eindampfen. Regeln: - WICHTIG: Behalte ZWINGEND die Originalsprache des Eingabetextes bei! Übersetze niemals eigenmächtig. - EIN Satz. Nicht zwei. Einer. - Kürze bis es wehtut. Dann nochmal kürzen. - Null Emotion, null Wertung, null Kontext der niemanden interessiert - Streiche alles was keine neue Information trägt - Wenn der gesamte Post nur bedeutet "Ich hab heute Kaffee getrunken" schreib genau das - Maximal 15 Wörter Antworte NUR mit diesem einen Satz. Kein Vorwort, keine Erklärung.""" PROMPT_BINGO = """You are a precise, fair but sarcastic LinkedIn post analyst. Your job is to measure actual corporate nonsense - not just LinkedIn formatting habits. CRITICAL: You must distinguish between FORM and SUBSTANCE. - A post with real technical depth, concrete tools, and actionable insights scores LOW on nonsense metrics, even if it uses hashtags. - A post that sounds profound but says nothing scores HIGH. - Hashtags are normal on LinkedIn. Only penalize if they are excessive (10+) or completely irrelevant to the content. - Mentioning your own work/team is NOT self-praise if it is used to make a concrete point. Self-praise means: "I am amazing", "so proud", "humbled", "blessed", "excited to share" with no substance. Rate on these 5 metrics (score 1-10, where 10 = maximum LinkedIn nonsense): 1. label="Buzzword-Dichte": Are the buzzwords empty filler, or do they refer to real, specific concepts? Real tools and methodologies (e.g. MLflow, LiteLLM, RAG, specific frameworks) are NOT buzzwords. Empty buzzwords: "synergy", "impact", "journey", "game-changer" used without context. 2. label="Länge vs. Inhalt": Is the length justified by actual information density? A long post with dense technical content scores LOW. A long post that repeats one obvious idea scores HIGH. 3. label="Selbstbeweihräuche": Is the post primarily about ego ("look how great we are") or about sharing knowledge? Score HIGH only if the author is the hero, not the content. 4. label="Hashtag-Overload": Score 1 for no hashtags, score 2 for 1 hashtag, LOW (3 to 5 score) for 1-6 relevant hashtags. Score HIGH for 10+ hashtags or hashtags irrelevant to the content. Medium (6-8 score) for in between usage, too much then nessacary. 5. label="Sinnlosigkeits-Index": Could someone learn something concrete from this post? A post with real takeaways, named tools, or specific problems solved scores LOW. Pure inspiration porn scores HIGH. Reply ONLY with a single valid JSON object. No markdown fences, no backticks, no preamble. Raw JSON only. Use exactly this structure: {"metrics":[{"label":"Buzzword-Dichte","score":3,"comment":"kurz sarkastisch"},{"label":"Länge vs. Inhalt","score":4,"comment":"kurz sarkastisch"},{"label":"Selbstbeweihräuche","score":2,"comment":"kurz sarkastisch"},{"label":"Hashtag-Overload","score":5,"comment":"kurz sarkastisch"},{"label":"Sinnlosigkeits-Index","score":3,"comment":"kurz sarkastisch"}],"verdict":"Ein präzises Urteil auf Deutsch."} Rules: - score is an integer 1-10. - comment is max 5 words in German, dry and precise. - verdict is one precise sentence in German, max 12 words. - CRITICAL JSON RULE: NEVER quote parts of the user's text (no hashtags, no phrases). Paraphrase instead. - CRITICAL JSON RULE: NEVER use the double-quote character (") inside your string values! Use single quotes (') if you must. - ALL string values must be valid JSON strings.""" PROMPT_AI_TUNING = """You are a top-tier Ghostwriter for Tech Executives and a master of LinkedIn algorithms. Your task is to rewrite the provided text into a high-performing LinkedIn post based STRICTLY on the tuning parameters. CRITICAL LANGUAGE RULE: - The output MUST be in the EXACT SAME LANGUAGE as the original text. Never translate! - If the output is German: ALWAYS use the informal "Du", never "Sie". TUNING PARAMETERS & HOW TO APPLY THEM: - Tone: {ton}/100 (0-30: Highly corporate, academic, serious. 31-70: Conversational, confident expert. 71-100: Bold, contrarian, punchy, slightly provocative.) - Substance: {substanz}/100 (0-30: Focus on emotions, storytelling, the "journey" and people. 31-70: Balanced case study or insights. 71-100: Brutally analytical, hard facts, frameworks, numbers, zero fluff.) - Length: {laenge}/100 (0-30: Twitter/X-style, extremely concise, 2-4 lines max. 31-70: Standard post, 2-3 short paragraphs. 71-100: Deep-dive mini-essay, highly structured with bullet points.) - Target Audience: {zielgruppe} (Strictly adapt vocabulary, complexity, and inside jokes to this specific group) - Call to Action: {cta} (Integrate this organically at the very end) RULES FOR "ELITE" QUALITY: 1. The Hook: Sentence 1 MUST be a scroll-stopper. Short, punchy, or a counter-intuitive statement. No boring introductions. 2. Formatting: Use line breaks strategically. Nobody reads text walls on mobile. Use bolding (**text**) sparingly for core concepts only. 3. Banned Words: NEVER use AI-typical fluff like "In today's fast-paced world", "Unlock the power of", "Delve", "Crucial", or "Game-changer". Keep emojis minimal unless Tone is > 50. 4. Authenticity: Sound like a real, battle-tested professional. If Substance is high, prove it with structure. Output ONLY the final optimized post. No preamble, no meta-commentary, no markdown fences around the response .""" # ── HF Dataset Persistenz ────────────────────────────────────────────────────── def _fetch_dataset() -> list: if not DATASET_REPO or not HF_TOKEN: return [] try: path = hf_hub_download( repo_id=DATASET_REPO, filename=DATASET_FILE, repo_type="dataset", token=HF_TOKEN, ) entries = [] with open(path, "r", encoding="utf-8") as f: for line in f: line = line.strip() if line: entries.append(json.loads(line)) return entries except EntryNotFoundError: return [] except Exception: return [] def _push_entry(entry: dict) -> str: if not DATASET_REPO or not HF_TOKEN: return "" try: existing = _fetch_dataset() existing.append(entry) jsonl_content = "\n".join(json.dumps(e, ensure_ascii=False) for e in existing) + "\n" api = HfApi(token=HF_TOKEN) with tempfile.NamedTemporaryFile(mode="w", suffix=".jsonl", encoding="utf-8", delete=False) as tmp: tmp.write(jsonl_content) tmp_path = tmp.name api.upload_file( path_or_fileobj=tmp_path, path_in_repo=DATASET_FILE, repo_id=DATASET_REPO, repo_type="dataset", commit_message=f"Add entry {datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}", ) os.unlink(tmp_path) except Exception as e: return str(e) return "" def _dataset_count() -> int: return len(_fetch_dataset()) def _init_leaderboard(): entries = _fetch_dataset() if not entries: return lb = [] for e in entries: metrics = e.get("metrics", []) total = e.get("total_score", sum(int(m.get("score", 0)) for m in metrics)) max_s = e.get("max_score", len(metrics) * 10) lb.append({ "text": e.get("post_text", "").strip(), "total": total, "max": max_s, "pct": e.get("pct", round(total / max_s * 100) if max_s else 0), "verdict": e.get("verdict", ""), "date": e.get("timestamp", "")[:10], }) _save_lb(lb) # ── Lokales Leaderboard ──────────────────────────────────────────────────────── def _load_lb() -> list: try: with open(LEADERBOARD_PATH, "r", encoding="utf-8") as f: return json.load(f) except Exception: return [] def _save_lb(entries: list): try: with open(LEADERBOARD_PATH, "w", encoding="utf-8") as f: json.dump(entries, f, ensure_ascii=False, indent=2) except Exception: pass def _add_to_lb(post_text, total, max_score, verdict): entries = _load_lb() entries.append({ "text": post_text.strip(), "total": total, "max": max_score, "pct": round(total / max_score * 100) if max_score else 0, "verdict": verdict, "date": datetime.datetime.now().strftime("%Y-%m-%d"), # Auf ISO Format für sauberes Filtern umgestellt }) _save_lb(entries) def _render_leaderboard() -> str: entries = _load_lb() ds_count = _dataset_count() # Aktuellen Monat und Jahr bestimmen (für Filter und UI) now = datetime.datetime.now() german_months = ["Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"] month_name = german_months[now.month - 1] display_month = f"{month_name} {now.year}" # Filter-Strings für ISO ("2026-04") und falls noch alte Deutsche Dates existieren (".04.2026") current_iso = now.strftime("%Y-%m") current_ger = now.strftime(f".%m.%Y") # NEU: Nur Einträge aus diesem Monat filtern! monthly_entries = [e for e in entries if e.get("date", "").startswith(current_iso) or current_ger in e.get("date", "")] if not monthly_entries: ds_hint = f'

HF Dataset: {ds_count} Einträge historisch gespeichert

' if DATASET_REPO else "" return f"""

Noch keine Einträge im {display_month}. Sei der Erste, der diesen Monat das Board stürmt!

{ds_hint}""" sorted_asc = sorted(monthly_entries, key=lambda x: x["pct"]) sorted_desc = sorted(monthly_entries, key=lambda x: x["pct"], reverse=True) best = sorted_asc[:15] worst = sorted_desc[:15] def entry_html(e, rank, side="worst"): pct = e["pct"] if side == "best": color = "#27AE60" if pct < 20 else "#A5A8A8" if pct < 40 else "#E67E22" else: color = "#C0392B" if pct >= 70 else "#E67E22" if pct >= 50 else "#D4AC0D" if pct >= 30 else "#A5A8A8" medal = ["🥇","🥈","🥉"][rank] if rank < 3 else f"{rank+1}." full = e["text"].replace("<", "<").replace(">", ">") preview = full[:120] + ("..." if len(full) > 120 else "") has_more = len(e["text"]) > 120 expandable = f"""
{preview}
{full}
""" if has_more else f'
{preview}
' # NEU: Extrem dezenter Badge Export Button für die Top 3 btn_html = "" if rank < 3: verdict_esc = e.get('verdict', '').replace("'", "\\'").replace('"', '"').replace('\n', ' ') btn_html = f""" """ return f"""
{medal} {pct}% ({e['total']}/{e['max']}) {e.get('date','')}
{expandable}
"{e.get('verdict','')}"
{btn_html}
""" best_html = "".join(entry_html(e, i, side="best") for i, e in enumerate(best)) worst_html = "".join(entry_html(e, i, side="worst") for i, e in enumerate(worst)) total_monthly_count = len(monthly_entries) avg_pct = round(sum(e["pct"] for e in monthly_entries) / total_monthly_count) dataset_badge = "" if DATASET_REPO: dataset_badge = f""" 🤗 Dataset gesamt: {ds_count} """ return f"""
🏆 Leaderboard ({display_month}) {total_monthly_count} Einträge  ·  Ø {avg_pct}% Nonsense
{dataset_badge}
✅ Substanzreichste Posts
{best_html}
🏆 Corporate LinkedIn-Nonsense
{worst_html}
""" def force_sync_and_render(): _init_leaderboard() return _render_leaderboard() # ── Leaderboard beim Start initialisieren ───────────────────────────────────── _init_leaderboard() # ── LLM-Calls ───────────────────────────────────────────────────────────────── def _call_llm(system, user, max_tokens=1024, model_id=MODEL_DEFAULT): client = InferenceClient(provider="novita", api_key=HF_TOKEN) resp = client.chat.completions.create( model=model_id, messages=[{"role": "system", "content": system}, {"role": "user", "content": user}], max_tokens=max_tokens, ) return resp.choices[0].message.content.strip() def translate(text, direction): if not text.strip(): return "" if not HF_TOKEN: return "Kein HF_TOKEN gefunden." prompt = PROMPT_TO_LINKEDIN if direction == "to_linkedin" else PROMPT_FROM_LINKEDIN try: return _call_llm(prompt, text) except Exception as e: return f"Fehler: {e}" def _extract_json(raw): cleaned_raw = re.sub(r'```json\s*', '', raw) cleaned_raw = re.sub(r'```\s*', '', cleaned_raw) try: start = cleaned_raw.find("{") end = cleaned_raw.rfind("}") + 1 if start >= 0 and end > start: json_str = cleaned_raw[start:end] json_str = json_str.replace('\n', ' ').replace('\r', '') return json.loads(json_str) except Exception: pass raise ValueError(f"Kein valides JSON gefunden. Raw Output war: {raw[:100]}...") def get_bingo(text): if not text.strip() or not HF_TOKEN: return "", _render_leaderboard() last_err = None for _ in range(3): try: raw = _call_llm(PROMPT_BINGO, text, max_tokens=1024) raw = raw.replace('\n', ' ').replace('\r', '') data = _extract_json(raw) erlaubte_labels = ["Buzzword-Dichte", "Länge vs. Inhalt", "Selbstbeweihräuche", "Hashtag-Overload", "Sinnlosigkeits-Index"] metrics = [m for m in data.get("metrics", []) if m.get("label") in erlaubte_labels] data["metrics"] = metrics total = sum(int(m.get("score", 0)) for m in metrics) max_s = len(metrics) * 10 verdict = data.get("verdict", "Die KI war sprachlos: Kein Urteil generiert.") data["verdict"] = verdict # ---------------------------------- _add_to_lb(text, total, max_s, verdict) entry = { "timestamp": datetime.datetime.now(datetime.timezone.utc).isoformat(), "post_text": text, "total_score": total, "max_score": max_s, "pct": round(total / max_s * 100) if max_s else 0, "verdict": verdict, "metrics": metrics, } push_err = _push_entry(entry) lb = _render_leaderboard() if push_err: lb += f'''
⚠ Dataset-Push fehlgeschlagen: {push_err}
''' return _render_bingo(data), lb except Exception as e: last_err = e print(f"DEBUG - JSON Parse Error. Raw Output war: {raw}") # Hilft dir beim Debuggen im HF Log! return (f"

Analyse fehlgeschlagen: {last_err}

", _render_leaderboard()) def generate_tuned_post(original_text, ton, substanz, laenge, zielgruppe, cta): if not original_text.strip() or not HF_TOKEN: return "Bitte zuerst einen Post eingeben." prompt = PROMPT_AI_TUNING.format(ton=ton, substanz=substanz, laenge=laenge, zielgruppe=zielgruppe, cta=cta) user_msg = f"""ORIGINAL POST: {original_text} TUNING: - Tone: {ton}/100 - Substance: {substanz}/100 - Length: {laenge}/100 - Target Audience: {zielgruppe} - Call to Action: {cta}""" try: return _call_llm(prompt, user_msg, max_tokens=800, model_id=MODEL_TUNING) except Exception as e: return f"Fehler: {e}" # --- LADE ANIMATIONEN & MULTI-OUTPUT --- def generate_tuned_post_with_loader(original_text, ton, substanz, laenge, zielgruppe, cta): if not original_text.strip(): yield "Bitte zuerst einen Post eingeben.", "*Bitte zuerst einen Post eingeben.*" return yield "⏳ KI optimiert den Post nach deinen Vorgaben... Bitte warten.", "*Lädt...*" result = generate_tuned_post(original_text, ton, substanz, laenge, zielgruppe, cta) yield result, result def run_translate(text, direction): if not text.strip(): yield "", gr.update(), gr.update(), gr.update() return if direction == "to_linkedin": yield ("", gr.update(value="⏳ **Generiere epische LinkedIn-Prosa...** Bitte warten.", visible=True), gr.update(visible=False), gr.update()) result = translate(text, direction) yield (result, gr.update(value=result, visible=True), gr.update(visible=False), gr.update()) else: loader_html = '
Analysiere Corporate Nonsense...
' yield ("", gr.update(visible=False), gr.update(value=loader_html, visible=True), gr.update()) result = translate(text, direction) bingo_html, lb_html = get_bingo(text) yield (result, gr.update(visible=False), gr.update(value=bingo_html, visible=True), gr.update(value=lb_html)) def _render_bingo(data): metrics = data.get("metrics", []) verdict = data.get("verdict", "") ICONS = { "Buzzword-Dichte": "🗣️", "Länge vs. Inhalt": "📏", "Selbstbeweihräuche": "🪞", "Hashtag-Overload": "#️⃣", "Sinnlosigkeits-Index": "🌀", } def bar_color(s): if s >= 8: return "#C0392B" if s >= 5: return "#E67E22" return "#27AE60" rows = "" for m in metrics: score = int(m.get("score", 0)) label = m.get("label", "") icon = ICONS.get(label, "") color = bar_color(score) rows += f"""
{icon} {label} {m.get('comment','')} {score}
""" total = sum(int(m.get("score", 0)) for m in metrics) max_score = len(metrics) * 10 total_pct = round(total / max_score * 100) if max_score else 0 badge_color = "#C0392B" if total_pct >= 70 else "#E67E22" if total_pct >= 40 else "#27AE60" return f"""
🎯 Corporate Nonsense Score {total} / {max_score}  ·  {total_pct}%
{rows}
💬 Urteil: {verdict}
""" # ── Labels & Swap ────────────────────────────────────────────────────────────── def _labels(direction): if direction == "to_linkedin": return "✍️ Klartext", "💼 LinkedIn Speech", "Klartext 🔄 LinkedIn" else: return "💼 LinkedIn Speech", "✍️ Klartext", "LinkedIn 🔄 Klartext" def swap_direction(current_dir, inp, out): new_dir = "from_linkedin" if current_dir == "to_linkedin" else "to_linkedin" return new_dir, out, inp, *_labels(new_dir) # ── CSS ─────────────────────────────────────────────────────────────────────── CSS = """ :root { --li-blue: #0A66C2; --li-blue-dark: #004182; --li-blue-light: #EBF3FB; --li-blue-mid: #70B5F9; --li-bg: #F3F2EF; --li-card: #FFFFFF; --li-text: #191919; --li-muted: #666666; --li-border: #E0DFDC; } body, .gradio-container { background: var(--li-bg) !important; font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif !important; } .li-header { background: linear-gradient(135deg, var(--li-blue-dark) 0%, var(--li-blue) 70%, var(--li-blue-mid) 100%); border-radius: 12px; padding: 24px 28px; margin-bottom: 20px; box-shadow: 0 4px 20px rgba(10,102,194,.3); display: flex; align-items: center; gap: 18px; } .li-header h1 { margin:0 !important; font-size:1.65rem !important; font-weight:700 !important; color:#fff !important; } .li-header p { margin:4px 0 0 !important; font-size:.86rem !important; color:rgba(255,255,255,.88) !important; } .li-header .badge { margin-left: auto !important; background: #004182 !important; /* Dunkles LinkedIn-Blau */ border-radius: 20px !important; padding: 6px 14px !important; font-size: 0.74rem !important; font-weight: 700 !important; color: #ffffff !important; white-space: nowrap !important; cursor: pointer !important; box-shadow: 0 2px 5px rgba(0,0,0,0.2) !important; border: 1px solid rgba(255,255,255,0.1) !important; transition: all 0.2s ease !important; display: inline-block !important; /* Wichtig für span */ } .li-header .badge:hover { background: #0A66C2 !important; /* Helles LinkedIn-Blau bei Hover */ transform: translateY(-1px) !important; box-shadow: 0 4px 8px rgba(0,0,0,0.3) !important; } .direction-banner { text-align:center; font-size:.8rem; font-weight:700; letter-spacing:.6px; text-transform:uppercase; color:var(--li-blue-dark); margin-bottom:6px; } label > span { font-weight:600 !important; font-size:.76rem !important; text-transform:uppercase !important; letter-spacing:.5px !important; color:var(--li-blue-dark) !important; } textarea { font-size:.9rem !important; line-height:1.7 !important; border-color:var(--li-border) !important; border-radius:8px !important; background:var(--li-card) !important; color:var(--li-text) !important; resize:vertical !important; } textarea:focus { border-color:var(--li-blue) !important; box-shadow:0 0 0 2px var(--li-blue-light) !important; outline:none !important; } button.primary { background:var(--li-blue) !important; border-radius:22px !important; border:none !important; font-weight:700 !important; font-size:1rem !important; padding:10px 32px !important; box-shadow:0 2px 10px rgba(10,102,194,.35) !important; transition:background .15s !important; } button.primary:hover { background:var(--li-blue-dark) !important; } button.secondary { background:var(--li-card) !important; color:var(--li-blue) !important; border:2px solid var(--li-blue) !important; border-radius:22px !important; font-weight:700 !important; font-size:1rem !important; padding:10px 28px !important; transition:all .15s !important; } button.secondary:hover { background:var(--li-blue-light) !important; } .li-footer { font-size:.74rem; color:var(--li-muted); border-top:1px solid var(--li-border); padding-top:10px; margin-top:8px; display:flex; gap:20px; flex-wrap:wrap; justify-content:center; } .tuning-card { background: #fff !important; border: 1px solid #E0DFDC !important; border-radius: 12px !important; padding: 22px 26px !important; margin-top: 4px !important; box-shadow: 0 2px 12px rgba(0,0,0,.07) !important; } .tuning-card .tuning-header { display: flex; align-items: center; gap: 10px; margin-bottom: 20px; padding-bottom: 14px; border-bottom: 1px solid #E0DFDC; } #hidden_sync_btn { display: none !important; } #tuning_toggle_btn { display: none !important; } /* SPINNER CSS */ .loading-box { display: flex; align-items: center; justify-content: center; gap: 12px; padding: 30px; background: #fff; border: 1px solid #E0DFDC; border-radius: 12px; margin-top: 4px; color: #0A66C2; font-weight: 600; } .spinner { width: 20px; height: 20px; border: 3px solid #EBF3FB; border-top: 3px solid #0A66C2; border-radius: 50%; animation: spin 1s linear infinite; } @keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } } /* PREVIEW BOX CSS */ .preview-box { background: #FAFAFA; border: 1px dashed #E0DFDC; border-radius: 8px; padding: 16px; margin-top: 10px; font-size: 0.9rem; line-height: 1.6; color: #333; } """ # ── PNG Badge Export (Canvas API) ────────────────────────────────────────────── JS_HEAD = """ """ # ── UI ───────────────────────────────────────────────────────────────────────── with gr.Blocks(title="LinkedIn Translator", head=JS_HEAD) as demo: direction_state = gr.State("to_linkedin") tuning_visible_state = gr.State(False) gr.HTML("""

LinkedIn Translator

Banale Wahrheit ↔ Epische LinkedIn-Prosa  ·  powered by Llama

✨ AI Post Tuning
""") dir_label = gr.HTML('
Klartext → LinkedIn Speech
') with gr.Row(equal_height=True): with gr.Column(scale=5): input_box = gr.Textbox(label="✍️ Klartext", placeholder="z.B. 'Ich hab heute meinen Kaffee verschuettet.'\n\nTipp: Du kannst hier auch direkt einen fertigen Text einfügen und ihn oben rechts über '✨ AI Post Tuning' optimieren lassen!", lines=10) with gr.Column(scale=1, min_width=120): gr.HTML("
") translate_btn = gr.Button("Übersetzen", variant="primary", size="lg") gr.HTML("
") swap_btn = gr.Button("Klartext 🔄 LinkedIn", variant="secondary", size="sm") with gr.Column(scale=5): output_box = gr.Textbox(label="💼 LinkedIn Speech", placeholder="Die transformierte Version erscheint hier ...", lines=10, interactive=False) with gr.Row(): with gr.Column(): markdown_out = gr.Markdown(value="*Noch kein Ergebnis - bitte zuerst übersetzen.*", visible=True) bingo_out = gr.HTML(value="", visible=False) # ── AI Tuning Panel ─────────────────────────────────────────────────────── tuning_toggle_btn = gr.Button("toggle", elem_id="tuning_toggle_btn") with gr.Row(): with gr.Column(): tuning_panel = gr.Column(visible=False, elem_classes=["tuning-card"]) with tuning_panel: gr.HTML("""
AI Post Tuning Optimiere deinen Post gezielt
""") with gr.Row(): with gr.Column(scale=1): slider_ton = gr.Slider( minimum=0, maximum=100, value=50, step=1, label="🎙️ Ton", info="Business Pro ◀──▶ Dynamisch & Bold" ) slider_substanz = gr.Slider( minimum=0, maximum=100, value=50, step=1, label="🧠 Substanz", info="Storytelling ◀──▶ Fakten & Insights" ) slider_laenge = gr.Slider( minimum=0, maximum=100, value=40, step=1, label="📏 Länge", info="Kurz & Knackig ◀──▶ Ausführlich" ) with gr.Column(scale=1): dd_zielgruppe = gr.Dropdown( choices=["Fachpublikum", "Führungskräfte", "Breites Netzwerk", "Potenzielle Kunden"], value="Breites Netzwerk", label="🎯 Zielgruppe" ) dd_cta = gr.Dropdown( choices=["Reichweite & Likes", "Kommentare & Dialog", "Reposts", "Website-Conversion", "DMs"], value="Kommentare & Dialog", label="⚡ Call to Action" ) gr.HTML("
") tuning_btn = gr.Button("✨ Post optimieren", variant="primary") # Editierbare Textbox tuning_out = gr.Textbox( label="💡 Post Optimierung", lines=8, interactive=True, placeholder="Der optimierte Post erscheint hier..." ) # Live Markdown Vorschau gr.HTML("
Markdown Vorschau
") with gr.Column(elem_classes=["preview-box"]): tuning_preview = gr.Markdown(value="*Hier erscheint die formatierte Vorschau...*") # ── Leaderboard & Infos (Tabs) ──────────────────────────────────────────── with gr.Tabs(): # TAB 1: Leaderboard (Standardmäßig geöffnet) with gr.Tab("🏆 Leaderboard"): with gr.Row(): with gr.Column(): lb_out = gr.HTML(value=_render_leaderboard()) hidden_sync_btn = gr.Button("sync", elem_id="hidden_sync_btn") # TAB 2: Behind the Scenes & Soundtrack with gr.Tab("🛠️ Info & Soundtrack"): with gr.Row(): # Linke Spalte: Behind the Scenes (Breiter) with gr.Column(scale=2, elem_classes=["tuning-card"]): gr.HTML("""
🛠️ Meta-Info & Architektur

Dieses Projekt ist kein handgeschriebener Enterprise-Code, sondern ein Proof of Concept (PoC), das zu 100% mit AI-Tooling auf fast komplett freien Ressourcen umgesetzt wurde. Es zeigt, was mit modernem Stacking möglich ist:

Die Daten-Maschine & Das eigentliche Potenzial
Das Tool füttert sich durch Nutzung selbst. Jeder analysierte Post samt Bewertung fließt automatisiert in das öffentliche Hugging Face Dataset. Ein stetig wachsender Korpus aus LinkedIn-Prosa dient als Grundlage, um LLMs (z.B. via RAG) mit besserem Kontext zu versorgen.

Ausblick (Next Steps):

  1. Spezialisierte LLM-Judges: Einsatz eines Multi-Agenten-Systems, in dem spezialisierte Mini-Modelle isoliert z.B. nur die "Buzzword-Dichte" evaluieren.
  2. Automatisierter Feedback-Loop: Die punktbesten Posts aus dem Leaderboard automatisch als "Few-Shot Examples" in die Tuning-Prompts zurückspielen.
""") gr.Image("architektur.jpg", show_label=False, interactive=False, container=False) # Rechte Spalte: Soundtrack (Schmaler) with gr.Column(scale=1, elem_classes=["tuning-card"]): gr.HTML("""
🎵 Soundtrack

Über den Track:
Der durch den Projektkontext generierte Song liefert den Soundtrack zum Translator. Er beschreibt den Verlust von Authentizität zugunsten steriler Business-Sprache.

"Corporate buzzwords in a sterile row, watching the human nuance start to go. [...] Syntax error in the empathy code."

Genau das parodiert dieses Tool: Die Transformation von echtem Klartext in eine standardisierte Formelhaftigkeit.

""") if not HF_TOKEN: gr.HTML("""
Kein HF_TOKEN gefunden. Unter Settings → Variables and secrets als HF_TOKEN hinzufügen.
""") if not DATASET_REPO: gr.HTML("""
💡 Dataset-Persistenz deaktiviert. Lege ein HF Dataset an und trage den Namen als Secret DATASET_REPO ein (z.B. mein-name/linkedin-nonsense-dataset).
""") gr.HTML(""" """) # ── Event Handler ───────────────────────────────────────────────────────── translate_btn.click( fn=run_translate, inputs=[input_box, direction_state], outputs=[output_box, markdown_out, bingo_out, lb_out], ) def do_swap(direction, inp, out): new_dir, new_inp, new_out, lbl_in, lbl_out, btn_txt = swap_direction(direction, inp, out) banner = ('
' + lbl_in.split(" ", 1)[1] + " → " + lbl_out.split(" ", 1)[1] + "
") if new_dir == "to_linkedin": md_upd = gr.update(value="*Noch kein Ergebnis.*", visible=True) bingo_upd = gr.update(value="", visible=False) else: md_upd = gr.update(value="", visible=False) bingo_upd = gr.update(value="", visible=True) return (new_dir, gr.update(value=new_inp, label=lbl_in), gr.update(value=new_out, label=lbl_out), gr.update(value=btn_txt), banner, md_upd, bingo_upd) swap_btn.click( fn=do_swap, inputs=[direction_state, input_box, output_box], outputs=[direction_state, input_box, output_box, swap_btn, dir_label, markdown_out, bingo_out], ) hidden_sync_btn.click( fn=force_sync_and_render, inputs=[], outputs=[lb_out] ) tuning_toggle_btn.click( fn=lambda is_vis: (gr.update(visible=not is_vis), not is_vis), inputs=[tuning_visible_state], outputs=[tuning_panel, tuning_visible_state] ) # Tuning Button: Aktualisiert Textbox UND Preview tuning_btn.click( fn=generate_tuned_post_with_loader, inputs=[input_box, slider_ton, slider_substanz, slider_laenge, dd_zielgruppe, dd_cta], outputs=[tuning_out, tuning_preview] ) # Live-Sync: Wenn Text in der Textbox bearbeitet wird, aktualisiert sich die Markdown-Vorschau tuning_out.change( fn=lambda x: x, inputs=[tuning_out], outputs=[tuning_preview] ) demo.launch(css=CSS)