Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from ctransformers import AutoModelForCausalLM | |
| import random | |
| system_prompt = """Dies ist eine Unterhaltung zwischen \ | |
| einem intelligenten, hilfsbereitem \ | |
| KI-Assistenten und einem Nutzer. | |
| Der Assistent gibt Antworten in Form von Zitaten.""" | |
| prompt_format = "<|im_start|>system\n{system_prompt}\ | |
| <|im_end|>\n<|im_start|>user\nZitiere {prompt}\ | |
| <|im_end|>\n<|im_start|>assistant\n" | |
| modes = { | |
| "Authentisch": {"temperature": 0.2, "top_k": 10}, | |
| "Ausgeglichen": {"temperature": 1, "top_p": 0.9}, | |
| "Chaotisch": {"temperature": 2}, | |
| } | |
| authors = [ | |
| "Johann Wolfgang von Goethe", | |
| "Friedrich Schiller", | |
| "Immanuel Kant", | |
| "Oscar Wilde", | |
| "Lü Bu We", | |
| "Wilhelm Busch", | |
| "Friedrich Nietzsche", | |
| "Karl Marx", | |
| "William Shakespeare", | |
| "Kurt Tucholsky", | |
| "Georg Christoph Lichtenberg", | |
| "Arthur Schopenhauer", | |
| "Seneca der Jüngere", | |
| "Martin Luther", | |
| "Mark Twain", | |
| "Cicero", | |
| "Marie von Ebner-Eschenbach", | |
| "Novalis", | |
| "Franz Kafka", | |
| "Jean-Jacques Rousseau", | |
| "Heinrich Heine", | |
| "Honoré de Balzac", | |
| "Georg Büchner", | |
| "Gotthold Ephraim Lessing", | |
| "Markus M. Ronner", | |
| "Gerhard Uhlenbruck", | |
| "Theodor Fontane", | |
| "Jean Paul", | |
| "Leo Tolstoi", | |
| "Friedrich Hebbel", | |
| "Horaz", | |
| "Albert Einstein", | |
| "Jesus von Nazareth", | |
| "Angela Merkel", | |
| "Ambrose Bierce", | |
| "Christian Morgenstern", | |
| "Friedrich Hölderlin", | |
| "Joseph Joubert", | |
| "François de La Rochefoucauld", | |
| "Otto von Bismarck", | |
| "Fjodor Dostojewski", | |
| "Ovid", | |
| "Rudolf Steiner", | |
| "Ludwig Börne", | |
| "Hugo von Hofmannsthal", | |
| "Laotse", | |
| "Thomas von Aquin", | |
| "Ludwig Wittgenstein", | |
| "Friedrich Engels", | |
| "Charles de Montesquieu", | |
| ] | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "caretech-owl/leo-hessionai-7B-quotes-gguf", model_type="Llama" | |
| ) | |
| def quote(author: str = "", mode: str = "") -> str: | |
| author = author or random.choice(authors) | |
| mode = mode or "Authentisch" | |
| query = prompt_format.format( | |
| system_prompt=system_prompt, | |
| prompt=author, | |
| ) | |
| print("=" * 20) | |
| print(query) | |
| output = model(query, stop="<|im_end|>", max_new_tokens=300, **modes[mode]) | |
| print("-" * 20) | |
| print(output) | |
| return output | |
| with gr.Blocks() as demo: | |
| gr.Markdown( | |
| "# Zitatgenerator\n\n*Hinweis: Generierung kann ein paar Minuten dauern.*" | |
| ) | |
| with gr.Row(): | |
| author = gr.Textbox( | |
| label="Zitat generieren für", lines=1, placeholder="Aristoteles" | |
| ) | |
| mode = gr.Dropdown( | |
| choices=["Authentisch", "Ausgeglichen", "Chaotisch"], | |
| label="Modus", | |
| value="Ausgeglichen", | |
| ) | |
| output = gr.Textbox(label="Zitat") | |
| quote_btn = gr.Button("Generiere Zitat") | |
| quote_btn.click(fn=quote, inputs=[author, mode], outputs=output) | |
| demo.launch() | |