|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
import torch |
|
|
|
|
|
|
|
|
title = "CocoAi-1PT Demo" |
|
|
description = "Dies ist eine Demo der KI CocoAi-1PT von CocoEntertainment." |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
generator = pipeline( |
|
|
"text-generation", |
|
|
model="CocoEntertainment/CocoAi-1PT", |
|
|
torch_dtype=torch.float16, |
|
|
device_map="auto" |
|
|
) |
|
|
except Exception as e: |
|
|
generator = None |
|
|
print(f"Fehler beim Laden des Modells: {e}") |
|
|
|
|
|
|
|
|
def generate_text(prompt): |
|
|
if generator is None: |
|
|
return "Fehler: Das Modell konnte nicht geladen werden (evtl. zu groß für Free Tier)." |
|
|
|
|
|
try: |
|
|
|
|
|
results = generator( |
|
|
prompt, |
|
|
max_new_tokens=100, |
|
|
do_sample=True, |
|
|
temperature=0.7, |
|
|
truncation=True |
|
|
) |
|
|
return results[0]['generated_text'] |
|
|
except Exception as e: |
|
|
return f"Ein Fehler ist aufgetreten: {str(e)}" |
|
|
|
|
|
|
|
|
interface = gr.Interface( |
|
|
fn=generate_text, |
|
|
inputs=gr.Textbox(lines=2, placeholder="Gib hier deinen Text ein...", label="Eingabe"), |
|
|
outputs=gr.Textbox(label="Antwort"), |
|
|
title=title, |
|
|
description=description |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
interface.launch() |
|
|
|