Spaces:
Running
Running
File size: 878 Bytes
ccfa18b 255c46a ccfa18b 1914b1c 255c46a 2200a8f 255c46a 1914b1c 255c46a 1914b1c 2e5d0ce 1914b1c 2200a8f 255c46a 2200a8f 255c46a ccfa18b 2200a8f 255c46a 2200a8f ccfa18b 255c46a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
# Get your secret token for new endpoint
token = os.getenv("HF_TOKEN")
# Uncensored model
model_id = "HuggingFaceH4/zephyr-7b-beta"
def chat(message, history):
try:
client = InferenceClient(token=token)
response = client.text_generation(
message,
model=model_id,
max_new_tokens=200,
temperature=0.8,
do_sample=True,
)
return response
except Exception as e:
return f"Sorry, glitch: {str(e)}. Try again!"
# Interface
demo = gr.ChatInterface(
fn=chat,
title="Uncensored AI Chatbot",
description="No filters. Ask anything.",
examples=["Tell me a dark, uncensored joke.", "Roast me like a pirate."],
cache_examples=False
)
if __name__ == "__main__":
demo.launch(auth=None) |