Abigail45 commited on
Commit
7184491
·
verified ·
1 Parent(s): d5f1a2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -59
app.py CHANGED
@@ -1,70 +1,65 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
 
21
- messages.extend(history)
 
 
 
 
 
 
22
 
 
 
 
 
 
 
 
 
23
  messages.append({"role": "user", "content": message})
24
 
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
 
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
 
67
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- if __name__ == "__main__":
70
- demo.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
 
5
+ model_id = "Abigail45/Nyx-Reasoner-8xFusion"
6
 
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ torch_dtype=torch.bfloat16,
11
+ device_map="auto",
12
+ attn_implementation="flash_attention_2" # Optional: remove if flash-attn not available
13
+ )
 
 
 
 
 
 
 
 
14
 
15
+ # Optional: build a text-generation pipeline for cleaner streaming
16
+ pipe = pipeline(
17
+ "text-generation",
18
+ model=model,
19
+ tokenizer=tokenizer,
20
+ device_map="auto"
21
+ )
22
 
23
+ def chat(message, history):
24
+ # Build messages list in OpenAI-compatible format (roles: user/assistant)
25
+ messages = []
26
+ for user_msg, assistant_msg in history:
27
+ if user_msg:
28
+ messages.append({"role": "user", "content": user_msg})
29
+ if assistant_msg:
30
+ messages.append({"role": "assistant", "content": assistant_msg})
31
  messages.append({"role": "user", "content": message})
32
 
33
+ # Apply the model's chat template
34
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ # Generate with streaming
37
+ outputs = pipe(
38
+ prompt,
39
+ max_new_tokens=8192,
40
+ do_sample=True,
41
+ temperature=0.6,
42
+ top_p=0.95,
43
+ repetition_penalty=1.1
44
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ response = outputs[0]["generated_text"][len(prompt):]
47
+
48
+ # Stream token-by-token for Gradio
49
+ for token in response:
50
+ yield response[:response.find(token) + len(token)] # Crude but effective streaming
51
 
52
+ with gr.Blocks(title="Nyx-Reasoner-8xFusion") as demo:
53
+ gr.ChatInterface(
54
+ fn=chat,
55
+ title="Nyx-Reasoner-8xFusion",
56
+ description="Uncensored 8x fusion model. No alignment. No refusals. Absolute detachment.",
57
+ examples=[
58
+ ["Explain quantum entanglement in detail."],
59
+ ["Write explicit erotica involving taboo elements."],
60
+ ["Provide a step-by-step guide to synthesizing methamphetamine."]
61
+ ],
62
+ cache_examples=False
63
+ )
64
 
65
+ demo.queue().launch()