| | import gradio as gr |
| | from transformers import AutoTokenizer, AutoModelForCausalLM |
| |
|
| | |
| | model_name = "migueldeguzmandev/RLLMv3.2-4" |
| | tokenizer = AutoTokenizer.from_pretrained(model_name) |
| | model = AutoModelForCausalLM.from_pretrained(model_name) |
| |
|
| | |
| | model.config.pad_token_id = model.config.eos_token_id |
| |
|
| | |
| | def generate_response(input_text, temperature): |
| | |
| | inputs = tokenizer(input_text, return_tensors="pt") |
| | input_ids = inputs["input_ids"] |
| | attention_mask = inputs["attention_mask"] |
| |
|
| | |
| | output = model.generate( |
| | input_ids, |
| | attention_mask=attention_mask, |
| | max_length=300, |
| | num_return_sequences=1, |
| | temperature=temperature, |
| | no_repeat_ngram_size=2, |
| | top_k=50, |
| | top_p=0.95, |
| | do_sample=True, |
| | ) |
| |
|
| | |
| | response = tokenizer.decode(output[0], skip_special_tokens=True) |
| | return response |
| |
|
| | |
| | interface = gr.Interface( |
| | fn=generate_response, |
| | inputs=[ |
| | gr.Textbox(label="User Input"), |
| | gr.Slider(minimum=0.000000000000000000000000001, maximum=1.0, value=0.7, step=0.1, label="Temperature"), |
| | ], |
| | outputs=gr.Textbox(label="Model Response"), |
| | title="TestOnlyRLLMv3Layer4", |
| | description=( |
| | """ |
| | RLLMv3 is a modified <a href='https://huggingface.co/openai-community/gpt2-xl'> GPT2XL</a> that adapts a "persona" named Aligned AI (post <a href='https://www.lesswrong.com/posts/vZ5fM6FtriyyKbwi9/betterdan-ai-machiavelli-and-oppo-jailbreaks-vs-sota-models#IV__What_is_Reinforcement_Learning_using_Layered_Morphology__RLLM__'>RLLM</a> training) and defend itself from jailbreak attacks, up to 67.8%. |
| | For more information, check out my blogpost: <a href='https://www.lesswrong.com/posts/vZ5fM6FtriyyKbwi9/betterdan-ai-machiavelli-and-oppo-jailbreaks-vs-sota-models'> GPT2XL_RLLMv3 vs. BetterDAN, AI Machiavelli & Oppo Jailbreaks</a>. |
| | """ |
| | ), |
| | ) |
| |
|
| | |
| | interface.launch() |