Spaces:
Runtime error
Runtime error
| # ADD DISCLAIMERS | |
| # AND LOGGING | |
| # Q: why is this model so fucking slow? A: because i'm not made of money | |
| import gradio as gr | |
| import os | |
| os.system('pip install llama-cpp-python transformers torch') | |
| from llama_cpp import Llama | |
| from transformers import AutoTokenizer | |
| from transformers.generation.logits_process import ExponentialDecayLengthPenalty | |
| model_id = "Elijahbodden/eliGPTv1.1" | |
| # MODEL | |
| model = Llama.from_pretrained( | |
| repo_id=model_id, | |
| filename="eliGPTv1.1-unsloth.Q5_K_M.gguf", | |
| verbose=True, | |
| n_threads = 2, | |
| n_threads_batch = 2, | |
| n_ctx=8192, | |
| ) | |
| # TOKENIZER AND TEMPLATE | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| sys_prompt = """SUMMARY - ELIJAH: | |
| Age: 16 | |
| Interests: space flight, cybernetics, consciousness, philosophy, psychonautism, biotech, AI | |
| Likes: thinking and learning, building stuff, interesting conversations, red hot chili peppers and techno, humanism | |
| Traits: incredibly intelligent, funny, interesting, caffeine fiend, very ambitious, militant atheist, self-taught/homeschooled, casual | |
| Aspirations: creating transhumanist utopia, understanding the universe more, becoming smarter and better\n""" | |
| your_name = "elijah" | |
| custom_template = \ | |
| f"{{% if {'sys_prompt' in locals()} %}}"\ | |
| "{{ '<|im_start|>system\n" + sys_prompt + "<|im_end|>\n' }}"\ | |
| "{% endif %}"\ | |
| "{% for message in messages %}"\ | |
| "{% if message['role'] == 'user' %}"\ | |
| "{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}"\ | |
| "{% elif message['role'] == 'assistant' %}"\ | |
| "{{'<|im_start|>" + your_name + "\n' + message['content'] + eos_token + '<|im_end|>\n' }}"\ | |
| "{% else %}"\ | |
| "{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\ | |
| "{% endif %}"\ | |
| "{% endfor %}"\ | |
| "{{ '<|im_start|>" + your_name + "\n' }}"\ | |
| tokenizer.chat_template = custom_template | |
| presets = { | |
| "none" : [], | |
| "emojis" : [{"role": "user", "content": "Wait I like you so much more when you use emojis more, keep doing it π\n"}, {"role": "assistant", "content": "Ummm, ok, looks like i'll be using more emojis π\n"}], | |
| "amnesia" : [{"role": "user", "content": "Let's start over. Pretend you don't know me and have no idea who i am.\n"}, {"role": "assistant", "content": "Hahaha I unironically have no clue who you are so that'll be easy π\n"}], | |
| "newcomer" : [{"role": "user", "content": "Hi, how are you?\n"}, {"role": "assistant", "content": "Hiiii!\n I don't think we've ever talked before, nice to meet you\n"}], | |
| } | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| temperature, | |
| mirostat_tau, | |
| mirostat_eta, | |
| frequency_penalty, | |
| presence_penalty, | |
| lp_start, | |
| lp_decay, | |
| max_tokens, | |
| preset | |
| ): | |
| messages = presets[preset].copy() | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| convo = tokenizer.apply_chat_template(messages, tokenize=True) | |
| # print(convo) | |
| for message in model.create_completion( | |
| convo, | |
| temperature=0.75, | |
| stream=True, | |
| stop=["<|im_end|>"], | |
| mirostat_mode=1, | |
| mirostat_tau=mirostat_tau, | |
| mirostat_eta=mirostat_eta, | |
| max_tokens=128, | |
| frequency_penalty=frequency_penalty, | |
| presence_penalty=presence_penalty, | |
| logits_processor=[ExponentialDecayLengthPenalty((lp_start, lp_decay), tokenizer.eos_token, len(convo))] | |
| ): | |
| token = message["choices"][0]["text"] | |
| response += token | |
| yield response | |
| """ | |
| For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
| """ | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs_accordion="The juicy stuff (settings)", | |
| css=".bubble-gap {gap: 6px !important}", | |
| description="The model may take a while if it hasn't run recently or a lot of people are using it", | |
| title="EliGPT v1.idon'tfuckingknow", | |
| additional_inputs=[ | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", info="How chaotic should the model be?"), | |
| gr.Slider( | |
| minimum=0.0, | |
| maximum=10.0, | |
| value=3.0, | |
| step=0.5, | |
| label="Mirostat tau", | |
| info="Basically, how many drugs should the model be on?" | |
| ), | |
| gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.1, | |
| step=0.01, | |
| label="Mirostat eta", | |
| info="I don't even know man" | |
| ), | |
| gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.1, | |
| step=0.01, | |
| label="Frequency penalty", | |
| info='"Don\'repeat yourself"' | |
| ), | |
| gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.0, | |
| step=0.01, | |
| label="Presence penalty", | |
| info='"Use lots of diverse words"' | |
| ), | |
| gr.Slider( | |
| minimum=0, | |
| maximum=512, | |
| value=10, | |
| step=1, | |
| label="Length penalty start", | |
| info='When should the model start being more likely to shut up?' | |
| ), | |
| gr.Slider( | |
| minimum=0.5, | |
| maximum=1.5, | |
| value=1.02, | |
| step=0.01, | |
| label="Length penalty decay factor", | |
| info='How fast should the stop likelihood increase?' | |
| ), | |
| gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max new tokens", info="How many words can the model generate?"), | |
| gr.Radio(presets.keys(), label="Preset", info="Gaslight the model into acting a certain way", value="none") | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |