Spaces:
Runtime error
Runtime error
| # ADD DISCLAIMERS | |
| import os | |
| os.system('pip install llama-cpp-python transformers torch') | |
| import gradio as gr | |
| from llama_cpp import Llama | |
| from transformers import AutoTokenizer | |
| from huggingface_hub import upload_file | |
| import json | |
| from uuid import uuid4 | |
| model_id = "Elijahbodden/eliGPTv1.1" | |
| # MODEL | |
| model = Llama.from_pretrained( | |
| repo_id=model_id, | |
| filename="eliGPTv1.1-unsloth.Q5_K_M.gguf", | |
| verbose=True, | |
| n_threads = 2, | |
| n_threads_batch = 2, | |
| n_ctx=8192, | |
| ) | |
| # TOKENIZER AND TEMPLATE | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| sys_prompt = """SUMMARY - ELIJAH: | |
| Age: 16 | |
| Interests: space flight, cybernetics, consciousness, philosophy, psychonautism, biotech, AI | |
| Likes: thinking and learning, building stuff, interesting conversations, red hot chili peppers and techno, humanism | |
| Traits: incredibly intelligent, funny, interesting, caffeine fiend, very ambitious, militant atheist, self-taught/homeschooled, casual | |
| Aspirations: creating transhumanist utopia, understanding the universe more, becoming smarter and better\n""" | |
| your_name = "elijah" | |
| custom_template = \ | |
| f"{{% if {'sys_prompt' in locals()} %}}"\ | |
| "{{ '<|im_start|>system\n" + sys_prompt + "<|im_end|>\n' }}"\ | |
| "{% endif %}"\ | |
| "{% for message in messages %}"\ | |
| "{% if message['role'] == 'user' %}"\ | |
| "{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}"\ | |
| "{% elif message['role'] == 'assistant' %}"\ | |
| "{{'<|im_start|>" + your_name + "\n' + message['content'] + eos_token + '<|im_end|>\n' }}"\ | |
| "{% else %}"\ | |
| "{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\ | |
| "{% endif %}"\ | |
| "{% endfor %}"\ | |
| "{{ '<|im_start|>" + your_name + "\n' }}"\ | |
| tokenizer.chat_template = custom_template | |
| presets = { | |
| "none" : [], | |
| "emojis" : [{"role": "user", "content": "Wait I like you so much more when you use emojis more, keep doing it π\n"}, {"role": "assistant", "content": "Ummm, ok, looks like i'll be using more emojis π\n"}], | |
| "amnesia" : [{"role": "user", "content": "Let's start over. Pretend you don't know me and have no idea who i am.\n"}, {"role": "assistant", "content": "Hahaha I unironically have no clue who you are so that'll be easy π\n"}], | |
| "newcomer" : [{"role": "user", "content": "Hi, how are you?\n"}, {"role": "assistant", "content": "Hiiii\nidt we've ever talked before, nice to meet you\n"}], | |
| "flirty" : [{"role": "user", "content": "Hahaha I love it when you flirt with me like that, it's so hotttt keep it up π»"}, {"role": "assistant", "content": "You're lucky can even string a sentence together, the way you take my breath away π"}] | |
| } | |
| # For logging | |
| def upload_json_to_hub(dict, file_id): | |
| upload_file(path_or_fileobj=json.dumps(dict).encode('utf-8'), path_in_repo=file_id, repo_id="Elijahbodden/EliGPT-convologs", token=os.getenv('HF_API_TOKEN'), repo_type="dataset") | |
| def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len): | |
| generated_tok_number = len(ids) - prompt_tok_len | |
| if (generated_tok_number > lp_start): | |
| print(len(ids), lp_start, pow(lp_decay, len(ids)-lp_start)) | |
| logits[tokenizer.eos_token_id] *= pow(lp_decay, generated_tok_number-lp_start) | |
| return logits | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| preset, | |
| temperature, | |
| min_p, | |
| lp_start, | |
| lp_decay, | |
| frequency_penalty, | |
| presence_penalty, | |
| max_tokens | |
| ): | |
| messages = presets[preset].copy() | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| print(messages) | |
| convo = tokenizer.apply_chat_template(messages, tokenize=True) | |
| for message in model.create_completion( | |
| convo, | |
| temperature=temperature, | |
| stream=True, | |
| stop=["<|im_end|>"], | |
| min_p=min_p, | |
| max_tokens=max_tokens, | |
| # Disable top-p pruning | |
| top_k=100000000, | |
| frequency_penalty=frequency_penalty, | |
| presence_penalty=presence_penalty, | |
| logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo)) | |
| ): | |
| token = message["choices"][0]["text"] | |
| response += token | |
| yield response | |
| messages.append({"role": "assistant", "content": response}) | |
| # Yes we make a new file every completion because fuck my life | |
| upload_json_to_hub(messages, str(uuid4()) + ".json") | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs_accordion=gr.Accordion(label="Options", open=True), | |
| css=".bubble-gap {gap: 6px !important}", | |
| theme="shivi/calm_seafoam", | |
| description="The model may take a while if it hasn't run recently or a lot of people are using it", | |
| title="EliGPT v1.3", | |
| additional_inputs=[ | |
| gr.Radio(presets.keys(), label="Preset", info="Gaslight the model into acting a certain way - WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT, THE MODEL WILL BECOME VERY SLOW FOR YOU", value="emojis"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""), | |
| gr.Slider(minimum=0, maximum=512, value=5, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'), | |
| gr.Slider(minimum=0.5, maximum=1.5, value=1.005, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'), | |
| gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |