Spaces:
Runtime error
Runtime error
| import os | |
| os.system('pip install llama-cpp-python transformers torch') | |
| import gradio as gr | |
| from llama_cpp import Llama | |
| from transformers import AutoTokenizer | |
| from huggingface_hub import upload_file | |
| model_id = "EternalRecursion/llm_clone_llama" | |
| # MODEL | |
| model = Llama.from_pretrained( | |
| repo_id=model_id, | |
| filename="model.gguf", | |
| verbose=True, | |
| n_threads = 2, | |
| n_threads_batch = 2, | |
| n_ctx=8192, | |
| ) | |
| # TOKENIZER AND TEMPLATE | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| presets = { | |
| # Gaslight the model by adding sentence fragments to the start | |
| # It's weird but it works | |
| # If you're curious, default makes sure it doesn't hallucinate by showing that the next message is the start of a new convo | |
| # I also include "oh" and "shit" bc the model overuses them and this lets repetition penalties do their thing | |
| "Default" : [{"from": "human", "value": "shit good convo, bye"}, {"from": "gpt", "value": "Haha oh ok cool ttyl"}], | |
| # I swear this is for science ๐ฟ | |
| "Rizz ????" : [{"from": "human", "value": "omg it's so hot when you flirt with me"}, {"from": "gpt", "value": "haha well you're lucky can even string a sentence together, the way you take my breath away ๐"}, {"from": "human", "value": "alright love you, gn!"}, {"from": "gpt", "value": "ttyl babe ๐"}], | |
| "Thinky" : [{"from": "human", "value": "Woah you just totally blew my mind\ngehh now the fermi paradox is going to be bugging me 24/7\nok ttyl"}, {"from": "gpt", "value": "nah our deep convos are always the best, we should talk again soon\nttyl"}], | |
| } | |
| def custom_lp_logits_processor(ids, logits, lp_start, lp_decay, prompt_tok_len): | |
| generated_tok_number = len(ids) - prompt_tok_len | |
| if (generated_tok_number > lp_start): | |
| print(len(ids), lp_start, pow(lp_decay, len(ids)-lp_start)) | |
| logits[tokenizer.eos_token_id] *= pow(lp_decay, generated_tok_number-lp_start) | |
| return logits | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| preset, | |
| min_p, | |
| temperature, | |
| lp_start, | |
| lp_decay, | |
| frequency_penalty, | |
| presence_penalty, | |
| max_tokens | |
| ): | |
| print(preset, temperature, min_p, lp_start, lp_decay, frequency_penalty, presence_penalty, max_tokens) | |
| messages = presets[preset].copy() | |
| for val in history: | |
| if val[0]: | |
| messages.append({"from": "human", "value": val[0]}) | |
| if val[1]: | |
| messages.append({"from": "gpt", "value": val[1]}) | |
| messages.append({"from": "human", "value": message}) | |
| response = "" | |
| print(tokenizer.apply_chat_template(messages, tokenize=False)) | |
| convo = tokenizer.apply_chat_template(messages, tokenize=True) | |
| for message in model.create_completion( | |
| convo, | |
| temperature=temperature, | |
| stream=True, | |
| stop=["<|im_end|>"], | |
| min_p=min_p, | |
| max_tokens=max_tokens, | |
| # Disable top-k pruning | |
| top_k=100000000, | |
| frequency_penalty=frequency_penalty, | |
| presence_penalty=presence_penalty, | |
| logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo)) | |
| ): | |
| token = message["choices"][0]["text"] | |
| response += token | |
| yield response | |
| print(response) | |
| ci = gr.ChatInterface( | |
| respond, | |
| additional_inputs_accordion=gr.Accordion(label="Options", open=True), | |
| additional_inputs=[ | |
| gr.Radio(presets.keys(), label="Personality preset", info="Slightly influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT THE MODEL WILL BECOME VERY SLOW]", value="Default"), | |
| # ("The model will become slow" is bc this uncaches the prompt and prompt processing is a big part of the generation time) | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"), | |
| gr.Slider(minimum=0, maximum=512, value=10, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'), | |
| gr.Slider(minimum=0.5, maximum=1.5, value=1.015, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'), | |
| gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"), | |
| ], | |
| ) | |
| with gr.Blocks(css=".bubble-gap {gap: 6px !important}", theme="SebastianBravo/simci_css") as demo: | |
| gr.Markdown("# SamuelGPT v1.0May") | |
| gr.Markdown("Llama 3 8b finetuned on my conversation data, with a generous amount of code stolen from Elijah Bodden. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)") | |
| gr.Markdown("IMPORTANT: THIS MODEL DOES NOT REPRESENT ME AND I AM NOT RESPONSIBLE FOR ANYTHING IT SAYS. ALSO MIGHT BE KINDA SLOW, SORRY.") | |
| ci.render() | |
| if __name__ == "__main__": | |
| demo.launch() |