| --- |
| library_name: peft |
| license: mit |
| --- |
| ## training only 2000/5000 complete |
|
|
|
|
|
|
| The following `bitsandbytes` quantization config was used during training: |
| - quant_method: QuantizationMethod.BITS_AND_BYTES |
| - load_in_8bit: False |
| - load_in_4bit: True |
| - llm_int8_threshold: 6.0 |
| - llm_int8_skip_modules: None |
| - llm_int8_enable_fp32_cpu_offload: False |
| - llm_int8_has_fp16_weight: False |
| - bnb_4bit_quant_type: nf4 |
| - bnb_4bit_use_double_quant: True |
| - bnb_4bit_compute_dtype: bfloat16 |
| ### Framework versions |
| |
| |
| - PEFT 0.6.0.dev0 |
| |
| - To load start a jupyter notebook, here it is all in 2 parts |
| |
| ``` |
| !pip install -q -U bitsandbytes |
| !pip install -q -U git+https://github.com/huggingface/transformers.git |
| !pip install -q -U git+https://github.com/huggingface/peft.git |
| !pip install -q -U git+https://github.com/huggingface/accelerate.git |
| !pip install -q -U gradio |
| !pip install -q -U sentencepiece |
| |
| |
| |
| import torch |
| from peft import PeftModel |
| from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer |
| |
| model_name = "TheBloke/CodeLlama-34B-Instruct-fp16" |
| adapters_name = 'nisten/bigdoc-c34b-python-v1' |
| |
| print(f"Starting to load the model {model_name} into memory") |
|
|
| m = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| #load_in_4bit=True, #19GB in 4bit, 38GB with load_in_8bit, 67GB in full f16 if you just delete this line |
| torch_dtype=torch.bfloat16, |
| device_map={"": 0} |
| ) |
| m = PeftModel.from_pretrained(m, adapters_name) |
| m = m.merge_and_unload() |
| tok = AutoTokenizer.from_pretrained(model_name) |
| eos_token_id = tok.convert_tokens_to_ids('/s') |
| tok.eos_token = '/s' |
| tok.pad_token = tok.eos_token |
| tok.padding_side = 'right' |
| tok.eos_token_id = eos_token_id |
| stop_token_ids = eos_token_id |
| |
| print(f"Successfully loaded the model {model_name} into memory") |
| |
| ``` |
| |
| ### Gradio the UI |
| |
| ``` |
| |
| #should all work in one click |
| import datetime |
| import os |
| from threading import Event, Thread |
| from uuid import uuid4 |
| from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer |
| import gradio as gr |
| import requests |
| |
| max_new_tokens = 2369 |
| start_message = """A chat between a chill human asking ( Question: ) and an AI doctor ( Answer: ). The doctor answers in helpful, detailed, and exhaustively nerdy extensive answers to the user's every medical Question:""" |
|
|
| class StopOnTokens(StoppingCriteria): |
| def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: |
| if isinstance(stop_token_ids, (list, torch.Tensor)): |
| for stop_id in stop_token_ids: |
| if stop_id in input_ids[0]: |
| return True |
| else: # Assumes scalar |
| if input_ids[0][-1] == stop_token_ids: |
| return True |
| return False |
| |
|
|
| def convert_history_to_text(history): |
| text = start_message + "".join( |
| [ |
| "".join( |
| [ |
| f" Question: {item[0]}\n", |
| f"\n\n Answer: {item[1]}\n", |
| ] |
| ) |
| for item in history[:-1] |
| ] |
| ) |
| text += "".join( |
| [ |
| "".join( |
| [ |
| f" Question: {history[-1][0]}\n", |
| f"\n\n Answer: {history[-1][1]}\n", |
| ] |
| ) |
| ] |
| ) |
| return text |
| |
|
|
| def log_conversation(conversation_id, history, messages, generate_kwargs): |
| logging_url = os.getenv("LOGGING_URL", None) |
| if logging_url is None: |
| return |
| |
| timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S") |
| |
| data = { |
| "conversation_id": conversation_id, |
| "timestamp": timestamp, |
| "history": history, |
| "messages": messages, |
| "generate_kwargs": generate_kwargs, |
| } |
| |
| try: |
| requests.post(logging_url, json=data) |
| except requests.exceptions.RequestException as e: |
| print(f"Error logging conversation: {e}") |
| |
|
|
| def user(message, history): |
| # Append the user's message to the conversation history |
| return "", history + [[message, ""]] |
| |
|
|
| def bot(history, temperature, top_p, top_k, repetition_penalty, conversation_id): |
| print(f"history: {history}") |
| # Initialize a StopOnTokens object |
| stop = StopOnTokens() |
| |
| # Construct the input message string for the model by concatenating the current system message and conversation history |
| messages = convert_history_to_text(history) |
| |
| # Tokenize the messages string |
| input_ids = tok(messages, return_tensors="pt").input_ids |
| input_ids = input_ids.to(m.device) |
| streamer = TextIteratorStreamer(tok, timeout=10.0, skip_prompt=True, skip_special_tokens=True) |
| generate_kwargs = dict( |
| input_ids=input_ids, |
| max_new_tokens=max_new_tokens, |
| temperature=temperature, |
| do_sample=temperature > 0.0, |
| top_p=top_p, |
| top_k=top_k, |
| repetition_penalty=repetition_penalty, |
| streamer=streamer, |
| stopping_criteria=StoppingCriteriaList([stop]), |
| ) |
| |
| stream_complete = Event() |
| |
| def generate_and_signal_complete(): |
| m.generate(**generate_kwargs) |
| stream_complete.set() |
| |
| def log_after_stream_complete(): |
| stream_complete.wait() |
| log_conversation( |
| conversation_id, |
| history, |
| messages, |
| { |
| "top_k": top_k, |
| "top_p": top_p, |
| "temperature": temperature, |
| "repetition_penalty": repetition_penalty, |
| }, |
| ) |
| |
| t1 = Thread(target=generate_and_signal_complete) |
| t1.start() |
| |
| t2 = Thread(target=log_after_stream_complete) |
| t2.start() |
| |
| # Initialize an empty string to store the generated text |
| partial_text = "" |
| for new_text in streamer: |
| partial_text += new_text |
| history[-1][1] = partial_text |
| yield history |
| |
|
|
| def get_uuid(): |
| return str(uuid4()) |
| |
| |
| with gr.Blocks( |
| theme=gr.themes.Soft(), |
| css=".disclaimer {font-variant-caps: all-small-caps;}", |
| ) as demo: |
| conversation_id = gr.State(get_uuid) |
| gr.Markdown( |
| """<h1><center>Nisten's 34b Doctor v1</center></h1> |
| """ |
| ) |
| chatbot = gr.Chatbot().style(height=969) |
| with gr.Row(): |
| with gr.Column(): |
| msg = gr.Textbox( |
| label="Chat Message Box", |
| placeholder="Chat Message Box", |
| show_label=False, |
| ).style(container=False) |
| with gr.Column(): |
| with gr.Row(): |
| submit = gr.Button("Submit") |
| stop = gr.Button("Stop") |
| clear = gr.Button("Clear") |
| with gr.Row(): |
| with gr.Accordion("Advanced Options:", open=False): |
| with gr.Row(): |
| with gr.Column(): |
| with gr.Row(): |
| temperature = gr.Slider( |
| label="Temperature", |
| value=0.7, |
| minimum=0.0, |
| maximum=1.0, |
| step=0.1, |
| interactive=True, |
| info="Higher values produce more diverse outputs", |
| ) |
| with gr.Column(): |
| with gr.Row(): |
| top_p = gr.Slider( |
| label="Top-p (nucleus sampling)", |
| value=0.9, |
| minimum=0.0, |
| maximum=1, |
| step=0.01, |
| interactive=True, |
| info=( |
| "Sample from the smallest possible set of tokens whose cumulative probability " |
| "exceeds top_p. Set to 1 to disable and sample from all tokens." |
| ), |
| ) |
| with gr.Column(): |
| with gr.Row(): |
| top_k = gr.Slider( |
| label="Top-k", |
| value=0, |
| minimum=0.0, |
| maximum=200, |
| step=1, |
| interactive=True, |
| info="Sample from a shortlist of top-k tokens — 0 to disable and sample from all tokens.", |
| ) |
| with gr.Column(): |
| with gr.Row(): |
| repetition_penalty = gr.Slider( |
| label="Repetition Penalty", |
| value=1.1, |
| minimum=1.0, |
| maximum=2.0, |
| step=0.1, |
| interactive=True, |
| info="Penalize repetition — 1.0 to disable.", |
| ) |
| with gr.Row(): |
| gr.Markdown( |
| "Disclaimer: The model can produce factually incorrect output, and should not be relied on to produce " |
| "factually accurate information. The model was trained on various public datasets; while great efforts " |
| "have been taken to clean the pretraining data, it is possible that this model could generate lewd, " |
| "biased, or otherwise offensive outputs.", |
| elem_classes=["disclaimer"], |
| ) |
| with gr.Row(): |
| gr.Markdown( |
| "[Privacy policy](https://gist.github.com/samhavens/c29c68cdcd420a9aa0202d0839876dac)", |
| elem_classes=["disclaimer"], |
| ) |
| |
| submit_event = msg.submit( |
| fn=user, |
| inputs=[msg, chatbot], |
| outputs=[msg, chatbot], |
| queue=False, |
| ).then( |
| fn=bot, |
| inputs=[ |
| chatbot, |
| temperature, |
| top_p, |
| top_k, |
| repetition_penalty, |
| conversation_id, |
| ], |
| outputs=chatbot, |
| queue=True, |
| ) |
| submit_click_event = submit.click( |
| fn=user, |
| inputs=[msg, chatbot], |
| outputs=[msg, chatbot], |
| queue=False, |
| ).then( |
| fn=bot, |
| inputs=[ |
| chatbot, |
| temperature, |
| top_p, |
| top_k, |
| repetition_penalty, |
| conversation_id, |
| ], |
| outputs=chatbot, |
| queue=True, |
| ) |
| stop.click( |
| fn=None, |
| inputs=None, |
| outputs=None, |
| cancels=[submit_event, submit_click_event], |
| queue=False, |
| ) |
| clear.click(lambda: None, None, chatbot, queue=False) |
| |
| demo.queue(max_size=128, concurrency_count=2) |
| demo.launch( share = True ) #delete share = True () to make it private |
|
|
| ``` |