Spaces:
Sleeping
Sleeping
| # import gradio as gr | |
| # from huggingface_hub import InferenceClient | |
| # """ | |
| # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| # """ | |
| # client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") | |
| # ## None type | |
| # def respond( | |
| # message: str, | |
| # history: list[tuple[str, str]], # This will not be used | |
| # system_message: str, | |
| # max_tokens: int, | |
| # temperature: float, | |
| # top_p: float, | |
| # ): | |
| # messages = [{"role": "system", "content": system_message}] | |
| # # Append only the latest user message | |
| # messages.append({"role": "user", "content": message}) | |
| # response = "" | |
| # try: | |
| # # Generate response from the model | |
| # for message in client.chat_completion( | |
| # messages, | |
| # max_tokens=max_tokens, | |
| # stream=True, | |
| # temperature=temperature, | |
| # top_p=top_p, | |
| # ): | |
| # if message.choices[0].delta.content is not None: | |
| # token = message.choices[0].delta.content | |
| # response += token | |
| # yield response | |
| # except Exception as e: | |
| # yield f"An error occurred: {e}" | |
| # ], | |
| # ) | |
| # if __name__ == "__main__": | |
| # demo.launch() | |
| ##Running smothly CHATBOT | |
| # import gradio as gr | |
| # from huggingface_hub import InferenceClient | |
| # """ | |
| # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| # """ | |
| # client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") | |
| # def respond( | |
| # message: str, | |
| # history: list[tuple[str, str]], # This will not be used | |
| # system_message: str, | |
| # max_tokens: int, | |
| # temperature: float, | |
| # top_p: float, | |
| # ): | |
| # # Build the messages list | |
| # messages = [{"role": "system", "content": system_message}] | |
| # messages.append({"role": "user", "content": message}) | |
| # response = "" | |
| # try: | |
| # # Generate response from the model | |
| # for msg in client.chat_completion( | |
| # messages=messages, | |
| # max_tokens=max_tokens, | |
| # stream=True, | |
| # temperature=temperature, | |
| # top_p=top_p, | |
| # ): | |
| # if msg.choices[0].delta.content is not None: | |
| # token = msg.choices[0].delta.content | |
| # response += token | |
| # yield response | |
| # except Exception as e: | |
| # yield f"An error occurred: {e}" | |
| # """ | |
| # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
| # """ | |
| # demo = gr.ChatInterface( | |
| # respond, | |
| # additional_inputs=[ | |
| # gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
| # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
| # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| # gr.Slider( | |
| # minimum=0.1, | |
| # maximum=1.0, | |
| # value=0.95, | |
| # step=0.05, | |
| # label="Top-p (nucleus sampling)", | |
| # ), | |
| # ], | |
| # ) | |
| # if __name__ == "__main__": | |
| # demo.launch() | |
| ### 26 Use a pipeline as a high-level Logic | |
| import spaces | |
| import os | |
| import subprocess | |
| from llama_cpp import Llama | |
| from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType | |
| from llama_cpp_agent.providers import LlamaCppPythonProvider | |
| from llama_cpp_agent.chat_history import BasicChatHistory | |
| from llama_cpp_agent.chat_history.messages import Roles | |
| import gradio as gr | |
| from huggingface_hub import hf_hub_download | |
| huggingface_token = os.getenv("HF_TOKEN") | |
| # Download the Meta-Llama-3.1-8B-Instruct model | |
| hf_hub_download( | |
| repo_id="bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", | |
| filename="Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf", | |
| local_dir="./models", | |
| token=huggingface_token | |
| ) | |
| llm = None | |
| llm_model = None | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| model, | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| top_k, | |
| repeat_penalty, | |
| ): | |
| chat_template = MessagesFormatterType.GEMMA_2 | |
| global llm | |
| global llm_model | |
| # Load model only if it's not already loaded or if a new model is selected | |
| if llm is None or llm_model != model: | |
| try: | |
| llm = Llama( | |
| model_path=f"models/{model}", | |
| flash_attn=True, | |
| n_gpu_layers=81, # Adjust based on available GPU resources | |
| n_batch=1024, | |
| n_ctx=8192, | |
| ) | |
| llm_model = model | |
| except Exception as e: | |
| return f"Error loading model: {str(e)}" | |
| provider = LlamaCppPythonProvider(llm) | |
| agent = LlamaCppAgent( | |
| provider, | |
| system_prompt=f"{system_message}", | |
| predefined_messages_formatter_type=chat_template, | |
| debug_output=True | |
| ) | |
| settings = provider.get_provider_default_settings() | |
| settings.temperature = temperature | |
| settings.top_k = top_k | |
| settings.top_p = top_p | |
| settings.max_tokens = max_tokens | |
| settings.repeat_penalty = repeat_penalty | |
| settings.stream = True | |
| messages = BasicChatHistory() | |
| # Add user and assistant messages to the history | |
| for msn in history: | |
| user = {'role': Roles.user, 'content': msn[0]} | |
| assistant = {'role': Roles.assistant, 'content': msn[1]} | |
| messages.add_message(user) | |
| messages.add_message(assistant) | |
| # Stream the response | |
| try: | |
| stream = agent.get_chat_response( | |
| message, | |
| llm_sampling_settings=settings, | |
| chat_history=messages, | |
| returns_streaming_generator=True, | |
| print_output=False | |
| ) | |
| outputs = "" | |
| for output in stream: | |
| outputs += output | |
| yield outputs | |
| except Exception as e: | |
| yield f"Error during response generation: {str(e)}" | |
| description = """<p align="center">Using the Meta-Llama-3.1-8B-Instruct Model</p>""" | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Dropdown([ | |
| 'Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf' | |
| ], | |
| value="Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf", | |
| label="Model" | |
| ), | |
| gr.Textbox(value="You are a helpful assistant.", label="System message"), | |
| gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p", | |
| ), | |
| gr.Slider( | |
| minimum=0, | |
| maximum=100, | |
| value=40, | |
| step=1, | |
| label="Top-k", | |
| ), | |
| gr.Slider( | |
| minimum=0.0, | |
| maximum=2.0, | |
| value=1.1, | |
| step=0.1, | |
| label="Repetition penalty", | |
| ), | |
| ], | |
| retry_btn="Retry", | |
| undo_btn="Undo", | |
| clear_btn="Clear", | |
| submit_btn="Send", | |
| title="Chat with Meta-Llama-3.1-8B-Instruct using llama.cpp", | |
| description=description, | |
| chatbot=gr.Chatbot( | |
| scale=1, | |
| likeable=False, | |
| show_copy_button=True | |
| ) | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |