| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from sentence_transformers import SentenceTransformer | |
| from qdrant_client import QdrantClient | |
| import torch | |
| from llama_cpp import Llama | |
| llm = Llama.from_pretrained( | |
| repo_id="Suku0/mistral-7b-instruct-v0.3-bnb-4bit-GGUF", | |
| filename="mistral-7b-instruct-v0.3-bnb-4bit.Q4_K_M.gguf", | |
| n_ctx=16384 | |
| ) | |
| embedding_model = SentenceTransformer('nomic-ai/nomic-embed-text-v1.5', trust_remote_code=True) | |
| qdrant_client = QdrantClient( | |
| url="https://9a5cbf91-7dac-4dd0-80f6-13e512da1060.europe-west3-0.gcp.cloud.qdrant.io:6333", | |
| api_key="1F4q1oo0rB5oU5OYOXcuzJLxACEkeGR87ioXwR-Jg617vsctJaPrOw", | |
| ) | |
| def retrieve_context(query): | |
| query_vector = embedding_model.encode(query).tolist() | |
| search_result = qdrant_client.search( | |
| collection_name="ctx_collection", | |
| query_vector=query_vector, | |
| limit=10, | |
| with_payload=True | |
| ) | |
| context = " ".join([hit.payload["text"] for hit in search_result]) | |
| return context | |
| def respond(message, history, system_message, max_tokens, temperature, top_p): | |
| context = retrieve_context(message) | |
| prompt = f"""You are a helpful assistant. Please answer the user's question based on the given context. If the context doesn't provide any answer, say the context doesn't provide the answer. | |
| ### Context: | |
| {context} | |
| ### Question: | |
| {message} | |
| ### Answer: | |
| """ | |
| response = llm(prompt.format(ctx=context, question=message), max_tokens=243) | |
| return response["choices"][0]["text"] | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
| gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)") | |
| ] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |