Spaces:
Sleeping
Sleeping
File size: 6,630 Bytes
6ce472c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
"""
Contain Classes for LLM inference for RAG pipeline.
"""
### ** Make input output tokens as class properties **
from openai import OpenAI
from .utils import count_tokens
import json
class rLLM:
def __init__(self, llm_name: str, api_key: str) -> None:
self.llm_name = llm_name
self.llm_client = OpenAI(
api_key=api_key, base_url="https://api.together.xyz/v1"
)
with open("components/prompts.json", "r") as file:
SysPrompt = json.load(file)["SysPrompt"]
self.sys_prompt = SysPrompt
def generate_rag_response(self, context: str, prompt: str, message_history):
"""
Generates a natural language response for user query(prompt) based on provided
context and message history, in Q&A style.
"""
system_prompt = self.sys_prompt
messages = [
{"role": "system", "content": system_prompt},
]
for message in message_history[-6:-1]:
if message["role"] == "assistant":
messages.append({"role": "assistant", "content": message["content"]})
else:
messages.append(message)
messages.append(
{"role": "user", "content": f"CONTEXT:\n{context}QUERY:\n{prompt}"},
)
stream = self.llm_client.chat.completions.create(
model=self.llm_name,
messages=messages,
stream=True,
)
output = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
output += content
yield 0, content
input_token_count = count_tokens(
string="\n".join([i["content"] for i in messages])
)
output_token_count = count_tokens(string=output)
yield 1, (output, input_token_count, output_token_count)
def HyDE(self, query: str, message_history):
"""
Rephare/rewrite the user query to include more semantics, hence improving the
semantic search based retreival.
"""
system_prompt = """You are an AI assistant specifically designed to generate hypothetical answers for semantic search. Your primary function is to create concise Maximum 100-150 words, informative, and relevant responses to user queries. Make sure to capture the original intent of the user query (by including keywords present in user query) as these responses will be used to generate embeddings for improved semantic search results.
"""
messages = [
{"role": "system", "content": system_prompt},
]
for message in message_history[-6:-1]:
if message["role"] == "assistant":
messages.append({"role": "assistant", "content": message["content"]})
else:
messages.append(message)
messages.append(
{"role": "user", "content": f"\n\nQUERY:\n{query}"},
)
response = self.llm_client.chat.completions.create(
model="meta-llama/Llama-3-8b-chat-hf",
messages=messages,
max_tokens=500,
)
response = response.choices[0].message.content
return response
### NOT IN USE
def generate_rag_chat_response(self, context: str, prompt: str, message_history):
"""
NOT IN USE CURRENTLY
Generates a natural language response for user query(prompt) based on provided
context and message history, in Q&A style.
"""
system_prompt = """You are a helpful legal compliance CHAT assistant designed to answer and resolve user query in chat format hence quick and *small* responses.
Instructions:
1. Use the provided CONTEXT to inform your responses, citing specific parts when relevant.
2. If unable to answer the QUERY, politely inform the user and suggest what additional information might help.
3. Give a small/chatty format response.
4. Try to give decisive responses that can help user to make informed decision.
5. Format responses for readability, include bold words to give weightage.
Don't add phrases like "According to provided context.." etcectra. """
messages = [
{"role": "system", "content": system_prompt},
]
for message in message_history[-6:-1]:
if message["role"] == "assistant":
messages.append({"role": "assistant", "content": message["content"]})
else:
messages.append(message)
messages.append(
{"role": "user", "content": f"CONTEXT:\n{context}\n\nQUERY:\n{prompt}"},
)
stream = self.llm_client.chat.completions.create(
model=self.llm_name,
messages=messages,
stream=True,
)
output = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
output += content
yield 0, content
input_token_count = count_tokens(
string="\n".join([i["content"] for i in messages])
)
output_token_count = count_tokens(string=output)
yield 1, (output, input_token_count, output_token_count)
def rephrase_query(self, query: str, message_history):
"""
NOT IN USE CURRENTLY
Rephare/rewrite the user query to include more semantics, hence improving the
semantic search based retreival.
"""
system_prompt = """You are an AI assistant specifically designed to rewrite the user QUERY for semantic search. Your primary function is to create more comprehensive, semantically rich query *while maintaining the original intent of the user*. These responses will be used to generate embeddings for improved semantic search results.
Do not include any other comments or text, other than the repharased/rewritten query.
"""
messages = [
{"role": "system", "content": system_prompt},
]
for message in message_history[-6:-1]:
if message["role"] == "assistant":
messages.append({"role": "assistant", "content": message["content"]})
else:
messages.append(message)
messages.append(
{"role": "user", "content": f"\n\nQUERY:\n{query}"},
)
response = self.llm_client.chat.completions.create(
model="meta-llama/Llama-3-8b-chat-hf",
messages=messages,
)
response = response.choices[0].message.content
return response
|