Kotta commited on
Commit ·
a8b45b8
1
Parent(s): d2afac0
feature(#8): implemented falcon model interface and http response error codes as well.
Browse files- src/common/assembler.py +2 -1
- src/common/brain_exception.py +14 -0
- src/common/http_response_codes.py +52 -0
- src/common/utils.py +2 -0
- src/model/requests/request_model.py +1 -0
- src/rising_plugin/guardrails-config/actions/actions.py +19 -5
- src/rising_plugin/llm/__init__.py +0 -0
- src/rising_plugin/llm/falcon_llm.py +29 -0
- src/rising_plugin/llm/gpt_llm.py +26 -0
- src/rising_plugin/llm/llms.py +57 -0
- src/rising_plugin/risingplugin.py +3 -2
- src/router/api.py +10 -5
src/common/assembler.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
# assembler to mapping data into another data type.
|
| 2 |
from typing import Any, List
|
| 3 |
|
|
|
|
| 4 |
from src.model.basic_model import BasicModel
|
| 5 |
from src.model.contact_model import ContactModel
|
| 6 |
from src.model.message_model import MessageModel
|
|
@@ -18,7 +19,7 @@ class Assembler:
|
|
| 18 |
"""mapping to http response"""
|
| 19 |
|
| 20 |
def to_response(self, code, message, result) -> Any:
|
| 21 |
-
response = {"message":
|
| 22 |
return response
|
| 23 |
|
| 24 |
"""mapping data to a collection of MessageModel"""
|
|
|
|
| 1 |
# assembler to mapping data into another data type.
|
| 2 |
from typing import Any, List
|
| 3 |
|
| 4 |
+
from src.common.http_response_codes import responses
|
| 5 |
from src.model.basic_model import BasicModel
|
| 6 |
from src.model.contact_model import ContactModel
|
| 7 |
from src.model.message_model import MessageModel
|
|
|
|
| 19 |
"""mapping to http response"""
|
| 20 |
|
| 21 |
def to_response(self, code, message, result) -> Any:
|
| 22 |
+
response = {"message": responses[code], "result": result, "status_code": code}
|
| 23 |
return response
|
| 24 |
|
| 25 |
"""mapping data to a collection of MessageModel"""
|
src/common/brain_exception.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Basic Exception in Brain"""
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
from src.common.http_response_codes import responses
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BrainException(Exception):
|
| 8 |
+
def __init__(self, message: str = "Exception occurred in brain"):
|
| 9 |
+
self.message = message
|
| 10 |
+
super().__init__(self.message)
|
| 11 |
+
|
| 12 |
+
def get_response_exp(self) -> Any:
|
| 13 |
+
responses[506] = ("Brain Exception", self.message)
|
| 14 |
+
return {"message": responses[506], "result": "", "status_code": 506}
|
src/common/http_response_codes.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
responses = {
|
| 2 |
+
100: ("Continue", "Request received, please continue"),
|
| 3 |
+
101: ("Switching Protocols", "Switching to new protocol; obey Upgrade header"),
|
| 4 |
+
200: ("OK", "Request fulfilled, document follows"),
|
| 5 |
+
201: ("Created", "Document created, URL follows"),
|
| 6 |
+
202: ("Accepted", "Request accepted, processing continues off-line"),
|
| 7 |
+
203: ("Non-Authoritative Information", "Request fulfilled from cache"),
|
| 8 |
+
204: ("No Content", "Request fulfilled, nothing follows"),
|
| 9 |
+
205: ("Reset Content", "Clear input form for further input."),
|
| 10 |
+
206: ("Partial Content", "Partial content follows."),
|
| 11 |
+
300: ("Multiple Choices", "Object has several resources -- see URI list"),
|
| 12 |
+
301: ("Moved Permanently", "Object moved permanently -- see URI list"),
|
| 13 |
+
302: ("Found", "Object moved temporarily -- see URI list"),
|
| 14 |
+
303: ("See Other", "Object moved -- see Method and URL list"),
|
| 15 |
+
304: ("Not Modified", "Document has not changed since given time"),
|
| 16 |
+
305: (
|
| 17 |
+
"Use Proxy",
|
| 18 |
+
"You must use proxy specified in Location to access this " "resource.",
|
| 19 |
+
),
|
| 20 |
+
307: ("Temporary Redirect", "Object moved temporarily -- see URI list"),
|
| 21 |
+
400: ("Bad Request", "Bad request syntax or unsupported method"),
|
| 22 |
+
401: ("Unauthorized", "No permission -- see authorization schemes"),
|
| 23 |
+
402: ("Payment Required", "No payment -- see charging schemes"),
|
| 24 |
+
403: ("Forbidden", "Request forbidden -- authorization will not help"),
|
| 25 |
+
404: ("Not Found", "Nothing matches the given URI"),
|
| 26 |
+
405: ("Method Not Allowed", "Specified method is invalid for this server."),
|
| 27 |
+
406: ("Not Acceptable", "URI not available in preferred format."),
|
| 28 |
+
407: (
|
| 29 |
+
"Proxy Authentication Required",
|
| 30 |
+
"You must authenticate with " "this proxy before proceeding.",
|
| 31 |
+
),
|
| 32 |
+
408: ("Request Timeout", "Request timed out; try again later."),
|
| 33 |
+
409: ("Conflict", "Request conflict."),
|
| 34 |
+
410: ("Gone", "URI no longer exists and has been permanently removed."),
|
| 35 |
+
411: ("Length Required", "Client must specify Content-Length."),
|
| 36 |
+
412: ("Precondition Failed", "Precondition in headers is false."),
|
| 37 |
+
413: ("Request Entity Too Large", "Entity is too large."),
|
| 38 |
+
414: ("Request-URI Too Long", "URI is too long."),
|
| 39 |
+
415: ("Unsupported Media Type", "Entity body in unsupported format."),
|
| 40 |
+
416: ("Requested Range Not Satisfiable", "Cannot satisfy request range."),
|
| 41 |
+
417: ("Expectation Failed", "Expect condition could not be satisfied."),
|
| 42 |
+
500: ("Internal Server Error", "Server got itself in trouble"),
|
| 43 |
+
501: ("Not Implemented", "Server does not support this operation"),
|
| 44 |
+
502: ("Bad Gateway", "Invalid responses from another server/proxy."),
|
| 45 |
+
503: (
|
| 46 |
+
"Service Unavailable",
|
| 47 |
+
"The server cannot process the request due to a high load",
|
| 48 |
+
),
|
| 49 |
+
504: ("Gateway Timeout", "The gateway server did not receive a timely response"),
|
| 50 |
+
505: ("HTTP Version Not Supported", "Cannot fulfill request."),
|
| 51 |
+
506: ("Brain Exception"),
|
| 52 |
+
}
|
src/common/utils.py
CHANGED
|
@@ -35,6 +35,8 @@ COMMAND_BROWSER_OPEN = [10]
|
|
| 35 |
# Twilio
|
| 36 |
ACCOUNT_SID = os.getenv("TWILIO_ACCOUNT_SID")
|
| 37 |
AUTH_TOKEN = os.getenv("TWILIO_AUTH_TOKEN")
|
|
|
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
def get_firebase_cred():
|
|
|
|
| 35 |
# Twilio
|
| 36 |
ACCOUNT_SID = os.getenv("TWILIO_ACCOUNT_SID")
|
| 37 |
AUTH_TOKEN = os.getenv("TWILIO_AUTH_TOKEN")
|
| 38 |
+
# HuggingFace
|
| 39 |
+
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 40 |
|
| 41 |
|
| 42 |
def get_firebase_cred():
|
src/model/requests/request_model.py
CHANGED
|
@@ -51,6 +51,7 @@ def get_client_info(request: Request):
|
|
| 51 |
class BasicReq(BaseModel):
|
| 52 |
token: str
|
| 53 |
uuid: str
|
|
|
|
| 54 |
|
| 55 |
|
| 56 |
"""endpoint: /sendNotification"""
|
|
|
|
| 51 |
class BasicReq(BaseModel):
|
| 52 |
token: str
|
| 53 |
uuid: str
|
| 54 |
+
model: str = "gpt-3.5-turbo"
|
| 55 |
|
| 56 |
|
| 57 |
"""endpoint: /sendNotification"""
|
src/rising_plugin/guardrails-config/actions/actions.py
CHANGED
|
@@ -17,11 +17,9 @@ import os
|
|
| 17 |
import json
|
| 18 |
import numpy as np
|
| 19 |
|
| 20 |
-
from langchain.chat_models import ChatOpenAI
|
| 21 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 22 |
from langchain.vectorstores import utils
|
| 23 |
from langchain.document_loaders.csv_loader import CSVLoader
|
| 24 |
-
from langchain.chains.question_answering import load_qa_chain
|
| 25 |
from langchain.docstore.document import Document
|
| 26 |
|
| 27 |
from src.common.utils import (
|
|
@@ -35,11 +33,19 @@ from src.rising_plugin.image_embedding import (
|
|
| 35 |
|
| 36 |
from nemoguardrails.actions import action
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
@action()
|
| 40 |
async def general_question(query, model, uuid, image_search):
|
| 41 |
-
llm = ChatOpenAI(model_name=model, temperature=0, openai_api_key=OPENAI_API_KEY)
|
| 42 |
-
chain = load_qa_chain(llm, chain_type="stuff")
|
| 43 |
file_path = os.path.dirname(os.path.abspath(__file__))
|
| 44 |
|
| 45 |
with open(f"{file_path}/phone.json", "r") as infile:
|
|
@@ -60,7 +66,15 @@ async def general_question(query, model, uuid, image_search):
|
|
| 60 |
)
|
| 61 |
)
|
| 62 |
|
| 63 |
-
chain_data =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
try:
|
| 65 |
result = json.loads(chain_data)
|
| 66 |
# check image query with only its text
|
|
|
|
| 17 |
import json
|
| 18 |
import numpy as np
|
| 19 |
|
|
|
|
| 20 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 21 |
from langchain.vectorstores import utils
|
| 22 |
from langchain.document_loaders.csv_loader import CSVLoader
|
|
|
|
| 23 |
from langchain.docstore.document import Document
|
| 24 |
|
| 25 |
from src.common.utils import (
|
|
|
|
| 33 |
|
| 34 |
from nemoguardrails.actions import action
|
| 35 |
|
| 36 |
+
from src.rising_plugin.llm.falcon_llm import FalconLLM
|
| 37 |
+
from src.rising_plugin.llm.gpt_llm import GptLLM
|
| 38 |
+
from src.rising_plugin.llm.llms import (
|
| 39 |
+
get_llm_chain,
|
| 40 |
+
GPT_3_5_TURBO,
|
| 41 |
+
GPT_4_32K,
|
| 42 |
+
GPT_4,
|
| 43 |
+
FALCON_7B,
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
|
| 47 |
@action()
|
| 48 |
async def general_question(query, model, uuid, image_search):
|
|
|
|
|
|
|
| 49 |
file_path = os.path.dirname(os.path.abspath(__file__))
|
| 50 |
|
| 51 |
with open(f"{file_path}/phone.json", "r") as infile:
|
|
|
|
| 66 |
)
|
| 67 |
)
|
| 68 |
|
| 69 |
+
chain_data = get_llm_chain(model=model).run(input_documents=docs, question=query)
|
| 70 |
+
# test
|
| 71 |
+
if model == GPT_3_5_TURBO or model == GPT_4 or model == GPT_4_32K:
|
| 72 |
+
gpt_llm = GptLLM(model=model)
|
| 73 |
+
chain_data = gpt_llm.get_chain().run(input_documents=docs, question=query)
|
| 74 |
+
elif model == FALCON_7B:
|
| 75 |
+
falcon_llm = FalconLLM()
|
| 76 |
+
chain_data = falcon_llm.get_chain().run(input_documents=docs, question=query)
|
| 77 |
+
|
| 78 |
try:
|
| 79 |
result = json.loads(chain_data)
|
| 80 |
# check image query with only its text
|
src/rising_plugin/llm/__init__.py
ADDED
|
File without changes
|
src/rising_plugin/llm/falcon_llm.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""falcon llm"""
|
| 2 |
+
from langchain import HuggingFaceHub, PromptTemplate, LLMChain
|
| 3 |
+
|
| 4 |
+
from src.common.utils import HUGGINGFACEHUB_API_TOKEN
|
| 5 |
+
|
| 6 |
+
repo_id = "tiiuae/falcon-7b-instruct"
|
| 7 |
+
template = """
|
| 8 |
+
You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
|
| 9 |
+
|
| 10 |
+
{question}
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class FalconLLM:
|
| 16 |
+
def __init__(self, temperature: float = 0.6, max_new_tokens: int = 2000):
|
| 17 |
+
self.llm = HuggingFaceHub(
|
| 18 |
+
huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
|
| 19 |
+
repo_id=repo_id,
|
| 20 |
+
model_kwargs={"temperature": temperature, "max_new_tokens": max_new_tokens},
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
def get_llm(self):
|
| 24 |
+
return self.llm
|
| 25 |
+
|
| 26 |
+
def get_chain(self):
|
| 27 |
+
prompt = PromptTemplate(template=template, input_variables=["question"])
|
| 28 |
+
llm_chain = LLMChain(prompt=prompt, llm=self.llm, verbose=True)
|
| 29 |
+
return llm_chain
|
src/rising_plugin/llm/gpt_llm.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""gpt-open ai llm"""
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
from langchain.chat_models import ChatOpenAI
|
| 5 |
+
from langchain.chains.question_answering import load_qa_chain
|
| 6 |
+
from src.common.utils import (
|
| 7 |
+
OPENAI_API_KEY,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class GptLLM:
|
| 12 |
+
def __init__(self, model: str = "gpt-3.5-turbo", temperature: float = 0.6):
|
| 13 |
+
self.llm = self.init_llm(model=model, temperature=temperature)
|
| 14 |
+
|
| 15 |
+
def init_llm(self, model: str = "gpt-3.5-turbo", temperature: float = 0.6) -> Any:
|
| 16 |
+
self.llm = ChatOpenAI(
|
| 17 |
+
model_name=model, temperature=temperature, openai_api_key=OPENAI_API_KEY
|
| 18 |
+
)
|
| 19 |
+
return self.llm
|
| 20 |
+
|
| 21 |
+
def get_llm(self):
|
| 22 |
+
return self.llm
|
| 23 |
+
|
| 24 |
+
def get_chain(self):
|
| 25 |
+
chain = load_qa_chain(self.llm, chain_type="stuff")
|
| 26 |
+
return chain
|
src/rising_plugin/llm/llms.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""lLMs which we offer"""
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
from src.common.brain_exception import BrainException
|
| 5 |
+
from src.rising_plugin.llm.falcon_llm import FalconLLM
|
| 6 |
+
from src.rising_plugin.llm.gpt_llm import GptLLM
|
| 7 |
+
|
| 8 |
+
GPT_3_5_TURBO = "gpt-3.5-turbo"
|
| 9 |
+
GPT_4 = "gpt-4"
|
| 10 |
+
GPT_4_32K = "gpt-4-32k"
|
| 11 |
+
FALCON_7B = "falcon-7b"
|
| 12 |
+
|
| 13 |
+
"""list of available model we offer you"""
|
| 14 |
+
LLM_MODELS = [GPT_3_5_TURBO, GPT_4, GPT_4_32K, FALCON_7B]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
"""exception message"""
|
| 18 |
+
EXCEPTION_MSG = f"The model is not correct. It should be in {LLM_MODELS}"
|
| 19 |
+
|
| 20 |
+
"""validate model"""
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def validate_model(model: str) -> bool:
|
| 24 |
+
if model in LLM_MODELS:
|
| 25 |
+
return True
|
| 26 |
+
return False
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
"""
|
| 30 |
+
Args
|
| 31 |
+
model: model name of LLM such as 'gpt-3.5-turbo' | 'falcon-7b'
|
| 32 |
+
Returns
|
| 33 |
+
datatype: LLmChain
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def get_llm_chain(
|
| 38 |
+
model: str, temperature: float = 0.6, max_new_tokens: int = 2000
|
| 39 |
+
) -> Any:
|
| 40 |
+
if not validate_model(model):
|
| 41 |
+
raise BrainException(EXCEPTION_MSG)
|
| 42 |
+
"""check model"""
|
| 43 |
+
llm = get_llm(model=model, temperature=temperature, max_new_tokens=max_new_tokens)
|
| 44 |
+
|
| 45 |
+
return llm.get_chain()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_llm(model: str, temperature: float = 0.6, max_new_tokens: int = 2000) -> Any:
|
| 49 |
+
if not validate_model(model):
|
| 50 |
+
raise BrainException(EXCEPTION_MSG)
|
| 51 |
+
"""check model"""
|
| 52 |
+
llm = GptLLM()
|
| 53 |
+
if model == GPT_3_5_TURBO or model == GPT_4 or model == GPT_4_32K:
|
| 54 |
+
llm = GptLLM(model=model)
|
| 55 |
+
elif model == FALCON_7B:
|
| 56 |
+
llm = FalconLLM(temperature=temperature, max_new_tokens=max_new_tokens)
|
| 57 |
+
return llm
|
src/rising_plugin/risingplugin.py
CHANGED
|
@@ -14,6 +14,7 @@ from langchain.chat_models import ChatOpenAI
|
|
| 14 |
|
| 15 |
from firebase_admin import storage
|
| 16 |
|
|
|
|
| 17 |
from ..common.utils import (
|
| 18 |
OPENAI_API_KEY,
|
| 19 |
FIREBASE_STORAGE_ROOT,
|
|
@@ -112,12 +113,12 @@ def getCompletion(
|
|
| 112 |
uuid="",
|
| 113 |
image_search=True,
|
| 114 |
):
|
| 115 |
-
llm =
|
| 116 |
-
|
| 117 |
# Break input text into chunks
|
| 118 |
chunks = getChunks(query)
|
| 119 |
|
| 120 |
app = LLMRails(config, llm)
|
|
|
|
| 121 |
return processLargeText(app, chunks)
|
| 122 |
|
| 123 |
|
|
|
|
| 14 |
|
| 15 |
from firebase_admin import storage
|
| 16 |
|
| 17 |
+
from .llm.llms import get_llm, GPT_4, FALCON_7B
|
| 18 |
from ..common.utils import (
|
| 19 |
OPENAI_API_KEY,
|
| 20 |
FIREBASE_STORAGE_ROOT,
|
|
|
|
| 113 |
uuid="",
|
| 114 |
image_search=True,
|
| 115 |
):
|
| 116 |
+
llm = get_llm(model=model).get_llm()
|
|
|
|
| 117 |
# Break input text into chunks
|
| 118 |
chunks = getChunks(query)
|
| 119 |
|
| 120 |
app = LLMRails(config, llm)
|
| 121 |
+
|
| 122 |
return processLargeText(app, chunks)
|
| 123 |
|
| 124 |
|
src/router/api.py
CHANGED
|
@@ -2,6 +2,7 @@ import json
|
|
| 2 |
import os
|
| 3 |
|
| 4 |
from src.common.assembler import Assembler
|
|
|
|
| 5 |
from src.common.utils import ProgramType
|
| 6 |
from src.model.image_model import ImageModel
|
| 7 |
from src.model.requests.request_model import (
|
|
@@ -77,13 +78,17 @@ def construct_blueprint_api() -> APIRouter:
|
|
| 77 |
uuid=uuid, search=result["content"]
|
| 78 |
)
|
| 79 |
result["content"] = str(contacts_results)
|
| 80 |
-
except Exception as e:
|
| 81 |
-
logger.error(title="sendNotification", message=json.dumps(result))
|
| 82 |
|
| 83 |
-
|
| 84 |
|
| 85 |
-
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
"""@generator.response(
|
| 89 |
status_code=200, schema={"message": "message", "result": "test_result"}
|
|
|
|
| 2 |
import os
|
| 3 |
|
| 4 |
from src.common.assembler import Assembler
|
| 5 |
+
from src.common.brain_exception import BrainException
|
| 6 |
from src.common.utils import ProgramType
|
| 7 |
from src.model.image_model import ImageModel
|
| 8 |
from src.model.requests.request_model import (
|
|
|
|
| 78 |
uuid=uuid, search=result["content"]
|
| 79 |
)
|
| 80 |
result["content"] = str(contacts_results)
|
|
|
|
|
|
|
| 81 |
|
| 82 |
+
notification = {"title": "alert", "content": json.dumps(result)}
|
| 83 |
|
| 84 |
+
state, value = send_message(notification, [token])
|
| 85 |
+
return assembler.to_response(200, value, result)
|
| 86 |
+
except Exception as e:
|
| 87 |
+
logger.error(
|
| 88 |
+
title="sendNotification", message="json parsing or get completion error"
|
| 89 |
+
)
|
| 90 |
+
if isinstance(e, BrainException):
|
| 91 |
+
return e.get_response_exp()
|
| 92 |
|
| 93 |
"""@generator.response(
|
| 94 |
status_code=200, schema={"message": "message", "result": "test_result"}
|