id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
22,219 |
The provided code snippet includes necessary dependencies for implementing the `run_async` function. Write a Python function `def run_async(cor)` to solve the following problem:
在同步环境中运行异步代码.
Here is the function:
def run_async(cor):
'''
在同步环境中运行异步代码.
'''
try:
loop = asyncio.get_event_loop()
except:
loop = asyncio.new_event_loop()
return loop.run_until_complete(cor) | 在同步环境中运行异步代码. |
22,220 |
The provided code snippet includes necessary dependencies for implementing the `iter_over_async` function. Write a Python function `def iter_over_async(ait, loop=None)` to solve the following problem:
将异步生成器封装成同步生成器.
Here is the function:
def iter_over_async(ait, loop=None):
'''
将异步生成器封装成同步生成器.
'''
ait = ait.__aiter__()
async def get_next():
try:
obj = await ait.__anext__()
return False, obj
except StopAsyncIteration:
return True, None
if loop is None:
try:
loop = asyncio.get_event_loop()
except:
loop = asyncio.new_event_loop()
while True:
done, obj = loop.run_until_complete(get_next())
if done:
break
yield obj | 将异步生成器封装成同步生成器. |
22,221 |
def get_all_model_worker_configs() -> dict:
result = {}
model_names = set(FSCHAT_MODEL_WORKERS.keys())
for name in model_names:
if name != "default":
result[name] = get_model_worker_config(name)
return result | null |
22,222 | import os
from configs import (
KB_ROOT_PATH,
CHUNK_SIZE,
OVERLAP_SIZE,
ZH_TITLE_ENHANCE,
logger,
log_verbose,
text_splitter_dict,
LLM_MODELS,
TEXT_SPLITTER_NAME,
)
import importlib
from text_splitter import zh_title_enhance as func_zh_title_enhance
import langchain.document_loaders
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from pathlib import Path
from server.utils import run_in_thread_pool, get_model_worker_config
import json
from typing import List, Union,Dict, Tuple, Generator
import chardet
def get_kb_path(knowledge_base_name: str):
return os.path.join(KB_ROOT_PATH, knowledge_base_name)
def get_vs_path(knowledge_base_name: str, vector_name: str):
return os.path.join(get_kb_path(knowledge_base_name), "vector_store", vector_name) | null |
22,223 | import os
from configs import (
KB_ROOT_PATH,
CHUNK_SIZE,
OVERLAP_SIZE,
ZH_TITLE_ENHANCE,
logger,
log_verbose,
text_splitter_dict,
LLM_MODELS,
TEXT_SPLITTER_NAME,
)
import importlib
from text_splitter import zh_title_enhance as func_zh_title_enhance
import langchain.document_loaders
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from pathlib import Path
from server.utils import run_in_thread_pool, get_model_worker_config
import json
from typing import List, Union,Dict, Tuple, Generator
import chardet
def _new_json_dumps(obj, **kwargs):
kwargs["ensure_ascii"] = False
return _origin_json_dumps(obj, **kwargs) | null |
22,224 | import os
from configs import (
KB_ROOT_PATH,
CHUNK_SIZE,
OVERLAP_SIZE,
ZH_TITLE_ENHANCE,
logger,
log_verbose,
text_splitter_dict,
LLM_MODELS,
TEXT_SPLITTER_NAME,
)
import importlib
from text_splitter import zh_title_enhance as func_zh_title_enhance
import langchain.document_loaders
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from pathlib import Path
from server.utils import run_in_thread_pool, get_model_worker_config
import json
from typing import List, Union,Dict, Tuple, Generator
import chardet
LOADER_DICT = {"UnstructuredHTMLLoader": ['.html', '.htm'],
"MHTMLLoader": ['.mhtml'],
"UnstructuredMarkdownLoader": ['.md'],
"JSONLoader": [".json"],
"JSONLinesLoader": [".jsonl"],
"CSVLoader": [".csv"],
# "FilteredCSVLoader": [".csv"], 如果使用自定义分割csv
"RapidOCRPDFLoader": [".pdf"],
"RapidOCRDocLoader": ['.docx', '.doc'],
"RapidOCRPPTLoader": ['.ppt', '.pptx', ],
"RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'],
"UnstructuredFileLoader": ['.eml', '.msg', '.rst',
'.rtf', '.txt', '.xml',
'.epub', '.odt','.tsv'],
"UnstructuredEmailLoader": ['.eml', '.msg'],
"UnstructuredEPubLoader": ['.epub'],
"UnstructuredExcelLoader": ['.xlsx', '.xls', '.xlsd'],
"NotebookLoader": ['.ipynb'],
"UnstructuredODTLoader": ['.odt'],
"PythonLoader": ['.py'],
"UnstructuredRSTLoader": ['.rst'],
"UnstructuredRTFLoader": ['.rtf'],
"SRTLoader": ['.srt'],
"TomlLoader": ['.toml'],
"UnstructuredTSVLoader": ['.tsv'],
"UnstructuredWordDocumentLoader": ['.docx', '.doc'],
"UnstructuredXMLLoader": ['.xml'],
"UnstructuredPowerPointLoader": ['.ppt', '.pptx'],
"EverNoteLoader": ['.enex'],
}
def get_LoaderClass(file_extension):
for LoaderClass, extensions in LOADER_DICT.items():
if file_extension in extensions:
return LoaderClass | null |
22,225 | import os
from configs import (
KB_ROOT_PATH,
CHUNK_SIZE,
OVERLAP_SIZE,
ZH_TITLE_ENHANCE,
logger,
log_verbose,
text_splitter_dict,
LLM_MODELS,
TEXT_SPLITTER_NAME,
)
import importlib
from text_splitter import zh_title_enhance as func_zh_title_enhance
import langchain.document_loaders
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from pathlib import Path
from server.utils import run_in_thread_pool, get_model_worker_config
import json
from typing import List, Union,Dict, Tuple, Generator
import chardet
The provided code snippet includes necessary dependencies for implementing the `get_loader` function. Write a Python function `def get_loader(loader_name: str, file_path: str, loader_kwargs: Dict = None)` to solve the following problem:
根据loader_name和文件路径或内容返回文档加载器。
Here is the function:
def get_loader(loader_name: str, file_path: str, loader_kwargs: Dict = None):
'''
根据loader_name和文件路径或内容返回文档加载器。
'''
loader_kwargs = loader_kwargs or {}
try:
if loader_name in ["RapidOCRPDFLoader", "RapidOCRLoader", "FilteredCSVLoader",
"RapidOCRDocLoader", "RapidOCRPPTLoader"]:
document_loaders_module = importlib.import_module('document_loaders')
else:
document_loaders_module = importlib.import_module('langchain.document_loaders')
DocumentLoader = getattr(document_loaders_module, loader_name)
except Exception as e:
msg = f"为文件{file_path}查找加载器{loader_name}时出错:{e}"
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
document_loaders_module = importlib.import_module('langchain.document_loaders')
DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader")
if loader_name == "UnstructuredFileLoader":
loader_kwargs.setdefault("autodetect_encoding", True)
elif loader_name == "CSVLoader":
if not loader_kwargs.get("encoding"):
# 如果未指定 encoding,自动识别文件编码类型,避免langchain loader 加载文件报编码错误
with open(file_path, 'rb') as struct_file:
encode_detect = chardet.detect(struct_file.read())
if encode_detect is None:
encode_detect = {"encoding": "utf-8"}
loader_kwargs["encoding"] = encode_detect["encoding"]
elif loader_name == "JSONLoader":
loader_kwargs.setdefault("jq_schema", ".")
loader_kwargs.setdefault("text_content", False)
elif loader_name == "JSONLinesLoader":
loader_kwargs.setdefault("jq_schema", ".")
loader_kwargs.setdefault("text_content", False)
loader = DocumentLoader(file_path, **loader_kwargs)
return loader | 根据loader_name和文件路径或内容返回文档加载器。 |
22,226 | import os
from configs import (
KB_ROOT_PATH,
CHUNK_SIZE,
OVERLAP_SIZE,
ZH_TITLE_ENHANCE,
logger,
log_verbose,
text_splitter_dict,
LLM_MODELS,
TEXT_SPLITTER_NAME,
)
import importlib
from text_splitter import zh_title_enhance as func_zh_title_enhance
import langchain.document_loaders
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from pathlib import Path
from server.utils import run_in_thread_pool, get_model_worker_config
import json
from typing import List, Union,Dict, Tuple, Generator
import chardet
langchain.document_loaders.JSONLinesLoader = JSONLinesLoader
The provided code snippet includes necessary dependencies for implementing the `make_text_splitter` function. Write a Python function `def make_text_splitter( splitter_name: str = TEXT_SPLITTER_NAME, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, llm_model: str = LLM_MODELS[0], )` to solve the following problem:
根据参数获取特定的分词器
Here is the function:
def make_text_splitter(
splitter_name: str = TEXT_SPLITTER_NAME,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
llm_model: str = LLM_MODELS[0],
):
"""
根据参数获取特定的分词器
"""
splitter_name = splitter_name or "SpacyTextSplitter"
try:
if splitter_name == "MarkdownHeaderTextSplitter": # MarkdownHeaderTextSplitter特殊判定
headers_to_split_on = text_splitter_dict[splitter_name]['headers_to_split_on']
text_splitter = langchain.text_splitter.MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on)
else:
try: ## 优先使用用户自定义的text_splitter
text_splitter_module = importlib.import_module('text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
except: ## 否则使用langchain的text_splitter
text_splitter_module = importlib.import_module('langchain.text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
if text_splitter_dict[splitter_name]["source"] == "tiktoken": ## 从tiktoken加载
try:
text_splitter = TextSplitter.from_tiktoken_encoder(
encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
pipeline="zh_core_web_sm",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except:
text_splitter = TextSplitter.from_tiktoken_encoder(
encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
elif text_splitter_dict[splitter_name]["source"] == "huggingface": ## 从huggingface加载
if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "":
config = get_model_worker_config(llm_model)
text_splitter_dict[splitter_name]["tokenizer_name_or_path"] = \
config.get("model_path")
if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "gpt2":
from transformers import GPT2TokenizerFast
from langchain.text_splitter import CharacterTextSplitter
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
else: ## 字符长度加载
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
trust_remote_code=True)
text_splitter = TextSplitter.from_huggingface_tokenizer(
tokenizer=tokenizer,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
else:
try:
text_splitter = TextSplitter(
pipeline="zh_core_web_sm",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except:
text_splitter = TextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except Exception as e:
print(e)
text_splitter_module = importlib.import_module('langchain.text_splitter')
TextSplitter = getattr(text_splitter_module, "RecursiveCharacterTextSplitter")
text_splitter = TextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
# If you use SpacyTextSplitter you can use GPU to do split likes Issue #1287
# text_splitter._tokenizer.max_length = 37016792
# text_splitter._tokenizer.prefer_gpu()
return text_splitter | 根据参数获取特定的分词器 |
22,227 | from configs import (
EMBEDDING_MODEL, DEFAULT_VS_TYPE, ZH_TITLE_ENHANCE,
CHUNK_SIZE, OVERLAP_SIZE,
logger, log_verbose
)
from server.knowledge_base.utils import (
get_file_path, list_kbs_from_folder,
list_files_from_folder, files2docs_in_thread,
KnowledgeFile
)
from server.knowledge_base.kb_service.base import KBServiceFactory
from server.db.models.conversation_model import ConversationModel
from server.db.models.message_model import MessageModel
from server.db.repository.knowledge_file_repository import add_file_to_db
from server.db.repository.knowledge_metadata_repository import add_summary_to_db
from server.db.base import Base, engine
from server.db.session import session_scope
import os
from dateutil.parser import parse
from typing import Literal, List
def create_tables():
Base.metadata.create_all(bind=engine)
engine = create_engine(
SQLALCHEMY_DATABASE_URI,
json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False),
)
Base: DeclarativeMeta = declarative_base()
def reset_tables():
Base.metadata.drop_all(bind=engine)
create_tables() | null |
22,228 | from configs import (
EMBEDDING_MODEL, DEFAULT_VS_TYPE, ZH_TITLE_ENHANCE,
CHUNK_SIZE, OVERLAP_SIZE,
logger, log_verbose
)
from server.knowledge_base.utils import (
get_file_path, list_kbs_from_folder,
list_files_from_folder, files2docs_in_thread,
KnowledgeFile
)
from server.knowledge_base.kb_service.base import KBServiceFactory
from server.db.models.conversation_model import ConversationModel
from server.db.models.message_model import MessageModel
from server.db.repository.knowledge_file_repository import add_file_to_db
from server.db.repository.knowledge_metadata_repository import add_summary_to_db
from server.db.base import Base, engine
from server.db.session import session_scope
import os
from dateutil.parser import parse
from typing import Literal, List
Base: DeclarativeMeta = declarative_base()
def session_scope() -> Session:
"""上下文管理器用于自动获取 Session, 避免错误"""
session = SessionLocal()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
The provided code snippet includes necessary dependencies for implementing the `import_from_db` function. Write a Python function `def import_from_db( sqlite_path: str = None, # csv_path: str = None, ) -> bool` to solve the following problem:
在知识库与向量库无变化的情况下,从备份数据库中导入数据到 info.db。 适用于版本升级时,info.db 结构变化,但无需重新向量化的情况。 请确保两边数据库表名一致,需要导入的字段名一致 当前仅支持 sqlite
Here is the function:
def import_from_db(
sqlite_path: str = None,
# csv_path: str = None,
) -> bool:
"""
在知识库与向量库无变化的情况下,从备份数据库中导入数据到 info.db。
适用于版本升级时,info.db 结构变化,但无需重新向量化的情况。
请确保两边数据库表名一致,需要导入的字段名一致
当前仅支持 sqlite
"""
import sqlite3 as sql
from pprint import pprint
models = list(Base.registry.mappers)
try:
con = sql.connect(sqlite_path)
con.row_factory = sql.Row
cur = con.cursor()
tables = [x["name"] for x in cur.execute("select name from sqlite_master where type='table'").fetchall()]
for model in models:
table = model.local_table.fullname
if table not in tables:
continue
print(f"processing table: {table}")
with session_scope() as session:
for row in cur.execute(f"select * from {table}").fetchall():
data = {k: row[k] for k in row.keys() if k in model.columns}
if "create_time" in data:
data["create_time"] = parse(data["create_time"])
pprint(data)
session.add(model.class_(**data))
con.close()
return True
except Exception as e:
print(f"无法读取备份数据库:{sqlite_path}。错误信息:{e}")
return False | 在知识库与向量库无变化的情况下,从备份数据库中导入数据到 info.db。 适用于版本升级时,info.db 结构变化,但无需重新向量化的情况。 请确保两边数据库表名一致,需要导入的字段名一致 当前仅支持 sqlite |
22,229 | from configs import (
EMBEDDING_MODEL, DEFAULT_VS_TYPE, ZH_TITLE_ENHANCE,
CHUNK_SIZE, OVERLAP_SIZE,
logger, log_verbose
)
from server.knowledge_base.utils import (
get_file_path, list_kbs_from_folder,
list_files_from_folder, files2docs_in_thread,
KnowledgeFile
)
from server.knowledge_base.kb_service.base import KBServiceFactory
from server.db.models.conversation_model import ConversationModel
from server.db.models.message_model import MessageModel
from server.db.repository.knowledge_file_repository import add_file_to_db
from server.db.repository.knowledge_metadata_repository import add_summary_to_db
from server.db.base import Base, engine
from server.db.session import session_scope
import os
from dateutil.parser import parse
from typing import Literal, List
def file_to_kbfile(kb_name: str, files: List[str]) -> List[KnowledgeFile]:
kb_files = []
for file in files:
try:
kb_file = KnowledgeFile(filename=file, knowledge_base_name=kb_name)
kb_files.append(kb_file)
except Exception as e:
msg = f"{e},已跳过"
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
return kb_files
def list_kbs_from_folder():
return [f for f in os.listdir(KB_ROOT_PATH)
if os.path.isdir(os.path.join(KB_ROOT_PATH, f))]
def list_files_from_folder(kb_name: str):
doc_path = get_doc_path(kb_name)
result = []
def is_skiped_path(path: str):
tail = os.path.basename(path).lower()
for x in ["temp", "tmp", ".", "~$"]:
if tail.startswith(x):
return True
return False
def process_entry(entry):
if is_skiped_path(entry.path):
return
if entry.is_symlink():
target_path = os.path.realpath(entry.path)
with os.scandir(target_path) as target_it:
for target_entry in target_it:
process_entry(target_entry)
elif entry.is_file():
file_path = (Path(os.path.relpath(entry.path, doc_path)).as_posix()) # 路径统一为 posix 格式
result.append(file_path)
elif entry.is_dir():
with os.scandir(entry.path) as it:
for sub_entry in it:
process_entry(sub_entry)
with os.scandir(doc_path) as it:
for entry in it:
process_entry(entry)
return result
class KnowledgeFile:
def __init__(
self,
filename: str,
knowledge_base_name: str,
loader_kwargs: Dict = {},
):
'''
对应知识库目录中的文件,必须是磁盘上存在的才能进行向量化等操作。
'''
self.kb_name = knowledge_base_name
self.filename = str(Path(filename).as_posix())
self.ext = os.path.splitext(filename)[-1].lower()
if self.ext not in SUPPORTED_EXTS:
raise ValueError(f"暂未支持的文件格式 {self.filename}")
self.loader_kwargs = loader_kwargs
self.filepath = get_file_path(knowledge_base_name, filename)
self.docs = None
self.splited_docs = None
self.document_loader_name = get_LoaderClass(self.ext)
self.text_splitter_name = TEXT_SPLITTER_NAME
def file2docs(self, refresh: bool = False):
if self.docs is None or refresh:
logger.info(f"{self.document_loader_name} used for {self.filepath}")
loader = get_loader(loader_name=self.document_loader_name,
file_path=self.filepath,
loader_kwargs=self.loader_kwargs)
self.docs = loader.load()
return self.docs
def docs2texts(
self,
docs: List[Document] = None,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
docs = docs or self.file2docs(refresh=refresh)
if not docs:
return []
if self.ext not in [".csv"]:
if text_splitter is None:
text_splitter = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
if self.text_splitter_name == "MarkdownHeaderTextSplitter":
docs = text_splitter.split_text(docs[0].page_content)
else:
docs = text_splitter.split_documents(docs)
if not docs:
return []
print(f"文档切分示例:{docs[0]}")
if zh_title_enhance:
docs = func_zh_title_enhance(docs)
self.splited_docs = docs
return self.splited_docs
def file2text(
self,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
if self.splited_docs is None or refresh:
docs = self.file2docs()
self.splited_docs = self.docs2texts(docs=docs,
zh_title_enhance=zh_title_enhance,
refresh=refresh,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
text_splitter=text_splitter)
return self.splited_docs
def file_exist(self):
return os.path.isfile(self.filepath)
def get_mtime(self):
return os.path.getmtime(self.filepath)
def get_size(self):
return os.path.getsize(self.filepath)
def files2docs_in_thread(
files: List[Union[KnowledgeFile, Tuple[str, str], Dict]],
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
) -> Generator:
'''
利用多线程批量将磁盘文件转化成langchain Document.
如果传入参数是Tuple,形式为(filename, kb_name)
生成器返回值为 status, (kb_name, file_name, docs | error)
'''
def file2docs(*, file: KnowledgeFile, **kwargs) -> Tuple[bool, Tuple[str, str, List[Document]]]:
try:
return True, (file.kb_name, file.filename, file.file2text(**kwargs))
except Exception as e:
msg = f"从文件 {file.kb_name}/{file.filename} 加载文档时出错:{e}"
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
return False, (file.kb_name, file.filename, msg)
kwargs_list = []
for i, file in enumerate(files):
kwargs = {}
try:
if isinstance(file, tuple) and len(file) >= 2:
filename = file[0]
kb_name = file[1]
file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
elif isinstance(file, dict):
filename = file.pop("filename")
kb_name = file.pop("kb_name")
kwargs.update(file)
file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
kwargs["file"] = file
kwargs["chunk_size"] = chunk_size
kwargs["chunk_overlap"] = chunk_overlap
kwargs["zh_title_enhance"] = zh_title_enhance
kwargs_list.append(kwargs)
except Exception as e:
yield False, (kb_name, filename, str(e))
for result in run_in_thread_pool(func=file2docs, params=kwargs_list):
yield result
class KBServiceFactory:
def get_service(kb_name: str,
vector_store_type: Union[str, SupportedVSType],
embed_model: str = EMBEDDING_MODEL,
) -> KBService:
if isinstance(vector_store_type, str):
vector_store_type = getattr(SupportedVSType, vector_store_type.upper())
if SupportedVSType.FAISS == vector_store_type:
from server.knowledge_base.kb_service.faiss_kb_service import FaissKBService
return FaissKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.PG == vector_store_type:
from server.knowledge_base.kb_service.pg_kb_service import PGKBService
return PGKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.MILVUS == vector_store_type:
from server.knowledge_base.kb_service.milvus_kb_service import MilvusKBService
return MilvusKBService(kb_name,embed_model=embed_model)
elif SupportedVSType.ZILLIZ == vector_store_type:
from server.knowledge_base.kb_service.zilliz_kb_service import ZillizKBService
return ZillizKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.DEFAULT == vector_store_type:
from server.knowledge_base.kb_service.milvus_kb_service import MilvusKBService
return MilvusKBService(kb_name,
embed_model=embed_model) # other milvus parameters are set in model_config.kbs_config
elif SupportedVSType.ES == vector_store_type:
from server.knowledge_base.kb_service.es_kb_service import ESKBService
return ESKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.CHROMADB == vector_store_type:
from server.knowledge_base.kb_service.chromadb_kb_service import ChromaKBService
return ChromaKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.DEFAULT == vector_store_type: # kb_exists of default kbservice is False, to make validation easier.
from server.knowledge_base.kb_service.default_kb_service import DefaultKBService
return DefaultKBService(kb_name)
def get_service_by_name(kb_name: str) -> KBService:
_, vs_type, embed_model = load_kb_from_db(kb_name)
if _ is None: # kb not in db, just return None
return None
return KBServiceFactory.get_service(kb_name, vs_type, embed_model)
def get_default():
return KBServiceFactory.get_service("default", SupportedVSType.DEFAULT)
The provided code snippet includes necessary dependencies for implementing the `folder2db` function. Write a Python function `def folder2db( kb_names: List[str], mode: Literal["recreate_vs", "update_in_db", "increment"], vs_type: Literal["faiss", "milvus", "pg", "chromadb"] = DEFAULT_VS_TYPE, embed_model: str = EMBEDDING_MODEL, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, zh_title_enhance: bool = ZH_TITLE_ENHANCE, )` to solve the following problem:
use existed files in local folder to populate database and/or vector store. set parameter `mode` to: recreate_vs: recreate all vector store and fill info to database using existed files in local folder fill_info_only(disabled): do not create vector store, fill info to db using existed files only update_in_db: update vector store and database info using local files that existed in database only increment: create vector store and database info for local files that not existed in database only
Here is the function:
def folder2db(
kb_names: List[str],
mode: Literal["recreate_vs", "update_in_db", "increment"],
vs_type: Literal["faiss", "milvus", "pg", "chromadb"] = DEFAULT_VS_TYPE,
embed_model: str = EMBEDDING_MODEL,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
):
"""
use existed files in local folder to populate database and/or vector store.
set parameter `mode` to:
recreate_vs: recreate all vector store and fill info to database using existed files in local folder
fill_info_only(disabled): do not create vector store, fill info to db using existed files only
update_in_db: update vector store and database info using local files that existed in database only
increment: create vector store and database info for local files that not existed in database only
"""
def files2vs(kb_name: str, kb_files: List[KnowledgeFile]):
for success, result in files2docs_in_thread(kb_files,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
zh_title_enhance=zh_title_enhance):
if success:
_, filename, docs = result
print(f"正在将 {kb_name}/{filename} 添加到向量库,共包含{len(docs)}条文档")
kb_file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
kb_file.splited_docs = docs
kb.add_doc(kb_file=kb_file, not_refresh_vs_cache=True)
else:
print(result)
kb_names = kb_names or list_kbs_from_folder()
for kb_name in kb_names:
kb = KBServiceFactory.get_service(kb_name, vs_type, embed_model)
if not kb.exists():
kb.create_kb()
# 清除向量库,从本地文件重建
if mode == "recreate_vs":
kb.clear_vs()
kb.create_kb()
kb_files = file_to_kbfile(kb_name, list_files_from_folder(kb_name))
files2vs(kb_name, kb_files)
kb.save_vector_store()
# # 不做文件内容的向量化,仅将文件元信息存到数据库
# # 由于现在数据库存了很多与文本切分相关的信息,单纯存储文件信息意义不大,该功能取消。
# elif mode == "fill_info_only":
# files = list_files_from_folder(kb_name)
# kb_files = file_to_kbfile(kb_name, files)
# for kb_file in kb_files:
# add_file_to_db(kb_file)
# print(f"已将 {kb_name}/{kb_file.filename} 添加到数据库")
# 以数据库中文件列表为基准,利用本地文件更新向量库
elif mode == "update_in_db":
files = kb.list_files()
kb_files = file_to_kbfile(kb_name, files)
files2vs(kb_name, kb_files)
kb.save_vector_store()
# 对比本地目录与数据库中的文件列表,进行增量向量化
elif mode == "increment":
db_files = kb.list_files()
folder_files = list_files_from_folder(kb_name)
files = list(set(folder_files) - set(db_files))
kb_files = file_to_kbfile(kb_name, files)
files2vs(kb_name, kb_files)
kb.save_vector_store()
else:
print(f"unsupported migrate mode: {mode}") | use existed files in local folder to populate database and/or vector store. set parameter `mode` to: recreate_vs: recreate all vector store and fill info to database using existed files in local folder fill_info_only(disabled): do not create vector store, fill info to db using existed files only update_in_db: update vector store and database info using local files that existed in database only increment: create vector store and database info for local files that not existed in database only |
22,230 | from configs import (
EMBEDDING_MODEL, DEFAULT_VS_TYPE, ZH_TITLE_ENHANCE,
CHUNK_SIZE, OVERLAP_SIZE,
logger, log_verbose
)
from server.knowledge_base.utils import (
get_file_path, list_kbs_from_folder,
list_files_from_folder, files2docs_in_thread,
KnowledgeFile
)
from server.knowledge_base.kb_service.base import KBServiceFactory
from server.db.models.conversation_model import ConversationModel
from server.db.models.message_model import MessageModel
from server.db.repository.knowledge_file_repository import add_file_to_db
from server.db.repository.knowledge_metadata_repository import add_summary_to_db
from server.db.base import Base, engine
from server.db.session import session_scope
import os
from dateutil.parser import parse
from typing import Literal, List
def file_to_kbfile(kb_name: str, files: List[str]) -> List[KnowledgeFile]:
kb_files = []
for file in files:
try:
kb_file = KnowledgeFile(filename=file, knowledge_base_name=kb_name)
kb_files.append(kb_file)
except Exception as e:
msg = f"{e},已跳过"
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
return kb_files
def list_files_from_folder(kb_name: str):
doc_path = get_doc_path(kb_name)
result = []
def is_skiped_path(path: str):
tail = os.path.basename(path).lower()
for x in ["temp", "tmp", ".", "~$"]:
if tail.startswith(x):
return True
return False
def process_entry(entry):
if is_skiped_path(entry.path):
return
if entry.is_symlink():
target_path = os.path.realpath(entry.path)
with os.scandir(target_path) as target_it:
for target_entry in target_it:
process_entry(target_entry)
elif entry.is_file():
file_path = (Path(os.path.relpath(entry.path, doc_path)).as_posix()) # 路径统一为 posix 格式
result.append(file_path)
elif entry.is_dir():
with os.scandir(entry.path) as it:
for sub_entry in it:
process_entry(sub_entry)
with os.scandir(doc_path) as it:
for entry in it:
process_entry(entry)
return result
class KBServiceFactory:
def get_service(kb_name: str,
vector_store_type: Union[str, SupportedVSType],
embed_model: str = EMBEDDING_MODEL,
) -> KBService:
if isinstance(vector_store_type, str):
vector_store_type = getattr(SupportedVSType, vector_store_type.upper())
if SupportedVSType.FAISS == vector_store_type:
from server.knowledge_base.kb_service.faiss_kb_service import FaissKBService
return FaissKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.PG == vector_store_type:
from server.knowledge_base.kb_service.pg_kb_service import PGKBService
return PGKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.MILVUS == vector_store_type:
from server.knowledge_base.kb_service.milvus_kb_service import MilvusKBService
return MilvusKBService(kb_name,embed_model=embed_model)
elif SupportedVSType.ZILLIZ == vector_store_type:
from server.knowledge_base.kb_service.zilliz_kb_service import ZillizKBService
return ZillizKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.DEFAULT == vector_store_type:
from server.knowledge_base.kb_service.milvus_kb_service import MilvusKBService
return MilvusKBService(kb_name,
embed_model=embed_model) # other milvus parameters are set in model_config.kbs_config
elif SupportedVSType.ES == vector_store_type:
from server.knowledge_base.kb_service.es_kb_service import ESKBService
return ESKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.CHROMADB == vector_store_type:
from server.knowledge_base.kb_service.chromadb_kb_service import ChromaKBService
return ChromaKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.DEFAULT == vector_store_type: # kb_exists of default kbservice is False, to make validation easier.
from server.knowledge_base.kb_service.default_kb_service import DefaultKBService
return DefaultKBService(kb_name)
def get_service_by_name(kb_name: str) -> KBService:
_, vs_type, embed_model = load_kb_from_db(kb_name)
if _ is None: # kb not in db, just return None
return None
return KBServiceFactory.get_service(kb_name, vs_type, embed_model)
def get_default():
return KBServiceFactory.get_service("default", SupportedVSType.DEFAULT)
The provided code snippet includes necessary dependencies for implementing the `prune_db_docs` function. Write a Python function `def prune_db_docs(kb_names: List[str])` to solve the following problem:
delete docs in database that not existed in local folder. it is used to delete database docs after user deleted some doc files in file browser
Here is the function:
def prune_db_docs(kb_names: List[str]):
"""
delete docs in database that not existed in local folder.
it is used to delete database docs after user deleted some doc files in file browser
"""
for kb_name in kb_names:
kb = KBServiceFactory.get_service_by_name(kb_name)
if kb is not None:
files_in_db = kb.list_files()
files_in_folder = list_files_from_folder(kb_name)
files = list(set(files_in_db) - set(files_in_folder))
kb_files = file_to_kbfile(kb_name, files)
for kb_file in kb_files:
kb.delete_doc(kb_file, not_refresh_vs_cache=True)
print(f"success to delete docs for file: {kb_name}/{kb_file.filename}")
kb.save_vector_store() | delete docs in database that not existed in local folder. it is used to delete database docs after user deleted some doc files in file browser |
22,231 | from configs import (
EMBEDDING_MODEL, DEFAULT_VS_TYPE, ZH_TITLE_ENHANCE,
CHUNK_SIZE, OVERLAP_SIZE,
logger, log_verbose
)
from server.knowledge_base.utils import (
get_file_path, list_kbs_from_folder,
list_files_from_folder, files2docs_in_thread,
KnowledgeFile
)
from server.knowledge_base.kb_service.base import KBServiceFactory
from server.db.models.conversation_model import ConversationModel
from server.db.models.message_model import MessageModel
from server.db.repository.knowledge_file_repository import add_file_to_db
from server.db.repository.knowledge_metadata_repository import add_summary_to_db
from server.db.base import Base, engine
from server.db.session import session_scope
import os
from dateutil.parser import parse
from typing import Literal, List
def get_file_path(knowledge_base_name: str, doc_name: str):
return os.path.join(get_doc_path(knowledge_base_name), doc_name)
def list_files_from_folder(kb_name: str):
doc_path = get_doc_path(kb_name)
result = []
def is_skiped_path(path: str):
tail = os.path.basename(path).lower()
for x in ["temp", "tmp", ".", "~$"]:
if tail.startswith(x):
return True
return False
def process_entry(entry):
if is_skiped_path(entry.path):
return
if entry.is_symlink():
target_path = os.path.realpath(entry.path)
with os.scandir(target_path) as target_it:
for target_entry in target_it:
process_entry(target_entry)
elif entry.is_file():
file_path = (Path(os.path.relpath(entry.path, doc_path)).as_posix()) # 路径统一为 posix 格式
result.append(file_path)
elif entry.is_dir():
with os.scandir(entry.path) as it:
for sub_entry in it:
process_entry(sub_entry)
with os.scandir(doc_path) as it:
for entry in it:
process_entry(entry)
return result
class KBServiceFactory:
def get_service(kb_name: str,
vector_store_type: Union[str, SupportedVSType],
embed_model: str = EMBEDDING_MODEL,
) -> KBService:
if isinstance(vector_store_type, str):
vector_store_type = getattr(SupportedVSType, vector_store_type.upper())
if SupportedVSType.FAISS == vector_store_type:
from server.knowledge_base.kb_service.faiss_kb_service import FaissKBService
return FaissKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.PG == vector_store_type:
from server.knowledge_base.kb_service.pg_kb_service import PGKBService
return PGKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.MILVUS == vector_store_type:
from server.knowledge_base.kb_service.milvus_kb_service import MilvusKBService
return MilvusKBService(kb_name,embed_model=embed_model)
elif SupportedVSType.ZILLIZ == vector_store_type:
from server.knowledge_base.kb_service.zilliz_kb_service import ZillizKBService
return ZillizKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.DEFAULT == vector_store_type:
from server.knowledge_base.kb_service.milvus_kb_service import MilvusKBService
return MilvusKBService(kb_name,
embed_model=embed_model) # other milvus parameters are set in model_config.kbs_config
elif SupportedVSType.ES == vector_store_type:
from server.knowledge_base.kb_service.es_kb_service import ESKBService
return ESKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.CHROMADB == vector_store_type:
from server.knowledge_base.kb_service.chromadb_kb_service import ChromaKBService
return ChromaKBService(kb_name, embed_model=embed_model)
elif SupportedVSType.DEFAULT == vector_store_type: # kb_exists of default kbservice is False, to make validation easier.
from server.knowledge_base.kb_service.default_kb_service import DefaultKBService
return DefaultKBService(kb_name)
def get_service_by_name(kb_name: str) -> KBService:
_, vs_type, embed_model = load_kb_from_db(kb_name)
if _ is None: # kb not in db, just return None
return None
return KBServiceFactory.get_service(kb_name, vs_type, embed_model)
def get_default():
return KBServiceFactory.get_service("default", SupportedVSType.DEFAULT)
The provided code snippet includes necessary dependencies for implementing the `prune_folder_files` function. Write a Python function `def prune_folder_files(kb_names: List[str])` to solve the following problem:
delete doc files in local folder that not existed in database. it is used to free local disk space by delete unused doc files.
Here is the function:
def prune_folder_files(kb_names: List[str]):
"""
delete doc files in local folder that not existed in database.
it is used to free local disk space by delete unused doc files.
"""
for kb_name in kb_names:
kb = KBServiceFactory.get_service_by_name(kb_name)
if kb is not None:
files_in_db = kb.list_files()
files_in_folder = list_files_from_folder(kb_name)
files = list(set(files_in_folder) - set(files_in_db))
for file in files:
os.remove(get_file_path(kb_name, file))
print(f"success to delete file: {kb_name}/{file}") | delete doc files in local folder that not existed in database. it is used to free local disk space by delete unused doc files. |
22,232 | from configs import CACHED_VS_NUM, CACHED_MEMO_VS_NUM
from server.knowledge_base.kb_cache.base import *
from server.knowledge_base.kb_service.base import EmbeddingsFunAdapter
from server.utils import load_local_embeddings
from server.knowledge_base.utils import get_vs_path
from langchain.vectorstores.faiss import FAISS
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.schema import Document
import os
from langchain.schema import Document
def _new_ds_search(self, search: str) -> Union[str, Document]:
if search not in self._dict:
return f"ID {search} not found."
else:
doc = self._dict[search]
if isinstance(doc, Document):
doc.metadata["id"] = search
return doc | null |
22,233 | from configs import CACHED_VS_NUM, CACHED_MEMO_VS_NUM
from server.knowledge_base.kb_cache.base import *
from server.knowledge_base.kb_service.base import EmbeddingsFunAdapter
from server.utils import load_local_embeddings
from server.knowledge_base.utils import get_vs_path
from langchain.vectorstores.faiss import FAISS
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.schema import Document
import os
from langchain.schema import Document
kb_faiss_pool = KBFaissPool(cache_num=CACHED_VS_NUM)
def worker(vs_name: str, name: str):
vs_name = "samples"
time.sleep(random.randint(1, 5))
embeddings = load_local_embeddings()
r = random.randint(1, 3)
with kb_faiss_pool.load_vector_store(vs_name).acquire(name) as vs:
if r == 1: # add docs
ids = vs.add_texts([f"text added by {name}"], embeddings=embeddings)
pprint(ids)
elif r == 2: # search docs
docs = vs.similarity_search_with_score(f"{name}", k=3, score_threshold=1.0)
pprint(docs)
if r == 3: # delete docs
logger.warning(f"清除 {vs_name} by {name}")
kb_faiss_pool.get(vs_name).clear() | null |
22,234 | import uuid
from typing import Any, Dict, List, Tuple
import chromadb
from chromadb.api.types import (GetResult, QueryResult)
from langchain.docstore.document import Document
from configs import SCORE_THRESHOLD
from server.knowledge_base.kb_service.base import (EmbeddingsFunAdapter,
KBService, SupportedVSType)
from server.knowledge_base.utils import KnowledgeFile, get_kb_path, get_vs_path
def _get_result_to_documents(get_result: GetResult) -> List[Document]:
if not get_result['documents']:
return []
_metadatas = get_result['metadatas'] if get_result['metadatas'] else [{}] * len(get_result['documents'])
document_list = []
for page_content, metadata in zip(get_result['documents'], _metadatas):
document_list.append(Document(**{'page_content': page_content, 'metadata': metadata}))
return document_list | null |
22,235 | import uuid
from typing import Any, Dict, List, Tuple
import chromadb
from chromadb.api.types import (GetResult, QueryResult)
from langchain.docstore.document import Document
from configs import SCORE_THRESHOLD
from server.knowledge_base.kb_service.base import (EmbeddingsFunAdapter,
KBService, SupportedVSType)
from server.knowledge_base.utils import KnowledgeFile, get_kb_path, get_vs_path
The provided code snippet includes necessary dependencies for implementing the `_results_to_docs_and_scores` function. Write a Python function `def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]` to solve the following problem:
from langchain_community.vectorstores.chroma import Chroma
Here is the function:
def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
"""
from langchain_community.vectorstores.chroma import Chroma
"""
return [
# TODO: Chroma can do batch querying,
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
] | from langchain_community.vectorstores.chroma import Chroma |
22,236 | import operator
from abc import ABC, abstractmethod
import os
from pathlib import Path
import numpy as np
from langchain.embeddings.base import Embeddings
from langchain.docstore.document import Document
from server.db.repository.knowledge_base_repository import (
add_kb_to_db, delete_kb_from_db, list_kbs_from_db, kb_exists,
load_kb_from_db, get_kb_detail,
)
from server.db.repository.knowledge_file_repository import (
add_file_to_db, delete_file_from_db, delete_files_from_db, file_exists_in_db,
count_files_from_db, list_files_from_db, get_file_detail, delete_file_from_db,
list_docs_from_db,
)
from configs import (kbs_config, VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD,
EMBEDDING_MODEL, KB_INFO)
from server.knowledge_base.utils import (
get_kb_path, get_doc_path, KnowledgeFile,
list_kbs_from_folder, list_files_from_folder,
)
from typing import List, Union, Dict, Optional, Tuple
from server.embeddings_api import embed_texts, aembed_texts, embed_documents
from server.knowledge_base.model.kb_document_model import DocumentWithVSId
The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize(embeddings: List[List[float]]) -> np.ndarray` to solve the following problem:
sklearn.preprocessing.normalize 的替代(使用 L2),避免安装 scipy, scikit-learn
Here is the function:
def normalize(embeddings: List[List[float]]) -> np.ndarray:
'''
sklearn.preprocessing.normalize 的替代(使用 L2),避免安装 scipy, scikit-learn
'''
norm = np.linalg.norm(embeddings, axis=1)
norm = np.reshape(norm, (norm.shape[0], 1))
norm = np.tile(norm, (1, len(embeddings[0])))
return np.divide(embeddings, norm) | sklearn.preprocessing.normalize 的替代(使用 L2),避免安装 scipy, scikit-learn |
22,237 | import operator
from abc import ABC, abstractmethod
import os
from pathlib import Path
import numpy as np
from langchain.embeddings.base import Embeddings
from langchain.docstore.document import Document
from server.db.repository.knowledge_base_repository import (
add_kb_to_db, delete_kb_from_db, list_kbs_from_db, kb_exists,
load_kb_from_db, get_kb_detail,
)
from server.db.repository.knowledge_file_repository import (
add_file_to_db, delete_file_from_db, delete_files_from_db, file_exists_in_db,
count_files_from_db, list_files_from_db, get_file_detail, delete_file_from_db,
list_docs_from_db,
)
from configs import (kbs_config, VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD,
EMBEDDING_MODEL, KB_INFO)
from server.knowledge_base.utils import (
get_kb_path, get_doc_path, KnowledgeFile,
list_kbs_from_folder, list_files_from_folder,
)
from typing import List, Union, Dict, Optional, Tuple
from server.embeddings_api import embed_texts, aembed_texts, embed_documents
from server.knowledge_base.model.kb_document_model import DocumentWithVSId
def score_threshold_process(score_threshold, k, docs):
if score_threshold is not None:
cmp = (
operator.le
)
docs = [
(doc, similarity)
for doc, similarity in docs
if cmp(similarity, score_threshold)
]
return docs[:k] | null |
22,238 | from fastchat.conversation import Conversation
from server.model_workers.base import *
from fastchat import conversation as conv
import sys
import json
from server.model_workers import SparkApi
import websockets
from server.utils import iter_over_async, asyncio
from typing import List, Dict
async def request(appid, api_key, api_secret, Spark_url, domain, question, temperature, max_token):
wsParam = SparkApi.Ws_Param(appid, api_key, api_secret, Spark_url)
wsUrl = wsParam.create_url()
data = SparkApi.gen_params(appid, domain, question, temperature, max_token)
async with websockets.connect(wsUrl) as ws:
await ws.send(json.dumps(data, ensure_ascii=False))
finish = False
while not finish:
chunk = await ws.recv()
response = json.loads(chunk)
if response.get("header", {}).get("status") == 2:
finish = True
if text := response.get("payload", {}).get("choices", {}).get("text"):
yield text[0]["content"] | null |
22,239 | from contextlib import contextmanager
import httpx
from fastchat.conversation import Conversation
from httpx_sse import EventSource
from server.model_workers.base import *
from fastchat import conversation as conv
import sys
from typing import List, Dict, Iterator, Literal, Any
import jwt
import time
def connect_sse(client: httpx.Client, method: str, url: str, **kwargs: Any):
with client.stream(method, url, **kwargs) as response:
yield EventSource(response) | null |
22,240 | from contextlib import contextmanager
import httpx
from fastchat.conversation import Conversation
from httpx_sse import EventSource
from server.model_workers.base import *
from fastchat import conversation as conv
import sys
from typing import List, Dict, Iterator, Literal, Any
import jwt
import time
def generate_token(apikey: str, exp_seconds: int):
try:
id, secret = apikey.split(".")
except Exception as e:
raise Exception("invalid apikey", e)
payload = {
"api_key": id,
"exp": int(round(time.time() * 1000)) + exp_seconds * 1000,
"timestamp": int(round(time.time() * 1000)),
}
return jwt.encode(
payload,
secret,
algorithm="HS256",
headers={"alg": "HS256", "sign_type": "SIGN"},
) | null |
22,241 | import sys
from fastchat.conversation import Conversation
from server.model_workers.base import *
from server.utils import get_httpx_client
from cachetools import cached, TTLCache
import json
from fastchat import conversation as conv
import sys
from server.model_workers.base import ApiEmbeddingsParams
from typing import List, Literal, Dict
from configs import logger, log_verbose
The provided code snippet includes necessary dependencies for implementing the `get_baidu_access_token` function. Write a Python function `def get_baidu_access_token(api_key: str, secret_key: str) -> str` to solve the following problem:
使用 AK,SK 生成鉴权签名(Access Token) :return: access_token,或是None(如果错误)
Here is the function:
def get_baidu_access_token(api_key: str, secret_key: str) -> str:
"""
使用 AK,SK 生成鉴权签名(Access Token)
:return: access_token,或是None(如果错误)
"""
url = "https://aip.baidubce.com/oauth/2.0/token"
params = {"grant_type": "client_credentials", "client_id": api_key, "client_secret": secret_key}
try:
with get_httpx_client() as client:
return client.get(url, params=params).json().get("access_token")
except Exception as e:
print(f"failed to get token from baidu: {e}") | 使用 AK,SK 生成鉴权签名(Access Token) :return: access_token,或是None(如果错误) |
22,242 | import json
import time
import hashlib
from fastchat.conversation import Conversation
from server.model_workers.base import *
from server.utils import get_httpx_client
from fastchat import conversation as conv
import sys
import json
from typing import List, Literal, Dict
from configs import logger, log_verbose
def calculate_md5(input_string):
md5 = hashlib.md5()
md5.update(input_string.encode('utf-8'))
encrypted = md5.hexdigest()
return encrypted | null |
22,243 | import sys
import os
from llm_api_stale import launch_all, parser, controller_args, worker_args, server_args
from api import create_app
import uvicorn
def create_app(run_mode: str = None):
app = FastAPI(
title="Langchain-Chatchat API Server",
version=VERSION
)
MakeFastAPIOffline(app)
# Add CORS middleware to allow all origins
# 在config.py中设置OPEN_DOMAIN=True,允许跨域
# set OPEN_DOMAIN=True in config.py to allow cross-domain
if OPEN_CROSS_DOMAIN:
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
mount_app_routes(app, run_mode=run_mode)
return app
def run_api(host, port, **kwargs):
app = create_app()
if kwargs.get("ssl_keyfile") and kwargs.get("ssl_certfile"):
uvicorn.run(app,
host=host,
port=port,
ssl_keyfile=kwargs.get("ssl_keyfile"),
ssl_certfile=kwargs.get("ssl_certfile"),
)
else:
uvicorn.run(app, host=host, port=port) | null |
22,244 | from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from configs import (BING_SEARCH_URL, BING_SUBSCRIPTION_KEY, METAPHOR_API_KEY,
LLM_MODELS, SEARCH_ENGINE_TOP_K, TEMPERATURE, OVERLAP_SIZE)
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.prompts.chat import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
from fastapi import Body
from fastapi.concurrency import run_in_threadpool
from sse_starlette import EventSourceResponse
from server.utils import wrap_done, get_ChatOpenAI
from server.utils import BaseResponse, get_prompt_template
from server.chat.utils import History
from typing import AsyncIterable
import asyncio
import json
from typing import List, Optional, Dict
from strsimpy.normalized_levenshtein import NormalizedLevenshtein
from markdownify import markdownify
def bing_search(text, result_len=SEARCH_ENGINE_TOP_K, **kwargs):
if not (BING_SEARCH_URL and BING_SUBSCRIPTION_KEY):
return [{"snippet": "please set BING_SUBSCRIPTION_KEY and BING_SEARCH_URL in os ENV",
"title": "env info is not found",
"link": "https://python.langchain.com/en/latest/modules/agents/tools/examples/bing_search.html"}]
search = BingSearchAPIWrapper(bing_subscription_key=BING_SUBSCRIPTION_KEY,
bing_search_url=BING_SEARCH_URL)
return search.results(text, result_len) | null |
22,245 | from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from configs import (BING_SEARCH_URL, BING_SUBSCRIPTION_KEY, METAPHOR_API_KEY,
LLM_MODELS, SEARCH_ENGINE_TOP_K, TEMPERATURE, OVERLAP_SIZE)
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.prompts.chat import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
from fastapi import Body
from fastapi.concurrency import run_in_threadpool
from sse_starlette import EventSourceResponse
from server.utils import wrap_done, get_ChatOpenAI
from server.utils import BaseResponse, get_prompt_template
from server.chat.utils import History
from typing import AsyncIterable
import asyncio
import json
from typing import List, Optional, Dict
from strsimpy.normalized_levenshtein import NormalizedLevenshtein
from markdownify import markdownify
def duckduckgo_search(text, result_len=SEARCH_ENGINE_TOP_K, **kwargs):
search = DuckDuckGoSearchAPIWrapper()
return search.results(text, result_len) | null |
22,246 | from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from configs import (BING_SEARCH_URL, BING_SUBSCRIPTION_KEY, METAPHOR_API_KEY,
LLM_MODELS, SEARCH_ENGINE_TOP_K, TEMPERATURE, OVERLAP_SIZE)
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.prompts.chat import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
from fastapi import Body
from fastapi.concurrency import run_in_threadpool
from sse_starlette import EventSourceResponse
from server.utils import wrap_done, get_ChatOpenAI
from server.utils import BaseResponse, get_prompt_template
from server.chat.utils import History
from typing import AsyncIterable
import asyncio
import json
from typing import List, Optional, Dict
from strsimpy.normalized_levenshtein import NormalizedLevenshtein
from markdownify import markdownify
def metaphor_search(
text: str,
result_len: int = SEARCH_ENGINE_TOP_K,
split_result: bool = False,
chunk_size: int = 500,
chunk_overlap: int = OVERLAP_SIZE,
) -> List[Dict]:
from metaphor_python import Metaphor
if not METAPHOR_API_KEY:
return []
client = Metaphor(METAPHOR_API_KEY)
search = client.search(text, num_results=result_len, use_autoprompt=True)
contents = search.get_contents().contents
for x in contents:
x.extract = markdownify(x.extract)
# metaphor 返回的内容都是长文本,需要分词再检索
if split_result:
docs = [Document(page_content=x.extract,
metadata={"link": x.url, "title": x.title})
for x in contents]
text_splitter = RecursiveCharacterTextSplitter(["\n\n", "\n", ".", " "],
chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
splitted_docs = text_splitter.split_documents(docs)
# 将切分好的文档放入临时向量库,重新筛选出TOP_K个文档
if len(splitted_docs) > result_len:
normal = NormalizedLevenshtein()
for x in splitted_docs:
x.metadata["score"] = normal.similarity(text, x.page_content)
splitted_docs.sort(key=lambda x: x.metadata["score"], reverse=True)
splitted_docs = splitted_docs[:result_len]
docs = [{"snippet": x.page_content,
"link": x.metadata["link"],
"title": x.metadata["title"]}
for x in splitted_docs]
else:
docs = [{"snippet": x.extract,
"link": x.url,
"title": x.title}
for x in contents]
return docs | null |
22,247 | import streamlit as st
from webui_pages.utils import *
from streamlit_option_menu import option_menu
from webui_pages import *
import os
from server.llm_api_stale import string_args,launch_all,controller_args,worker_args,server_args,LOG_PATH
from server.api_allinone_stale import parser, api_args
import subprocess
LOG_PATH = "./logs/"
def string_args(args, args_list):
api_args = ["api-host", "api-port", "ssl_keyfile", "ssl_certfile"]
def launch_api(args,args_list=api_args,log_name=None):
print("Launching api ...")
print("启动API服务...")
if not log_name:
log_name = f"{LOG_PATH}api_{args.api_host}_{args.api_port}"
print(f"logs on api are written in {log_name}")
print(f"API日志位于{log_name}下,如启动异常请查看日志")
args_str = string_args(args,args_list)
api_sh = "python server/{script} {args_str} >{log_name}.log 2>&1 &".format(
script="api.py",args_str=args_str,log_name=log_name)
subprocess.run(api_sh, shell=True, check=True)
print("launch api done!")
print("启动API服务完毕.") | null |
22,248 | import streamlit as st
from webui_pages.utils import *
from streamlit_option_menu import option_menu
from webui_pages import *
import os
from server.llm_api_stale import string_args,launch_all,controller_args,worker_args,server_args,LOG_PATH
from server.api_allinone_stale import parser, api_args
import subprocess
web_args = ["server.port","theme.base","theme.primaryColor","theme.secondaryBackgroundColor","theme.textColor"]
LOG_PATH = "./logs/"
def string_args(args, args_list):
"""将args中的key转化为字符串"""
args_str = ""
for key, value in args._get_kwargs():
# args._get_kwargs中的key以_为分隔符,先转换,再判断是否在指定的args列表中
key = key.replace("_", "-")
if key not in args_list:
continue
# fastchat中port,host没有前缀,去除前缀
key = key.split("-")[-1] if re.search("port|host", key) else key
if not value:
pass
# 1==True -> True
elif isinstance(value, bool) and value == True:
args_str += f" --{key} "
elif isinstance(value, list) or isinstance(value, tuple) or isinstance(value, set):
value = " ".join(value)
args_str += f" --{key} {value} "
else:
args_str += f" --{key} {value} "
return args_str
def launch_webui(args,args_list=web_args,log_name=None):
print("Launching webui...")
print("启动webui服务...")
if not log_name:
log_name = f"{LOG_PATH}webui"
args_str = string_args(args,args_list)
if args.nohup:
print(f"logs on api are written in {log_name}")
print(f"webui服务日志位于{log_name}下,如启动异常请查看日志")
webui_sh = "streamlit run webui.py {args_str} >{log_name}.log 2>&1 &".format(
args_str=args_str,log_name=log_name)
else:
webui_sh = "streamlit run webui.py {args_str}".format(
args_str=args_str)
subprocess.run(webui_sh, shell=True, check=True)
print("launch webui done!")
print("启动webui服务完毕.") | null |
22,249 | import json
from server.chat.search_engine_chat import search_engine_chat
from configs import VECTOR_SEARCH_TOP_K, MAX_TOKENS
import asyncio
from server.agent import model_container
from pydantic import BaseModel, Field
async def search_engine_iter(query: str):
response = await search_engine_chat(query=query,
search_engine_name="bing", # 这里切换搜索引擎
model_name=model_container.MODEL.model_name,
temperature=0.01, # Agent 搜索互联网的时候,温度设置为0.01
history=[],
top_k = VECTOR_SEARCH_TOP_K,
max_tokens= MAX_TOKENS,
prompt_name = "default",
stream=False)
contents = ""
async for data in response.body_iterator: # 这里的data是一个json字符串
data = json.loads(data)
contents = data["answer"]
docs = data["docs"]
return contents
def search_internet(query: str):
return asyncio.run(search_engine_iter(query)) | null |
22,250 | import WolframAlphaAPIWrapper
from pydantic import BaseModel, Field
wolfram_alpha_appid = "your key"
def wolfram(query: str):
wolfram = WolframAlphaAPIWrapper(wolfram_alpha_appid=wolfram_alpha_appid)
ans = wolfram.run(query)
return ans | null |
22,251 | import BaseModel, Field
from langchain.tools import ShellTool
def shell(query: str):
tool = ShellTool()
return tool.run(tool_input=query) | null |
22,252 | import YouTubeSearchTool
from pydantic import BaseModel, Field
def search_youtube(query: str):
tool = YouTubeSearchTool()
return tool.run(tool_input=query) | null |
22,253 | from langchain.prompts import PromptTemplate
from langchain.chains import LLMMathChain
from server.agent import model_container
from pydantic import BaseModel, Field
PROMPT = PromptTemplate(
input_variables=["question"],
template=_PROMPT_TEMPLATE,
)
def calculate(query: str):
model = model_container.MODEL
llm_math = LLMMathChain.from_llm(model, verbose=True, prompt=PROMPT)
ans = llm_math.run(query)
return ans | null |
22,254 | from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, MAX_TOKENS
import json
import asyncio
from server.agent import model_container
async def search_knowledge_base_iter(database: str, query: str) -> str:
response = await knowledge_base_chat(query=query,
knowledge_base_name=database,
model_name=model_container.MODEL.model_name,
temperature=0.01,
history=[],
top_k=VECTOR_SEARCH_TOP_K,
max_tokens=MAX_TOKENS,
prompt_name="knowledge_base_chat",
score_threshold=SCORE_THRESHOLD,
stream=False)
contents = ""
async for data in response.body_iterator: # 这里的data是一个json字符串
data = json.loads(data)
contents = data["answer"]
docs = data["docs"]
return contents
def search_knowledgebase_simple(query: str):
return asyncio.run(search_knowledge_base_iter(query)) | null |
22,255 | from pydantic import BaseModel, Field
import requests
from configs.kb_config import SENIVERSE_API_KEY
def weather(location: str, api_key: str):
url = f"https://api.seniverse.com/v3/weather/now.json?key={api_key}&location={location}&language=zh-Hans&unit=c"
response = requests.get(url)
if response.status_code == 200:
data = response.json()
weather = {
"temperature": data["results"][0]["now"]["temperature"],
"description": data["results"][0]["now"]["text"],
}
return weather
else:
raise Exception(
f"Failed to retrieve weather: {response.status_code}")
def weathercheck(location: str):
return weather(location, SENIVERSE_API_KEY) | null |
22,256 | import BaseModel, Field
from langchain.tools.arxiv.tool import ArxivQueryRun
def arxiv(query: str):
tool = ArxivQueryRun()
return tool.run(tool_input=query) | null |
22,257 | from __future__ import annotations
import json
import re
import warnings
from typing import Dict
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun
from langchain.chains.llm import LLMChain
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from typing import List, Any, Optional
from langchain.prompts import PromptTemplate
from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, MAX_TOKENS
import asyncio
from server.agent import model_container
from pydantic import BaseModel, Field
async def search_knowledge_multiple(queries) -> List[str]:
# queries 应该是一个包含多个 (database, query) 元组的列表
tasks = [search_knowledge_base_iter(database, query) for database, query in queries]
results = await asyncio.gather(*tasks)
# 结合每个查询结果,并在每个查询结果前添加一个自定义的消息
combined_results = []
for (database, _), result in zip(queries, results):
message = f"\n查询到 {database} 知识库的相关信息:\n{result}"
combined_results.append(message)
return combined_results
def search_knowledge(queries) -> str:
responses = asyncio.run(search_knowledge_multiple(queries))
# 输出每个整合的查询结果
contents = ""
for response in responses:
contents += response + "\n\n"
return contents | null |
22,258 | from __future__ import annotations
import json
import re
import warnings
from typing import Dict
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun
from langchain.chains.llm import LLMChain
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from typing import List, Any, Optional
from langchain.prompts import PromptTemplate
from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, MAX_TOKENS
import asyncio
from server.agent import model_container
from pydantic import BaseModel, Field
PROMPT = PromptTemplate(
input_variables=["question", "database_names"],
template=_PROMPT_TEMPLATE,
)
class LLMKnowledgeChain(LLMChain):
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
prompt: BasePromptTemplate = PROMPT
"""[Deprecated] Prompt to use to translate to python if necessary."""
database_names: Dict[str, str] = None
input_key: str = "question" #: :meta private:
output_key: str = "answer" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMKnowledgeChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
prompt = values.get("prompt", PROMPT)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
def _evaluate_expression(self, queries) -> str:
try:
output = search_knowledge(queries)
except Exception as e:
output = "输入的信息有误或不存在知识库,错误信息如下:\n"
return output + str(e)
return output
def _process_llm_result(
self,
llm_output: str,
run_manager: CallbackManagerForChainRun
) -> Dict[str, str]:
run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
# text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
text_match = re.search(r"```text(.*)", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1).strip()
cleaned_input_str = (expression.replace("\"", "").replace("“", "").
replace("”", "").replace("```", "").strip())
lines = cleaned_input_str.split("\n")
# 使用逗号分割每一行,然后形成一个(数据库,查询)元组的列表
try:
queries = [(line.split(",")[0].strip(), line.split(",")[1].strip()) for line in lines]
except:
queries = [(line.split(",")[0].strip(), line.split(",")[1].strip()) for line in lines]
run_manager.on_text("知识库查询询内容:\n\n" + str(queries) + " \n\n", color="blue", verbose=self.verbose)
output = self._evaluate_expression(queries)
run_manager.on_text("\nAnswer: ", verbose=self.verbose)
run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = llm_output.split("Answer:")[-1]
else:
return {self.output_key: f"输入的格式不对:\n {llm_output}"}
return {self.output_key: answer}
async def _aprocess_llm_result(
self,
llm_output: str,
run_manager: AsyncCallbackManagerForChainRun,
) -> Dict[str, str]:
await run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"```text(.*)", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1).strip()
cleaned_input_str = (
expression.replace("\"", "").replace("“", "").replace("”", "").replace("```", "").strip())
lines = cleaned_input_str.split("\n")
try:
queries = [(line.split(",")[0].strip(), line.split(",")[1].strip()) for line in lines]
except:
queries = [(line.split(",")[0].strip(), line.split(",")[1].strip()) for line in lines]
await run_manager.on_text("知识库查询询内容:\n\n" + str(queries) + " \n\n", color="blue",
verbose=self.verbose)
output = self._evaluate_expression(queries)
await run_manager.on_text("\nAnswer: ", verbose=self.verbose)
await run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
raise ValueError(f"unknown format from LLM: {llm_output}")
return {self.output_key: answer}
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_run_manager.on_text(inputs[self.input_key])
self.database_names = model_container.DATABASE
data_formatted_str = ',\n'.join([f' "{k}":"{v}"' for k, v in self.database_names.items()])
llm_output = self.llm_chain.predict(
database_names=data_formatted_str,
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return self._process_llm_result(llm_output, _run_manager)
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
await _run_manager.on_text(inputs[self.input_key])
self.database_names = model_container.DATABASE
data_formatted_str = ',\n'.join([f' "{k}":"{v}"' for k, v in self.database_names.items()])
llm_output = await self.llm_chain.apredict(
database_names=data_formatted_str,
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return await self._aprocess_llm_result(llm_output, inputs[self.input_key], _run_manager)
def _chain_type(self) -> str:
return "llm_knowledge_chain"
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate = PROMPT,
**kwargs: Any,
) -> LLMKnowledgeChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs)
def search_knowledgebase_complex(query: str):
model = model_container.MODEL
llm_knowledge = LLMKnowledgeChain.from_llm(model, verbose=True, prompt=PROMPT)
ans = llm_knowledge.run(query)
return ans | null |
22,259 | from __future__ import annotations
import re
import warnings
from typing import Dict
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.llm import LLMChain
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from typing import List, Any, Optional
from langchain.prompts import PromptTemplate
import sys
import os
import json
from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, MAX_TOKENS
import asyncio
from server.agent import model_container
from pydantic import BaseModel, Field
async def knowledge_base_chat(query: str = Body(..., description="用户输入", examples=["你好"]),
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
score_threshold: float = Body(
SCORE_THRESHOLD,
description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右",
ge=0,
le=2
),
history: List[History] = Body(
[],
description="历史对话",
examples=[[
{"role": "user",
"content": "我们来玩成语接龙,我先来,生龙活虎"},
{"role": "assistant",
"content": "虎头虎脑"}]]
),
stream: bool = Body(False, description="流式输出"),
model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
max_tokens: Optional[int] = Body(
None,
description="限制LLM生成Token数量,默认None代表模型最大值"
),
prompt_name: str = Body(
"default",
description="使用的prompt模板名称(在configs/prompt_config.py中配置)"
),
request: Request = None,
):
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
history = [History.from_data(h) for h in history]
async def knowledge_base_chat_iterator(
query: str,
top_k: int,
history: Optional[List[History]],
model_name: str = model_name,
prompt_name: str = prompt_name,
) -> AsyncIterable[str]:
nonlocal max_tokens
callback = AsyncIteratorCallbackHandler()
if isinstance(max_tokens, int) and max_tokens <= 0:
max_tokens = None
model = get_ChatOpenAI(
model_name=model_name,
temperature=temperature,
max_tokens=max_tokens,
callbacks=[callback],
)
docs = await run_in_threadpool(search_docs,
query=query,
knowledge_base_name=knowledge_base_name,
top_k=top_k,
score_threshold=score_threshold)
# 加入reranker
if USE_RERANKER:
reranker_model_path = MODEL_PATH["reranker"].get(RERANKER_MODEL,"BAAI/bge-reranker-large")
print("-----------------model path------------------")
print(reranker_model_path)
reranker_model = LangchainReranker(top_n=top_k,
device=embedding_device(),
max_length=RERANKER_MAX_LENGTH,
model_name_or_path=reranker_model_path
)
print(docs)
docs = reranker_model.compress_documents(documents=docs,
query=query)
print("---------after rerank------------------")
print(docs)
context = "\n".join([doc.page_content for doc in docs])
if len(docs) == 0: # 如果没有找到相关文档,使用empty模板
prompt_template = get_prompt_template("knowledge_base_chat", "empty")
else:
prompt_template = get_prompt_template("knowledge_base_chat", prompt_name)
input_msg = History(role="user", content=prompt_template).to_msg_template(False)
chat_prompt = ChatPromptTemplate.from_messages(
[i.to_msg_template() for i in history] + [input_msg])
chain = LLMChain(prompt=chat_prompt, llm=model)
# Begin a task that runs in the background.
task = asyncio.create_task(wrap_done(
chain.acall({"context": context, "question": query}),
callback.done),
)
source_documents = []
for inum, doc in enumerate(docs):
filename = doc.metadata.get("source")
parameters = urlencode({"knowledge_base_name": knowledge_base_name, "file_name": filename})
base_url = request.base_url
url = f"{base_url}knowledge_base/download_doc?" + parameters
text = f"""出处 [{inum + 1}] [{filename}]({url}) \n\n{doc.page_content}\n\n"""
source_documents.append(text)
if len(source_documents) == 0: # 没有找到相关文档
source_documents.append(f"<span style='color:red'>未找到相关文档,该回答为大模型自身能力解答!</span>")
if stream:
async for token in callback.aiter():
# Use server-sent-events to stream the response
yield json.dumps({"answer": token}, ensure_ascii=False)
yield json.dumps({"docs": source_documents}, ensure_ascii=False)
else:
answer = ""
async for token in callback.aiter():
answer += token
yield json.dumps({"answer": answer,
"docs": source_documents},
ensure_ascii=False)
await task
return EventSourceResponse(knowledge_base_chat_iterator(query, top_k, history,model_name,prompt_name))
async def search_knowledge_base_iter(database: str, query: str):
response = await knowledge_base_chat(query=query,
knowledge_base_name=database,
model_name=model_container.MODEL.model_name,
temperature=0.01,
history=[],
top_k=VECTOR_SEARCH_TOP_K,
max_tokens=MAX_TOKENS,
prompt_name="knowledge_base_chat",
score_threshold=SCORE_THRESHOLD,
stream=False)
contents = ""
async for data in response.body_iterator: # 这里的data是一个json字符串
data = json.loads(data)
contents += data["answer"]
docs = data["docs"]
return contents | null |
22,260 | from __future__ import annotations
import re
import warnings
from typing import Dict
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.llm import LLMChain
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from typing import List, Any, Optional
from langchain.prompts import PromptTemplate
import sys
import os
import json
from server.chat.knowledge_base_chat import knowledge_base_chat
from configs import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD, MAX_TOKENS
import asyncio
from server.agent import model_container
from pydantic import BaseModel, Field
PROMPT = PromptTemplate(
input_variables=["question", "database_names"],
template=_PROMPT_TEMPLATE,
)
class LLMKnowledgeChain(LLMChain):
def raise_deprecation(cls, values: Dict) -> Dict:
def input_keys(self) -> List[str]:
def output_keys(self) -> List[str]:
def _evaluate_expression(self, dataset, query) -> str:
def _process_llm_result(
self,
llm_output: str,
llm_input: str,
run_manager: CallbackManagerForChainRun
) -> Dict[str, str]:
async def _aprocess_llm_result(
self,
llm_output: str,
run_manager: AsyncCallbackManagerForChainRun,
) -> Dict[str, str]:
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
def _chain_type(self) -> str:
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate = PROMPT,
**kwargs: Any,
) -> LLMKnowledgeChain:
def search_knowledgebase_once(query: str):
model = model_container.MODEL
llm_knowledge = LLMKnowledgeChain.from_llm(model, verbose=True, prompt=PROMPT)
ans = llm_knowledge.run(query)
return ans | null |
22,261 | from server.db.models.knowledge_metadata_model import SummaryChunkModel
from server.db.session import with_session
from typing import List, Dict
def list_summary_from_db(session,
kb_name: str,
metadata: Dict = {},
) -> List[Dict]:
'''
列出某知识库chunk summary。
返回形式:[{"id": str, "summary_context": str, "doc_ids": str}, ...]
'''
docs = session.query(SummaryChunkModel).filter(SummaryChunkModel.kb_name.ilike(kb_name))
for k, v in metadata.items():
docs = docs.filter(SummaryChunkModel.meta_data[k].as_string() == str(v))
return [{"id": x.id,
"summary_context": x.summary_context,
"summary_id": x.summary_id,
"doc_ids": x.doc_ids,
"metadata": x.metadata} for x in docs.all()]
class SummaryChunkModel(Base):
"""
chunk summary模型,用于存储file_doc中每个doc_id的chunk 片段,
数据来源:
用户输入: 用户上传文件,可填写文件的描述,生成的file_doc中的doc_id,存入summary_chunk中
程序自动切分 对file_doc表meta_data字段信息中存储的页码信息,按每页的页码切分,自定义prompt生成总结文本,将对应页码关联的doc_id存入summary_chunk中
后续任务:
矢量库构建: 对数据库表summary_chunk中summary_context创建索引,构建矢量库,meta_data为矢量库的元数据(doc_ids)
语义关联: 通过用户输入的描述,自动切分的总结文本,计算
语义相似度
"""
__tablename__ = 'summary_chunk'
id = Column(Integer, primary_key=True, autoincrement=True, comment='ID')
kb_name = Column(String(50), comment='知识库名称')
summary_context = Column(String(255), comment='总结文本')
summary_id = Column(String(255), comment='总结矢量id')
doc_ids = Column(String(1024), comment="向量库id关联列表")
meta_data = Column(JSON, default={})
def __repr__(self):
return (f"<SummaryChunk(id='{self.id}', kb_name='{self.kb_name}', summary_context='{self.summary_context}',"
f" doc_ids='{self.doc_ids}', metadata='{self.metadata}')>")
The provided code snippet includes necessary dependencies for implementing the `delete_summary_from_db` function. Write a Python function `def delete_summary_from_db(session, kb_name: str ) -> List[Dict]` to solve the following problem:
删除知识库chunk summary,并返回被删除的Dchunk summary。 返回形式:[{"id": str, "summary_context": str, "doc_ids": str}, ...]
Here is the function:
def delete_summary_from_db(session,
kb_name: str
) -> List[Dict]:
'''
删除知识库chunk summary,并返回被删除的Dchunk summary。
返回形式:[{"id": str, "summary_context": str, "doc_ids": str}, ...]
'''
docs = list_summary_from_db(kb_name=kb_name)
query = session.query(SummaryChunkModel).filter(SummaryChunkModel.kb_name.ilike(kb_name))
query.delete(synchronize_session=False)
session.commit()
return docs | 删除知识库chunk summary,并返回被删除的Dchunk summary。 返回形式:[{"id": str, "summary_context": str, "doc_ids": str}, ...] |
22,262 | from server.db.models.knowledge_metadata_model import SummaryChunkModel
from server.db.session import with_session
from typing import List, Dict
class SummaryChunkModel(Base):
"""
chunk summary模型,用于存储file_doc中每个doc_id的chunk 片段,
数据来源:
用户输入: 用户上传文件,可填写文件的描述,生成的file_doc中的doc_id,存入summary_chunk中
程序自动切分 对file_doc表meta_data字段信息中存储的页码信息,按每页的页码切分,自定义prompt生成总结文本,将对应页码关联的doc_id存入summary_chunk中
后续任务:
矢量库构建: 对数据库表summary_chunk中summary_context创建索引,构建矢量库,meta_data为矢量库的元数据(doc_ids)
语义关联: 通过用户输入的描述,自动切分的总结文本,计算
语义相似度
"""
__tablename__ = 'summary_chunk'
id = Column(Integer, primary_key=True, autoincrement=True, comment='ID')
kb_name = Column(String(50), comment='知识库名称')
summary_context = Column(String(255), comment='总结文本')
summary_id = Column(String(255), comment='总结矢量id')
doc_ids = Column(String(1024), comment="向量库id关联列表")
meta_data = Column(JSON, default={})
def __repr__(self):
return (f"<SummaryChunk(id='{self.id}', kb_name='{self.kb_name}', summary_context='{self.summary_context}',"
f" doc_ids='{self.doc_ids}', metadata='{self.metadata}')>")
The provided code snippet includes necessary dependencies for implementing the `add_summary_to_db` function. Write a Python function `def add_summary_to_db(session, kb_name: str, summary_infos: List[Dict])` to solve the following problem:
将总结信息添加到数据库。 summary_infos形式:[{"summary_context": str, "doc_ids": str}, ...]
Here is the function:
def add_summary_to_db(session,
kb_name: str,
summary_infos: List[Dict]):
'''
将总结信息添加到数据库。
summary_infos形式:[{"summary_context": str, "doc_ids": str}, ...]
'''
for summary in summary_infos:
obj = SummaryChunkModel(
kb_name=kb_name,
summary_context=summary["summary_context"],
summary_id=summary["summary_id"],
doc_ids=summary["doc_ids"],
meta_data=summary["metadata"],
)
session.add(obj)
session.commit()
return True | 将总结信息添加到数据库。 summary_infos形式:[{"summary_context": str, "doc_ids": str}, ...] |
22,263 | from server.db.models.knowledge_metadata_model import SummaryChunkModel
from server.db.session import with_session
from typing import List, Dict
class SummaryChunkModel(Base):
"""
chunk summary模型,用于存储file_doc中每个doc_id的chunk 片段,
数据来源:
用户输入: 用户上传文件,可填写文件的描述,生成的file_doc中的doc_id,存入summary_chunk中
程序自动切分 对file_doc表meta_data字段信息中存储的页码信息,按每页的页码切分,自定义prompt生成总结文本,将对应页码关联的doc_id存入summary_chunk中
后续任务:
矢量库构建: 对数据库表summary_chunk中summary_context创建索引,构建矢量库,meta_data为矢量库的元数据(doc_ids)
语义关联: 通过用户输入的描述,自动切分的总结文本,计算
语义相似度
"""
__tablename__ = 'summary_chunk'
id = Column(Integer, primary_key=True, autoincrement=True, comment='ID')
kb_name = Column(String(50), comment='知识库名称')
summary_context = Column(String(255), comment='总结文本')
summary_id = Column(String(255), comment='总结矢量id')
doc_ids = Column(String(1024), comment="向量库id关联列表")
meta_data = Column(JSON, default={})
def __repr__(self):
return (f"<SummaryChunk(id='{self.id}', kb_name='{self.kb_name}', summary_context='{self.summary_context}',"
f" doc_ids='{self.doc_ids}', metadata='{self.metadata}')>")
def count_summary_from_db(session, kb_name: str) -> int:
return session.query(SummaryChunkModel).filter(SummaryChunkModel.kb_name.ilike(kb_name)).count() | null |
22,264 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.session import with_session
class KnowledgeBaseModel(Base):
"""
知识库模型
"""
__tablename__ = 'knowledge_base'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识库ID')
kb_name = Column(String(50), comment='知识库名称')
kb_info = Column(String(200), comment='知识库简介(用于Agent)')
vs_type = Column(String(50), comment='向量库类型')
embed_model = Column(String(50), comment='嵌入模型名称')
file_count = Column(Integer, default=0, comment='文件数量')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeBase(id='{self.id}', kb_name='{self.kb_name}',kb_intro='{self.kb_info} vs_type='{self.vs_type}', embed_model='{self.embed_model}', file_count='{self.file_count}', create_time='{self.create_time}')>"
def add_kb_to_db(session, kb_name, kb_info, vs_type, embed_model):
# 创建知识库实例
kb = session.query(KnowledgeBaseModel).filter(KnowledgeBaseModel.kb_name.ilike(kb_name)).first()
if not kb:
kb = KnowledgeBaseModel(kb_name=kb_name, kb_info=kb_info, vs_type=vs_type, embed_model=embed_model)
session.add(kb)
else: # update kb with new vs_type and embed_model
kb.kb_info = kb_info
kb.vs_type = vs_type
kb.embed_model = embed_model
return True | null |
22,265 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.session import with_session
class KnowledgeBaseModel(Base):
"""
知识库模型
"""
__tablename__ = 'knowledge_base'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识库ID')
kb_name = Column(String(50), comment='知识库名称')
kb_info = Column(String(200), comment='知识库简介(用于Agent)')
vs_type = Column(String(50), comment='向量库类型')
embed_model = Column(String(50), comment='嵌入模型名称')
file_count = Column(Integer, default=0, comment='文件数量')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeBase(id='{self.id}', kb_name='{self.kb_name}',kb_intro='{self.kb_info} vs_type='{self.vs_type}', embed_model='{self.embed_model}', file_count='{self.file_count}', create_time='{self.create_time}')>"
def kb_exists(session, kb_name):
kb = session.query(KnowledgeBaseModel).filter(KnowledgeBaseModel.kb_name.ilike(kb_name)).first()
status = True if kb else False
return status | null |
22,266 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.session import with_session
class KnowledgeBaseModel(Base):
"""
知识库模型
"""
__tablename__ = 'knowledge_base'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识库ID')
kb_name = Column(String(50), comment='知识库名称')
kb_info = Column(String(200), comment='知识库简介(用于Agent)')
vs_type = Column(String(50), comment='向量库类型')
embed_model = Column(String(50), comment='嵌入模型名称')
file_count = Column(Integer, default=0, comment='文件数量')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeBase(id='{self.id}', kb_name='{self.kb_name}',kb_intro='{self.kb_info} vs_type='{self.vs_type}', embed_model='{self.embed_model}', file_count='{self.file_count}', create_time='{self.create_time}')>"
def load_kb_from_db(session, kb_name):
kb = session.query(KnowledgeBaseModel).filter(KnowledgeBaseModel.kb_name.ilike(kb_name)).first()
if kb:
kb_name, vs_type, embed_model = kb.kb_name, kb.vs_type, kb.embed_model
else:
kb_name, vs_type, embed_model = None, None, None
return kb_name, vs_type, embed_model | null |
22,267 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.session import with_session
class KnowledgeBaseModel(Base):
"""
知识库模型
"""
__tablename__ = 'knowledge_base'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识库ID')
kb_name = Column(String(50), comment='知识库名称')
kb_info = Column(String(200), comment='知识库简介(用于Agent)')
vs_type = Column(String(50), comment='向量库类型')
embed_model = Column(String(50), comment='嵌入模型名称')
file_count = Column(Integer, default=0, comment='文件数量')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeBase(id='{self.id}', kb_name='{self.kb_name}',kb_intro='{self.kb_info} vs_type='{self.vs_type}', embed_model='{self.embed_model}', file_count='{self.file_count}', create_time='{self.create_time}')>"
def delete_kb_from_db(session, kb_name):
kb = session.query(KnowledgeBaseModel).filter(KnowledgeBaseModel.kb_name.ilike(kb_name)).first()
if kb:
session.delete(kb)
return True | null |
22,268 | from server.db.session import with_session
from typing import Dict, List
import uuid
from server.db.models.message_model import MessageModel
class MessageModel(Base):
"""
聊天记录模型
"""
__tablename__ = 'message'
id = Column(String(32), primary_key=True, comment='聊天记录ID')
conversation_id = Column(String(32), default=None, index=True, comment='对话框ID')
# chat/agent_chat等
chat_type = Column(String(50), comment='聊天类型')
query = Column(String(4096), comment='用户问题')
response = Column(String(4096), comment='模型回答')
# 记录知识库id等,以便后续扩展
meta_data = Column(JSON, default={})
# 满分100 越高表示评价越好
feedback_score = Column(Integer, default=-1, comment='用户评分')
feedback_reason = Column(String(255), default="", comment='用户评分理由')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<message(id='{self.id}', conversation_id='{self.conversation_id}', chat_type='{self.chat_type}', query='{self.query}', response='{self.response}',meta_data='{self.meta_data}',feedback_score='{self.feedback_score}',feedback_reason='{self.feedback_reason}', create_time='{self.create_time}')>"
The provided code snippet includes necessary dependencies for implementing the `add_message_to_db` function. Write a Python function `def add_message_to_db(session, conversation_id: str, chat_type, query, response="", message_id=None, metadata: Dict = {})` to solve the following problem:
新增聊天记录
Here is the function:
def add_message_to_db(session, conversation_id: str, chat_type, query, response="", message_id=None,
metadata: Dict = {}):
"""
新增聊天记录
"""
if not message_id:
message_id = uuid.uuid4().hex
m = MessageModel(id=message_id, chat_type=chat_type, query=query, response=response,
conversation_id=conversation_id,
meta_data=metadata)
session.add(m)
session.commit()
return m.id | 新增聊天记录 |
22,269 | from server.db.session import with_session
from typing import Dict, List
import uuid
from server.db.models.message_model import MessageModel
def get_message_by_id(session, message_id) -> MessageModel:
"""
查询聊天记录
"""
m = session.query(MessageModel).filter_by(id=message_id).first()
return m
The provided code snippet includes necessary dependencies for implementing the `update_message` function. Write a Python function `def update_message(session, message_id, response: str = None, metadata: Dict = None)` to solve the following problem:
更新已有的聊天记录
Here is the function:
def update_message(session, message_id, response: str = None, metadata: Dict = None):
"""
更新已有的聊天记录
"""
m = get_message_by_id(message_id)
if m is not None:
if response is not None:
m.response = response
if isinstance(metadata, dict):
m.meta_data = metadata
session.add(m)
session.commit()
return m.id | 更新已有的聊天记录 |
22,270 | from server.db.session import with_session
from typing import Dict, List
import uuid
from server.db.models.message_model import MessageModel
class MessageModel(Base):
"""
聊天记录模型
"""
__tablename__ = 'message'
id = Column(String(32), primary_key=True, comment='聊天记录ID')
conversation_id = Column(String(32), default=None, index=True, comment='对话框ID')
# chat/agent_chat等
chat_type = Column(String(50), comment='聊天类型')
query = Column(String(4096), comment='用户问题')
response = Column(String(4096), comment='模型回答')
# 记录知识库id等,以便后续扩展
meta_data = Column(JSON, default={})
# 满分100 越高表示评价越好
feedback_score = Column(Integer, default=-1, comment='用户评分')
feedback_reason = Column(String(255), default="", comment='用户评分理由')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<message(id='{self.id}', conversation_id='{self.conversation_id}', chat_type='{self.chat_type}', query='{self.query}', response='{self.response}',meta_data='{self.meta_data}',feedback_score='{self.feedback_score}',feedback_reason='{self.feedback_reason}', create_time='{self.create_time}')>"
The provided code snippet includes necessary dependencies for implementing the `feedback_message_to_db` function. Write a Python function `def feedback_message_to_db(session, message_id, feedback_score, feedback_reason)` to solve the following problem:
反馈聊天记录
Here is the function:
def feedback_message_to_db(session, message_id, feedback_score, feedback_reason):
"""
反馈聊天记录
"""
m = session.query(MessageModel).filter_by(id=message_id).first()
if m:
m.feedback_score = feedback_score
m.feedback_reason = feedback_reason
session.commit()
return m.id | 反馈聊天记录 |
22,271 | from server.db.session import with_session
from typing import Dict, List
import uuid
from server.db.models.message_model import MessageModel
class MessageModel(Base):
"""
聊天记录模型
"""
__tablename__ = 'message'
id = Column(String(32), primary_key=True, comment='聊天记录ID')
conversation_id = Column(String(32), default=None, index=True, comment='对话框ID')
# chat/agent_chat等
chat_type = Column(String(50), comment='聊天类型')
query = Column(String(4096), comment='用户问题')
response = Column(String(4096), comment='模型回答')
# 记录知识库id等,以便后续扩展
meta_data = Column(JSON, default={})
# 满分100 越高表示评价越好
feedback_score = Column(Integer, default=-1, comment='用户评分')
feedback_reason = Column(String(255), default="", comment='用户评分理由')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<message(id='{self.id}', conversation_id='{self.conversation_id}', chat_type='{self.chat_type}', query='{self.query}', response='{self.response}',meta_data='{self.meta_data}',feedback_score='{self.feedback_score}',feedback_reason='{self.feedback_reason}', create_time='{self.create_time}')>"
def filter_message(session, conversation_id: str, limit: int = 10):
messages = (session.query(MessageModel).filter_by(conversation_id=conversation_id).
# 用户最新的query 也会插入到db,忽略这个message record
filter(MessageModel.response != '').
# 返回最近的limit 条记录
order_by(MessageModel.create_time.desc()).limit(limit).all())
# 直接返回 List[MessageModel] 报错
data = []
for m in messages:
data.append({"query": m.query, "response": m.response})
return data | null |
22,272 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.models.knowledge_file_model import KnowledgeFileModel, FileDocModel
from server.db.session import with_session
from server.knowledge_base.utils import KnowledgeFile
from typing import List, Dict
class FileDocModel(Base):
"""
文件-向量库文档模型
"""
__tablename__ = 'file_doc'
id = Column(Integer, primary_key=True, autoincrement=True, comment='ID')
kb_name = Column(String(50), comment='知识库名称')
file_name = Column(String(255), comment='文件名称')
doc_id = Column(String(50), comment="向量库文档ID")
meta_data = Column(JSON, default={})
def __repr__(self):
return f"<FileDoc(id='{self.id}', kb_name='{self.kb_name}', file_name='{self.file_name}', doc_id='{self.doc_id}', metadata='{self.meta_data}')>"
The provided code snippet includes necessary dependencies for implementing the `list_file_num_docs_id_by_kb_name_and_file_name` function. Write a Python function `def list_file_num_docs_id_by_kb_name_and_file_name(session, kb_name: str, file_name: str, ) -> List[int]` to solve the following problem:
列出某知识库某文件对应的所有Document的id。 返回形式:[str, ...]
Here is the function:
def list_file_num_docs_id_by_kb_name_and_file_name(session,
kb_name: str,
file_name: str,
) -> List[int]:
'''
列出某知识库某文件对应的所有Document的id。
返回形式:[str, ...]
'''
doc_ids = session.query(FileDocModel.doc_id).filter_by(kb_name=kb_name, file_name=file_name).all()
return [int(_id[0]) for _id in doc_ids] | 列出某知识库某文件对应的所有Document的id。 返回形式:[str, ...] |
22,273 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.models.knowledge_file_model import KnowledgeFileModel, FileDocModel
from server.db.session import with_session
from server.knowledge_base.utils import KnowledgeFile
from typing import List, Dict
class KnowledgeFileModel(Base):
"""
知识文件模型
"""
__tablename__ = 'knowledge_file'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识文件ID')
file_name = Column(String(255), comment='文件名')
file_ext = Column(String(10), comment='文件扩展名')
kb_name = Column(String(50), comment='所属知识库名称')
document_loader_name = Column(String(50), comment='文档加载器名称')
text_splitter_name = Column(String(50), comment='文本分割器名称')
file_version = Column(Integer, default=1, comment='文件版本')
file_mtime = Column(Float, default=0.0, comment="文件修改时间")
file_size = Column(Integer, default=0, comment="文件大小")
custom_docs = Column(Boolean, default=False, comment="是否自定义docs")
docs_count = Column(Integer, default=0, comment="切分文档数量")
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeFile(id='{self.id}', file_name='{self.file_name}', file_ext='{self.file_ext}', kb_name='{self.kb_name}', document_loader_name='{self.document_loader_name}', text_splitter_name='{self.text_splitter_name}', file_version='{self.file_version}', create_time='{self.create_time}')>"
def count_files_from_db(session, kb_name: str) -> int:
return session.query(KnowledgeFileModel).filter(KnowledgeFileModel.kb_name.ilike(kb_name)).count() | null |
22,274 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.models.knowledge_file_model import KnowledgeFileModel, FileDocModel
from server.db.session import with_session
from server.knowledge_base.utils import KnowledgeFile
from typing import List, Dict
class KnowledgeFileModel(Base):
def __repr__(self):
def list_files_from_db(session, kb_name):
files = session.query(KnowledgeFileModel).filter(KnowledgeFileModel.kb_name.ilike(kb_name)).all()
docs = [f.file_name for f in files]
return docs | null |
22,275 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.models.knowledge_file_model import KnowledgeFileModel, FileDocModel
from server.db.session import with_session
from server.knowledge_base.utils import KnowledgeFile
from typing import List, Dict
def add_docs_to_db(session,
kb_name: str,
file_name: str,
doc_infos: List[Dict]):
'''
将某知识库某文件对应的所有Document信息添加到数据库。
doc_infos形式:[{"id": str, "metadata": dict}, ...]
'''
# ! 这里会出现doc_infos为None的情况,需要进一步排查
if doc_infos is None:
print("输入的server.db.repository.knowledge_file_repository.add_docs_to_db的doc_infos参数为None")
return False
for d in doc_infos:
obj = FileDocModel(
kb_name=kb_name,
file_name=file_name,
doc_id=d["id"],
meta_data=d["metadata"],
)
session.add(obj)
return True
class KnowledgeBaseModel(Base):
"""
知识库模型
"""
__tablename__ = 'knowledge_base'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识库ID')
kb_name = Column(String(50), comment='知识库名称')
kb_info = Column(String(200), comment='知识库简介(用于Agent)')
vs_type = Column(String(50), comment='向量库类型')
embed_model = Column(String(50), comment='嵌入模型名称')
file_count = Column(Integer, default=0, comment='文件数量')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeBase(id='{self.id}', kb_name='{self.kb_name}',kb_intro='{self.kb_info} vs_type='{self.vs_type}', embed_model='{self.embed_model}', file_count='{self.file_count}', create_time='{self.create_time}')>"
class KnowledgeFileModel(Base):
"""
知识文件模型
"""
__tablename__ = 'knowledge_file'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识文件ID')
file_name = Column(String(255), comment='文件名')
file_ext = Column(String(10), comment='文件扩展名')
kb_name = Column(String(50), comment='所属知识库名称')
document_loader_name = Column(String(50), comment='文档加载器名称')
text_splitter_name = Column(String(50), comment='文本分割器名称')
file_version = Column(Integer, default=1, comment='文件版本')
file_mtime = Column(Float, default=0.0, comment="文件修改时间")
file_size = Column(Integer, default=0, comment="文件大小")
custom_docs = Column(Boolean, default=False, comment="是否自定义docs")
docs_count = Column(Integer, default=0, comment="切分文档数量")
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeFile(id='{self.id}', file_name='{self.file_name}', file_ext='{self.file_ext}', kb_name='{self.kb_name}', document_loader_name='{self.document_loader_name}', text_splitter_name='{self.text_splitter_name}', file_version='{self.file_version}', create_time='{self.create_time}')>"
class KnowledgeFile:
def __init__(
self,
filename: str,
knowledge_base_name: str,
loader_kwargs: Dict = {},
):
'''
对应知识库目录中的文件,必须是磁盘上存在的才能进行向量化等操作。
'''
self.kb_name = knowledge_base_name
self.filename = str(Path(filename).as_posix())
self.ext = os.path.splitext(filename)[-1].lower()
if self.ext not in SUPPORTED_EXTS:
raise ValueError(f"暂未支持的文件格式 {self.filename}")
self.loader_kwargs = loader_kwargs
self.filepath = get_file_path(knowledge_base_name, filename)
self.docs = None
self.splited_docs = None
self.document_loader_name = get_LoaderClass(self.ext)
self.text_splitter_name = TEXT_SPLITTER_NAME
def file2docs(self, refresh: bool = False):
if self.docs is None or refresh:
logger.info(f"{self.document_loader_name} used for {self.filepath}")
loader = get_loader(loader_name=self.document_loader_name,
file_path=self.filepath,
loader_kwargs=self.loader_kwargs)
self.docs = loader.load()
return self.docs
def docs2texts(
self,
docs: List[Document] = None,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
docs = docs or self.file2docs(refresh=refresh)
if not docs:
return []
if self.ext not in [".csv"]:
if text_splitter is None:
text_splitter = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
if self.text_splitter_name == "MarkdownHeaderTextSplitter":
docs = text_splitter.split_text(docs[0].page_content)
else:
docs = text_splitter.split_documents(docs)
if not docs:
return []
print(f"文档切分示例:{docs[0]}")
if zh_title_enhance:
docs = func_zh_title_enhance(docs)
self.splited_docs = docs
return self.splited_docs
def file2text(
self,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
if self.splited_docs is None or refresh:
docs = self.file2docs()
self.splited_docs = self.docs2texts(docs=docs,
zh_title_enhance=zh_title_enhance,
refresh=refresh,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
text_splitter=text_splitter)
return self.splited_docs
def file_exist(self):
return os.path.isfile(self.filepath)
def get_mtime(self):
return os.path.getmtime(self.filepath)
def get_size(self):
return os.path.getsize(self.filepath)
def add_file_to_db(session,
kb_file: KnowledgeFile,
docs_count: int = 0,
custom_docs: bool = False,
doc_infos: List[Dict] = [], # 形式:[{"id": str, "metadata": dict}, ...]
):
kb = session.query(KnowledgeBaseModel).filter_by(kb_name=kb_file.kb_name).first()
if kb:
# 如果已经存在该文件,则更新文件信息与版本号
existing_file: KnowledgeFileModel = (session.query(KnowledgeFileModel)
.filter(KnowledgeFileModel.kb_name.ilike(kb_file.kb_name),
KnowledgeFileModel.file_name.ilike(kb_file.filename))
.first())
mtime = kb_file.get_mtime()
size = kb_file.get_size()
if existing_file:
existing_file.file_mtime = mtime
existing_file.file_size = size
existing_file.docs_count = docs_count
existing_file.custom_docs = custom_docs
existing_file.file_version += 1
# 否则,添加新文件
else:
new_file = KnowledgeFileModel(
file_name=kb_file.filename,
file_ext=kb_file.ext,
kb_name=kb_file.kb_name,
document_loader_name=kb_file.document_loader_name,
text_splitter_name=kb_file.text_splitter_name or "SpacyTextSplitter",
file_mtime=mtime,
file_size=size,
docs_count=docs_count,
custom_docs=custom_docs,
)
kb.file_count += 1
session.add(new_file)
add_docs_to_db(kb_name=kb_file.kb_name, file_name=kb_file.filename, doc_infos=doc_infos)
return True | null |
22,276 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.models.knowledge_file_model import KnowledgeFileModel, FileDocModel
from server.db.session import with_session
from server.knowledge_base.utils import KnowledgeFile
from typing import List, Dict
def delete_docs_from_db(session,
kb_name: str,
file_name: str = None,
) -> List[Dict]:
class KnowledgeBaseModel(Base):
def __repr__(self):
class KnowledgeFileModel(Base):
def __repr__(self):
class KnowledgeFile:
def __init__(
self,
filename: str,
knowledge_base_name: str,
loader_kwargs: Dict = {},
):
def file2docs(self, refresh: bool = False):
def docs2texts(
self,
docs: List[Document] = None,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
def file2text(
self,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
def file_exist(self):
def get_mtime(self):
def get_size(self):
def delete_file_from_db(session, kb_file: KnowledgeFile):
existing_file = (session.query(KnowledgeFileModel)
.filter(KnowledgeFileModel.file_name.ilike(kb_file.filename),
KnowledgeFileModel.kb_name.ilike(kb_file.kb_name))
.first())
if existing_file:
session.delete(existing_file)
delete_docs_from_db(kb_name=kb_file.kb_name, file_name=kb_file.filename)
session.commit()
kb = session.query(KnowledgeBaseModel).filter(KnowledgeBaseModel.kb_name.ilike(kb_file.kb_name)).first()
if kb:
kb.file_count -= 1
session.commit()
return True | null |
22,277 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.models.knowledge_file_model import KnowledgeFileModel, FileDocModel
from server.db.session import with_session
from server.knowledge_base.utils import KnowledgeFile
from typing import List, Dict
class KnowledgeBaseModel(Base):
"""
知识库模型
"""
__tablename__ = 'knowledge_base'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识库ID')
kb_name = Column(String(50), comment='知识库名称')
kb_info = Column(String(200), comment='知识库简介(用于Agent)')
vs_type = Column(String(50), comment='向量库类型')
embed_model = Column(String(50), comment='嵌入模型名称')
file_count = Column(Integer, default=0, comment='文件数量')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeBase(id='{self.id}', kb_name='{self.kb_name}',kb_intro='{self.kb_info} vs_type='{self.vs_type}', embed_model='{self.embed_model}', file_count='{self.file_count}', create_time='{self.create_time}')>"
class KnowledgeFileModel(Base):
"""
知识文件模型
"""
__tablename__ = 'knowledge_file'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识文件ID')
file_name = Column(String(255), comment='文件名')
file_ext = Column(String(10), comment='文件扩展名')
kb_name = Column(String(50), comment='所属知识库名称')
document_loader_name = Column(String(50), comment='文档加载器名称')
text_splitter_name = Column(String(50), comment='文本分割器名称')
file_version = Column(Integer, default=1, comment='文件版本')
file_mtime = Column(Float, default=0.0, comment="文件修改时间")
file_size = Column(Integer, default=0, comment="文件大小")
custom_docs = Column(Boolean, default=False, comment="是否自定义docs")
docs_count = Column(Integer, default=0, comment="切分文档数量")
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeFile(id='{self.id}', file_name='{self.file_name}', file_ext='{self.file_ext}', kb_name='{self.kb_name}', document_loader_name='{self.document_loader_name}', text_splitter_name='{self.text_splitter_name}', file_version='{self.file_version}', create_time='{self.create_time}')>"
class FileDocModel(Base):
"""
文件-向量库文档模型
"""
__tablename__ = 'file_doc'
id = Column(Integer, primary_key=True, autoincrement=True, comment='ID')
kb_name = Column(String(50), comment='知识库名称')
file_name = Column(String(255), comment='文件名称')
doc_id = Column(String(50), comment="向量库文档ID")
meta_data = Column(JSON, default={})
def __repr__(self):
return f"<FileDoc(id='{self.id}', kb_name='{self.kb_name}', file_name='{self.file_name}', doc_id='{self.doc_id}', metadata='{self.meta_data}')>"
def delete_files_from_db(session, knowledge_base_name: str):
session.query(KnowledgeFileModel).filter(KnowledgeFileModel.kb_name.ilike(knowledge_base_name)).delete(
synchronize_session=False)
session.query(FileDocModel).filter(FileDocModel.kb_name.ilike(knowledge_base_name)).delete(
synchronize_session=False)
kb = session.query(KnowledgeBaseModel).filter(KnowledgeBaseModel.kb_name.ilike(knowledge_base_name)).first()
if kb:
kb.file_count = 0
session.commit()
return True | null |
22,278 | from server.db.models.knowledge_base_model import KnowledgeBaseModel
from server.db.models.knowledge_file_model import KnowledgeFileModel, FileDocModel
from server.db.session import with_session
from server.knowledge_base.utils import KnowledgeFile
from typing import List, Dict
class KnowledgeFileModel(Base):
"""
知识文件模型
"""
__tablename__ = 'knowledge_file'
id = Column(Integer, primary_key=True, autoincrement=True, comment='知识文件ID')
file_name = Column(String(255), comment='文件名')
file_ext = Column(String(10), comment='文件扩展名')
kb_name = Column(String(50), comment='所属知识库名称')
document_loader_name = Column(String(50), comment='文档加载器名称')
text_splitter_name = Column(String(50), comment='文本分割器名称')
file_version = Column(Integer, default=1, comment='文件版本')
file_mtime = Column(Float, default=0.0, comment="文件修改时间")
file_size = Column(Integer, default=0, comment="文件大小")
custom_docs = Column(Boolean, default=False, comment="是否自定义docs")
docs_count = Column(Integer, default=0, comment="切分文档数量")
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<KnowledgeFile(id='{self.id}', file_name='{self.file_name}', file_ext='{self.file_ext}', kb_name='{self.kb_name}', document_loader_name='{self.document_loader_name}', text_splitter_name='{self.text_splitter_name}', file_version='{self.file_version}', create_time='{self.create_time}')>"
class KnowledgeFile:
def __init__(
self,
filename: str,
knowledge_base_name: str,
loader_kwargs: Dict = {},
):
'''
对应知识库目录中的文件,必须是磁盘上存在的才能进行向量化等操作。
'''
self.kb_name = knowledge_base_name
self.filename = str(Path(filename).as_posix())
self.ext = os.path.splitext(filename)[-1].lower()
if self.ext not in SUPPORTED_EXTS:
raise ValueError(f"暂未支持的文件格式 {self.filename}")
self.loader_kwargs = loader_kwargs
self.filepath = get_file_path(knowledge_base_name, filename)
self.docs = None
self.splited_docs = None
self.document_loader_name = get_LoaderClass(self.ext)
self.text_splitter_name = TEXT_SPLITTER_NAME
def file2docs(self, refresh: bool = False):
if self.docs is None or refresh:
logger.info(f"{self.document_loader_name} used for {self.filepath}")
loader = get_loader(loader_name=self.document_loader_name,
file_path=self.filepath,
loader_kwargs=self.loader_kwargs)
self.docs = loader.load()
return self.docs
def docs2texts(
self,
docs: List[Document] = None,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
docs = docs or self.file2docs(refresh=refresh)
if not docs:
return []
if self.ext not in [".csv"]:
if text_splitter is None:
text_splitter = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
if self.text_splitter_name == "MarkdownHeaderTextSplitter":
docs = text_splitter.split_text(docs[0].page_content)
else:
docs = text_splitter.split_documents(docs)
if not docs:
return []
print(f"文档切分示例:{docs[0]}")
if zh_title_enhance:
docs = func_zh_title_enhance(docs)
self.splited_docs = docs
return self.splited_docs
def file2text(
self,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
if self.splited_docs is None or refresh:
docs = self.file2docs()
self.splited_docs = self.docs2texts(docs=docs,
zh_title_enhance=zh_title_enhance,
refresh=refresh,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
text_splitter=text_splitter)
return self.splited_docs
def file_exist(self):
return os.path.isfile(self.filepath)
def get_mtime(self):
return os.path.getmtime(self.filepath)
def get_size(self):
return os.path.getsize(self.filepath)
def file_exists_in_db(session, kb_file: KnowledgeFile):
existing_file = (session.query(KnowledgeFileModel)
.filter(KnowledgeFileModel.file_name.ilike(kb_file.filename),
KnowledgeFileModel.kb_name.ilike(kb_file.kb_name))
.first())
return True if existing_file else False | null |
22,279 | from server.db.session import with_session
import uuid
from server.db.models.conversation_model import ConversationModel
class ConversationModel(Base):
"""
聊天记录模型
"""
__tablename__ = 'conversation'
id = Column(String(32), primary_key=True, comment='对话框ID')
name = Column(String(50), comment='对话框名称')
# chat/agent_chat等
chat_type = Column(String(50), comment='聊天类型')
create_time = Column(DateTime, default=func.now(), comment='创建时间')
def __repr__(self):
return f"<Conversation(id='{self.id}', name='{self.name}', chat_type='{self.chat_type}', create_time='{self.create_time}')>"
The provided code snippet includes necessary dependencies for implementing the `add_conversation_to_db` function. Write a Python function `def add_conversation_to_db(session, chat_type, name="", conversation_id=None)` to solve the following problem:
新增聊天记录
Here is the function:
def add_conversation_to_db(session, chat_type, name="", conversation_id=None):
"""
新增聊天记录
"""
if not conversation_id:
conversation_id = uuid.uuid4().hex
c = ConversationModel(id=conversation_id, chat_type=chat_type, name=name)
session.add(c)
return c.id | 新增聊天记录 |
22,280 | from functools import wraps
from contextlib import contextmanager
from server.db.base import SessionLocal
from sqlalchemy.orm import Session
def session_scope() -> Session:
"""上下文管理器用于自动获取 Session, 避免错误"""
session = SessionLocal()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def with_session(f):
@wraps(f)
def wrapper(*args, **kwargs):
with session_scope() as session:
try:
result = f(session, *args, **kwargs)
session.commit()
return result
except:
session.rollback()
raise
return wrapper | null |
22,281 | from functools import wraps
from contextlib import contextmanager
from server.db.base import SessionLocal
from sqlalchemy.orm import Session
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def get_db() -> SessionLocal:
db = SessionLocal()
try:
yield db
finally:
db.close() | null |
22,282 | from functools import wraps
from contextlib import contextmanager
from server.db.base import SessionLocal
from sqlalchemy.orm import Session
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def get_db0() -> SessionLocal:
db = SessionLocal()
return db | null |
22,283 | from langchain.docstore.document import Document
from configs import EMBEDDING_MODEL, logger
from server.model_workers.base import ApiEmbeddingsParams
from server.utils import BaseResponse, get_model_worker_config, list_embed_models, list_online_embed_models
from fastapi import Body
from fastapi.concurrency import run_in_threadpool
from typing import Dict, List
def embed_texts(
texts: List[str],
embed_model: str = EMBEDDING_MODEL,
to_query: bool = False,
) -> BaseResponse:
'''
对文本进行向量化。返回数据格式:BaseResponse(data=List[List[float]])
'''
try:
if embed_model in list_embed_models(): # 使用本地Embeddings模型
from server.utils import load_local_embeddings
embeddings = load_local_embeddings(model=embed_model)
return BaseResponse(data=embeddings.embed_documents(texts))
if embed_model in list_online_embed_models(): # 使用在线API
config = get_model_worker_config(embed_model)
worker_class = config.get("worker_class")
embed_model = config.get("embed_model")
worker = worker_class()
if worker_class.can_embedding():
params = ApiEmbeddingsParams(texts=texts, to_query=to_query, embed_model=embed_model)
resp = worker.do_embeddings(params)
return BaseResponse(**resp)
return BaseResponse(code=500, msg=f"指定的模型 {embed_model} 不支持 Embeddings 功能。")
except Exception as e:
logger.error(e)
return BaseResponse(code=500, msg=f"文本向量化过程中出现错误:{e}")
The provided code snippet includes necessary dependencies for implementing the `aembed_texts` function. Write a Python function `async def aembed_texts( texts: List[str], embed_model: str = EMBEDDING_MODEL, to_query: bool = False, ) -> BaseResponse` to solve the following problem:
对文本进行向量化。返回数据格式:BaseResponse(data=List[List[float]])
Here is the function:
async def aembed_texts(
texts: List[str],
embed_model: str = EMBEDDING_MODEL,
to_query: bool = False,
) -> BaseResponse:
'''
对文本进行向量化。返回数据格式:BaseResponse(data=List[List[float]])
'''
try:
if embed_model in list_embed_models(): # 使用本地Embeddings模型
from server.utils import load_local_embeddings
embeddings = load_local_embeddings(model=embed_model)
return BaseResponse(data=await embeddings.aembed_documents(texts))
if embed_model in list_online_embed_models(): # 使用在线API
return await run_in_threadpool(embed_texts,
texts=texts,
embed_model=embed_model,
to_query=to_query)
except Exception as e:
logger.error(e)
return BaseResponse(code=500, msg=f"文本向量化过程中出现错误:{e}") | 对文本进行向量化。返回数据格式:BaseResponse(data=List[List[float]]) |
22,284 | import plistlib
import requests
import logging
logger = logging.getLogger("bags")
OLD_APNS_BAG = None
def apns_init_bag_old():
global OLD_APNS_BAG
if OLD_APNS_BAG is not None:
return OLD_APNS_BAG
r = requests.get("https://init.push.apple.com/bag", verify=False)
if r.status_code != 200:
raise Exception("Failed to get APNs init bag")
# Parse the config as a plist
bag = plistlib.loads(r.content)
logger.debug("Received APNs old-style init bag")
OLD_APNS_BAG = bag
return bag | null |
22,285 | import plistlib
import requests
import logging
logger = logging.getLogger("bags")
APNS_BAG = None
def apns_init_bag():
global APNS_BAG
if APNS_BAG is not None:
return APNS_BAG
r = requests.get("http://init-p01st.push.apple.com/bag", verify=False)
if r.status_code != 200:
raise Exception("Failed to get APNs init bag 2")
content = plistlib.loads(r.content)
bag = plistlib.loads(content["bag"])
logger.debug("Received APNs new init bag")
APNS_BAG = bag
return bag | null |
22,286 | import plistlib
import requests
import logging
logger = logging.getLogger("bags")
IDS_BAG = None
def ids_bag():
global IDS_BAG
if IDS_BAG is not None:
return IDS_BAG
r = requests.get(
"https://init.ess.apple.com/WebObjects/VCInit.woa/wa/getBag?ix=3", verify=False
)
if r.status_code != 200:
raise Exception("Failed to get IDS bag")
# Parse the config as a plist
content = plistlib.loads(r.content)
# Load the inner bag
bag = plistlib.loads(content["bag"])
logger.debug("Recieved IDS bag")
IDS_BAG = bag
return bag | null |
22,287 | from __future__ import annotations
import random
import socket
import threading
import time
from hashlib import sha1
from base64 import b64encode, b64decode
import logging
logger = logging.getLogger("apns")
import tlslite
if tlslite.__version__ != "0.8.0-alpha43":
logger.warning("tlslite-ng is not the correct version!")
logger.warning("Please install tlslite-ng==0.8.0a43 or you will experience issues!")
import albert
import bags
COURIER_HOST = f"{random.randint(1, bags.apns_init_bag()['APNSCourierHostcount'])}-{bags.apns_init_bag()['APNSCourierHostname']}"
COURIER_PORT = 5223
ALPN = [b"apns-security-v2"]
def _connect(private_key: str, cert: str) -> tlslite.TLSConnection:
# Connect to the courier server
sock = socket.create_connection((COURIER_HOST, COURIER_PORT))
# Wrap the socket in TLS
sock = tlslite.TLSConnection(sock)
# Parse the certificate and private key
cert = tlslite.X509CertChain([tlslite.X509().parse(cert)])
private_key = tlslite.parsePEMKey(private_key, private=True)
# Handshake with the server
sock.handshakeClientCert(cert, private_key, alpn=ALPN)
logger.info(f"Connected to APNs ({COURIER_HOST})")
return sock | null |
22,288 | from __future__ import annotations
import random
import socket
import threading
import time
from hashlib import sha1
from base64 import b64encode, b64decode
import logging
import tlslite
import albert
import bags
def _serialize_field(id: int, value: bytes) -> bytes:
def _serialize_payload(id: int, fields: list[(int, bytes)]) -> bytes:
payload = b""
for fid, value in fields:
if fid is not None:
payload += _serialize_field(fid, value)
return id.to_bytes(1, "big") + len(payload).to_bytes(4, "big") + payload | null |
22,289 | from __future__ import annotations
import random
import socket
import threading
import time
from hashlib import sha1
from base64 import b64encode, b64decode
import logging
import tlslite
import albert
import bags
def _deserialize_field(stream: bytes) -> tuple[int, bytes]:
id = int.from_bytes(stream[:1], "big")
length = int.from_bytes(stream[1:3], "big")
value = stream[3 : 3 + length]
return id, value
def _deserialize_payload(stream) -> tuple[int, list[tuple[int, bytes]]] | None:
id = int.from_bytes(stream.read(1), "big")
if id == 0x0:
return None
length = int.from_bytes(stream.read(4), "big")
buffer = stream.read(length)
fields = []
while len(buffer) > 0:
fid, value = _deserialize_field(buffer)
fields.append((fid, value))
buffer = buffer[3 + len(value) :]
return id, fields | null |
22,290 | from __future__ import annotations
import random
import socket
import threading
import time
from hashlib import sha1
from base64 import b64encode, b64decode
import logging
import tlslite
import albert
import bags
def _deserialize_field(stream: bytes) -> tuple[int, bytes]:
id = int.from_bytes(stream[:1], "big")
length = int.from_bytes(stream[1:3], "big")
value = stream[3 : 3 + length]
return id, value
def _deserialize_payload_from_buffer(
buffer: bytes,
) -> tuple[int, list[tuple[int, bytes]]] | None:
id = int.from_bytes(buffer[:1], "big")
if id == 0x0:
return None
length = int.from_bytes(buffer[1:5], "big")
buffer = buffer[5:]
if len(buffer) < length:
raise Exception("Buffer is too short")
fields = []
while len(buffer) > 0:
fid, value = _deserialize_field(buffer)
fields.append((fid, value))
buffer = buffer[3 + len(value) :]
return id, fields | null |
22,291 | from __future__ import annotations
import random
import socket
import threading
import time
from hashlib import sha1
from base64 import b64encode, b64decode
import logging
import tlslite
import albert
import bags
def _get_field(fields: list[tuple[int, bytes]], id: int) -> bytes:
for field_id, value in fields:
if field_id == id:
return value
return None | null |
22,292 | import plistlib
from base64 import b64decode
from typing import Union
import requests
from ._helpers import PROTOCOL_VERSION, USER_AGENT, KeyPair, parse_key, serialize_key
from .signing import add_auth_signature, armour_cert
from io import BytesIO
from cryptography.hazmat.primitives.asymmetric import ec, rsa
import logging
logger = logging.getLogger("ids")
class IDSIdentity:
def __init__(
self,
signing_key: Union[str, None] = None,
encryption_key: Union[str, None] = None,
signing_public_key: Union[str, None] = None,
encryption_public_key: Union[str, None] = None):
if signing_key is not None:
self.signing_key = signing_key
self.signing_public_key = serialize_key(parse_key(signing_key).public_key())
elif signing_public_key is not None:
self.signing_key = None
self.signing_public_key = signing_public_key
else:
# Generate a new key
self.signing_key = serialize_key(ec.generate_private_key(ec.SECP256R1()))
self.signing_public_key = serialize_key(parse_key(self.signing_key).public_key())
if encryption_key is not None:
self.encryption_key = encryption_key
self.encryption_public_key = serialize_key(parse_key(encryption_key).public_key())
elif encryption_public_key is not None:
self.encryption_key = None
self.encryption_public_key = encryption_public_key
else:
self.encryption_key = serialize_key(rsa.generate_private_key(65537, 1280))
self.encryption_public_key = serialize_key(parse_key(self.encryption_key).public_key())
def decode(input: bytes) -> 'IDSIdentity':
input = BytesIO(input)
assert input.read(5) == b'\x30\x81\xF6\x81\x43' # DER header
raw_ecdsa = input.read(67)
assert input.read(3) == b'\x82\x81\xAE' # DER header
raw_rsa = input.read(174)
# Parse the RSA key
raw_rsa = BytesIO(raw_rsa)
assert raw_rsa.read(2) == b'\x00\xAC' # Not sure what this is
assert raw_rsa.read(3) == b'\x30\x81\xA9' # Inner DER header
assert raw_rsa.read(3) == b'\x02\x81\xA1'
rsa_modulus = raw_rsa.read(161)
rsa_modulus = int.from_bytes(rsa_modulus, "big")
assert raw_rsa.read(5) == b'\x02\x03\x01\x00\x01' # Exponent, should always be 65537
# Parse the EC key
assert raw_ecdsa[:3] == b'\x00\x41\x04'
raw_ecdsa = raw_ecdsa[3:]
ec_x = int.from_bytes(raw_ecdsa[:32], "big")
ec_y = int.from_bytes(raw_ecdsa[32:], "big")
ec_key = ec.EllipticCurvePublicNumbers(ec_x, ec_y, ec.SECP256R1())
ec_key = ec_key.public_key()
rsa_key = rsa.RSAPublicNumbers(e=65537, n=rsa_modulus)
rsa_key = rsa_key.public_key()
return IDSIdentity(signing_public_key=serialize_key(ec_key), encryption_public_key=serialize_key(rsa_key))
def encode(self) -> bytes:
output = BytesIO()
raw_rsa = BytesIO()
raw_rsa.write(b'\x00\xAC')
raw_rsa.write(b'\x30\x81\xA9')
raw_rsa.write(b'\x02\x81\xA1')
raw_rsa.write(parse_key(self.encryption_public_key).public_numbers().n.to_bytes(161, "big"))
raw_rsa.write(b'\x02\x03\x01\x00\x01') # Hardcode the exponent
output.write(b'\x30\x81\xF6\x81\x43')
output.write(b'\x00\x41\x04')
output.write(parse_key(self.signing_public_key).public_numbers().x.to_bytes(32, "big"))
output.write(parse_key(self.signing_public_key).public_numbers().y.to_bytes(32, "big"))
output.write(b'\x82\x81\xAE')
output.write(raw_rsa.getvalue())
return output.getvalue()
PROTOCOL_VERSION = "1640"
KeyPair = namedtuple("KeyPair", ["key", "cert"])
def armour_cert(cert: bytes) -> str:
cert = x509.load_der_x509_certificate(cert)
return cert.public_bytes(serialization.Encoding.PEM).decode("utf-8").strip()
def add_auth_signature(
headers: dict,
body: bytes,
bag_key: str,
auth_key: KeyPair,
push_key: KeyPair,
push_token: str,
auth_number=None,
):
push_sig, push_nonce = _sign_payload(push_key.key, bag_key, "", push_token, body)
headers["x-push-sig"] = push_sig
headers["x-push-nonce"] = b64encode(push_nonce)
headers["x-push-cert"] = dearmour(push_key.cert)
headers["x-push-token"] = push_token
auth_sig, auth_nonce = _sign_payload(auth_key.key, bag_key, "", push_token, body)
auth_postfix = "-" + str(auth_number) if auth_number is not None else ""
headers["x-auth-sig" + auth_postfix] = auth_sig
headers["x-auth-nonce" + auth_postfix] = b64encode(auth_nonce)
headers["x-auth-cert" + auth_postfix] = dearmour(auth_key.cert)
def register(
push_token, handles, user_id, auth_key: KeyPair, push_key: KeyPair, identity: IDSIdentity, validation_data
):
logger.debug(f"Registering IDS identity for {handles}")
uris = [{"uri": handle} for handle in handles]
body = {
"hardware-version": "MacBookPro18,3",
"language": "en-US",
"os-version": "macOS,13.2.1,22D68",
"software-version": "22D68",
"services": [
{
"capabilities": [{"flags": 17, "name": "Messenger", "version": 1}],
"service": "com.apple.madrid",
"users": [
{
"client-data": {
'is-c2k-equipment': True,
'optionally-receive-typing-indicators': True,
'public-message-identity-key': identity.encode(),
'public-message-identity-version':2,
'show-peer-errors': True,
'supports-ack-v1': True,
'supports-activity-sharing-v1': True,
'supports-audio-messaging-v2': True,
"supports-autoloopvideo-v1": True,
'supports-be-v1': True,
'supports-ca-v1': True,
'supports-fsm-v1': True,
'supports-fsm-v2': True,
'supports-fsm-v3': True,
'supports-ii-v1': True,
'supports-impact-v1': True,
'supports-inline-attachments': True,
'supports-keep-receipts': True,
"supports-location-sharing": True,
'supports-media-v2': True,
'supports-photos-extension-v1': True,
'supports-st-v1': True,
'supports-update-attachments-v1': True,
},
"uris": uris,
"user-id": user_id,
}
],
}
],
"validation-data": b64decode(validation_data),
}
body = plistlib.dumps(body)
headers = {
"x-protocol-version": PROTOCOL_VERSION,
"x-auth-user-id-0": user_id,
}
add_auth_signature(headers, body, "id-register", auth_key, push_key, push_token, 0)
r = requests.post(
"https://identity.ess.apple.com/WebObjects/TDIdentityService.woa/wa/register",
headers=headers,
data=body,
verify=False,
)
r = plistlib.loads(r.content)
#print(f'Response code: {r["status"]}')
logger.debug(f"Recieved response to IDS registration: {r}")
if "status" in r and r["status"] == 6004:
raise Exception("Validation data expired!")
# TODO: Do validation of nested statuses
if "status" in r and r["status"] != 0:
raise Exception(f"Failed to register: {r}")
if not "services" in r:
raise Exception(f"No services in response: {r}")
if not "users" in r["services"][0]:
raise Exception(f"No users in response: {r}")
if not "cert" in r["services"][0]["users"][0]:
raise Exception(f"No cert in response: {r}")
return armour_cert(r["services"][0]["users"][0]["cert"]) | null |
22,293 | import plistlib
import random
import uuid
from base64 import b64decode
import requests
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.x509.oid import NameOID
import bags
from . import signing
from ._helpers import PROTOCOL_VERSION, USER_AGENT, KeyPair
import logging
logger = logging.getLogger("ids")
def _auth_token_request(username: str, password: str) -> any:
# Turn the PET into an auth token
data = {
"username": username,
#"client-id": str(uuid.uuid4()),
#"delegates": {"com.apple.private.ids": {"protocol-version": "4"}},
"password": password,
}
data = plistlib.dumps(data)
r = requests.post(
# TODO: Figure out which URL bag we can get this from
"https://profile.ess.apple.com/WebObjects/VCProfileService.woa/wa/authenticateUser",
#"https://setup.icloud.com/setup/prefpane/loginDelegates",
#auth=(username, password),
data=data,
verify=False,
)
r = plistlib.loads(r.content)
return r
def get_auth_token(
username: str, password: str, factor_gen: callable = None
) -> tuple[str, str]:
from sys import platform
result = _auth_token_request(username, password)
if result["status"] != 0:
if result["status"] == 5000:
if factor_gen is None:
password = password + input("Enter 2FA code: ")
else:
password = password + factor_gen()
result = _auth_token_request(username, password)
if result["status"] != 0:
raise Exception(f"Error: {result}")
auth_token = result["auth-token"]
realm_user_id = result["profile-id"]
# else:
# logger.debug("Using old-style authentication")
# # Make the request without the 2FA code to make the prompt appear
# _auth_token_request(username, password)
# # TODO: Make sure we actually need the second request, some rare accounts don't have 2FA
# # Now make the request with the 2FA code
# if factor_gen is None:
# pet = password + input("Enter 2FA code: ")
# else:
# pet = password + factor_gen()
# r = _auth_token_request(username, pet)
# # print(r)
# if "description" in r:
# raise Exception(f"Error: {r['description']}")
# service_data = r["delegates"]["com.apple.private.ids"]["service-data"]
# realm_user_id = service_data["realm-user-id"]
# auth_token = service_data["auth-token"]
# print(f"Auth token for {realm_user_id}: {auth_token}")
logger.debug(f"Got auth token for IDS: {auth_token}")
return realm_user_id, auth_token | null |
22,294 | import plistlib
import random
import uuid
from base64 import b64decode
import requests
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.x509.oid import NameOID
import bags
from . import signing
from ._helpers import PROTOCOL_VERSION, USER_AGENT, KeyPair
import logging
logger = logging.getLogger("ids")
def _generate_csr(private_key: rsa.RSAPrivateKey) -> str:
csr = (
x509.CertificateSigningRequestBuilder()
.subject_name(
x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, random.randbytes(20).hex()),
]
)
)
.sign(private_key, hashes.SHA256())
)
csr = csr.public_bytes(serialization.Encoding.PEM).decode("utf-8")
return (
csr.replace("-----BEGIN CERTIFICATE REQUEST-----", "")
.replace("-----END CERTIFICATE REQUEST-----", "")
.replace("\n", "")
)
KeyPair = namedtuple("KeyPair", ["key", "cert"])
def get_auth_cert(user_id, token) -> KeyPair:
BAG_KEY = "id-authenticate-ds-id"
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
body = {
"authentication-data": {"auth-token": token},
"csr": b64decode(_generate_csr(private_key)),
"realm-user-id": user_id,
}
body = plistlib.dumps(body)
r = requests.post(
bags.ids_bag()[BAG_KEY],
#"https://profile.ess.apple.com/WebObjects/VCProfileService.woa/wa/authenticateDS",
data=body,
headers={"x-protocol-version": "1630"},
verify=False,
)
r = plistlib.loads(r.content)
if r["status"] != 0:
raise (Exception(f"Failed to get auth cert: {r}"))
cert = x509.load_der_x509_certificate(r["cert"])
logger.debug("Got auth cert from token")
return KeyPair(
private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
.decode("utf-8")
.strip(),
cert.public_bytes(serialization.Encoding.PEM).decode("utf-8").strip(),
) | null |
22,295 | import plistlib
import random
import uuid
from base64 import b64decode
import requests
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.x509.oid import NameOID
import bags
from . import signing
from ._helpers import PROTOCOL_VERSION, USER_AGENT, KeyPair
import logging
logger = logging.getLogger("ids")
PROTOCOL_VERSION = "1640"
KeyPair = namedtuple("KeyPair", ["key", "cert"])
def get_handles(push_token, user_id: str, auth_key: KeyPair, push_key: KeyPair):
BAG_KEY = "id-get-handles"
headers = {
"x-protocol-version": PROTOCOL_VERSION,
"x-auth-user-id": user_id,
}
signing.add_auth_signature(
headers, None, BAG_KEY, auth_key, push_key, push_token
)
r = requests.get(
bags.ids_bag()[BAG_KEY],
headers=headers,
verify=False,
)
r = plistlib.loads(r.content)
if not "handles" in r:
raise Exception("No handles in response: " + str(r))
logger.debug(f"User {user_id} has handles {r['handles']}")
return [handle["uri"] for handle in r["handles"]] | null |
22,296 | from collections import namedtuple
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec, rsa
def parse_key(key: str):
# Check if it is a public or private key
if "PUBLIC" in key:
return serialization.load_pem_public_key(key.encode())
else:
return serialization.load_pem_private_key(key.encode(), None) | null |
22,297 | from collections import namedtuple
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec, rsa
def serialize_key(key):
if isinstance(key, ec.EllipticCurvePrivateKey) or isinstance(key, rsa.RSAPrivateKey):
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
).decode("utf-8").strip()
else:
return key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
).decode("utf-8").strip() | null |
22,298 | import gzip
import plistlib
import random
from base64 import b64encode
import apns
import bags
from ._helpers import KeyPair, PROTOCOL_VERSION
from . import signing
PROTOCOL_VERSION = "1640"
KeyPair = namedtuple("KeyPair", ["key", "cert"])
def lookup(
conn: apns.APNSConnection,
self_uri: str,
id_keypair: KeyPair,
query: list[str],
topic,
) -> bytes:
BAG_KEY = "id-query"
conn.filter([topic])
body = plistlib.dumps({"uris": query})
body = gzip.compress(body, mtime=0)
push_token = b64encode(conn.token).decode()
headers = {
"x-id-self-uri": self_uri,
"x-protocol-version": PROTOCOL_VERSION,
}
signing.add_id_signature(headers, body, BAG_KEY, id_keypair, push_token)
msg_id = random.randbytes(16)
req = {
"cT": "application/x-apple-plist",
"U": msg_id,
"c": 96,
"u": bags.ids_bag()[BAG_KEY],
"h": headers,
"v": 2,
"b": body,
}
conn.send_message(topic, plistlib.dumps(req, fmt=plistlib.FMT_BINARY))
def check_response(x):
if x[0] != 0x0A:
return False
resp_body = apns._get_field(x[1], 3)
if resp_body is None:
return False
resp_body = plistlib.loads(resp_body)
return resp_body.get('U') == msg_id
# Lambda to check if the response is the one we want
payload = conn.incoming_queue.wait_pop_find(check_response)
resp = apns._get_field(payload[1], 3)
resp = plistlib.loads(resp)
resp = gzip.decompress(resp["b"])
resp = plistlib.loads(resp)
# Acknowledge the message
#conn._send_ack(apns._get_field(payload[1], 4))
if resp['status'] != 0:
raise Exception(f'Query failed: {resp}')
if not 'results' in resp:
raise Exception(f'No results in response: {resp}')
return resp['results'] | null |
22,299 | import plistlib
import zlib
from base64 import b64decode, b64encode
from hashlib import sha1
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def _lookup_topic(hash: bytes):
for topic_lookup in topics_lookup:
if topic_lookup[1] == hash:
return topic_lookup[0]
return None
def _get_field(fields: list[tuple[int, bytes]], id: int) -> bytes:
for field_id, value in fields:
if field_id == id:
return value
return None
def _p_filter(prefix, fields: list[tuple[int, bytes]]):
enabled = []
ignored = []
oppertunistic = []
paused = []
token = ""
for field in fields:
if field[0] == 1:
token = b64encode(field[1])
# print(f"Push Token: {b64encode(field[1])}")
elif field[0] == 2:
enabled.append(_lookup_topic(field[1]))
elif field[0] == 3:
ignored.append(_lookup_topic(field[1]))
elif field[0] == 4:
oppertunistic.append(_lookup_topic(field[1]))
elif field[0] == 5:
paused.append(_lookup_topic(field[1]))
else:
pass # whatever, there's a 6 but it's not documented
# print(f"Unknown field ID: {field[0]}")
# Remove None values
enabled = [topic.strip() for topic in enabled if topic is not None]
ignored = [topic.strip() for topic in ignored if topic is not None]
oppertunistic = [topic.strip() for topic in oppertunistic if topic is not None]
paused = [topic.strip() for topic in paused if topic is not None]
enabled = ", ".join(enabled)
ignored = ", ".join(ignored)
oppertunistic = ", ".join(oppertunistic)
paused = ", ".join(paused)
if not enabled:
enabled = "None"
if not ignored:
ignored = "None"
if not oppertunistic:
oppertunistic = "None"
if not paused:
paused = "None"
# Trim the list of topics
if len(enabled) > 100:
enabled = enabled[:100] + "..."
if len(ignored) > 100:
ignored = ignored[:100] + "..."
if len(oppertunistic) > 100:
oppertunistic = oppertunistic[:100] + "..."
if len(paused) > 100:
paused = paused[:100] + "..."
# (Token: {token.decode()})
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKCYAN}Filter{bcolors.ENDC} {bcolors.WARNING}Enabled{bcolors.ENDC}: {enabled} {bcolors.FAIL}Ignored{bcolors.ENDC}: {ignored} {bcolors.OKBLUE}Oppertunistic{bcolors.ENDC}: {oppertunistic} {bcolors.OKGREEN}Paused{bcolors.ENDC}: {paused}"
)
import apns
def pretty_print_payload(
prefix, payload: tuple[int, list[tuple[int, bytes]]]
) -> bytes | None:
id = payload[0]
if id == 9:
_p_filter(prefix, payload[1])
elif id == 8:
token_str = ""
if _get_field(payload[1], 3):
token_str = f"{bcolors.WARNING}Token{bcolors.ENDC}: {b64encode(_get_field(payload[1], 3)).decode()}"
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKCYAN}Connected{bcolors.ENDC} {token_str} {bcolors.OKBLUE}{_get_field(payload[1], 1).hex()}{bcolors.ENDC}"
)
elif id == 7:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKCYAN}Connect Request{bcolors.ENDC}",
end="",
)
if _get_field(payload[1], 1):
print(
f" {bcolors.WARNING}Token{bcolors.ENDC}: {b64encode(_get_field(payload[1], 1)).decode()}",
end="",
)
if _get_field(payload[1], 0x0C):
print(f" {bcolors.OKBLUE}SIGNED{bcolors.ENDC}", end="")
if (
_get_field(payload[1], 0x5)
and int.from_bytes(_get_field(payload[1], 0x5)) & 0x4
):
print(f" {bcolors.FAIL}ROOT{bcolors.ENDC}", end="")
print()
# for field in payload[1]:
# print(f"Field ID: {field[0]}")
# print(f"Field Value: {field[1]}")
# 65 (user) or 69 (root)
for i in range(len(payload[1])):
# if payload[1][i][0] == 5:
# if payload[1][i][1] == b'\x00\x00\x00A': # user
# payload[1][i][1] = b'\x00\x00\x00E'
# elif payload[1][i][1] == b'\x00\x00\x00E': # root
# payload[1][i][1] = b'\x00\x00\x00A'
# else:
# print("Unknown field value: ", payload[1][i][1])
if payload[1][i][0] == 1:
pass
# payload[1][i] = (None, None)
# payload[1][i] = (1, b64decode("D3MtN3e18QE8rve3n92wp+CwK7u/bWk/5WjQUOBN640="))
out = apns._serialize_payload(payload[0], payload[1])
# return out
elif id == 0xC:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKCYAN}Keep Alive{bcolors.ENDC}"
)
elif id == 0xD:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKCYAN}Keep Alive Ack{bcolors.ENDC}"
)
elif id == 0x14:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKCYAN}Set State{bcolors.ENDC}: {_get_field(payload[1], 1).hex()}"
)
elif id == 0x1D or id == 0x20:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.WARNING}PubSub ??{bcolors.ENDC}"
)
elif id == 0xE:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.WARNING}Token Confirmation{bcolors.ENDC}"
)
elif id == 0xA:
topic = ""
# topic = _lookup_topic(_get_field(payload[1], 1))
# if it has apsd -> APNs in the prefix, it's an outgoing notification
if "apsd -> APNs" in prefix:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKBLUE}OUTGOING Notification{bcolors.ENDC}",
end="",
)
topic = _lookup_topic(_get_field(payload[1], 1))
# topic = _lookup_topic(_get_field(payload[1], 1))
# if b"bplist" in _get_field(payload[1], 3):
# print(f" {bcolors.OKCYAN}Binary{bcolors.ENDC}", end="")
# if topic == "com.apple.madrid":
# print(f" {bcolors.FAIL}Madrid{bcolors.ENDC}", end="")
# import plistlib
# plist = plistlib.loads(_get_field(payload[1], 3))
# #payload = plist["P"]
# #print(f" {bcolors.WARNING}Payload{bcolors.ENDC}: {payload}", end="")
# for key in plist:
# print(f" {bcolors.OKBLUE}{key}{bcolors.ENDC}: {plist[key]}", end="")
else:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKCYAN}Notification{bcolors.ENDC}",
end="",
)
topic = _lookup_topic(_get_field(payload[1], 2))
# if b"bplist" in _get_field(payload[1], 3):
# print(f" {bcolors.OKBLUE}Binary{bcolors.ENDC}", end="")
# print(f" {bcolors.WARNING}Topic{bcolors.ENDC}: {_lookup_topic(_get_field(payload[1], 2))}")
print(f" {bcolors.WARNING}Topic{bcolors.ENDC}: {topic}", end="")
if topic == "com.apple.madrid":
print(f" {bcolors.FAIL}Madrid{bcolors.ENDC}", end="")
orig_payload = payload
payload = plistlib.loads(_get_field(payload[1], 3))
# print(payload)
if "cT" in payload and False:
# It's HTTP over APNs
if "hs" in payload:
print(
f" {bcolors.WARNING}HTTP Response{bcolors.ENDC}: {payload['hs']}",
end="",
)
else:
print(f" {bcolors.WARNING}HTTP Request{bcolors.ENDC}", end="")
# print(f" {bcolors.WARNING}HTTP{bcolors.ENDC} {payload['hs']}", end="")
if "u" in payload:
print(f" {bcolors.OKCYAN}URL{bcolors.ENDC}: {payload['u']}", end="")
print(
f" {bcolors.FAIL}Content Type{bcolors.ENDC}: {payload['cT']}",
end="",
)
if "h" in payload:
print(
f" {bcolors.FAIL}Headers{bcolors.ENDC}: {payload['h']}", end=""
)
if "b" in payload:
# What am I really supposed to put in WBITS? Got this from a random SO answer
# print(payload["b"])
body = zlib.decompress(payload["b"], 16 + zlib.MAX_WBITS)
if b"plist" in body:
body = plistlib.loads(body)
print(f" {bcolors.FAIL}Body{bcolors.ENDC}: {body}", end="")
#if not "cT" in payload:
for key in payload:
print(f" {bcolors.OKBLUE}{key}{bcolors.ENDC}: {payload[key]}")
if 'dtl' in payload:
print("OVERRIDE DTL")
payload['dtl'][0].update({'sT': b64decode("jJ86jTYbv1mGVwO44PyfuZ9lh3o56QjOE39Jk8Z99N8=")})
# Re-serialize the payload
payload = plistlib.dumps(payload, fmt=plistlib.FMT_BINARY)
# Construct APNS message
# Get the original fields except 3
fields = orig_payload[1]
fields = [field for field in fields if field[0] != 3]
# Add the new field
fields.append((3, payload))
payload = apns._serialize_payload(0xA, fields)
# Use the override payload
#print(payload, orig_payload)
#print(payload == orig_payload)
return payload
print()
# print(f" {bcolors.WARNING}{bcolors.ENDC}: {payload['cT']}")
# for field in payload[1]:
# print(f"Field ID: {field[0]}")
# print(f"Field Value: {field[1]}")
elif id == 0xB:
print(
f"{bcolors.OKGREEN}{prefix}{bcolors.ENDC}: {bcolors.OKCYAN}Notification Ack{bcolors.ENDC} {bcolors.OKBLUE}{_get_field(payload[1], 8).hex()}{bcolors.ENDC}"
)
else:
print(prefix, f"Payload ID: {hex(payload[0])}")
for field in payload[1]:
print(f"Field ID: {field[0]}")
print(f"Field Value: {field[1]}") | null |
22,300 | import socket
import sys
import threading
import tlslite
cert: str = None
key: str = None
import printer
import apns
def handle(conn: socket.socket):
# Wrap the socket in TLS
s_conn = tlslite.TLSConnection(conn)
global cert, key
chain = tlslite.X509CertChain()
chain.parsePemList(cert)
# print(chain)
# cert = tlslite.X509CertChain([tlslite.X509().parse(cert)])
key_parsed = tlslite.parsePEMKey(key, private=True)
# print(key_parsed)
s_conn.handshakeServer(
certChain=chain, privateKey=key_parsed, reqCert=False, alpn=[ALPN]
)
print("Handling connection")
# Connect to the APNs server
apns = connect()
print("Connected to APNs")
threading.Thread(target=repl, args=(s_conn, apns)).start()
global global_cnt
global_cnt += 1
# Proxy data between the connections
# Create a thread to proxy data from the APNs server to the client
threading.Thread(
target=proxy, args=(s_conn, apns, f"{global_cnt} apsd -> APNs")
).start()
# Just proxy data from the client to the APNs server in this thread
proxy(apns, s_conn, f"{global_cnt} APNs -> apsd")
def serve():
# Create a socket to listen for connections
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allow the socket to be reused
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("localhost", 5223))
sock.listen()
print("Listening for connections...")
# Handshake with the client
# Read the certificate and private key from the config
with open("push_certificate_chain.pem", "r") as f:
global cert
cert = f.read()
# NEED TO USE OPENSSL, SEE CORETRUST CMD, MIMIC ENTRUST? OR AT LEAST SEE PUSHPROXY FOR EXTRACTION & REPLACEMENT
with open("push_key.pem", "r") as f:
global key
key = f.read()
conns = []
# Accept connections
try:
while True:
# Accept a connection
conn, addr = sock.accept()
conns.append(conn)
# Create a thread to handle the connection
# handle(conn)
thread = threading.Thread(target=handle, args=(conn,))
thread.start()
except KeyboardInterrupt:
print("Keyboard interrupt, closing sockets")
for conn in conns:
conn.close()
sock.close() | null |
22,301 | import hashlib
from . import mparser as macholibre
from .jelly import Jelly
import plistlib
import logging
logger = logging.getLogger("nac")
import struct
import requests, plistlib
def hook_code(uc, address: int, size: int, user_data):
logger.debug(">>> Tracing instruction at 0x%x, instruction size = 0x%x" % (address, size)) | null |
22,302 | import hashlib
from . import mparser as macholibre
from .jelly import Jelly
import plistlib
import logging
import struct
import requests, plistlib
class Jelly:
# Constants
UC_ARCH = unicorn.UC_ARCH_X86
UC_MODE = unicorn.UC_MODE_64
BINARY_BASE = 0x0
HOOK_BASE = 0xD00000
HOOK_SIZE = 0x1000
STACK_BASE = 0x00300000
STACK_SIZE = 0x00100000
HEAP_BASE = 0x00400000
HEAP_SIZE = 0x00100000
STOP_ADDRESS = 0x00900000
# Public variables
_hooks: dict[str, callable] = {}
"""Symbol name to hook function mapping"""
instr: VirtualInstructions = None
uc: unicorn.Uc = None
# Private variables
_binary: bytes = b""
_heap_use: int = 0
def __init__(self, binary: bytes):
self._binary = binary
def setup(self, hooks: dict[str, callable] = {}):
self._hooks = hooks
self._setup_unicorn()
self.instr = VirtualInstructions(self.uc)
self._setup_hooks()
self._map_binary()
self._setup_stack()
self._setup_heap()
self._setup_stop()
def _setup_unicorn(self):
self.uc = unicorn.Uc(self.UC_ARCH, self.UC_MODE)
def _setup_stack(self):
self.uc.mem_map(self.STACK_BASE, self.STACK_SIZE)
self.uc.mem_write(self.STACK_BASE, b"\x00" * self.STACK_SIZE)
self.uc.reg_write(unicorn.x86_const.UC_X86_REG_ESP, self.STACK_BASE + self.STACK_SIZE)
self.uc.reg_write(unicorn.x86_const.UC_X86_REG_EBP, self.STACK_BASE + self.STACK_SIZE)
def _setup_heap(self):
self.uc.mem_map(self.HEAP_BASE, self.HEAP_SIZE)
self.uc.mem_write(self.HEAP_BASE, b"\x00" * self.HEAP_SIZE)
def debug_registers(self):
logger.debug(f"""
RAX: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RAX))}
RBX: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RBX))}
RCX: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RCX))}
RDX: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RDX))}
RSI: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RSI))}
RDI: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RDI))}
RSP: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RSP))}
RBP: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RBP))}
RIP: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RIP))}
R8: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_R8))}
R9: {hex(self.uc.reg_read(unicorn.x86_const.UC_X86_REG_R9))}
""")
def wrap_hook(self, func: callable) -> callable:
# Get the number of arguments the function takes
arg_count = func.__code__.co_argcount
#print(f"Wrapping {arg_count} argument function {func.__name__}")
# Create a wrapper function that reads the arguments from registers and the stack
def wrapper(self: 'Jelly'):
args = []
for i in range(1, arg_count):
if i < 6:
args.append(self.uc.reg_read(ARG_REGISTERS[i-1]))
else:
args.append(self.instr.pop())
#print(ARG_REGISTERS[1])
#self.debug_registers()
logger.debug(f"calling {func.__name__}")
if args != []:
logger.debug(f" with args: {args}")
ret = func(self, *args)
if ret is not None:
self.uc.reg_write(unicorn.x86_const.UC_X86_REG_RAX, ret)
return
return wrapper
def malloc(self, size: int) -> int:
# Very naive malloc implementation
addr = self.HEAP_BASE + self._heap_use
self._heap_use += size
return addr
def _setup_stop(self):
self.uc.mem_map(self.STOP_ADDRESS, 0x1000)
self.uc.mem_write(self.STOP_ADDRESS, b"\xc3" * 0x1000)
def _resolve_hook(uc: unicorn.Uc, address: int, size: int, self: 'Jelly'):
for name, addr in self._resolved_hooks.items():
if addr == address:
logger.debug(f"{name}: ")
self._hooks[name](self)
def _setup_hooks(self):
# Wrap all hooks
for name, func in self._hooks.items():
self._hooks[name] = self.wrap_hook(func)
self.uc.mem_map(self.HOOK_BASE, self.HOOK_SIZE)
# Write 'ret' instruction to all hook addresses
self.uc.mem_write(self.HOOK_BASE, b"\xc3" * self.HOOK_SIZE)
# Assign address in hook space to each hook
current_address = self.HOOK_BASE
self._resolved_hooks = {}
for hook in self._hooks:
self._resolved_hooks[hook] = current_address
current_address += 1
# Add unicorn instruction hook to entire hook space
self.uc.hook_add(unicorn.UC_HOOK_CODE, Jelly._resolve_hook, begin=self.HOOK_BASE, end=self.HOOK_BASE + self.HOOK_SIZE, user_data=self)
def _map_binary(self):
self.uc.mem_map(self.BINARY_BASE, round_to_page_size(len(self._binary), self.uc.ctl_get_page_size()))
self.uc.mem_write(self.BINARY_BASE, self._binary)
# Unmap the first page so we can catch NULL derefs
self.uc.mem_unmap(0x0, self.uc.ctl_get_page_size())
# Parse the binary so we can process binds
p = macholibre.Parser(self._binary)
p.parse()
for seg in p.segments:
for section in seg['sects']:
if section['type'] == 'LAZY_SYMBOL_POINTERS' or section['type'] == 'NON_LAZY_SYMBOL_POINTERS':
self._parse_lazy_binds(self.uc, section['r1'], section, self._binary[p.dysymtab['indirectsymoff']:], self._binary[p.symtab['stroff']:], self._binary[p.symtab['symoff']:])
self._parse_binds(self.uc, self._binary[p.dyld_info['bind_off']:p.dyld_info['bind_off']+p.dyld_info['bind_size']], p.segments)
def _do_bind(self, mu: unicorn.Uc, type, location, name):
if type == 1: # BIND_TYPE_POINTER
if name in self._hooks:
#print(f"Hooking {name} at {hex(location)}")
mu.mem_write(location, self._resolved_hooks[name].to_bytes(8, byteorder='little'))
else:
#print(f"Unknown symbol {name}")
pass
else:
raise NotImplementedError(f"Unknown bind type {type}")
def _parse_lazy_binds(self, mu: unicorn.Uc, indirect_offset, section, dysimtab, strtab, symtab):
logger.debug(f"Doing binds for {section['name']}")
for i in range(0, int(section['size']/8)):
# Parse into proper list?
dysym = dysimtab[(indirect_offset + i)*4:(indirect_offset + i)*4+4]
dysym = int.from_bytes(dysym, 'little')
index = dysym & 0x3fffffff
# Proper list too?
symbol = symtab[index * 16:(index * 16) + 4]
strx = int.from_bytes(symbol, 'little')
name = c_string(strtab, strx) # Remove _ at beginning
#print(f"Lazy bind for {hex(section['offset'] + (i * 8))} : {name}")
self._do_bind(mu, 1, section['offset'] + (i * 8), name)
def _parse_binds(self, mu: unicorn.Uc, binds: bytes, segments):
blen = len(binds)
binds: BytesIO = BytesIO(binds)
ordinal = 0
symbolName = ''
type = BIND_TYPE_POINTER
addend = 0
segIndex = 0
segOffset = 0
while binds.tell() < blen:
current = binds.read(1)[0]
opcode = current & BIND_OPCODE_MASK
immediate = current & BIND_IMMEDIATE_MASK
#print(f"{hex(offset)}: {hex(opcode)} {hex(immediate)}")
if opcode == BIND_OPCODE_DONE:
logger.debug("BIND_OPCODE_DONE")
break
elif opcode == BIND_OPCODE_SET_DYLIB_ORDINAL_IMM:
ordinal = immediate
elif opcode == BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
#ordinal = uLEB128(&p);
ordinal = decodeULEB128(binds)
#raise NotImplementedError("BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB")
elif opcode == BIND_OPCODE_SET_DYLIB_SPECIAL_IMM:
if (immediate == 0):
ordinal = 0
else:
ordinal = BIND_OPCODE_MASK | immediate
elif opcode == BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM:
# Parse string until null terminator
symbolName = ''
while True:
b = binds.read(1)[0]
if b == 0:
break
symbolName += chr(b)
#while binds[offset] != 0:
# symbolName += chr(binds[offset])
# offset += 1
#offset += 1
#print(f"Symbol name: {symbolName}")
elif opcode == BIND_OPCODE_SET_TYPE_IMM:
type = immediate
elif opcode == BIND_OPCODE_SET_ADDEND_SLEB:
#addend = sLEB128(&p);
raise NotImplementedError("BIND_OPCODE_SET_ADDEND_SLEB")
elif opcode == BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
segIndex = immediate
segOffset = decodeULEB128(binds)
#raise NotImplementedError("BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB")
elif opcode == BIND_OPCODE_ADD_ADDR_ULEB:
segOffset += decodeULEB128(binds)
#segOffset += uLEB128(&p);
#raise NotImplementedError("BIND_OPCODE_ADD_ADDR_ULEB")
elif opcode == BIND_OPCODE_DO_BIND:
self._do_bind(mu, type, segments[segIndex]['offset'] + segOffset, symbolName)
segOffset += 8
elif opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
self._do_bind(mu, type, segments[segIndex]['offset'] + segOffset, symbolName)
segOffset += decodeULEB128(binds) + 8
#bind(type, (cast(void**) &segments[segIndex][segOffset]), symbolName, addend, generateFallback);
#segOffset += uLEB128(&p) + size_t.sizeof;
#raise NotImplementedError("BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB")
elif opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
#bind(type, (cast(void**) &segments[segIndex][segOffset]), symbolName, addend, generateFallback);
self._do_bind(mu, type, segments[segIndex]['offset'] + segOffset, symbolName)
segOffset += immediate * 8 + 8
elif opcode == BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
count = decodeULEB128(binds)
skip = decodeULEB128(binds)
for i in range(count):
self._do_bind(mu, type, segments[segIndex]['offset'] + segOffset, symbolName)
segOffset += skip + 8
# uint64_t count = uLEB128(&p);
# uint64_t skip = uLEB128(&p);
# for (uint64_t i = 0; i < count; i++) {
# bind(type, (cast(void**) &segments[segIndex][segOffset]), symbolName, addend, generateFallback);
# segOffset += skip + size_t.sizeof;
# }
#raise NotImplementedError("BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB")
else:
logger.error(f"Unknown bind opcode {opcode}")
def sysctlbyname(j: Jelly):
return 0 # The output is not checked | null |
22,303 | import hashlib
from . import mparser as macholibre
from .jelly import Jelly
import plistlib
import logging
logger = logging.getLogger("nac")
def nac_init(j: Jelly, cert: bytes):
# Allocate memory for the cert
cert_addr = j.malloc(len(cert))
j.uc.mem_write(cert_addr, cert)
# Allocate memory for the outputs
out_validation_ctx_addr = j.malloc(8)
out_request_bytes_addr = j.malloc(8)
out_request_len_addr = j.malloc(8)
# Call the function
ret = j.instr.call(
0xB1DB0,
[
cert_addr,
len(cert),
out_validation_ctx_addr,
out_request_bytes_addr,
out_request_len_addr,
],
)
#print(hex(ret))
if ret != 0:
n = ret & 0xffffffff
n = (n ^ 0x80000000) - 0x80000000
raise Exception(f"Error calling nac_init: {n}")
# Get the outputs
validation_ctx_addr = j.uc.mem_read(out_validation_ctx_addr, 8)
request_bytes_addr = j.uc.mem_read(out_request_bytes_addr, 8)
request_len = j.uc.mem_read(out_request_len_addr, 8)
request_bytes_addr = int.from_bytes(request_bytes_addr, 'little')
request_len = int.from_bytes(request_len, 'little')
logger.debug(f"Request @ {hex(request_bytes_addr)} : {hex(request_len)}")
request = j.uc.mem_read(request_bytes_addr, request_len)
validation_ctx_addr = int.from_bytes(validation_ctx_addr, 'little')
return validation_ctx_addr, request
def nac_key_establishment(j: Jelly, validation_ctx: int, response: bytes):
response_addr = j.malloc(len(response))
j.uc.mem_write(response_addr, response)
ret = j.instr.call(
0xB1DD0,
[
validation_ctx,
response_addr,
len(response),
],
)
if ret != 0:
n = ret & 0xffffffff
n = (n ^ 0x80000000) - 0x80000000
raise Exception(f"Error calling nac_submit: {n}")
def nac_sign(j: Jelly, validation_ctx: int):
#void *validation_ctx, void *unk_bytes, int unk_len,
# void **validation_data, int *validation_data_len
out_validation_data_addr = j.malloc(8)
out_validation_data_len_addr = j.malloc(8)
ret = j.instr.call(
0xB1DF0,
[
validation_ctx,
0,
0,
out_validation_data_addr,
out_validation_data_len_addr,
],
)
if ret != 0:
n = ret & 0xffffffff
n = (n ^ 0x80000000) - 0x80000000
raise Exception(f"Error calling nac_generate: {n}")
validation_data_addr = j.uc.mem_read(out_validation_data_addr, 8)
validation_data_len = j.uc.mem_read(out_validation_data_len_addr, 8)
validation_data_addr = int.from_bytes(validation_data_addr, 'little')
validation_data_len = int.from_bytes(validation_data_len, 'little')
validation_data = j.uc.mem_read(validation_data_addr, validation_data_len)
return validation_data
import struct
import requests, plistlib
def get_cert():
resp = requests.get("http://static.ess.apple.com/identity/validation/cert-1.0.plist")
resp = plistlib.loads(resp.content)
return resp["cert"]
def get_session_info(req: bytes) -> bytes:
body = {
'session-info-request': req,
}
body = plistlib.dumps(body)
resp = requests.post("https://identity.ess.apple.com/WebObjects/TDIdentityService.woa/wa/initializeValidation", data=body, verify=False)
resp = plistlib.loads(resp.content)
return resp["session-info"]
def load_nac() -> Jelly:
binary = load_binary()
binary = get_x64_slice(binary)
# Create a Jelly object from the binary
j = Jelly(binary)
hooks = {
"_malloc": malloc,
"___stack_chk_guard": lambda: 0,
"___memset_chk": memset_chk,
"_sysctlbyname": lambda _: 0,
"_memcpy": memcpy,
"_kIOMasterPortDefault": lambda: 0,
"_IORegistryEntryFromPath": lambda _: 1,
"_kCFAllocatorDefault": lambda: 0,
"_IORegistryEntryCreateCFProperty": IORegistryEntryCreateCFProperty,
"_CFGetTypeID": CFGetTypeID,
"_CFStringGetTypeID": lambda _: 2,
"_CFDataGetTypeID": lambda _: 1,
"_CFDataGetLength": CFDataGetLength,
"_CFDataGetBytes": CFDataGetBytes,
"_CFRelease": lambda _: 0,
"_IOObjectRelease": lambda _: 0,
"_statfs$INODE64": lambda _: 0,
"_DASessionCreate": lambda _: 201,
"_DADiskCreateFromBSDName": lambda _: 202,
"_kDADiskDescriptionVolumeUUIDKey": lambda: 0,
"_DADiskCopyDescription": DADiskCopyDescription,
"_CFDictionaryGetValue": CFDictionaryGetValue,
"_CFUUIDCreateString": lambda _, __, uuid: uuid,
"_CFStringGetLength": CFStringGetLength,
"_CFStringGetMaximumSizeForEncoding": lambda _, length, __: length,
"_CFStringGetCString": CFStringGetCString,
"_free": lambda _: 0,
"_IOServiceMatching": IOServiceMatching,
"_IOServiceGetMatchingService": IOServiceGetMatchingService,
"_CFDictionaryCreateMutable": CFDictionaryCreateMutable,
"_kCFBooleanTrue": lambda: 0,
"_CFDictionarySetValue": CFDictionarySetValue,
"_IOServiceGetMatchingServices": IOServiceGetMatchingServices,
"_IOIteratorNext": IOIteratorNext,
"___bzero": bzero,
"_IORegistryEntryGetParentEntry": IORegistryEntryGetParentEntry,
"_arc4random": arc4random
}
j.setup(hooks)
return j
def generate_validation_data() -> bytes:
j = load_nac()
logger.debug("Loaded NAC library")
val_ctx, req = nac_init(j,get_cert())
logger.debug("Initialized NAC")
session_info = get_session_info(req)
logger.debug("Got session info")
nac_key_establishment(j, val_ctx, session_info)
logger.debug("Submitted session info")
val_data = nac_sign(j, val_ctx)
logger.info("Generated validation data")
return bytes(val_data) | null |
22,304 | from io import BytesIO
import unicorn
from . import mparser as macholibre
import logging
def round_to_page_size(size: int, page_size: int) -> int:
return (size + page_size - 1) & ~(page_size - 1) | null |
22,305 | from io import BytesIO
import unicorn
from . import mparser as macholibre
import logging
def decodeULEB128(bytes: BytesIO) -> int:
result = 0
shift = 0
while True:
b = bytes.read(1)[0]
result |= (b & 0x7F) << shift
if (b & 0x80) == 0:
break
shift += 7
return result | null |
22,306 | from io import BytesIO
import unicorn
from . import mparser as macholibre
import logging
def c_string(bytes, start: int = 0) -> str:
out = ''
i = start
while True:
if i > len(bytes) or bytes[i] == 0:
break
out += chr(bytes[i])
#print(start)
#print(chr(bytes[i]))
i += 1
return out | null |
22,307 | import json
import logging
import os
import threading
import time
from base64 import b64decode, b64encode
from getpass import getpass
from rich.logging import RichHandler
import apns
import ids
import imessage
def safe_b64decode(s):
try:
return b64decode(s)
except:
return None | null |
22,308 | import json
import logging
import os
import threading
import time
from base64 import b64decode, b64encode
from getpass import getpass
from rich.logging import RichHandler
import apns
import ids
import imessage
INPUT_QUEUE = apns.IncomingQueue()
while True:
msg = im.receive()
if msg is not None:
# print(f'[{msg.sender}] {msg.text}')
print(msg.to_string())
attachments = msg.attachments()
if len(attachments) > 0:
attachments_path = f"attachments/{msg.id}/"
os.makedirs(attachments_path, exist_ok=True)
for attachment in attachments:
with open(attachments_path + attachment.name, "wb") as attachment_file:
attachment_file.write(attachment.versions[0].data())
print(f"({len(attachments)} attachment{'s have' if len(attachments) != 1 else ' has'} been downloaded and put "
f"in {attachments_path})")
if len(INPUT_QUEUE) > 0:
msg = INPUT_QUEUE.pop()
if msg == '': continue
if msg == 'help' or msg == 'h':
print('help (h): show this message')
print('quit (q): quit')
#print('send (s) [recipient] [message]: send a message')
print('filter (f) [recipient]: set the current chat')
print('effect (e): adds an iMessage effect to the next sent message')
print('note: recipient must start with tel: or mailto: and include the country code')
print('handle <handle>: set the current handle (for sending messages)')
print('\\: escape commands (will be removed from message)')
elif msg == 'quit' or msg == 'q':
break
elif msg == 'effect' or msg == 'e' or msg.startswith("effect ") or msg.startswith("e "):
msg = msg.split(" ")
if len(msg) < 2 or msg[1] == "":
print("effect [effect namespace]")
else:
print(f"next message will be sent with [{msg[1]}]")
current_effect = msg[1]
elif msg == 'filter' or msg == 'f' or msg.startswith('filter ') or msg.startswith('f '):
# Set the curernt chat
msg = msg.split(' ')
if len(msg) < 2 or msg[1] == '':
print('filter [recipients]')
else:
print(f'Filtering to {[fixup_handle(h) for h in msg[1:]]}')
current_participants = [fixup_handle(h) for h in msg[1:]]
elif msg == 'handle' or msg.startswith('handle '):
msg = msg.split(' ')
if len(msg) < 2 or msg[1] == '':
print('handle [handle]')
print('Available handles:')
for h in user.handles:
if h == user.current_handle:
print(f'\t{h} (current)')
else:
print(f'\t{h}')
else:
h = msg[1]
h = fixup_handle(h)
if h in user.handles:
print(f'Using {h} as handle')
user.current_handle = h
else:
print(f'Handle {h} not found')
elif current_participants != []:
if msg.startswith('\\'):
msg = msg[1:]
im.send(imessage.iMessage(
text=msg,
participants=current_participants,
sender=user.current_handle,
effect=current_effect
))
current_effect = None
else:
print('No chat selected, use help for help')
time.sleep(0.1)
# elif msg.startswith('send') or msg.startswith('s'):
# msg = msg.split(' ')
# if len(msg) < 3:
# print('send [recipient] [message]')
# else:
# im.send(imessage.iMessage(
# text=' '.join(msg[2:]),
# participants=[msg[1], user.handles[0]],
# #sender=user.handles[0]
# ))
def input_thread():
from prompt_toolkit import prompt
while True:
try:
msg = prompt('>> ')
except:
msg = 'quit'
INPUT_QUEUE.append(msg) | null |
22,309 | import json
import logging
import os
import threading
import time
from base64 import b64decode, b64encode
from getpass import getpass
from rich.logging import RichHandler
import apns
import ids
import imessage
def fixup_handle(handle):
if handle.startswith('tel:+'):
return handle
elif handle.startswith('mailto:'):
return handle
elif handle.startswith('tel:'):
return 'tel:+' + handle[4:]
elif handle.startswith('+'):
return 'tel:' + handle
# If the handle starts with a number
elif handle[0].isdigit():
# If the handle is 10 digits, assume it's a US number
if len(handle) == 10:
return 'tel:+1' + handle
# If the handle is 11 digits, assume it's a US number with country code
elif len(handle) == 11:
return 'tel:+' + handle
else: # Assume it's an email
return 'mailto:' + handle | null |
22,310 | import plistlib
import re
import uuid
from base64 import b64decode, b64encode
import requests
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.x509.oid import NameOID
import logging
logger = logging.getLogger("albert")
FAIRPLAY_PRIVATE_KEY = b64decode(
"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlDV3dJQkFBS0JnUUMzQktyTFBJQmFiaHByKzRTdnVRSG5iRjBzc3FSSVE2Ny8xYlRmQXJWdVVGNnA5c2RjdjcwTityOHlGeGVzRG1wVG1LaXRMUDA2c3pLTkFPMWs1SlZrOS9QMWVqejA4Qk1lOWVBYjRqdUFoVldkZkFJeWFKN3NHRmplU0wwMTVtQXZyeFRGY09NMTBGL3FTbEFSQmljY3hIalBYdHVXVnIwZkxHcmhNKy9BTVFJREFRQUJBb0dBQ0dXM2JISFBOZGI5Y1Z6dC9wNFBmMDNTakoxNXVqTVkwWFk5d1VtL2gxczZyTE84Ky8xME1ETUVHTWxFZGNtSGlXUmt3T1ZpalJIeHpOUnhFQU1JODdBcnVvZmhqZGRiTlZMdDZwcFcybkxDSzdjRURRSkZhaFRXOUdRRnpwVlJRWFhmeHI0Y3MxWDNrdXRsQjZ1WTJWR2x0eFFGWXNqNWRqdjdEK0E3MkEwQ1FRRFpqMVJHZHhiZU9vNFh6eGZBNm40MkdwWmF2VGxNM1F6R0ZvQkpnQ3FxVnUxSlFPem9vQU1SVCtOUGZnb0U4K3VzSVZWQjRJbzBiQ1VUV0xwa0V5dFRBa0VBMTFyeklwR0loRmtQdE5jLzMzZnZCRmd3VWJzalRzMVY1RzZ6NWx5L1huRzlFTmZMYmxnRW9iTG1TbXozaXJ2QlJXQURpd1V4NXpZNkZOL0RtdGk1NndKQWRpU2Nha3VmY255dnp3UVo3UndwLzYxK2VyWUpHTkZ0YjJDbXQ4Tk82QU9laGNvcEhNWlFCQ1d5MWVjbS83dUovb1ozYXZmSmRXQkkzZkd2L2twZW13SkFHTVh5b0RCanB1M2oyNmJEUno2eHRTczc2N3IrVmN0VExTTDYrTzRFYWFYbDNQRW1DcngvVSthVGpVNDVyN0RuaThaK3dkaElKRlBkbkpjZEZrd0dId0pBUFErd1ZxUmpjNGgzSHd1OEk2bGxrOXdocEs5TzcwRkxvMUZNVmRheXRFbE15cXpRMi8wNWZNYjdGNnlhV2h1K1EyR0dYdmRsVVJpQTN0WTBDc2ZNMHc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ=="
)
FAIRPLAY_CERT_CHAIN = b64decode(
"MIIC8zCCAlygAwIBAgIKAlKu1qgdFrqsmzANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBwbGUgSW5jLjEVMBMGA1UECxMMQXBwbGUgaVBob25lMR8wHQYDVQQDExZBcHBsZSBpUGhvbmUgRGV2aWNlIENBMB4XDTIxMTAxMTE4NDczMVoXDTI0MTAxMTE4NDczMVowgYMxLTArBgNVBAMWJDE2MEQzRkExLUM3RDUtNEY4NS04NDQ4LUM1Q0EzQzgxMTE1NTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlDdXBlcnRpbm8xEzARBgNVBAoTCkFwcGxlIEluYy4xDzANBgNVBAsTBmlQaG9uZTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAtwSqyzyAWm4aa/uEr7kB52xdLLKkSEOu/9W03wK1blBeqfbHXL+9Dfq/MhcXrA5qU5iorSz9OrMyjQDtZOSVZPfz9Xo89PATHvXgG+I7gIVVnXwCMmie7BhY3ki9NeZgL68UxXDjNdBf6kpQEQYnHMR4z17blla9Hyxq4TPvwDECAwEAAaOBlTCBkjAfBgNVHSMEGDAWgBSy/iEjRIaVannVgSaOcxDYp0yOdDAdBgNVHQ4EFgQURyh+oArXlcLvCzG4m5/QxwUFzzMwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCBaAwIAYDVR0lAQH/BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMBAGCiqGSIb3Y2QGCgIEAgUAMA0GCSqGSIb3DQEBBQUAA4GBAKwB9DGwHsinZu78lk6kx7zvwH5d0/qqV1+4Hz8EG3QMkAOkMruSRkh8QphF+tNhP7y93A2kDHeBSFWk/3Zy/7riB/dwl94W7vCox/0EJDJ+L2SXvtB2VEv8klzQ0swHYRV9+rUCBWSglGYlTNxfAsgBCIsm8O1Qr5SnIhwfutc4MIIDaTCCAlGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB5MQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxLTArBgNVBAMTJEFwcGxlIGlQaG9uZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNzA0MTYyMjU0NDZaFw0xNDA0MTYyMjU0NDZaMFoxCzAJBgNVBAYTAlVTMRMwEQYDVQQKEwpBcHBsZSBJbmMuMRUwEwYDVQQLEwxBcHBsZSBpUGhvbmUxHzAdBgNVBAMTFkFwcGxlIGlQaG9uZSBEZXZpY2UgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAPGUSsnquloYYK3Lok1NTlQZaRdZB2bLl+hmmkdfRq5nerVKc1SxywT2vTa4DFU4ioSDMVJl+TPhl3ecK0wmsCU/6TKqewh0lOzBSzgdZ04IUpRai1mjXNeT9KD+VYW7TEaXXm6yd0UvZ1y8Cxi/WblshvcqdXbSGXH0KWO5JQuvAgMBAAGjgZ4wgZswDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLL+ISNEhpVqedWBJo5zENinTI50MB8GA1UdIwQYMBaAFOc0Ki4i3jlga7SUzneDYS8xoHw1MDgGA1UdHwQxMC8wLaAroCmGJ2h0dHA6Ly93d3cuYXBwbGUuY29tL2FwcGxlY2EvaXBob25lLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAd13PZ3pMViukVHe9WUg8Hum+0I/0kHKvjhwVd/IMwGlXyU7DhUYWdja2X/zqj7W24Aq57dEKm3fqqxK5XCFVGY5HI0cRsdENyTP7lxSiiTRYj2mlPedheCn+k6T5y0U4Xr40FXwWb2nWqCF1AgIudhgvVbxlvqcxUm8Zz7yDeJ0JFovXQhyO5fLUHRLCQFssAbf8B4i8rYYsBUhYTspVJcxVpIIltkYpdIRSIARA49HNvKK4hzjzMS/OhKQpVKw+OCEZxptCVeN2pjbdt9uzi175oVo/u6B2ArKAW17u6XEHIdDMOe7cb33peVI6TD15W4MIpyQPbp8orlXe+tA8JDCCA/MwggLboAMCAQICARcwDQYJKoZIhvcNAQEFBQAwYjELMAkGA1UEBhMCVVMxEzARBgNVBAoTCkFwcGxlIEluYy4xJjAkBgNVBAsTHUFwcGxlIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRYwFAYDVQQDEw1BcHBsZSBSb290IENBMB4XDTA3MDQxMjE3NDMyOFoXDTIyMDQxMjE3NDMyOFoweTELMAkGA1UEBhMCVVMxEzARBgNVBAoTCkFwcGxlIEluYy4xJjAkBgNVBAsTHUFwcGxlIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS0wKwYDVQQDEyRBcHBsZSBpUGhvbmUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCjHr7wR8C0nhBbRqS4IbhPhiFwKEVgXBzDyApkY4j7/Gnu+FT86Vu3Bk4EL8NrM69ETOpLgAm0h/ZbtP1k3bNy4BOz/RfZvOeo7cKMYcIq+ezOpV7WaetkC40Ij7igUEYJ3Bnk5bCUbbv3mZjE6JtBTtTxZeMbUnrc6APZbh3aEFWGpClYSQzqR9cVNDP2wKBESnC+LLUqMDeMLhXr0eRslzhVVrE1K1jqRKMmhe7IZkrkz4nwPWOtKd6tulqz3KWjmqcJToAWNWWkhQ1jez5jitp9SkbsozkYNLnGKGUYvBNgnH9XrBTJie2htodoUraETrjIg+z5nhmrs8ELhsefAgMBAAGjgZwwgZkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOc0Ki4i3jlga7SUzneDYS8xoHw1MB8GA1UdIwQYMBaAFCvQaUeUdgn+9GuNLkCm90dNfwheMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cuYXBwbGUuY29tL2FwcGxlY2Evcm9vdC5jcmwwDQYJKoZIhvcNAQEFBQADggEBAB3R1XvddE7XF/yCLQyZm15CcvJp3NVrXg0Ma0s+exQl3rOU6KD6D4CJ8hc9AAKikZG+dFfcr5qfoQp9ML4AKswhWev9SaxudRnomnoD0Yb25/awDktJ+qO3QbrX0eNWoX2Dq5eu+FFKJsGFQhMmjQNUZhBeYIQFEjEra1TAoMhBvFQe51StEwDSSse7wYqvgQiO8EYKvyemvtzPOTqAcBkjMqNrZl2eTahHSbJ7RbVRM6d0ZwlOtmxvSPcsuTMFRGtFvnRLb7KGkbQ+JSglnrPCUYb8T+WvO6q7RCwBSeJ0szT6RO8UwhHyLRkaUYnTCEpBbFhW3ps64QVX5WLP0g8wggS7MIIDo6ADAgECAgECMA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNVBAYTAlVTMRMwEQYDVQQKEwpBcHBsZSBJbmMuMSYwJAYDVQQLEx1BcHBsZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEWMBQGA1UEAxMNQXBwbGUgUm9vdCBDQTAeFw0wNjA0MjUyMTQwMzZaFw0zNTAyMDkyMTQwMzZaMGIxCzAJBgNVBAYTAlVTMRMwEQYDVQQKEwpBcHBsZSBJbmMuMSYwJAYDVQQLEx1BcHBsZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEWMBQGA1UEAxMNQXBwbGUgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOSRqQkfkdseR1DrBe1eeYQt6zaiV0xV7IsZid75S2z1B6siMALoGD74UAnTf0GomPnRymacJGsR0KO75Bsqwx+VnnoMpEeLW9QWNzPLxA9NzhRp0ckZcvVdDtV/X5vyJQO6VY9NXQ3xZDUjFUsVWR2zlPf2nJ7PULrBWFBnjwi0IPfLrCwgb3C2PwEwjLdDzw+dPfMrSSgayP7OtbkO2V4c1ss9tTqt9A8OAJILsSEWLnTVPA3bYharo3GSR1NVwa8vQbP4++NwzeajTEV+H0xrUJZBicR0YgsQg0GHM4qBsTBY7FoEMoxos48d3mVz/2deZbxJ2HafMxRloXeUyS0CAwEAAaOCAXowggF2MA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjAfBgNVHSMEGDAWgBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjCCAREGA1UdIASCAQgwggEEMIIBAAYJKoZIhvdjZAUBMIHyMCoGCCsGAQUFBwIBFh5odHRwczovL3d3dy5hcHBsZS5jb20vYXBwbGVjYS8wgcMGCCsGAQUFBwICMIG2GoGzUmVsaWFuY2Ugb24gdGhpcyBjZXJ0aWZpY2F0ZSBieSBhbnkgcGFydHkgYXNzdW1lcyBhY2NlcHRhbmNlIG9mIHRoZSB0aGVuIGFwcGxpY2FibGUgc3RhbmRhcmQgdGVybXMgYW5kIGNvbmRpdGlvbnMgb2YgdXNlLCBjZXJ0aWZpY2F0ZSBwb2xpY3kgYW5kIGNlcnRpZmljYXRpb24gcHJhY3RpY2Ugc3RhdGVtZW50cy4wDQYJKoZIhvcNAQEFBQADggEBAFw2mUwteLftjJvc83eb8nbSdzBPwR+Fg4UbmT1HN/Kpm0COLNSxkBLYvvRzm+7SZA/LeU802KI++Xj/a8gH7H05g4tTINM4xLG/mk8Ka/8r/FmnBQl8F0BWER5007eLIztHo9VvJOLr0bdw3w9F4SfK8W147ee1Fxeo3H4iNcol1dkP1mvUoiQjEfehrI9zgWDGG1sJL5Ky+ERI8GA4nhX1PSZnIIozavcNgs/e66Mv+VNqW2TAYzN39zoHLFbr2g8hDtq6cxlPtdk2f8GHVdmnmbkyQvvY1XGefqFStxu9k0IkEirHDx22TZxeY8hLgBdQqorV2uT80AkHN7B1dSE="
)
def _generate_csr(private_key: rsa.RSAPrivateKey) -> str:
csr = (
x509.CertificateSigningRequestBuilder()
.subject_name(
x509.Name(
[
x509.NameAttribute(NameOID.COUNTRY_NAME, "US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "CA"),
x509.NameAttribute(NameOID.LOCALITY_NAME, "Cupertino"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Apple Inc."),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "iPhone"),
x509.NameAttribute(NameOID.COMMON_NAME, str(uuid.uuid4())),
]
)
)
.sign(private_key, hashes.SHA256())
)
return csr.public_bytes(serialization.Encoding.PEM).decode("utf-8")
def generate_push_cert() -> tuple[str, str]:
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
csr = _generate_csr(private_key)
activation_info = {
"ActivationRandomness": str(uuid.uuid4()),
"ActivationState": "Unactivated",
"BuildVersion": "10.6.4",
"DeviceCertRequest": csr.encode("utf-8"),
"DeviceClass": "Windows",
"ProductType": "windows1,1",
"ProductVersion": "10.6.4",
"SerialNumber": "WindowSerial",
"UniqueDeviceID": str(uuid.uuid4()),
}
logger.debug(f"Generated activation info (with UUID: {activation_info['UniqueDeviceID']})")
activation_info = plistlib.dumps(activation_info)
# Load the private key
fairplay_key = serialization.load_pem_private_key(
FAIRPLAY_PRIVATE_KEY, password=None, backend=default_backend()
)
# Sign the activation info
signature = fairplay_key.sign(activation_info, padding.PKCS1v15(), hashes.SHA1()) # type: ignore
body = {
"ActivationInfoComplete": True,
"ActivationInfoXML": activation_info,
"FairPlayCertChain": FAIRPLAY_CERT_CHAIN,
"FairPlaySignature": signature,
}
resp = requests.post(
"https://albert.apple.com/WebObjects/ALUnbrick.woa/wa/deviceActivation?device=Windows",
data={"activation-info": plistlib.dumps(body)},
verify=False,
)
protocol = re.search("<Protocol>(.*)</Protocol>", resp.text).group(1) # type: ignore
protocol = plistlib.loads(protocol.encode("utf-8"))
logger.debug("Recieved push certificate from Albert")
return (
private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
.decode("utf-8")
.strip(),
protocol["device-activation"]["activation-record"]["DeviceCertificate"]
.decode("utf-8")
.strip(),
) | null |
22,311 | import atexit
import numpy as np
import queue
import torch
import torch.multiprocessing as mp
import slowfast.utils.logging as logging
from slowfast.datasets import cv2_transform
from slowfast.visualization.predictor import Predictor
The provided code snippet includes necessary dependencies for implementing the `draw_predictions` function. Write a Python function `def draw_predictions(task, video_vis)` to solve the following problem:
Draw prediction for the given task. Args: task (TaskInfo object): task object that contain the necessary information for visualization. (e.g. frames, preds) All attributes must lie on CPU devices. video_vis (VideoVisualizer object): the video visualizer object.
Here is the function:
def draw_predictions(task, video_vis):
"""
Draw prediction for the given task.
Args:
task (TaskInfo object): task object that contain
the necessary information for visualization. (e.g. frames, preds)
All attributes must lie on CPU devices.
video_vis (VideoVisualizer object): the video visualizer object.
"""
boxes = task.bboxes
frames = task.frames
preds = task.action_preds
if boxes is not None:
img_width = task.img_width
img_height = task.img_height
if boxes.device != torch.device("cpu"):
boxes = boxes.cpu()
boxes = cv2_transform.revert_scaled_boxes(
task.crop_size, boxes, img_height, img_width
)
keyframe_idx = len(frames) // 2 - task.num_buffer_frames
draw_range = [
keyframe_idx - task.clip_vis_size,
keyframe_idx + task.clip_vis_size,
]
buffer = frames[: task.num_buffer_frames]
frames = frames[task.num_buffer_frames :]
if boxes is not None:
if len(boxes) != 0:
frames = video_vis.draw_clip_range(
frames,
preds,
boxes,
keyframe_idx=keyframe_idx,
draw_range=draw_range,
)
else:
frames = video_vis.draw_clip_range(
frames, preds, keyframe_idx=keyframe_idx, draw_range=draw_range
)
del task
return buffer + frames | Draw prediction for the given task. Args: task (TaskInfo object): task object that contain the necessary information for visualization. (e.g. frames, preds) All attributes must lie on CPU devices. video_vis (VideoVisualizer object): the video visualizer object. |
22,312 | import itertools
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import confusion_matrix
import slowfast.utils.logging as logging
from slowfast.datasets.utils import pack_pathway_output, tensor_normalize
The provided code snippet includes necessary dependencies for implementing the `get_confusion_matrix` function. Write a Python function `def get_confusion_matrix(preds, labels, num_classes, normalize="true")` to solve the following problem:
Calculate confusion matrix on the provided preds and labels. Args: preds (tensor or lists of tensors): predictions. Each tensor is in in the shape of (n_batch, num_classes). Tensor(s) must be on CPU. labels (tensor or lists of tensors): corresponding labels. Each tensor is in the shape of either (n_batch,) or (n_batch, num_classes). num_classes (int): number of classes. Tensor(s) must be on CPU. normalize (Optional[str]) : {‘true’, ‘pred’, ‘all’}, default="true" Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. Returns: cmtx (ndarray): confusion matrix of size (num_classes x num_classes)
Here is the function:
def get_confusion_matrix(preds, labels, num_classes, normalize="true"):
"""
Calculate confusion matrix on the provided preds and labels.
Args:
preds (tensor or lists of tensors): predictions. Each tensor is in
in the shape of (n_batch, num_classes). Tensor(s) must be on CPU.
labels (tensor or lists of tensors): corresponding labels. Each tensor is
in the shape of either (n_batch,) or (n_batch, num_classes).
num_classes (int): number of classes. Tensor(s) must be on CPU.
normalize (Optional[str]) : {‘true’, ‘pred’, ‘all’}, default="true"
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix
will not be normalized.
Returns:
cmtx (ndarray): confusion matrix of size (num_classes x num_classes)
"""
if isinstance(preds, list):
preds = torch.cat(preds, dim=0)
if isinstance(labels, list):
labels = torch.cat(labels, dim=0)
# If labels are one-hot encoded, get their indices.
if labels.ndim == preds.ndim:
labels = torch.argmax(labels, dim=-1)
# Get the predicted class indices for examples.
preds = torch.flatten(torch.argmax(preds, dim=-1))
labels = torch.flatten(labels)
cmtx = confusion_matrix(
labels, preds, labels=list(range(num_classes)), normalize=normalize
)
return cmtx | Calculate confusion matrix on the provided preds and labels. Args: preds (tensor or lists of tensors): predictions. Each tensor is in in the shape of (n_batch, num_classes). Tensor(s) must be on CPU. labels (tensor or lists of tensors): corresponding labels. Each tensor is in the shape of either (n_batch,) or (n_batch, num_classes). num_classes (int): number of classes. Tensor(s) must be on CPU. normalize (Optional[str]) : {‘true’, ‘pred’, ‘all’}, default="true" Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. Returns: cmtx (ndarray): confusion matrix of size (num_classes x num_classes) |
22,313 | import itertools
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import confusion_matrix
import slowfast.utils.logging as logging
from slowfast.datasets.utils import pack_pathway_output, tensor_normalize
def pack_pathway_output(cfg, frames):
"""
Prepare output as a list of tensors. Each tensor corresponding to a
unique pathway.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `channel` x `num frames` x `height` x `width`.
Returns:
frame_list (list): list of tensors with the dimension of
`channel` x `num frames` x `height` x `width`.
"""
if cfg.DATA.REVERSE_INPUT_CHANNEL:
frames = frames[[2, 1, 0], :, :, :]
if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH:
frame_list = [frames]
elif cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH:
fast_pathway = frames
# Perform temporal sampling from the fast pathway.
slow_pathway = torch.index_select(
frames,
1,
torch.linspace(
0, frames.shape[1] - 1, frames.shape[1] // cfg.SLOWFAST.ALPHA
).long(),
)
frame_list = [slow_pathway, fast_pathway]
else:
raise NotImplementedError(
"Model arch {} is not in {}".format(
cfg.MODEL.ARCH,
cfg.MODEL.SINGLE_PATHWAY_ARCH + cfg.MODEL.MULTI_PATHWAY_ARCH,
)
)
return frame_list
def tensor_normalize(tensor, mean, std, func=None):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
if func is not None:
tensor = func(tensor)
tensor = tensor - mean
tensor = tensor / std
return tensor
The provided code snippet includes necessary dependencies for implementing the `process_cv2_inputs` function. Write a Python function `def process_cv2_inputs(frames, cfg)` to solve the following problem:
Normalize and prepare inputs as a list of tensors. Each tensor correspond to a unique pathway. Args: frames (list of array): list of input images (correspond to one clip) in range [0, 255]. cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py
Here is the function:
def process_cv2_inputs(frames, cfg):
"""
Normalize and prepare inputs as a list of tensors. Each tensor
correspond to a unique pathway.
Args:
frames (list of array): list of input images (correspond to one clip) in range [0, 255].
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
inputs = torch.from_numpy(np.array(frames)).float() / 255
inputs = tensor_normalize(inputs, cfg.DATA.MEAN, cfg.DATA.STD)
# T H W C -> C T H W.
inputs = inputs.permute(3, 0, 1, 2)
# Sample frames for num_frames specified.
index = torch.linspace(0, inputs.shape[1] - 1, cfg.DATA.NUM_FRAMES).long()
inputs = torch.index_select(inputs, 1, index)
inputs = pack_pathway_output(cfg, inputs)
inputs = [inp.unsqueeze(0) for inp in inputs]
return inputs | Normalize and prepare inputs as a list of tensors. Each tensor correspond to a unique pathway. Args: frames (list of array): list of input images (correspond to one clip) in range [0, 255]. cfg (CfgNode): configs. Details can be found in slowfast/config/defaults.py |
22,314 | import itertools
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import confusion_matrix
import slowfast.utils.logging as logging
from slowfast.datasets.utils import pack_pathway_output, tensor_normalize
The provided code snippet includes necessary dependencies for implementing the `get_layer` function. Write a Python function `def get_layer(model, layer_name)` to solve the following problem:
Return the targeted layer (nn.Module Object) given a hierarchical layer name, separated by /. Args: model (model): model to get layers from. layer_name (str): name of the layer. Returns: prev_module (nn.Module): the layer from the model with `layer_name` name.
Here is the function:
def get_layer(model, layer_name):
"""
Return the targeted layer (nn.Module Object) given a hierarchical layer name,
separated by /.
Args:
model (model): model to get layers from.
layer_name (str): name of the layer.
Returns:
prev_module (nn.Module): the layer from the model with `layer_name` name.
"""
layer_ls = layer_name.split("/")
prev_module = model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
return prev_module | Return the targeted layer (nn.Module Object) given a hierarchical layer name, separated by /. Args: model (model): model to get layers from. layer_name (str): name of the layer. Returns: prev_module (nn.Module): the layer from the model with `layer_name` name. |
22,315 | import logging as log
import math
import os
import matplotlib.pyplot as plt
import torch
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
import slowfast.utils.logging as logging
import slowfast.visualization.utils as vis_utils
from slowfast.utils.misc import get_class_names
The provided code snippet includes necessary dependencies for implementing the `add_confusion_matrix` function. Write a Python function `def add_confusion_matrix( writer, cmtx, num_classes, global_step=None, subset_ids=None, class_names=None, tag="Confusion Matrix", figsize=None, )` to solve the following problem:
Calculate and plot confusion matrix to a SummaryWriter. Args: writer (SummaryWriter): the SummaryWriter to write the matrix to. cmtx (ndarray): confusion matrix. num_classes (int): total number of classes. global_step (Optional[int]): current step. subset_ids (list of ints): a list of label indices to keep. class_names (list of strs, optional): a list of all class names. tag (str or list of strs): name(s) of the confusion matrix image. figsize (Optional[float, float]): the figure size of the confusion matrix. If None, default to [6.4, 4.8].
Here is the function:
def add_confusion_matrix(
writer,
cmtx,
num_classes,
global_step=None,
subset_ids=None,
class_names=None,
tag="Confusion Matrix",
figsize=None,
):
"""
Calculate and plot confusion matrix to a SummaryWriter.
Args:
writer (SummaryWriter): the SummaryWriter to write the matrix to.
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
global_step (Optional[int]): current step.
subset_ids (list of ints): a list of label indices to keep.
class_names (list of strs, optional): a list of all class names.
tag (str or list of strs): name(s) of the confusion matrix image.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
"""
if subset_ids is None or len(subset_ids) != 0:
# If class names are not provided, use class indices as class names.
if class_names is None:
class_names = [str(i) for i in range(num_classes)]
# If subset is not provided, take every classes.
if subset_ids is None:
subset_ids = list(range(num_classes))
sub_cmtx = cmtx[subset_ids, :][:, subset_ids]
sub_names = [class_names[j] for j in subset_ids]
sub_cmtx = vis_utils.plot_confusion_matrix(
sub_cmtx,
num_classes=len(subset_ids),
class_names=sub_names,
figsize=figsize,
)
# Add the confusion matrix image to writer.
writer.add_figure(tag=tag, figure=sub_cmtx, global_step=global_step) | Calculate and plot confusion matrix to a SummaryWriter. Args: writer (SummaryWriter): the SummaryWriter to write the matrix to. cmtx (ndarray): confusion matrix. num_classes (int): total number of classes. global_step (Optional[int]): current step. subset_ids (list of ints): a list of label indices to keep. class_names (list of strs, optional): a list of all class names. tag (str or list of strs): name(s) of the confusion matrix image. figsize (Optional[float, float]): the figure size of the confusion matrix. If None, default to [6.4, 4.8]. |
22,316 | import logging as log
import math
import os
import matplotlib.pyplot as plt
import torch
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
import slowfast.utils.logging as logging
import slowfast.visualization.utils as vis_utils
from slowfast.utils.misc import get_class_names
The provided code snippet includes necessary dependencies for implementing the `plot_hist` function. Write a Python function `def plot_hist( writer, cmtx, num_classes, k=10, global_step=None, subset_ids=None, class_names=None, figsize=None, )` to solve the following problem:
Given all predictions and all true labels, plot histograms of top-k most frequently predicted classes for each true class. Args: writer (SummaryWriter object): a tensorboard SummaryWriter object. cmtx (ndarray): confusion matrix. num_classes (int): total number of classes. k (int): top k to plot histograms. global_step (Optional[int]): current step. subset_ids (list of ints, optional): class indices to plot histogram. mapping (list of strings): names of all classes. figsize (Optional[float, float]): the figure size of the confusion matrix. If None, default to [6.4, 4.8].
Here is the function:
def plot_hist(
writer,
cmtx,
num_classes,
k=10,
global_step=None,
subset_ids=None,
class_names=None,
figsize=None,
):
"""
Given all predictions and all true labels, plot histograms of top-k most
frequently predicted classes for each true class.
Args:
writer (SummaryWriter object): a tensorboard SummaryWriter object.
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
k (int): top k to plot histograms.
global_step (Optional[int]): current step.
subset_ids (list of ints, optional): class indices to plot histogram.
mapping (list of strings): names of all classes.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
"""
if subset_ids is None or len(subset_ids) != 0:
if subset_ids is None:
subset_ids = set(range(num_classes))
else:
subset_ids = set(subset_ids)
# If class names are not provided, use their indices as names.
if class_names is None:
class_names = list(range(num_classes))
for i in subset_ids:
pred = cmtx[i]
hist = vis_utils.plot_topk_histogram(
class_names[i],
torch.Tensor(pred),
k,
class_names,
figsize=figsize,
)
writer.add_figure(
tag="Top {} predictions by classes/{}".format(
k, class_names[i]
),
figure=hist,
global_step=global_step,
) | Given all predictions and all true labels, plot histograms of top-k most frequently predicted classes for each true class. Args: writer (SummaryWriter object): a tensorboard SummaryWriter object. cmtx (ndarray): confusion matrix. num_classes (int): total number of classes. k (int): top k to plot histograms. global_step (Optional[int]): current step. subset_ids (list of ints, optional): class indices to plot histogram. mapping (list of strings): names of all classes. figsize (Optional[float, float]): the figure size of the confusion matrix. If None, default to [6.4, 4.8]. |
22,317 | import logging as log
import math
import os
import matplotlib.pyplot as plt
import torch
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
import slowfast.utils.logging as logging
import slowfast.visualization.utils as vis_utils
from slowfast.utils.misc import get_class_names
def add_heatmap(tensor):
"""
Add heatmap to 2D tensor.
Args:
tensor (tensor): a 2D tensor. Tensor value must be in [0..1] range.
Returns:
heatmap (tensor): a 3D tensor. Result of applying heatmap to the 2D tensor.
"""
assert tensor.ndim == 2, "Only support 2D tensors."
# Move tensor to cpu if necessary.
if tensor.device != torch.device("cpu"):
arr = tensor.cpu()
else:
arr = tensor
arr = arr.numpy()
# Get the color map by name.
cm = plt.get_cmap("viridis")
heatmap = cm(arr)
heatmap = heatmap[:, :, :3]
# Convert (H, W, C) to (C, H, W)
heatmap = torch.Tensor(heatmap).permute(2, 0, 1)
return heatmap
The provided code snippet includes necessary dependencies for implementing the `add_ndim_array` function. Write a Python function `def add_ndim_array( writer, array, name, nrow=None, normalize=False, global_step=None, heat_map=True, )` to solve the following problem:
Visualize and add tensors of n-dimentionals to a Tensorboard SummaryWriter. Tensors will be visualized as a 2D grid image. Args: writer (SummaryWriter): Tensorboard SummaryWriter. array (tensor): tensor to visualize. name (str): name of the tensor. nrow (Optional[int]): number of 2D filters in each row in the grid image. normalize (bool): whether to normalize when we have multiple 2D filters. Default to False. global_step (Optional[int]): current step. heat_map (bool): whether to add heat map to 2D each 2D filters in array.
Here is the function:
def add_ndim_array(
writer,
array,
name,
nrow=None,
normalize=False,
global_step=None,
heat_map=True,
):
"""
Visualize and add tensors of n-dimentionals to a Tensorboard SummaryWriter. Tensors
will be visualized as a 2D grid image.
Args:
writer (SummaryWriter): Tensorboard SummaryWriter.
array (tensor): tensor to visualize.
name (str): name of the tensor.
nrow (Optional[int]): number of 2D filters in each row in the grid image.
normalize (bool): whether to normalize when we have multiple 2D filters.
Default to False.
global_step (Optional[int]): current step.
heat_map (bool): whether to add heat map to 2D each 2D filters in array.
"""
if array is not None and array.ndim != 0:
if array.ndim == 1:
reshaped_array = array.unsqueeze(0)
if nrow is None:
nrow = int(math.sqrt(reshaped_array.size()[1]))
reshaped_array = reshaped_array.view(-1, nrow)
if heat_map:
reshaped_array = add_heatmap(reshaped_array)
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="CHW",
)
else:
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="HW",
)
elif array.ndim == 2:
reshaped_array = array
if heat_map:
heatmap = add_heatmap(reshaped_array)
writer.add_image(
name, heatmap, global_step=global_step, dataformats="CHW"
)
else:
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="HW",
)
else:
last2_dims = array.size()[-2:]
reshaped_array = array.view(-1, *last2_dims)
if heat_map:
reshaped_array = [
add_heatmap(array_2d).unsqueeze(0)
for array_2d in reshaped_array
]
reshaped_array = torch.cat(reshaped_array, dim=0)
else:
reshaped_array = reshaped_array.unsqueeze(1)
if nrow is None:
nrow = int(math.sqrt(reshaped_array.size()[0]))
img_grid = make_grid(
reshaped_array, nrow, padding=1, normalize=normalize
)
writer.add_image(name, img_grid, global_step=global_step) | Visualize and add tensors of n-dimentionals to a Tensorboard SummaryWriter. Tensors will be visualized as a 2D grid image. Args: writer (SummaryWriter): Tensorboard SummaryWriter. array (tensor): tensor to visualize. name (str): name of the tensor. nrow (Optional[int]): number of 2D filters in each row in the grid image. normalize (bool): whether to normalize when we have multiple 2D filters. Default to False. global_step (Optional[int]): current step. heat_map (bool): whether to add heat map to 2D each 2D filters in array. |
22,318 | import numpy as np
import os
import cv2
import torch
import tqdm
import slowfast.utils.checkpoint as cu
import slowfast.utils.logging as logging
from slowfast.datasets.ava_helper import parse_bboxes_file
from slowfast.datasets.cv2_transform import scale, scale_boxes
from slowfast.datasets.utils import get_sequence
from slowfast.models import build_model
from slowfast.utils import misc
from slowfast.utils.env import pathmgr
from slowfast.visualization.utils import process_cv2_inputs
from slowfast.visualization.video_visualizer import VideoVisualizer
The provided code snippet includes necessary dependencies for implementing the `merge_pred_gt_boxes` function. Write a Python function `def merge_pred_gt_boxes(pred_dict, gt_dict=None)` to solve the following problem:
Merge data from precomputed and ground-truth boxes dictionaries. Args: pred_dict (dict): a dict which maps from `frame_idx` to a list of `boxes` and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is a list of labels for `boxes[i]`. gt_dict (Optional[dict]): a dict which maps from `frame_idx` to a list of `boxes` and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes. Returns: merged_dict (dict): merged dictionary from `pred_dict` and `gt_dict` if given. It is a dict which maps from `frame_idx` to a list of [`is_gt`, `boxes`, `labels`], where `is_gt` is a boolean indicate whether the `boxes` and `labels` are ground-truth.
Here is the function:
def merge_pred_gt_boxes(pred_dict, gt_dict=None):
"""
Merge data from precomputed and ground-truth boxes dictionaries.
Args:
pred_dict (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`.
gt_dict (Optional[dict]): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
Returns:
merged_dict (dict): merged dictionary from `pred_dict` and `gt_dict` if given.
It is a dict which maps from `frame_idx` to a list of [`is_gt`, `boxes`, `labels`],
where `is_gt` is a boolean indicate whether the `boxes` and `labels` are ground-truth.
"""
merged_dict = {}
for key, item in pred_dict.items():
merged_dict[key] = [[False, item[0], item[1]]]
if gt_dict is not None:
for key, item in gt_dict.items():
if merged_dict.get(key) is None:
merged_dict[key] = [[True, item[0], item[1]]]
else:
merged_dict[key].append([True, item[0], item[1]])
return merged_dict | Merge data from precomputed and ground-truth boxes dictionaries. Args: pred_dict (dict): a dict which maps from `frame_idx` to a list of `boxes` and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is a list of labels for `boxes[i]`. gt_dict (Optional[dict]): a dict which maps from `frame_idx` to a list of `boxes` and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes. Returns: merged_dict (dict): merged dictionary from `pred_dict` and `gt_dict` if given. It is a dict which maps from `frame_idx` to a list of [`is_gt`, `boxes`, `labels`], where `is_gt` is a boolean indicate whether the `boxes` and `labels` are ground-truth. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.