id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
188,934 | import os
import gradio as gr
import nltk
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from paddle_embedding import PaddleNLPEmbeddings
from chatllm import ChatLLM
def init_knowledge_vector_store(embedding_model, filepath):
embeddings = PaddleNLPEmbeddings(
model=embedding_model_dict[embedding_model])
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
vector_store = FAISS.from_documents(docs, embeddings)
return vector_store
def get_knowledge_based_answer(query,
large_language_model,
vector_store,
VECTOR_SEARCH_TOP_K,
chat_history=[]):
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
chatLLM = ChatLLM()
knowledge_chain = RetrievalQA.from_llm(
llm=chatLLM,
retriever=vector_store.as_retriever(
search_kwargs={"k": VECTOR_SEARCH_TOP_K}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result['result']
def predict(input: str,
large_language_model: str,
embedding_model: str,
file_obj,
VECTOR_SEARCH_TOP_K: int,
history=None):
if history == None:
history = []
vector_store = init_knowledge_vector_store(embedding_model, file_obj.name)
resp = get_knowledge_based_answer(
query=input,
large_language_model=large_language_model,
vector_store=vector_store,
VECTOR_SEARCH_TOP_K=VECTOR_SEARCH_TOP_K,
chat_history=history,
)
history.append((input, resp['result']))
return '', history, history | null |
188,936 | import os
import gradio as gr
import nltk
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from paddle_embedding import PaddleNLPEmbeddings
from chatllm import ChatLLM
def init_knowledge_vector_store(embedding_model, filepath):
embeddings = PaddleNLPEmbeddings(
model = embedding_model_dict[embedding_model])
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
vector_store = FAISS.from_documents(docs, embeddings)
return vector_store
def get_knowledge_based_answer(query,
large_language_model,
vector_store,
VECTOR_SEARCH_TOP_K,
chat_history=[]):
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
chatLLM = ChatLLM()
knowledge_chain = RetrievalQA.from_llm(
llm=chatLLM,
retriever=vector_store.as_retriever(
search_kwargs={"k": VECTOR_SEARCH_TOP_K}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
print('result')
print(result)
return result
def predict(input: str,
large_language_model: str,
embedding_model: str,
file_obj,
VECTOR_SEARCH_TOP_K: int,
history=None):
if history == None:
history = []
print(file_obj.name)
vector_store = init_knowledge_vector_store(embedding_model, file_obj.name)
resp = get_knowledge_based_answer(
query=input,
large_language_model=large_language_model,
vector_store=vector_store,
VECTOR_SEARCH_TOP_K=VECTOR_SEARCH_TOP_K,
chat_history=history,
)
print(resp['result'])
history.append((input, resp['result']))
return '', history, history | null |
188,937 | import datetime
import os
from typing import List
import nltk
import qdrant_client
import sentence_transformers
import torch
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import Qdrant
from lcserve import serving
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return """初始模型已成功加载"""
except Exception as e:
return """模型未成功加载,请检查后重新尝试""" | null |
188,938 | import datetime
import os
from typing import List
import nltk
import qdrant_client
import sentence_transformers
import torch
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import Qdrant
from lcserve import serving
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def reinit_model(large_language_model: str, embedding_model: str):
try:
knowladge_based_chat_llm.init_model_config(
large_language_model=large_language_model,
embedding_model=embedding_model)
model_status = """模型已成功重新加载"""
except Exception as e:
model_status = """模型未成功加载,请检查后重新尝试"""
return model_status | null |
188,939 | import datetime
import os
from typing import List
import nltk
import qdrant_client
import sentence_transformers
import torch
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import Qdrant
from lcserve import serving
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def vector_store(file_path: str or List[str]):
vector_store_state, loaded_files = knowladge_based_chat_llm.init_knowledge_vector_store(
file_path)
return vector_store_state | null |
188,940 | import datetime
import os
from typing import List
import nltk
import qdrant_client
import sentence_transformers
import torch
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import Qdrant
from lcserve import serving
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
def search_web(query):
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def predict(input: str,
use_web: bool, top_k: int, history_len: int, temperature: float,
top_p: float, history: list):
if history == None:
history = []
if use_web == 'True':
web_content = search_web(query=input)
else:
web_content = ''
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
web_content=web_content,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
print(resp['result'])
return resp['result'] | null |
188,947 | import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
def _get_coco_stuff_meta():
def register_all_coco_stuff_10k(root):
root = os.path.join(root, "coco", "coco_stuff_10k")
meta = _get_coco_stuff_meta()
for name, image_dirname, sem_seg_dirname in [
("train", "images_detectron2/train", "annotations_detectron2/train"),
("test", "images_detectron2/test", "annotations_detectron2/test"),
]:
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname)
name = f"coco_2017_{name}_stuff_10k_sem_seg"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**meta,
) | null |
188,948 | import copy
import logging
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.structures import BitMasks, Instances
from pycocotools import mask as coco_mask
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks | null |
188,949 | import copy
import logging
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.structures import BitMasks, Instances
from pycocotools import mask as coco_mask
The provided code snippet includes necessary dependencies for implementing the `build_transform_gen` function. Write a Python function `def build_transform_gen(cfg, is_train)` to solve the following problem:
Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation]
Here is the function:
def build_transform_gen(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation"
image_size = cfg.INPUT.IMAGE_SIZE
min_scale = cfg.INPUT.MIN_SCALE
max_scale = cfg.INPUT.MAX_SCALE
augmentation = []
if cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
),
T.FixedSizeCrop(crop_size=(image_size, image_size)),
])
return augmentation | Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation] |
188,950 | import copy
import logging
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.structures import BitMasks, Boxes, Instances
The provided code snippet includes necessary dependencies for implementing the `build_transform_gen` function. Write a Python function `def build_transform_gen(cfg, is_train)` to solve the following problem:
Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation]
Here is the function:
def build_transform_gen(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation"
image_size = cfg.INPUT.IMAGE_SIZE
min_scale = cfg.INPUT.MIN_SCALE
max_scale = cfg.INPUT.MAX_SCALE
augmentation = []
if cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
),
T.FixedSizeCrop(crop_size=(image_size, image_size)),
])
return augmentation | Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation] |
188,951 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
The provided code snippet includes necessary dependencies for implementing the `dice_loss` function. Write a Python function `def dice_loss( inputs: torch.Tensor, targets: torch.Tensor, num_masks: float, )` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
Here is the function:
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks | Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). |
188,952 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
The provided code snippet includes necessary dependencies for implementing the `sigmoid_ce_loss` function. Write a Python function `def sigmoid_ce_loss( inputs: torch.Tensor, targets: torch.Tensor, num_masks: float, )` to solve the following problem:
Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor
Here is the function:
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks | Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor |
188,953 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
The provided code snippet includes necessary dependencies for implementing the `calculate_uncertainty` function. Write a Python function `def calculate_uncertainty(logits)` to solve the following problem:
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score.
Here is the function:
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits)) | We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score. |
188,956 | import logging
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from .position_encoding import PositionEmbeddingSine
from .maskformer_transformer_decoder import TRANSFORMER_DECODER_REGISTRY
The provided code snippet includes necessary dependencies for implementing the `_get_activation_fn` function. Write a Python function `def _get_activation_fn(activation)` to solve the following problem:
Return an activation function given a string
Here is the function:
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | Return an activation function given a string |
188,957 | import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from detectron2.utils.registry import Registry
from .position_encoding import PositionEmbeddingSine
from .transformer import Transformer
TRANSFORMER_DECODER_REGISTRY = Registry("TRANSFORMER_MODULE")
TRANSFORMER_DECODER_REGISTRY.__doc__ = """
Registry for transformer module in MaskFormer.
"""
The provided code snippet includes necessary dependencies for implementing the `build_transformer_decoder` function. Write a Python function `def build_transformer_decoder(cfg, in_channels, mask_classification=True)` to solve the following problem:
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
Here is the function:
def build_transformer_decoder(cfg, in_channels, mask_classification=True):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME
return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification) | Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. |
188,960 | import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from torch.cuda.amp import autocast
from detectron2.projects.point_rend.point_features import point_sample
The provided code snippet includes necessary dependencies for implementing the `batch_dice_loss` function. Write a Python function `def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor)` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
Here is the function:
def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets)
denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :]
loss = 1 - (numerator + 1) / (denominator + 1)
return loss | Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). |
188,961 | import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from torch.cuda.amp import autocast
from detectron2.projects.point_rend.point_features import point_sample
The provided code snippet includes necessary dependencies for implementing the `batch_sigmoid_ce_loss` function. Write a Python function `def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor)` to solve the following problem:
Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor
Here is the function:
def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
hw = inputs.shape[1]
pos = F.binary_cross_entropy_with_logits(
inputs, torch.ones_like(inputs), reduction="none"
)
neg = F.binary_cross_entropy_with_logits(
inputs, torch.zeros_like(inputs), reduction="none"
)
loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum(
"nc,mc->nm", neg, (1 - targets)
)
return loss / hw | Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor |
188,962 | import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, DeformConv, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.position_encoding import PositionEmbeddingSine
from ..transformer_decoder.transformer import TransformerEncoder, TransformerEncoderLayer, _get_clones, _get_activation_fn
The provided code snippet includes necessary dependencies for implementing the `build_pixel_decoder` function. Write a Python function `def build_pixel_decoder(cfg, input_shape)` to solve the following problem:
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
Here is the function:
def build_pixel_decoder(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model | Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`. |
188,963 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
from ..functions import MSDeformAttnFunction
from ..functions.ms_deform_attn_func import ms_deform_attn_core_pytorch
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
return (n & (n-1) == 0) and n != 0 | null |
188,969 | import argparse
import glob
import multiprocessing as mp
import os
import sys
import tempfile
import time
import warnings
import cv2
import numpy as np
import tqdm
from torch.cuda.amp import autocast
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from mask2former_video import add_maskformer2_video_config
from predictor import VisualizationDemo
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
add_maskformer2_video_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg | null |
188,970 | import argparse
import glob
import multiprocessing as mp
import os
import sys
import tempfile
import time
import warnings
import cv2
import numpy as np
import tqdm
from torch.cuda.amp import autocast
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from mask2former_video import add_maskformer2_video_config
from predictor import VisualizationDemo
def get_parser():
parser = argparse.ArgumentParser(description="maskformer2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/youtubevis_2019/video_maskformer2_R50_bs16_8ep.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'"
"this will be treated as frames of a video",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--save-frames",
default=False,
help="Save frame level image outputs.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser | null |
188,971 | import argparse
import glob
import multiprocessing as mp
import os
import sys
import tempfile
import time
import warnings
import cv2
import numpy as np
import tqdm
from torch.cuda.amp import autocast
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from mask2former_video import add_maskformer2_video_config
from predictor import VisualizationDemo
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False | null |
188,972 | import argparse
import glob
import multiprocessing as mp
import os
import sys
import tempfile
import time
import warnings
import cv2
import numpy as np
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from predictor import VisualizationDemo
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg | null |
188,973 | import argparse
import glob
import multiprocessing as mp
import os
import sys
import tempfile
import time
import warnings
import cv2
import numpy as np
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from predictor import VisualizationDemo
def get_parser():
parser = argparse.ArgumentParser(description="maskformer2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/coco/panoptic-segmentation/maskformer2_R50_bs16_50ep.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser | null |
188,974 | import argparse
import glob
import multiprocessing as mp
import os
import sys
import tempfile
import time
import warnings
import cv2
import numpy as np
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from predictor import VisualizationDemo
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False | null |
188,975 | import copy
import itertools
import logging
import os
from collections import OrderedDict
from typing import Any, Dict, List, Set
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import (
DefaultTrainer,
default_argument_parser,
default_setup,
launch,
)
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
from detectron2.solver.build import maybe_add_gradient_clipping
from detectron2.utils.logger import setup_logger
from mask2former import (
COCOInstanceNewBaselineDatasetMapper,
COCOPanopticNewBaselineDatasetMapper,
InstanceSegEvaluator,
MaskFormerInstanceDatasetMapper,
MaskFormerPanopticDatasetMapper,
MaskFormerSemanticDatasetMapper,
SemanticSegmentorWithTTA,
add_maskformer2_config,
)
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `def setup(args)` to solve the following problem:
Create configs and perform basic setups.
Here is the function:
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
# for poly lr schedule
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
# Setup logger for "mask_former" module
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="mask2former")
return cfg | Create configs and perform basic setups. |
188,976 | import os
from pathlib import Path
import numpy as np
import tqdm
from PIL import Image
def convert(input, output):
img = np.asarray(Image.open(input))
assert img.dtype == np.uint8
img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1
Image.fromarray(img).save(output) | null |
188,978 | import copy
import itertools
import logging
import os
from collections import OrderedDict
from typing import Any, Dict, List, Set
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import (
DefaultTrainer,
default_argument_parser,
default_setup,
launch,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
from detectron2.solver.build import maybe_add_gradient_clipping
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from mask2former_video import (
YTVISDatasetMapper,
YTVISEvaluator,
add_maskformer2_video_config,
build_detection_train_loader,
build_detection_test_loader,
get_detection_dataset_dicts,
)
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `def setup(args)` to solve the following problem:
Create configs and perform basic setups.
Here is the function:
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
# for poly lr schedule
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
add_maskformer2_video_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
# Setup logger for "mask_former" module
setup_logger(name="mask2former")
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="mask2former_video")
return cfg | Create configs and perform basic setups. |
188,980 | import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from .datasets.ytvis_api.ytvos import YTVOS
from .datasets.ytvis_api.ytvoseval import YTVOSeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
class YTVOSeval:
# Interface for evaluating video instance segmentation on the YouTubeVIS dataset.
#
# The usage for YTVOSeval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = YTVOSeval(cocoGt,cocoDt); # initialize YTVOSeval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.params = {} # evaluation parameters
self.evalVids = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.vidIds = sorted(cocoGt.getVidIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
for i, a in enumerate(ann['segmentations']):
if a:
rle = coco.annToRLE(ann, i)
ann['segmentations'][i] = rle
l = [a for a in ann['areas'] if a]
if len(l)==0:
ann['avg_area'] = 0
else:
ann['avg_area'] = np.array(l).mean()
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds))
# convert ground truth to mask if iouType == 'segm'
if p.iouType == 'segm':
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
if p.iouType == 'keypoints':
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['video_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['video_id'], dt['category_id']].append(dt)
self.evalVids = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalVids
:return: None
'''
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if not p.useSegm is None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
print('Evaluate annotation type *{}*'.format(p.iouType))
p.vidIds = list(np.unique(p.vidIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {(vidId, catId): computeIoU(vidId, catId) \
for vidId in p.vidIds
for catId in catIds}
evaluateVid = self.evaluateVid
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateVid(vidId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for vidId in p.vidIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc-tic))
def computeIoU(self, vidId, catId):
p = self.params
if p.useCats:
gt = self._gts[vidId,catId]
dt = self._dts[vidId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]
if len(gt) == 0 and len(dt) ==0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt=dt[0:p.maxDets[-1]]
if p.iouType == 'segm':
g = [g['segmentations'] for g in gt]
d = [d['segmentations'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bboxes'] for g in gt]
d = [d['bboxes'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
#ious = maskUtils.iou(d,g,iscrowd)
def iou_seq(d_seq, g_seq):
i = .0
u = .0
for d, g in zip(d_seq, g_seq):
if d and g:
i += maskUtils.area(maskUtils.merge([d, g], True))
u += maskUtils.area(maskUtils.merge([d, g], False))
elif not d and g:
u += maskUtils.area(g)
elif d and not g:
u += maskUtils.area(d)
if not u > .0:
print("Mask sizes in video {} and category {} may not match!".format(vidId, catId))
iou = i / u if u > .0 else .0
return iou
ious = np.zeros([len(d), len(g)])
for i, j in np.ndindex(ious.shape):
ious[i, j] = iou_seq(d[i], g[j])
#print(vidId, catId, ious.shape, ious)
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
vars = (sigmas * 2)**2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['keypoints'])
xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt['bbox']
x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt['keypoints'])
xd = d[0::3]; yd = d[1::3]
if k1>0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros((k))
dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)
dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)
e = (dx**2 + dy**2) / vars / (gt['avg_area']+np.spacing(1)) / 2
if k1 > 0:
e=e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def evaluateVid(self, vidId, catId, aRng, maxDet):
'''
perform evaluation for single category and image
:return: dict (single image results)
'''
p = self.params
if p.useCats:
gt = self._gts[vidId,catId]
dt = self._dts[vidId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]
if len(gt) == 0 and len(dt) ==0:
return None
for g in gt:
if g['ignore'] or (g['avg_area']<aRng[0] or g['avg_area']>aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[vidId, catId][:, gtind] if len(self.ious[vidId, catId]) > 0 else self.ious[vidId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T,G))
dtm = np.zeros((T,D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T,D))
if not len(ious)==0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t,1-1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind,gind]>0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
break
# continue to next gt unless better match made
if ious[dind,gind] < iou:
continue
# if match successful and best so far, store appropriately
iou=ious[dind,gind]
m=gind
# if match made store id of match for both dt and gt
if m ==-1:
continue
dtIg[tind,dind] = gtIg[m]
dtm[tind,dind] = gt[m]['id']
gtm[tind,m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['avg_area']<aRng[0] or d['avg_area']>aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
# store results for given image and category
return {
'video_id': vidId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
scores = -np.ones((T,R,K,A,M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.vidIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.vidIds) if i in setI]
I0 = len(_pe.vidIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0*A0*I0
for a, a0 in enumerate(a_list):
Na = a0*I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0 )
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg) )
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
scores[t,:,k,a,m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap==1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,:,aind,mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,aind,mind]
if len(s[s>-1])==0:
mean_s = -1
else:
mean_s = np.mean(s[s>-1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize()
The provided code snippet includes necessary dependencies for implementing the `_evaluate_predictions_on_coco` function. Write a Python function `def _evaluate_predictions_on_coco( coco_gt, coco_results, img_ids=None, )` to solve the following problem:
Evaluate the coco results using COCOEval API.
Here is the function:
def _evaluate_predictions_on_coco(
coco_gt,
coco_results,
img_ids=None,
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = YTVOSeval(coco_gt, coco_dt)
# For COCO, the default max_dets_per_image is [1, 10, 100].
max_dets_per_image = [1, 10, 100] # Default from COCOEval
coco_eval.params.maxDets = max_dets_per_image
if img_ids is not None:
coco_eval.params.imgIds = img_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval | Evaluate the coco results using COCOEval API. |
188,981 | import numpy as np
import logging
import sys
from fvcore.transforms.transform import (
HFlipTransform,
NoOpTransform,
VFlipTransform,
)
from PIL import Image
from detectron2.data import transforms as T
class ResizeShortestEdge(T.Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR, clip_frame_cnt=1
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice", "range_by_clip", "choice_by_clip"], sample_style
self.is_range = ("range" in sample_style)
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert len(short_edge_length) == 2, (
"short_edge_length must be two values using 'range' sample style."
f" Got {short_edge_length}!"
)
self._cnt = 0
self._init(locals())
def get_transform(self, image):
if self._cnt % self.clip_frame_cnt == 0:
if self.is_range:
self.size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
self.size = np.random.choice(self.short_edge_length)
if self.size == 0:
return NoOpTransform()
self._cnt = 0 # avoiding overflow
self._cnt += 1
h, w = image.shape[:2]
scale = self.size * 1.0 / min(h, w)
if h < w:
newh, neww = self.size, scale * w
else:
newh, neww = scale * h, self.size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return T.ResizeTransform(h, w, newh, neww, self.interp)
class RandomFlip(T.Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False, clip_frame_cnt=1):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._cnt = 0
self._init(locals())
def get_transform(self, image):
if self._cnt % self.clip_frame_cnt == 0:
self.do = self._rand_range() < self.prob
self._cnt = 0 # avoiding overflow
self._cnt += 1
h, w = image.shape[:2]
if self.do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
def build_augmentation(cfg, is_train):
logger = logging.getLogger(__name__)
aug_list = []
if is_train:
# Crop
if cfg.INPUT.CROP.ENABLED:
aug_list.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
# Resize
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
ms_clip_frame_cnt = cfg.INPUT.SAMPLING_FRAME_NUM if "by_clip" in cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else 1
aug_list.append(ResizeShortestEdge(min_size, max_size, sample_style, clip_frame_cnt=ms_clip_frame_cnt))
# Flip
if cfg.INPUT.RANDOM_FLIP != "none":
if cfg.INPUT.RANDOM_FLIP == "flip_by_clip":
flip_clip_frame_cnt = cfg.INPUT.SAMPLING_FRAME_NUM
else:
flip_clip_frame_cnt = 1
aug_list.append(
# NOTE using RandomFlip modified for the support of flip maintenance
RandomFlip(
horizontal=(cfg.INPUT.RANDOM_FLIP == "horizontal") or (cfg.INPUT.RANDOM_FLIP == "flip_by_clip"),
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
clip_frame_cnt=flip_clip_frame_cnt,
)
)
# Additional augmentations : brightness, contrast, saturation, rotation
augmentations = cfg.INPUT.AUGMENTATIONS
if "brightness" in augmentations:
aug_list.append(T.RandomBrightness(0.9, 1.1))
if "contrast" in augmentations:
aug_list.append(T.RandomContrast(0.9, 1.1))
if "saturation" in augmentations:
aug_list.append(T.RandomSaturation(0.9, 1.1))
if "rotation" in augmentations:
aug_list.append(
T.RandomRotation(
[-15, 15], expand=False, center=[(0.4, 0.4), (0.6, 0.6)], sample_style="range"
)
)
else:
# Resize
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
aug_list.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
return aug_list | null |
188,987 | import os
from .ytvis import (
register_ytvis_instances,
_get_ytvis_2019_instances_meta,
_get_ytvis_2021_instances_meta,
)
_PREDEFINED_SPLITS_YTVIS_2019 = {
"ytvis_2019_train": ("ytvis_2019/train/JPEGImages",
"ytvis_2019/train.json"),
"ytvis_2019_val": ("ytvis_2019/valid/JPEGImages",
"ytvis_2019/valid.json"),
"ytvis_2019_test": ("ytvis_2019/test/JPEGImages",
"ytvis_2019/test.json"),
}
def _get_ytvis_2019_instances_meta():
thing_ids = [k["id"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
thing_colors = [k["color"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
assert len(thing_ids) == 40, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def register_ytvis_instances(name, metadata, json_file, image_root):
"""
Register a dataset in YTVIS's json annotation format for
instance tracking.
Args:
name (str): the name that identifies a dataset, e.g. "ytvis_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: load_ytvis_json(json_file, image_root, name))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="ytvis", **metadata
)
def register_all_ytvis_2019(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_YTVIS_2019.items():
# Assume pre-defined datasets live in `./datasets`.
register_ytvis_instances(
key,
_get_ytvis_2019_instances_meta(),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
) | null |
188,988 | import os
from .ytvis import (
register_ytvis_instances,
_get_ytvis_2019_instances_meta,
_get_ytvis_2021_instances_meta,
)
_PREDEFINED_SPLITS_YTVIS_2021 = {
"ytvis_2021_train": ("ytvis_2021/train/JPEGImages",
"ytvis_2021/train.json"),
"ytvis_2021_val": ("ytvis_2021/valid/JPEGImages",
"ytvis_2021/valid.json"),
"ytvis_2021_test": ("ytvis_2021/test/JPEGImages",
"ytvis_2021/test.json"),
}
def _get_ytvis_2021_instances_meta():
thing_ids = [k["id"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
thing_colors = [k["color"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
assert len(thing_ids) == 40, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def register_ytvis_instances(name, metadata, json_file, image_root):
"""
Register a dataset in YTVIS's json annotation format for
instance tracking.
Args:
name (str): the name that identifies a dataset, e.g. "ytvis_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: load_ytvis_json(json_file, image_root, name))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="ytvis", **metadata
)
def register_all_ytvis_2021(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_YTVIS_2021.items():
# Assume pre-defined datasets live in `./datasets`.
register_ytvis_instances(
key,
_get_ytvis_2021_instances_meta(),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
) | null |
188,989 | import itertools
import logging
import torch.utils.data
from detectron2.config import CfgNode, configurable
from detectron2.data.build import (
build_batch_data_loader,
load_proposals_into_dataset,
trivial_batch_collator,
)
from detectron2.data.catalog import DatasetCatalog
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import InferenceSampler, TrainingSampler
from detectron2.utils.comm import get_world_size
def _compute_num_images_per_worker(cfg: CfgNode):
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
return images_per_worker | null |
188,990 | import itertools
import logging
import torch.utils.data
from detectron2.config import CfgNode, configurable
from detectron2.data.build import (
build_batch_data_loader,
load_proposals_into_dataset,
trivial_batch_collator,
)
from detectron2.data.catalog import DatasetCatalog
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import InferenceSampler, TrainingSampler
from detectron2.utils.comm import get_world_size
def get_detection_dataset_dicts(
dataset_names, filter_empty=True, proposal_files=None
):
def _train_loader_from_config(cfg, mapper, *, dataset=None, sampler=None):
if dataset is None:
dataset = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, True)
if sampler is None:
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
sampler = TrainingSampler(len(dataset))
return {
"dataset": dataset,
"sampler": sampler,
"mapper": mapper,
"total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
"aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
} | null |
188,991 | import itertools
import logging
import torch.utils.data
from detectron2.config import CfgNode, configurable
from detectron2.data.build import (
build_batch_data_loader,
load_proposals_into_dataset,
trivial_batch_collator,
)
from detectron2.data.catalog import DatasetCatalog
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import InferenceSampler, TrainingSampler
from detectron2.utils.comm import get_world_size
The provided code snippet includes necessary dependencies for implementing the `build_detection_train_loader` function. Write a Python function `def build_detection_train_loader( dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0 )` to solve the following problem:
Build a dataloader for object detection with some default features. This interface is experimental. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a map-style pytorch dataset. They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices to be applied on ``dataset``. Default to :class:`TrainingSampler`, which coordinates a random shuffle sequence across all workers. total_batch_size (int): total batch size across all workers. Batching simply puts data into a list. aspect_ratio_grouping (bool): whether to group images with similar aspect ratio for efficiency. When enabled, it requires each element in dataset be a dict with keys "width" and "height". num_workers (int): number of parallel data loading workers Returns: torch.utils.data.DataLoader: a dataloader. Each output from it is a ``list[mapped_element]`` of length ``total_batch_size / num_workers``, where ``mapped_element`` is produced by the ``mapper``.
Here is the function:
def build_detection_train_loader(
dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0
):
"""
Build a dataloader for object detection with some default features.
This interface is experimental.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that
produces indices to be applied on ``dataset``.
Default to :class:`TrainingSampler`, which coordinates a random shuffle
sequence across all workers.
total_batch_size (int): total batch size across all workers. Batching
simply puts data into a list.
aspect_ratio_grouping (bool): whether to group images with similar
aspect ratio for efficiency. When enabled, it requires each
element in dataset be a dict with keys "width" and "height".
num_workers (int): number of parallel data loading workers
Returns:
torch.utils.data.DataLoader: a dataloader. Each output from it is a
``list[mapped_element]`` of length ``total_batch_size / num_workers``,
where ``mapped_element`` is produced by the ``mapper``.
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torch.utils.data.sampler.Sampler)
return build_batch_data_loader(
dataset,
sampler,
total_batch_size,
aspect_ratio_grouping=aspect_ratio_grouping,
num_workers=num_workers,
) | Build a dataloader for object detection with some default features. This interface is experimental. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a map-style pytorch dataset. They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices to be applied on ``dataset``. Default to :class:`TrainingSampler`, which coordinates a random shuffle sequence across all workers. total_batch_size (int): total batch size across all workers. Batching simply puts data into a list. aspect_ratio_grouping (bool): whether to group images with similar aspect ratio for efficiency. When enabled, it requires each element in dataset be a dict with keys "width" and "height". num_workers (int): number of parallel data loading workers Returns: torch.utils.data.DataLoader: a dataloader. Each output from it is a ``list[mapped_element]`` of length ``total_batch_size / num_workers``, where ``mapped_element`` is produced by the ``mapper``. |
188,992 | import itertools
import logging
import torch.utils.data
from detectron2.config import CfgNode, configurable
from detectron2.data.build import (
build_batch_data_loader,
load_proposals_into_dataset,
trivial_batch_collator,
)
from detectron2.data.catalog import DatasetCatalog
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import InferenceSampler, TrainingSampler
from detectron2.utils.comm import get_world_size
def get_detection_dataset_dicts(
dataset_names, filter_empty=True, proposal_files=None
):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
dataset_names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `dataset_names`.
Returns:
list[dict]: a list of dicts following the standard dataset dict format.
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
assert len(dataset_names)
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
for dataset_name, dicts in zip(dataset_names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
if proposal_files is not None:
assert len(dataset_names) == len(proposal_files)
# load precomputed proposals from proposal files
dataset_dicts = [
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
]
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts, dataset_names)
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(dataset_names))
return dataset_dicts
The provided code snippet includes necessary dependencies for implementing the `_test_loader_from_config` function. Write a Python function `def _test_loader_from_config(cfg, dataset_name, mapper=None)` to solve the following problem:
Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them).
Here is the function:
def _test_loader_from_config(cfg, dataset_name, mapper=None):
"""
Uses the given `dataset_name` argument (instead of the names in cfg), because the
standard practice is to evaluate each test set individually (not combining them).
"""
dataset = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return {"dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS} | Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them). |
188,993 | import itertools
import logging
import torch.utils.data
from detectron2.config import CfgNode, configurable
from detectron2.data.build import (
build_batch_data_loader,
load_proposals_into_dataset,
trivial_batch_collator,
)
from detectron2.data.catalog import DatasetCatalog
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import InferenceSampler, TrainingSampler
from detectron2.utils.comm import get_world_size
The provided code snippet includes necessary dependencies for implementing the `build_detection_test_loader` function. Write a Python function `def build_detection_test_loader(dataset, *, mapper, num_workers=0)` to solve the following problem:
Similar to `build_detection_train_loader`, but uses a batch size of 1. This interface is experimental. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a map-style pytorch dataset. They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. num_workers (int): number of parallel data loading workers Returns: DataLoader: a torch DataLoader, that loads the given detection dataset, with test-time transformation and batching. Examples: :: data_loader = build_detection_test_loader( DatasetRegistry.get("my_test"), mapper=DatasetMapper(...)) # or, instantiate with a CfgNode: data_loader = build_detection_test_loader(cfg, "my_test")
Here is the function:
def build_detection_test_loader(dataset, *, mapper, num_workers=0):
"""
Similar to `build_detection_train_loader`, but uses a batch size of 1.
This interface is experimental.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
num_workers (int): number of parallel data loading workers
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
Examples:
::
data_loader = build_detection_test_loader(
DatasetRegistry.get("my_test"),
mapper=DatasetMapper(...))
# or, instantiate with a CfgNode:
data_loader = build_detection_test_loader(cfg, "my_test")
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
sampler = InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader | Similar to `build_detection_train_loader`, but uses a batch size of 1. This interface is experimental. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a map-style pytorch dataset. They can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. num_workers (int): number of parallel data loading workers Returns: DataLoader: a torch DataLoader, that loads the given detection dataset, with test-time transformation and batching. Examples: :: data_loader = build_detection_test_loader( DatasetRegistry.get("my_test"), mapper=DatasetMapper(...)) # or, instantiate with a CfgNode: data_loader = build_detection_test_loader(cfg, "my_test") |
188,994 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from mask2former.utils.misc import is_dist_avail_and_initialized
The provided code snippet includes necessary dependencies for implementing the `dice_loss` function. Write a Python function `def dice_loss( inputs: torch.Tensor, targets: torch.Tensor, num_masks: float, )` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
Here is the function:
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks | Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). |
188,995 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from mask2former.utils.misc import is_dist_avail_and_initialized
The provided code snippet includes necessary dependencies for implementing the `sigmoid_ce_loss` function. Write a Python function `def sigmoid_ce_loss( inputs: torch.Tensor, targets: torch.Tensor, num_masks: float, )` to solve the following problem:
Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor
Here is the function:
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks | Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor |
188,996 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from mask2former.utils.misc import is_dist_avail_and_initialized
The provided code snippet includes necessary dependencies for implementing the `calculate_uncertainty` function. Write a Python function `def calculate_uncertainty(logits)` to solve the following problem:
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score.
Here is the function:
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits)) | We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score. |
188,997 | import logging
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from mask2former.modeling.transformer_decoder.maskformer_transformer_decoder import TRANSFORMER_DECODER_REGISTRY
from .position_encoding import PositionEmbeddingSine3D
The provided code snippet includes necessary dependencies for implementing the `_get_activation_fn` function. Write a Python function `def _get_activation_fn(activation)` to solve the following problem:
Return an activation function given a string
Here is the function:
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | Return an activation function given a string |
189,001 | import logging
from contextlib import contextmanager
from functools import wraps
import torch
from torch.cuda.amp import autocast
def _ignore_torch_cuda_oom():
"""
A context which ignores CUDA OOM exception from pytorch.
"""
try:
yield
except RuntimeError as e:
# NOTE: the string may change?
if "CUDA out of memory. " in str(e):
pass
else:
raise
The provided code snippet includes necessary dependencies for implementing the `retry_if_cuda_oom` function. Write a Python function `def retry_if_cuda_oom(func)` to solve the following problem:
Makes a function retry itself after encountering pytorch's CUDA OOM error. It will first retry after calling `torch.cuda.empty_cache()`. If that still fails, it will then retry by trying to convert inputs to CPUs. In this case, it expects the function to dispatch to CPU implementation. The return values may become CPU tensors as well and it's user's responsibility to convert it back to CUDA tensor if needed. Args: func: a stateless callable that takes tensor-like objects as arguments Returns: a callable which retries `func` if OOM is encountered. Examples: :: output = retry_if_cuda_oom(some_torch_function)(input1, input2) # output may be on CPU even if inputs are on GPU Note: 1. When converting inputs to CPU, it will only look at each argument and check if it has `.device` and `.to` for conversion. Nested structures of tensors are not supported. 2. Since the function might be called more than once, it has to be stateless.
Here is the function:
def retry_if_cuda_oom(func):
"""
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
"""
def maybe_to_cpu(x):
try:
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
except AttributeError:
like_gpu_tensor = False
if like_gpu_tensor:
return x.to(device="cpu").to(torch.float32)
else:
return x
@wraps(func)
def wrapped(*args, **kwargs):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Try on CPU. This slows down the code significantly, therefore print a notice.
logger = logging.getLogger(__name__)
logger.info("Attempting to copy inputs to CPU due to CUDA OOM")
new_args = (maybe_to_cpu(x) for x in args)
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
with autocast(enabled=False):
return func(*new_args, **new_kwargs)
return wrapped | Makes a function retry itself after encountering pytorch's CUDA OOM error. It will first retry after calling `torch.cuda.empty_cache()`. If that still fails, it will then retry by trying to convert inputs to CPUs. In this case, it expects the function to dispatch to CPU implementation. The return values may become CPU tensors as well and it's user's responsibility to convert it back to CUDA tensor if needed. Args: func: a stateless callable that takes tensor-like objects as arguments Returns: a callable which retries `func` if OOM is encountered. Examples: :: output = retry_if_cuda_oom(some_torch_function)(input1, input2) # output may be on CPU even if inputs are on GPU Note: 1. When converting inputs to CPU, it will only look at each argument and check if it has `.device` and `.to` for conversion. Nested structures of tensors are not supported. 2. Since the function might be called more than once, it has to be stateless. |
189,002 | import argparse
import json
import os
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import torch
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.utils.file_io import PathManager
from pycocotools import mask as maskUtils
from panopticapi.evaluation import PQStat
The provided code snippet includes necessary dependencies for implementing the `default_argument_parser` function. Write a Python function `def default_argument_parser()` to solve the following problem:
Creates a parser with some common arguments used by analysis tools. Returns: argparse.ArgumentParser:
Here is the function:
def default_argument_parser():
"""
Creates a parser with some common arguments used by analysis tools.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(description="Evaluate PQ metric for semantic segmentation.")
# NOTE: currently does not support Cityscapes, you need to convert
# Cityscapes prediction format to Detectron2 prediction format.
parser.add_argument(
"--dataset-name",
default="ade20k_sem_seg_val",
choices=["ade20k_sem_seg_val", "coco_2017_test_stuff_10k_sem_seg", "ade20k_full_sem_seg_val"],
help="dataset name you want to evaluate")
parser.add_argument("--json-file", default="", help="path to detection json file")
return parser | Creates a parser with some common arguments used by analysis tools. Returns: argparse.ArgumentParser: |
189,003 | import argparse
import json
import os
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import torch
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.utils.file_io import PathManager
from pycocotools import mask as maskUtils
from panopticapi.evaluation import PQStat
def pq_compute_single_image(segm_gt, segm_dt, categories, ignore_label):
pq_stat = PQStat()
VOID = ignore_label
OFFSET = 256 * 256 * 256
pan_gt = segm_gt
pan_pred = segm_dt
gt_ann = {'segments_info': []}
labels, labels_cnt = np.unique(segm_gt, return_counts=True)
for cat_id, cnt in zip(labels, labels_cnt):
if cat_id == VOID:
continue
gt_ann['segments_info'].append(
{"id": cat_id, "category_id": cat_id, "area": cnt, "iscrowd": 0}
)
pred_ann = {'segments_info': []}
for cat_id in np.unique(segm_dt):
pred_ann['segments_info'].append({"id": cat_id, "category_id": cat_id})
gt_segms = {el['id']: el for el in gt_ann['segments_info']}
pred_segms = {el['id']: el for el in pred_ann['segments_info']}
# predicted segments area calculation + prediction sanity checks
pred_labels_set = set(el['id'] for el in pred_ann['segments_info'])
labels, labels_cnt = np.unique(pan_pred, return_counts=True)
for label, label_cnt in zip(labels, labels_cnt):
if label not in pred_segms:
if label == VOID:
continue
raise KeyError('In the image with ID {} segment with ID {} is presented in PNG and not presented in JSON.'.format(image_id, label))
pred_segms[label]['area'] = label_cnt
pred_labels_set.remove(label)
if pred_segms[label]['category_id'] not in categories:
raise KeyError('In the image with ID {} segment with ID {} has unknown category_id {}.'.format(image_id, label, pred_segms[label]['category_id']))
if len(pred_labels_set) != 0:
raise KeyError('In the image with ID {} the following segment IDs {} are presented in JSON and not presented in PNG.'.format(image_id, list(pred_labels_set)))
# confusion matrix calculation
pan_gt_pred = pan_gt.astype(np.uint64) * OFFSET + pan_pred.astype(np.uint64)
gt_pred_map = {}
labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True)
for label, intersection in zip(labels, labels_cnt):
gt_id = label // OFFSET
pred_id = label % OFFSET
gt_pred_map[(gt_id, pred_id)] = intersection
# count all matched pairs
gt_matched = set()
pred_matched = set()
for label_tuple, intersection in gt_pred_map.items():
gt_label, pred_label = label_tuple
if gt_label not in gt_segms:
continue
if pred_label not in pred_segms:
continue
if gt_segms[gt_label]['iscrowd'] == 1:
continue
if gt_segms[gt_label]['category_id'] != pred_segms[pred_label]['category_id']:
continue
union = pred_segms[pred_label]['area'] + gt_segms[gt_label]['area'] - intersection - gt_pred_map.get((VOID, pred_label), 0)
iou = intersection / union
if iou > 0.5:
pq_stat[gt_segms[gt_label]['category_id']].tp += 1
pq_stat[gt_segms[gt_label]['category_id']].iou += iou
gt_matched.add(gt_label)
pred_matched.add(pred_label)
# count false positives
crowd_labels_dict = {}
for gt_label, gt_info in gt_segms.items():
if gt_label in gt_matched:
continue
# crowd segments are ignored
if gt_info['iscrowd'] == 1:
crowd_labels_dict[gt_info['category_id']] = gt_label
continue
pq_stat[gt_info['category_id']].fn += 1
# count false positives
for pred_label, pred_info in pred_segms.items():
if pred_label in pred_matched:
continue
# intersection of the segment with VOID
intersection = gt_pred_map.get((VOID, pred_label), 0)
# plus intersection with corresponding CROWD region if it exists
if pred_info['category_id'] in crowd_labels_dict:
intersection += gt_pred_map.get((crowd_labels_dict[pred_info['category_id']], pred_label), 0)
# predicted segment is ignored if more than half of the segment correspond to VOID and CROWD regions
if intersection / pred_info['area'] > 0.5:
continue
pq_stat[pred_info['category_id']].fp += 1
return pq_stat | null |
189,004 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
import os
import sys
from mask2former import add_maskformer2_config
def setup(args):
if args.config_file.endswith(".yaml"):
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
else:
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
setup_logger(name="fvcore")
setup_logger()
return cfg | null |
189,005 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
import os
import sys
from mask2former import add_maskformer2_config
logger = logging.getLogger("detectron2")
def do_flop(cfg):
if isinstance(cfg, CfgNode):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_flops = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
if args.use_fixed_input_size and isinstance(cfg, CfgNode):
import torch
crop_size = cfg.INPUT.CROP.SIZE[0]
data[0]["image"] = torch.zeros((3, crop_size, crop_size))
flops = FlopCountAnalysis(model, data)
if idx > 0:
flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False)
counts += flops.by_operator()
total_flops.append(flops.total())
logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops))
logger.info(
"Average GFlops for each type of operators:\n"
+ str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()])
)
logger.info(
"Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9)
) | null |
189,006 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
import os
import sys
from mask2former import add_maskformer2_config
logger = logging.getLogger("detectron2")
def do_activation(cfg):
if isinstance(cfg, CfgNode):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_activations = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
count = activation_count_operators(model, data)
counts += count
total_activations.append(sum(count.values()))
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
logger.info(
"Total (Million) Activations: {}±{}".format(
np.mean(total_activations), np.std(total_activations)
)
) | null |
189,007 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
import os
import sys
from mask2former import add_maskformer2_config
logger = logging.getLogger("detectron2")
def do_parameter(cfg):
if isinstance(cfg, CfgNode):
model = build_model(cfg)
else:
model = instantiate(cfg.model)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5)) | null |
189,008 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
import os
import sys
from mask2former import add_maskformer2_config
logger = logging.getLogger("detectron2")
def do_structure(cfg):
if isinstance(cfg, CfgNode):
model = build_model(cfg)
else:
model = instantiate(cfg.model)
logger.info("Model Structure:\n" + str(model)) | null |
189,009 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def token_is_valid() -> tuple[str, bool]:
"""
Return type: token: str, is_valid: bool. Token wil be an empty string is not valid.
"""
from core.models import CodeSignToken
t: "Optional[CodeSignToken]" = CodeSignToken.objects.first()
if not t:
return "", False
if not t.token:
return "", False
if t.is_valid:
return t.token, True
return "", False
class DebugLog(models.Model):
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
entry_time = models.DateTimeField(auto_now_add=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="debuglogs",
on_delete=models.CASCADE,
null=True,
blank=True,
)
log_level = models.CharField(
max_length=50, choices=DebugLogLevel.choices, default=DebugLogLevel.INFO
)
log_type = models.CharField(
max_length=50, choices=DebugLogType.choices, default=DebugLogType.SYSTEM_ISSUES
)
message = models.TextField(null=True, blank=True)
def info(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() == DebugLogLevel.INFO:
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def warning(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (DebugLogLevel.INFO, DebugLogLevel.WARN):
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def error(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
):
cls.objects.create(
log_level=DebugLogLevel.ERROR,
agent=agent,
log_type=log_type,
message=message,
)
def critical(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
DebugLogLevel.CRITICAL,
):
cls.objects.create(
log_level=DebugLogLevel.CRITICAL,
agent=agent,
log_type=log_type,
message=message,
)
def get_agent_url(*, goarch: str, plat: str, token: str = "") -> str:
ver = settings.LATEST_AGENT_VER
if token:
params = {
"version": ver,
"arch": goarch,
"token": token,
"plat": plat,
"api": settings.ALLOWED_HOSTS[0],
}
return settings.AGENTS_URL + urllib.parse.urlencode(params)
return f"https://github.com/amidaware/rmmagent/releases/download/v{ver}/tacticalagent-v{ver}-{plat}-{goarch}.exe"
def generate_winagent_exe(
*,
client: int,
site: int,
agent_type: str,
rdp: int,
ping: int,
power: int,
goarch: str,
token: str,
api: str,
file_name: str,
) -> Union[Response, FileResponse]:
from agents.utils import get_agent_url
inno = (
f"tacticalagent-v{settings.LATEST_AGENT_VER}-{AgentPlat.WINDOWS}-{goarch}.exe"
)
codetoken, _ = token_is_valid()
dl_url = get_agent_url(goarch=goarch, plat=AgentPlat.WINDOWS, token=codetoken)
data = {
"client": client,
"site": site,
"agenttype": agent_type,
"rdp": str(rdp),
"ping": str(ping),
"power": str(power),
"goarch": goarch,
"token": token,
"inno": inno,
"url": dl_url,
"api": api,
"codesigntoken": codetoken,
}
headers = {"Content-type": "application/json"}
with tempfile.NamedTemporaryFile() as fp:
try:
r = requests.post(
settings.EXE_GEN_URL,
json=data,
headers=headers,
stream=True,
timeout=900,
)
except Exception as e:
DebugLog.error(message=str(e))
return notify_error(
"Something went wrong. Check debug error log for exact error message"
)
with open(fp.name, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
del r
return FileResponse(open(fp.name, "rb"), as_attachment=True, filename=file_name) | null |
189,010 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def get_core_settings() -> "CoreSettings":
from core.models import CORESETTINGS_CACHE_KEY, CoreSettings
coresettings = cache.get(CORESETTINGS_CACHE_KEY)
if coresettings and isinstance(coresettings, CoreSettings):
return coresettings
else:
coresettings = CoreSettings.objects.first()
if not coresettings:
raise CoreSettingsNotFound("CoreSettings not found.")
cache.set(CORESETTINGS_CACHE_KEY, coresettings, 600)
return cast(CoreSettings, coresettings)
def get_default_timezone():
return ZoneInfo(get_core_settings().default_time_zone) | null |
189,011 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def get_bit_days(days: list[str]) -> int:
bit_days = 0
for day in days:
bit_days |= WEEK_DAYS.get(day, 0)
return bit_days | null |
189,012 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def bitdays_to_string(day: int) -> str:
ret: List[str] = []
if day == 127:
return "Every day"
for key, value in WEEK_DAYS.items():
if day & value:
ret.append(key)
return ", ".join(ret) | null |
189,013 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def bitmonths_to_string(month: int) -> str:
ret: List[str] = []
if month == 4095:
return "Every month"
for key, value in MONTHS.items():
if month & value:
ret.append(key)
return ", ".join(ret) | null |
189,014 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def bitweeks_to_string(week: int) -> str:
ret: List[str] = []
if week == 31:
return "Every week"
for key, value in WEEKS.items():
if week & value:
ret.append(key)
return ", ".join(ret) | null |
189,015 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def bitmonthdays_to_string(day: int) -> str:
ret: List[str] = []
if day == MONTH_DAYS["Last Day"]:
return "Last day"
elif day in (2147483647, 4294967295):
return "Every day"
for key, value in MONTH_DAYS.items():
if day & value:
ret.append(key)
return ", ".join(ret) | null |
189,016 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def convert_to_iso_duration(string: str) -> str:
tmp = string.upper()
if "D" in tmp:
return f"P{tmp.replace('D', 'DT')}"
return f"PT{tmp}" | null |
189,017 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
class DebugLog(models.Model):
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
entry_time = models.DateTimeField(auto_now_add=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="debuglogs",
on_delete=models.CASCADE,
null=True,
blank=True,
)
log_level = models.CharField(
max_length=50, choices=DebugLogLevel.choices, default=DebugLogLevel.INFO
)
log_type = models.CharField(
max_length=50, choices=DebugLogType.choices, default=DebugLogType.SYSTEM_ISSUES
)
message = models.TextField(null=True, blank=True)
def info(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() == DebugLogLevel.INFO:
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def warning(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (DebugLogLevel.INFO, DebugLogLevel.WARN):
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def error(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
):
cls.objects.create(
log_level=DebugLogLevel.ERROR,
agent=agent,
log_type=log_type,
message=message,
)
def critical(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
DebugLogLevel.CRITICAL,
):
cls.objects.create(
log_level=DebugLogLevel.CRITICAL,
agent=agent,
log_type=log_type,
message=message,
)
def reload_nats() -> None:
users = [
{
"user": "tacticalrmm",
"password": settings.SECRET_KEY,
"permissions": {"publish": ">", "subscribe": ">"},
}
]
agents = Agent.objects.prefetch_related("user").only(
"pk", "agent_id"
) # type:ignore
for agent in agents:
try:
users.append(
{
"user": agent.agent_id,
"password": agent.user.auth_token.key,
"permissions": {
"publish": {"allow": agent.agent_id},
"subscribe": {"allow": agent.agent_id},
"allow_responses": {
"expires": getattr(
settings, "NATS_ALLOW_RESPONSE_EXPIRATION", "1435m"
)
},
},
}
)
except:
DebugLog.critical(
agent=agent,
log_type=DebugLogType.AGENT_ISSUES,
message=f"{agent.hostname} does not have a user account, NATS will not work",
)
cert_file, key_file = get_certs()
nats_std_host, nats_ws_host, _ = get_nats_hosts()
nats_std_port, nats_ws_port = get_nats_ports()
config = {
"authorization": {"users": users},
"max_payload": 67108864,
"host": nats_std_host,
"port": nats_std_port, # internal only
"websocket": {
"host": nats_ws_host,
"port": nats_ws_port,
"no_tls": True, # TLS is handled by nginx, so not needed here
},
}
if get_nats_internal_protocol() == "tls":
config["tls"] = {
"cert_file": cert_file,
"key_file": key_file,
}
if "NATS_HTTP_PORT" in os.environ:
config["http_port"] = int(os.getenv("NATS_HTTP_PORT")) # type: ignore
elif hasattr(settings, "NATS_HTTP_PORT"):
config["http_port"] = settings.NATS_HTTP_PORT # type: ignore
if "NATS_WS_COMPRESSION" in os.environ or hasattr(settings, "NATS_WS_COMPRESSION"):
config["websocket"]["compression"] = True
conf = os.path.join(settings.BASE_DIR, "nats-rmm.conf")
with open(conf, "w") as f:
json.dump(config, f)
if not settings.DOCKER_BUILD:
time.sleep(0.5)
subprocess.run(
["/usr/local/bin/nats-server", "-signal", "reload"], capture_output=True
) | null |
189,018 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def get_user(access_token):
try:
auth = TokenAuthentication()
token = access_token.decode().split("access_token=")[1]
user = auth.authenticate_credentials(token.encode())
except Exception:
return AnonymousUser()
else:
return user[0] | null |
189,019 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
class DebugLog(models.Model):
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
entry_time = models.DateTimeField(auto_now_add=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="debuglogs",
on_delete=models.CASCADE,
null=True,
blank=True,
)
log_level = models.CharField(
max_length=50, choices=DebugLogLevel.choices, default=DebugLogLevel.INFO
)
log_type = models.CharField(
max_length=50, choices=DebugLogType.choices, default=DebugLogType.SYSTEM_ISSUES
)
message = models.TextField(null=True, blank=True)
def info(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() == DebugLogLevel.INFO:
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def warning(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (DebugLogLevel.INFO, DebugLogLevel.WARN):
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def error(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
):
cls.objects.create(
log_level=DebugLogLevel.ERROR,
agent=agent,
log_type=log_type,
message=message,
)
def critical(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
DebugLogLevel.CRITICAL,
):
cls.objects.create(
log_level=DebugLogLevel.CRITICAL,
agent=agent,
log_type=log_type,
message=message,
)
def get_latest_trmm_ver() -> str:
url = "https://raw.githubusercontent.com/amidaware/tacticalrmm/master/api/tacticalrmm/tacticalrmm/settings.py"
try:
r = requests.get(url, timeout=5)
except:
return "error"
try:
for line in r.text.splitlines():
if "TRMM_VERSION" in line:
return line.split(" ")[2].strip('"')
except Exception as e:
DebugLog.error(message=str(e))
return "error" | null |
189,020 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def get_db_value(
*, string: str, instance: Optional[Union["Agent", "Client", "Site"]] = None
) -> Union[str, List[str], Literal[True], Literal[False], None]:
from core.models import CustomField, GlobalKVStore
# get properties into an array
props = string.strip().split(".")
# value is in the global keystore and replace value
if props[0] == "global" and len(props) == 2:
try:
return GlobalKVStore.objects.get(name=props[1]).value
except GlobalKVStore.DoesNotExist:
DebugLog.error(
log_type=DebugLogType.SCRIPTING,
message=f"Couldn't lookup value for: {string}. Make sure it exists in CoreSettings > Key Store",
)
return None
if not instance:
# instance must be set if not global property
return None
# custom field lookup
try:
# looking up custom field directly on this instance
if len(props) == 2:
field = CustomField.objects.get(model=props[0], name=props[1])
model_fields = getattr(field, f"{props[0]}_fields")
try:
# resolve the correct model id
if props[0] != instance.__class__.__name__.lower():
value = model_fields.get(
**{props[0]: getattr(instance, props[0])}
).value
else:
value = model_fields.get(**{f"{props[0]}_id": instance.id}).value
if field.type != CustomFieldType.CHECKBOX:
if value:
return value
else:
return field.default_value
else:
return bool(value)
except:
return (
field.default_value
if field.type != CustomFieldType.CHECKBOX
else bool(field.default_value)
)
except CustomField.DoesNotExist:
pass
# if the instance is the same as the first prop. We remove it.
if props[0] == instance.__class__.__name__.lower():
del props[0]
instance_value = instance
# look through all properties and return the value
for prop in props:
if hasattr(instance_value, prop):
value = getattr(instance_value, prop)
if callable(value):
return None
instance_value = value
if not instance_value:
return None
return instance_value
def format_shell_array(value: list[str]) -> str:
temp_string = ""
for item in value:
temp_string += item + ","
return f"{temp_string.strip(',')}"
def format_shell_bool(value: bool, shell: Optional[str]) -> str:
if shell == ScriptShell.POWERSHELL:
return "$True" if value else "$False"
return "1" if value else "0"
class DebugLog(models.Model):
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
entry_time = models.DateTimeField(auto_now_add=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="debuglogs",
on_delete=models.CASCADE,
null=True,
blank=True,
)
log_level = models.CharField(
max_length=50, choices=DebugLogLevel.choices, default=DebugLogLevel.INFO
)
log_type = models.CharField(
max_length=50, choices=DebugLogType.choices, default=DebugLogType.SYSTEM_ISSUES
)
message = models.TextField(null=True, blank=True)
def info(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() == DebugLogLevel.INFO:
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def warning(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (DebugLogLevel.INFO, DebugLogLevel.WARN):
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def error(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
):
cls.objects.create(
log_level=DebugLogLevel.ERROR,
agent=agent,
log_type=log_type,
message=message,
)
def critical(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
DebugLogLevel.CRITICAL,
):
cls.objects.create(
log_level=DebugLogLevel.CRITICAL,
agent=agent,
log_type=log_type,
message=message,
)
def replace_arg_db_values(
string: str, instance=None, shell: str = None, quotes=True # type:ignore
) -> Union[str, None]:
# resolve the value
value = get_db_value(string=string, instance=instance)
# check for model and property
if value is None:
DebugLog.error(
log_type=DebugLogType.SCRIPTING,
message=f"Couldn't lookup value for: {string}. Make sure it exists",
)
return ""
# format args for str
if isinstance(value, str):
if shell == ScriptShell.POWERSHELL and "'" in value:
value = value.replace("'", "''")
return f"'{value}'" if quotes else value
# format args for list
elif isinstance(value, list):
return f"'{format_shell_array(value)}'" if quotes else format_shell_array(value)
# format args for bool
elif value is True or value is False:
return format_shell_bool(value, shell)
elif isinstance(value, dict):
return json.dumps(value)
# return str for everything else
try:
ret = str(value)
except Exception:
ret = ""
return ret | null |
189,021 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def redis_lock(lock_id, oid):
timeout_at = time.monotonic() + REDIS_LOCK_EXPIRE - 3
status = cache.add(lock_id, oid, REDIS_LOCK_EXPIRE)
try:
yield status
finally:
if time.monotonic() < timeout_at and status:
cache.delete(lock_id) | null |
189,022 | import json
import os
import subprocess
import tempfile
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, List, Literal, Optional, Union
from zoneinfo import ZoneInfo
import requests
from channels.auth import AuthMiddlewareStack
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.http import FileResponse
from knox.auth import TokenAuthentication
from rest_framework.response import Response
from agents.models import Agent
from core.utils import get_core_settings, token_is_valid
from logs.models import DebugLog
from tacticalrmm.constants import (
MONTH_DAYS,
MONTHS,
REDIS_LOCK_EXPIRE,
WEEK_DAYS,
WEEKS,
AgentPlat,
CustomFieldType,
DebugLogType,
ScriptShell,
)
from tacticalrmm.helpers import (
get_certs,
get_nats_hosts,
get_nats_internal_protocol,
get_nats_ports,
notify_error,
)
def runcmd_placeholder_text() -> dict[str, str]:
ret = {
"cmd": getattr(
settings,
"CMD_PLACEHOLDER_TEXT",
"rmdir /S /Q C:\\Windows\\System32",
),
"powershell": getattr(
settings,
"POWERSHELL_PLACEHOLDER_TEXT",
"Remove-Item -Recurse -Force C:\\Windows\\System32",
),
"shell": getattr(
settings, "SHELL_PLACEHOLDER_TEXT", "rm -rf --no-preserve-root /"
),
}
return ret | null |
189,023 | from __future__ import absolute_import, unicode_literals
import os
from datetime import timedelta
from celery import Celery
from celery.schedules import crontab
from django.conf import settings
def debug_task(self):
print("Request: {0!r}".format(self.request)) | null |
189,024 | from typing import TYPE_CHECKING
from django.db.models import Q
from django.shortcuts import get_object_or_404
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
def _has_perm(request, perm: str) -> bool:
if request.user.is_superuser or (
request.user.role and getattr(request.user.role, "is_superuser")
):
return True
# make sure non-superusers with empty roles aren't permitted
elif not request.user.role:
return False
return request.user.role and getattr(request.user.role, perm) | null |
189,025 | from typing import TYPE_CHECKING
from django.db.models import Q
from django.shortcuts import get_object_or_404
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def _has_perm_on_agent(user: "User", agent_id: str) -> bool:
from agents.models import Agent
role = user.get_and_set_role_cache()
if user.is_superuser or (role and getattr(role, "is_superuser")):
return True
# make sure non-superusers with empty roles aren't permitted
elif not role:
return False
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER).select_related("site__client"),
agent_id=agent_id,
)
can_view_clients = role.can_view_clients.all() if role else None
can_view_sites = role.can_view_sites.all() if role else None
if not can_view_clients and not can_view_sites:
return True
elif can_view_clients and agent.client in can_view_clients:
return True
elif can_view_sites and agent.site in can_view_sites:
return True
return False | null |
189,026 | from typing import TYPE_CHECKING
from django.db.models import Q
from django.shortcuts import get_object_or_404
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
class Client(BaseAuditModel):
def save(self, *args, **kwargs):
def __str__(self):
def live_agent_count(self) -> int:
def serialize(client):
def _has_perm_on_client(user: "User", client_id: int) -> bool:
from clients.models import Client
role = user.get_and_set_role_cache()
if user.is_superuser or (role and getattr(role, "is_superuser")):
return True
# make sure non-superusers with empty roles aren't permitted
elif not role:
return False
client = get_object_or_404(Client, pk=client_id)
can_view_clients = role.can_view_clients.all() if role else None
if not can_view_clients:
return True
elif can_view_clients and client in can_view_clients:
return True
return False | null |
189,027 | from typing import TYPE_CHECKING
from django.db.models import Q
from django.shortcuts import get_object_or_404
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
class Site(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
client = models.ForeignKey(Client, related_name="sites", on_delete=models.CASCADE)
name = models.CharField(max_length=255)
block_policy_inheritance = models.BooleanField(default=False)
failing_checks = models.JSONField(default=_default_failing_checks_data)
workstation_policy = models.ForeignKey(
"automation.Policy",
related_name="workstation_sites",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
server_policy = models.ForeignKey(
"automation.Policy",
related_name="server_sites",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="sites",
on_delete=models.SET_NULL,
null=True,
blank=True,
)
def save(self, *args, **kwargs):
from alerts.tasks import cache_agents_alert_template
# get old client if exists
old_site = Site.objects.get(pk=self.pk) if self.pk else None
super().save(old_model=old_site, *args, **kwargs)
# check if polcies have changed and initiate task to reapply policies if so
if old_site:
if (
old_site.alert_template != self.alert_template
or old_site.workstation_policy != self.workstation_policy
or old_site.server_policy != self.server_policy
):
cache_agents_alert_template.delay()
if old_site.workstation_policy != self.workstation_policy:
cache.delete_many_pattern(f"site_workstation_*{self.pk}_*")
if old_site.server_policy != self.server_policy:
cache.delete_many_pattern(f"site_server_*{self.pk}_*")
class Meta:
ordering = ("name",)
unique_together = (("client", "name"),)
def __str__(self):
return self.name
def live_agent_count(self) -> int:
return self.agents.defer(*AGENT_DEFER).count() # type: ignore
def serialize(site):
from .serializers import SiteAuditSerializer
# serializes the site and returns json
return SiteAuditSerializer(site).data
def _has_perm_on_site(user: "User", site_id: int) -> bool:
from clients.models import Site
role = user.get_and_set_role_cache()
if user.is_superuser or (role and getattr(role, "is_superuser")):
return True
# make sure non-superusers with empty roles aren't permitted
elif not role:
return False
site = get_object_or_404(Site, pk=site_id)
can_view_clients = role.can_view_clients.all() if role else None
can_view_sites = role.can_view_sites.all() if role else None
if not can_view_clients and not can_view_sites:
return True
elif can_view_sites and site in can_view_sites:
return True
elif can_view_clients and site.client in can_view_clients:
return True
return False | null |
189,028 | from typing import TYPE_CHECKING
from django.db.models import Q
from django.shortcuts import get_object_or_404
from agents.models import Agent
from tacticalrmm.constants import AGENT_DEFER
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def _audit_log_filter(user: "User") -> Q:
role = user.get_and_set_role_cache()
if user.is_superuser or (role and getattr(role, "is_superuser")):
return Q()
# make sure non-superusers with empty roles aren't permitted
elif not role:
return Q()
sites_queryset = Q()
clients_queryset = Q()
agent_filter = Q()
can_view_clients = role.can_view_clients.all() if role else None
can_view_sites = role.can_view_sites.all() if role else None
if can_view_sites:
agents = Agent.objects.filter(site__in=can_view_sites).values_list(
"agent_id", flat=True
)
sites_queryset = Q(agent_id__in=agents)
agent_filter = Q(agent_id=None)
if can_view_clients:
agents = Agent.objects.filter(site__client__in=can_view_clients).values_list(
"agent_id", flat=True
)
sites_queryset = Q(agent_id__in=agents)
agent_filter = Q(agent_id=None)
return Q(sites_queryset | clients_queryset | agent_filter) | null |
189,029 | import threading
from contextlib import suppress
from typing import Any, Dict, Optional
from django.conf import settings
from python_ipware import IpWare
from rest_framework.exceptions import AuthenticationFailed
from tacticalrmm.constants import DEMO_NOT_ALLOWED
from tacticalrmm.helpers import notify_error
request_local = threading.local()
def get_username() -> Optional[str]:
return getattr(request_local, "username", None) | null |
189,030 | import threading
from contextlib import suppress
from typing import Any, Dict, Optional
from django.conf import settings
from python_ipware import IpWare
from rest_framework.exceptions import AuthenticationFailed
from tacticalrmm.constants import DEMO_NOT_ALLOWED
from tacticalrmm.helpers import notify_error
request_local = threading.local()
def get_debug_info() -> Dict[str, Any]:
return getattr(request_local, "debug_info", {}) | null |
189,031 | import json
import random
from django.conf import settings
from rest_framework.response import Response
SVC_FILE = settings.BASE_DIR.joinpath("tacticalrmm/test_data/winsvcs.json")
def demo_get_services():
with open(SVC_FILE, "r") as f:
svcs = json.load(f)
return Response(svcs) | null |
189,032 | import json
import random
from django.conf import settings
from rest_framework.response import Response
PROCS_FILE = settings.BASE_DIR.joinpath("tacticalrmm/test_data/procs.json")
def demo_get_procs():
with open(PROCS_FILE, "r") as f:
procs = json.load(f)
ret = []
for proc in procs:
tmp = {}
for _, _ in proc.items():
tmp["name"] = proc["name"]
tmp["pid"] = proc["pid"]
tmp["membytes"] = random.randrange(423424, 938921325)
tmp["username"] = proc["username"]
tmp["id"] = proc["id"]
tmp["cpu_percent"] = "{:.2f}".format(random.uniform(0.1, 99.4))
ret.append(tmp)
return Response(ret) | null |
189,033 | import json
import random
from django.conf import settings
from rest_framework.response import Response
EVT_LOG_FILE = settings.BASE_DIR.joinpath("tacticalrmm/test_data/appeventlog.json")
def demo_get_eventlog():
with open(EVT_LOG_FILE, "r") as f:
logs = json.load(f)
return Response(logs) | null |
189,034 | import asyncio
from typing import TYPE_CHECKING, Any
import msgpack
import nats
from nats.errors import TimeoutError as NatsTimeout
from tacticalrmm.exceptions import NatsDown
from tacticalrmm.helpers import setup_nats_options
async def _anats_message(*, nc: "NClient", subject: str, data: "NATS_DATA") -> None:
try:
payload = msgpack.dumps(data)
except:
return
await nc.publish(subject=subject, payload=payload)
The provided code snippet includes necessary dependencies for implementing the `abulk_nats_command` function. Write a Python function `async def abulk_nats_command(*, items: "BULK_NATS_TASKS") -> None` to solve the following problem:
Fire and forget
Here is the function:
async def abulk_nats_command(*, items: "BULK_NATS_TASKS") -> None:
"""Fire and forget"""
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except Exception:
raise NatsDown
tasks = [_anats_message(nc=nc, subject=item[0], data=item[1]) for item in items]
await asyncio.gather(*tasks)
await nc.flush()
await nc.close() | Fire and forget |
189,035 | import asyncio
from typing import TYPE_CHECKING, Any
import msgpack
import nats
from nats.errors import TimeoutError as NatsTimeout
from tacticalrmm.exceptions import NatsDown
from tacticalrmm.helpers import setup_nats_options
NATS_DATA = dict[str, Any]
async def a_nats_cmd(
*, nc: "NClient", sub: str, data: NATS_DATA, timeout: int = 10
) -> str | Any:
try:
msg = await nc.request(
subject=sub, payload=msgpack.dumps(data), timeout=timeout
)
except NatsTimeout:
return "timeout"
try:
return msgpack.loads(msg.data)
except Exception as e:
return str(e) | null |
189,036 | import os
import sys
from contextlib import suppress
from datetime import timedelta
from pathlib import Path
LOG_DIR = os.path.join(BASE_DIR, "tacticalrmm/private/log")
TRMM_LOG_TO = "file"
if "GHACTIONS" in os.environ:
DEBUG = False
ADMIN_ENABLED = False
DEMO = False
import warnings
from django.core.cache import CacheKeyWarning
def get_log_level() -> str:
if "TRMM_LOG_LEVEL" in os.environ:
return os.getenv("TRMM_LOG_LEVEL") # type: ignore
return TRMM_LOG_LEVEL
if "GHACTIONS" in os.environ:
print("-----------------------GHACTIONS----------------------------")
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "pipeline",
"USER": "pipeline",
"PASSWORD": "pipeline123456",
"HOST": "127.0.0.1",
"PORT": "",
}
}
SECRET_KEY = "abcdefghijklmnoptravis123456789"
ALLOWED_HOSTS = ["api.example.com"]
ADMIN_URL = "abc123456/"
CORS_ORIGIN_WHITELIST = ["https://rmm.example.com"]
MESH_USERNAME = "pipeline"
MESH_SITE = "https://example.com"
MESH_TOKEN_KEY = "bd65e957a1e70c622d32523f61508400d6cd0937001a7ac12042227eba0b9ed625233851a316d4f489f02994145f74537a331415d00047dbbf13d940f556806dffe7a8ce1de216dc49edbad0c1a7399c"
REDIS_HOST = "localhost"
def configure_logging_handler():
cfg = {
"level": get_log_level(),
"formatter": "verbose",
}
log_to = os.getenv("TRMM_LOG_TO", TRMM_LOG_TO)
if log_to == "stdout":
cfg["class"] = "logging.StreamHandler"
cfg["stream"] = sys.stdout
else:
cfg["class"] = "logging.FileHandler"
cfg["filename"] = os.path.join(LOG_DIR, "trmm_debug.log")
return cfg | null |
189,037 | import os
import random
import secrets
import string
from pathlib import Path
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from zoneinfo import ZoneInfo
from cryptography import x509
from django.conf import settings
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.response import Response
def notify_error(msg: str) -> Response:
return Response(msg, status=status.HTTP_400_BAD_REQUEST) | null |
189,038 | import os
import random
import secrets
import string
from pathlib import Path
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from zoneinfo import ZoneInfo
from cryptography import x509
from django.conf import settings
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.response import Response
The provided code snippet includes necessary dependencies for implementing the `date_is_in_past` function. Write a Python function `def date_is_in_past(*, datetime_obj: "datetime", agent_tz: str) -> bool` to solve the following problem:
datetime_obj must be a naive datetime
Here is the function:
def date_is_in_past(*, datetime_obj: "datetime", agent_tz: str) -> bool:
"""
datetime_obj must be a naive datetime
"""
# convert agent tz to UTC to compare
localized = datetime_obj.replace(tzinfo=ZoneInfo(agent_tz))
utc_time = localized.astimezone(ZoneInfo("UTC"))
return djangotime.now() > utc_time | datetime_obj must be a naive datetime |
189,039 | import os
import random
import secrets
import string
from pathlib import Path
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from zoneinfo import ZoneInfo
from cryptography import x509
from django.conf import settings
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.response import Response
def get_webdomain() -> str:
return urlparse(settings.CORS_ORIGIN_WHITELIST[0]).netloc | null |
189,040 | import os
import random
import secrets
import string
from pathlib import Path
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from zoneinfo import ZoneInfo
from cryptography import x509
from django.conf import settings
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.response import Response
The provided code snippet includes necessary dependencies for implementing the `rand_range` function. Write a Python function `def rand_range(min: int, max: int) -> float` to solve the following problem:
Input is milliseconds. Returns float truncated to 2 decimals.
Here is the function:
def rand_range(min: int, max: int) -> float:
"""
Input is milliseconds.
Returns float truncated to 2 decimals.
"""
return round(random.uniform(min, max) / 1000, 2) | Input is milliseconds. Returns float truncated to 2 decimals. |
189,041 | import os
import random
import secrets
import string
from pathlib import Path
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from zoneinfo import ZoneInfo
from cryptography import x509
from django.conf import settings
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.response import Response
def get_nats_url() -> str:
_, _, connect_host = get_nats_hosts()
proto = get_nats_internal_protocol()
port, _ = get_nats_ports()
return f"{proto}://{connect_host}:{port}"
def setup_nats_options() -> dict[str, Any]:
opts = {
"servers": get_nats_url(),
"user": "tacticalrmm",
"name": "trmm-django",
"password": settings.SECRET_KEY,
"connect_timeout": 3,
"max_reconnect_attempts": 2,
}
return opts | null |
189,042 | import os
import random
import secrets
import string
from pathlib import Path
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from zoneinfo import ZoneInfo
from cryptography import x509
from django.conf import settings
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.response import Response
def make_random_password(*, len: int) -> str:
alphabet = string.ascii_letters + string.digits
return "".join(secrets.choice(alphabet) for i in range(len)) | null |
189,043 | import os
import random
import secrets
import string
from pathlib import Path
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from zoneinfo import ZoneInfo
from cryptography import x509
from django.conf import settings
from django.utils import timezone as djangotime
from rest_framework import status
from rest_framework.response import Response
def get_certs() -> tuple[str, str]:
domain = settings.ALLOWED_HOSTS[0].split(".", 1)[1]
cert_file = f"/etc/letsencrypt/live/{domain}/fullchain.pem"
key_file = f"/etc/letsencrypt/live/{domain}/privkey.pem"
if hasattr(settings, "CERT_FILE") and hasattr(settings, "KEY_FILE"):
cert_file = settings.CERT_FILE
key_file = settings.KEY_FILE
return cert_file, key_file
def days_until_cert_expires() -> int:
cert_file, _ = get_certs()
cert_bytes = Path(cert_file).read_bytes()
cert = x509.load_pem_x509_certificate(cert_bytes)
expires = cert.not_valid_after.replace(tzinfo=ZoneInfo("UTC"))
delta = expires - djangotime.now()
return delta.days | null |
189,044 | from django.utils import timezone as djangotime
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions
from rest_framework.authentication import HTTP_HEADER_ENCODING, BaseAuthentication
from accounts.models import APIKey
The provided code snippet includes necessary dependencies for implementing the `get_authorization_header` function. Write a Python function `def get_authorization_header(request) -> str` to solve the following problem:
Return request's 'Authorization:' header, as a bytestring. Hide some test client ickyness where the header can be unicode.
Here is the function:
def get_authorization_header(request) -> str:
"""
Return request's 'Authorization:' header, as a bytestring.
Hide some test client ickyness where the header can be unicode.
"""
auth = request.META.get("HTTP_X_API_KEY", b"")
if isinstance(auth, str):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
return auth | Return request's 'Authorization:' header, as a bytestring. Hide some test client ickyness where the header can be unicode. |
189,045 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
for name, func in inspect.getmembers(custom_filters, inspect.isfunction):
env.filters[name] = func
class ReportTemplate(models.Model):
name = models.CharField(max_length=200, unique=True)
template_md = models.TextField()
template_css = models.TextField(null=True, blank=True)
template_html = models.ForeignKey(
"ReportHTMLTemplate",
related_name="htmltemplate",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
type = models.CharField(
max_length=15,
choices=ReportFormatType.choices,
default=ReportFormatType.MARKDOWN,
)
template_variables = models.TextField(blank=True, default="")
depends_on = ArrayField(
models.CharField(max_length=20, blank=True), blank=True, default=list
)
def __str__(self) -> str:
return self.name
class ReportHTMLTemplate(models.Model):
name = models.CharField(max_length=200, unique=True)
html = models.TextField()
def __str__(self) -> str:
return self.name
def db_template_loader(template_name: str) -> Optional[str]:
# trys the ReportHTMLTemplate table and ReportTemplate table
try:
return ReportHTMLTemplate.objects.get(name=template_name).html
except ReportHTMLTemplate.DoesNotExist:
pass
try:
template = ReportTemplate.objects.get(name=template_name)
return template.template_md
except ReportTemplate.DoesNotExist:
pass
return None | null |
189,046 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
def generate_pdf(*, html: str, css: str = "") -> bytes:
font_config = FontConfiguration()
pdf_bytes: bytes = HTML(string=html).write_pdf(
stylesheets=[CSS(string=css, font_config=font_config)], font_config=font_config
)
return pdf_bytes | null |
189,047 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
env = Environment(
loader=FunctionLoader(db_template_loader),
comment_start_string="{=",
comment_end_string="=}",
extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"],
)
env.globals.update(custom_globals)
for name, func in inspect.getmembers(custom_filters, inspect.isfunction):
env.filters[name] = func
def prep_variables_for_template(
*,
variables: str,
dependencies: Optional[Dict[str, Any]] = None,
limit_query_results: Optional[int] = None,
) -> Dict[str, Any]:
Markdown = markdown.Markdown(extensions=markdown_ext)
class ReportHTMLTemplate(models.Model):
def __str__(self) -> str:
def generate_html(
*,
template: str,
template_type: str,
css: str = "",
html_template: Optional[int] = None,
variables: str = "",
dependencies: Optional[Dict[str, int]] = None,
) -> Tuple[str, Dict[str, Any]]:
if dependencies is None:
dependencies = {}
# validate the template
env.parse(template)
# convert template
template_string = (
Markdown.convert(template) if template_type == "markdown" else template
)
# append extends if base template is configured
if html_template:
try:
html_template_name = ReportHTMLTemplate.objects.get(pk=html_template).name
template_string = (
f"""{{% extends "{html_template_name}" %}}\n{template_string}"""
)
except ReportHTMLTemplate.DoesNotExist:
pass
tm = env.from_string(template_string)
variables_dict = prep_variables_for_template(
variables=variables, dependencies=dependencies
)
return (tm.render(css=css, **variables_dict), variables_dict) | null |
189,048 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
for name, func in inspect.getmembers(custom_filters, inspect.isfunction):
env.filters[name] = func
import yaml
yaml.SafeLoader.add_constructor("!now", construct_yaml_now)
yaml.SafeDumper.add_representer(datetime, represent_datetime_now)
class ReportDataQuery(models.Model):
name = models.CharField(max_length=50, unique=True)
json_query = models.JSONField()
def make_dataqueries_inline(*, variables: str) -> str:
try:
variables_obj = yaml.safe_load(variables) or {}
except (yaml.parser.ParserError, yaml.YAMLError):
variables_obj = {}
data_sources = variables_obj.get("data_sources", {})
if isinstance(data_sources, dict):
for key, value in data_sources.items():
if isinstance(value, str):
try:
query = ReportDataQuery.objects.get(name=value).json_query
variables_obj["data_sources"][key] = query
except ReportDataQuery.DoesNotExist:
continue
return yaml.dump(variables_obj) | null |
189,049 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
RE_ASSET_URL = re.compile(
r"(asset://([0-9a-f]{8}-[0-9a-f]{4}-[0-5][0-9a-f]{3}-[089ab][0-9a-f]{3}-[0-9a-f]{12}))"
)
class ReportAsset(models.Model):
id = models.UUIDField(
primary_key=True, unique=True, default=uuid.uuid4, editable=False
)
file = models.FileField(storage=get_report_assets_fs, unique=True)
def __str__(self) -> str:
return f"{self.id} - {self.file}"
def normalize_asset_url(text: str, type: Literal["pdf", "html", "plaintext"]) -> str:
new_text = text
for url, id in RE_ASSET_URL.findall(text):
try:
asset = ReportAsset.objects.get(id=id)
if type == "html":
new_text = new_text.replace(
f"asset://{id}", f"{asset.file.url}?id={id}"
)
else:
new_text = new_text.replace(f"{url}", f"file://{asset.file.path}")
except ReportAsset.DoesNotExist:
pass
return new_text | null |
189,050 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
RE_ASSET_URL = re.compile(
r"(asset://([0-9a-f]{8}-[0-9a-f]{4}-[0-5][0-9a-f]{3}-[089ab][0-9a-f]{3}-[0-9a-f]{12}))"
)
for name, func in inspect.getmembers(custom_filters, inspect.isfunction):
env.filters[name] = func
class ReportAsset(models.Model):
id = models.UUIDField(
primary_key=True, unique=True, default=uuid.uuid4, editable=False
)
file = models.FileField(storage=get_report_assets_fs, unique=True)
def __str__(self) -> str:
return f"{self.id} - {self.file}"
def base64_encode_assets(template: str) -> List[Dict[str, Any]]:
import base64
assets = []
added_ids = []
for _, id in RE_ASSET_URL.findall(template):
if id not in added_ids:
try:
asset = ReportAsset.objects.get(pk=id)
encoded_base64_str = base64.b64encode(asset.file.file.read()).decode(
"utf-8"
)
assets.append(
{
"id": asset.id,
"name": asset.file.name,
"file": encoded_base64_str,
}
)
added_ids.append(
str(asset.id)
) # need to convert uuid to str for easy comparison
except ReportAsset.DoesNotExist:
continue
return assets | null |
189,051 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
for name, func in inspect.getmembers(custom_filters, inspect.isfunction):
env.filters[name] = func
def _generate_random_string(length: int = 6) -> str:
import random
import string
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
class ReportHTMLTemplate(models.Model):
name = models.CharField(max_length=200, unique=True)
html = models.TextField()
def __str__(self) -> str:
return self.name
def _import_base_template(
base_template_data: Optional[Dict[str, Any]] = None,
overwrite: bool = False,
) -> Optional[int]:
if base_template_data:
# Check name conflict and modify name if necessary
name = base_template_data.get("name")
html = base_template_data.get("html")
if not name:
raise ValidationError("base_template is missing 'name' key")
if not html:
raise ValidationError("base_template is missing 'html' field")
if ReportHTMLTemplate.objects.filter(name=name).exists():
base_template = ReportHTMLTemplate.objects.filter(name=name).get()
if overwrite:
base_template.html = html
base_template.save()
else:
name += f"_{_generate_random_string()}"
base_template = ReportHTMLTemplate.objects.create(name=name, html=html)
else:
base_template = ReportHTMLTemplate.objects.create(name=name, html=html)
base_template.refresh_from_db()
return base_template.id
return None | null |
189,052 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
for name, func in inspect.getmembers(custom_filters, inspect.isfunction):
env.filters[name] = func
def _generate_random_string(length: int = 6) -> str:
import random
import string
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
class ReportTemplate(models.Model):
name = models.CharField(max_length=200, unique=True)
template_md = models.TextField()
template_css = models.TextField(null=True, blank=True)
template_html = models.ForeignKey(
"ReportHTMLTemplate",
related_name="htmltemplate",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
type = models.CharField(
max_length=15,
choices=ReportFormatType.choices,
default=ReportFormatType.MARKDOWN,
)
template_variables = models.TextField(blank=True, default="")
depends_on = ArrayField(
models.CharField(max_length=20, blank=True), blank=True, default=list
)
def __str__(self) -> str:
return self.name
def _import_report_template(
report_template_data: Dict[str, Any],
base_template_id: Optional[int] = None,
overwrite: bool = False,
) -> "ReportTemplate":
if report_template_data:
name = report_template_data.pop("name", None)
template_md = report_template_data.get("template_md")
if not name:
raise ValidationError("template requires a 'name' key")
if not template_md:
raise ValidationError("template requires a 'template_md' field")
if ReportTemplate.objects.filter(name=name).exists():
report_template = ReportTemplate.objects.filter(name=name).get()
if overwrite:
for key, value in report_template_data.items():
setattr(report_template, key, value)
report_template.save()
else:
name += f"_{_generate_random_string()}"
report_template = ReportTemplate.objects.create(
name=name,
template_html_id=base_template_id,
**report_template_data,
)
else:
report_template = ReportTemplate.objects.create(
name=name, template_html_id=base_template_id, **report_template_data
)
report_template.refresh_from_db()
return report_template
else:
raise ValidationError("'template' key is required in input") | null |
189,053 | import datetime
import inspect
import json
import re
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union, cast
from zoneinfo import ZoneInfo
import yaml
from django.apps import apps
from jinja2 import Environment, FunctionLoader
from rest_framework.serializers import ValidationError
from weasyprint import CSS, HTML
from weasyprint.text.fonts import FontConfiguration
from tacticalrmm.utils import get_db_value
from . import custom_filters
from .constants import REPORTING_MODELS
from .markdown.config import Markdown
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
for name, func in inspect.getmembers(custom_filters, inspect.isfunction):
env.filters[name] = func
def decode_base64_asset(asset: str) -> bytes:
import base64
return base64.b64decode(asset.encode("utf-8"))
class ReportAsset(models.Model):
id = models.UUIDField(
primary_key=True, unique=True, default=uuid.uuid4, editable=False
)
file = models.FileField(storage=get_report_assets_fs, unique=True)
def __str__(self) -> str:
return f"{self.id} - {self.file}"
report_assets_fs = ReportAssetStorage(
location=settings.REPORTING_ASSETS_BASE_PATH,
base_url=f"{settings.REPORTING_BASE_URL}/reporting/assets/",
)
def _import_assets(assets: List[Dict[str, Any]]) -> None:
import io
import os
from django.core.files import File
from .storage import report_assets_fs
if isinstance(assets, list):
for asset in assets:
parent_folder = report_assets_fs.getreldir(path=asset["name"])
path = report_assets_fs.get_available_name(
os.path.join(parent_folder, asset["name"])
)
asset_obj = ReportAsset(
id=asset["id"],
file=File(
io.BytesIO(decode_base64_asset(asset["file"])),
name=path,
),
)
asset_obj.save() | null |
189,054 | import re
from typing import Any, List
from markdown import Extension, Markdown
from markdown.postprocessors import Postprocessor
from markdown.preprocessors import Preprocessor
class IgnoreJinjaExtension(Extension):
"""Extension for looking up {% block tag %}"""
def extendMarkdown(self, md: Markdown) -> None:
"""Add IgnoreJinjaExtension to Markdown instance."""
md.preprocessors.register(IgnoreJinjaPreprocessor(md), "preignorejinja", 0)
md.postprocessors.register(IgnoreJinjaPostprocessor(md), "postignorejinja", 0)
The provided code snippet includes necessary dependencies for implementing the `makeExtension` function. Write a Python function `def makeExtension(*args: Any, **kwargs: Any) -> IgnoreJinjaExtension` to solve the following problem:
set up extension.
Here is the function:
def makeExtension(*args: Any, **kwargs: Any) -> IgnoreJinjaExtension:
"""set up extension."""
return IgnoreJinjaExtension(*args, **kwargs) | set up extension. |
189,055 | import json
from typing import TYPE_CHECKING, Any, Dict, List, Tuple
from django.apps import apps
from django.conf import settings as djangosettings
from django.core.management.base import BaseCommand
from ...constants import REPORTING_MODELS
def traverse_model_fields(
*, model: "Model", prefix: str = "", depth: int = 3
) -> Tuple[Dict[str, Any], Dict[str, Any], List[str], List[str]]:
filterObj: Dict[str, Any] = {}
patternObj: Dict[str, Any] = {}
select_related: List[str] = []
field_list: List[str] = []
if depth < 1:
return filterObj, patternObj, select_related, field_list
for field in model._meta.get_fields():
field_type = field.get_internal_type() # type: ignore
if field_type == "CharField" and field.choices: # type: ignore
propDefinition = {
"type": "string",
"enum": [index for index, _ in field.choices], # type: ignore
}
elif field_type == "BooleanField":
propDefinition = {
"type": "boolean",
}
elif field.many_to_many or field.one_to_many:
continue
elif (
field_type == "ForeignKey" or field.name == "id" or "Integer" in field_type
):
propDefinition = {
"type": "integer",
}
if field_type == "ForeignKey":
select_related.append(prefix + field.name)
related_model = field.related_model
# Get fields of the related model, recursively
filter, pattern, select, list = traverse_model_fields(
model=related_model, # type: ignore
prefix=prefix + field.name + "__",
depth=depth - 1,
)
filterObj = {**filterObj, **filter}
patternObj = {**patternObj, **pattern}
select_related += select
field_list += list
else:
propDefinition = {
"type": "string",
}
filterObj[prefix + field.name] = propDefinition
patternObj["^" + prefix + field.name + "(__[a-zA-Z]+)*$"] = propDefinition
field_list.append(prefix + field.name)
return filterObj, patternObj, select_related, field_list
REPORTING_MODELS = (
("Agent", "agents"),
("AgentCustomField", "agents"),
("AgentHistory", "agents"),
("Alert", "alerts"),
("Policy", "automation"),
("AutomatedTask", "autotasks"),
("TaskResult", "autotasks"),
("Check", "checks"),
("CheckResult", "checks"),
("CheckHistory", "checks"),
("Client", "clients"),
("ClientCustomField", "clients"),
("Site", "clients"),
("SiteCustomField", "clients"),
("GlobalKVStore", "core"),
("AuditLog", "logs"),
("DebugLog", "logs"),
("PendingAction", "logs"),
("ChocoSoftware", "software"),
("InstalledSoftware", "software"),
("WinUpdate", "winupdate"),
("WinUpdatePolicy", "winupdate"),
)
def generate_schema() -> None:
oneOf = []
for model, app in REPORTING_MODELS:
Model = apps.get_model(app_label=app, model_name=model)
filterObj, patternObj, select_related, field_list = traverse_model_fields(
model=Model, depth=3
)
order_by = []
for field in field_list:
order_by.append(field)
order_by.append(f"-{field}")
oneOf.append(
{
"properties": {
"model": {"type": "string", "enum": [model.lower()]},
"filter": {
"type": "object",
"properties": filterObj,
"patternProperties": patternObj,
},
"exclude": {
"type": "object",
"properties": filterObj,
"patternProperties": patternObj,
},
"defer": {
"type": "array",
"items": {
"type": "string",
"minimum": 1,
"enum": field_list,
},
},
"only": {
"type": "array",
"items": {"type": "string", "minimum": 1, "enum": field_list},
},
"select_related": {
"type": "array",
"items": {
"type": "string",
"minimum": 1,
"enum": select_related,
},
},
"order_by": {"type": "string", "enum": order_by},
},
}
)
schema = {
"$id": f"https://{djangosettings.ALLOWED_HOSTS[0]}/static/reporting/schemas/query_schema.json",
"type": "object",
"properties": {
"model": {
"type": "string",
"enum": [model.lower() for model, _ in REPORTING_MODELS],
},
"custom_fields": {
"type": "array",
"items": {"type": "string", "minimum": 1},
},
"limit": {"type": "integer"},
"count": {"type": "boolean"},
"get": {"type": "boolean"},
"first": {"type": "boolean"},
},
"required": ["model"],
"oneOf": oneOf,
}
with open(
f"{djangosettings.STATICFILES_DIRS[0]}reporting/schemas/query_schema.json", "w"
) as outfile:
outfile.write(json.dumps(schema)) | null |
189,056 | import os
import shutil
from django.core.files.storage import FileSystemStorage
from .settings import settings
report_assets_fs = ReportAssetStorage(
location=settings.REPORTING_ASSETS_BASE_PATH,
base_url=f"{settings.REPORTING_BASE_URL}/reporting/assets/",
)
def get_report_assets_fs():
return report_assets_fs | null |
189,057 | from contextlib import suppress
from zoneinfo import ZoneInfo
import validators
def as_tz(date_obj, tz, format="%b %d %Y, %I:%M %p"):
return date_obj.astimezone(ZoneInfo(tz)).strftime(format) | null |
189,058 | from contextlib import suppress
from zoneinfo import ZoneInfo
import validators
def local_ips(wmi_detail):
ret = []
with suppress(Exception):
ips = wmi_detail["network_config"]
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
return ret | null |
189,059 | import json
import os
import shutil
import uuid
from typing import Any, Dict, List, Literal, Optional, Union
import requests
from django.conf import settings as djangosettings
from django.core.exceptions import (
ObjectDoesNotExist,
PermissionDenied,
SuspiciousFileOperation,
)
from django.core.files.base import ContentFile
from django.db import transaction
from django.http import FileResponse, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from jinja2.exceptions import TemplateError
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.serializers import (
BooleanField,
CharField,
ChoiceField,
IntegerField,
JSONField,
ListField,
ModelSerializer,
Serializer,
ValidationError,
)
from rest_framework.views import APIView
from tacticalrmm.utils import notify_error
from .models import ReportAsset, ReportDataQuery, ReportHTMLTemplate, ReportTemplate
from .permissions import GenerateReportPerms, ReportingPerms
from .storage import report_assets_fs
from .utils import (
_import_assets,
_import_base_template,
_import_report_template,
base64_encode_assets,
generate_html,
generate_pdf,
normalize_asset_url,
prep_variables_for_template,
)
report_assets_fs = ReportAssetStorage(
location=settings.REPORTING_ASSETS_BASE_PATH,
base_url=f"{settings.REPORTING_BASE_URL}/reporting/assets/",
)
def path_exists(value: str) -> None:
if not report_assets_fs.exists(value):
raise ValidationError("Path does not exist on the file system") | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.