id
stringlengths
3
8
content
stringlengths
100
981k
86307
from ..core.base import ErsiliaBase from ..app.app import AppBase, StreamlitApp import subprocess import os import shutil import streamlit class DeployBase(ErsiliaBase): def __init__(self, config_json=None, credentials_json=None): ErsiliaBase.__init__( self, config_json=config_json, credentials_json=credentials_json ) self.docker_org = self.cfg.EXT.DOCKERHUB_ORG def _get_bundle_directory(self, model_id): path = os.path.join( self._bundles_dir, model_id, self._get_latest_bundle_tag(model_id) ) return path def _get_tmp_directory(self, model_id): path_o = self._get_bundle_directory(model_id) path_d = os.path.join(self._tmp_dir, model_id, os.path.basename(path_o)) return path_d def _read_bundle_dockerfile(self, model_id): dockerfile = os.path.join(self._get_bundle_directory(model_id), "Dockerfile") with open(dockerfile, "r") as f: text = f.readlines() return text def _read_bundle_requirements(self, model_id): requirements = os.path.join( self._get_bundle_directory(model_id), "requirements.txt" ) with open(requirements, "r") as f: text = f.readlines() return text def _copy_bundle_to_tmp(self, model_id): path_o = self._get_bundle_directory(model_id) path_d = self._get_tmp_directory(model_id) os.makedirs(os.path.join(self._tmp_dir, model_id), exist_ok=True) if os.path.exists(path_d): shutil.rmtree(path_d) shutil.copytree(path_o, path_d) return path_d def _copy_app_to_tmp(self, model_id): ab = AppBase() app_script = ab.app_script(model_id) shutil.copy( app_script, os.path.join( self._get_tmp_directory(model_id), os.path.basename(app_script) ), ) def _copy_to_tmp(self, model_id): self._copy_bundle_to_tmp(model_id) self._copy_app_to_tmp(model_id) def _delete_tmp(self, model_id): shutil.rmtree(self._get_tmp_directory(model_id)) @staticmethod def _app_type(model_id): ab = AppBase() if ab._is_streamlit(model_id): return "streamlit" if ab._is_swagger(model_id): return "swagger" if ab._is_dash(model_id): return "dash" def _was_ersilia_docker_env(self, model_id): head = self._read_bundle_dockerfile(model_id)[0].rstrip() manifest = "FROM %s" % self.docker_org if manifest in head: return True else: return False def _modify_requirements(self, model_id): if self._was_ersilia_docker_env(model_id): # no need for modifying requirements return app_type = self._app_type(model_id) R = self._read_bundle_requirements(model_id) r = R[-1].rstrip("\n") + "\n" R[-1] = r R += ["git+https://github.com/ersilia-os/ersilia\n"] if app_type == "streamlit": R += ["streamlit==%s\n" % streamlit.__version__] requirements = os.path.join( self._get_tmp_directory(model_id), "requirements.txt" ) with open(requirements, "w") as f: for r in R: f.write(r) if app_type == "swagger": return if app_type == "dash": return def _modify_dockerfile(self, model_id, envport=False): R = self._read_bundle_dockerfile(model_id) app_type = self._app_type(model_id) if app_type == "streamlit": if envport: R[-1] = "CMD streamlit run /bento/app.py --server.port $PORT\n" else: R[-1] = "CMD streamlit run /bento/app.py\n" dockerfile = os.path.join(self._get_tmp_directory(model_id), "Dockerfile") with open(dockerfile, "w") as f: for r in R: f.write(r) return R if app_type == "swagger": return # TODO if app_type == "dash": return # TODO class Local(DeployBase): def __init__(self, config_json=None, credentials_json=None): DeployBase.__init__( self, config_json=config_json, credentials_json=credentials_json ) def deploy(self, model_id): app_type = self._app_type(model_id) if app_type == "streamlit": app = StreamlitApp() app.run(model_id) if app_type == "swagger": pass if app_type == "dash": pass class Heroku(DeployBase): def __init__(self, config_json=None, credentials_json=None): DeployBase.__init__( self, config_json=config_json, credentials_json=credentials_json ) self._login() @staticmethod def _login(): subprocess.Popen("heroku container:login", shell=True).wait() def _set_tmp(self, model_id): self._copy_to_tmp(model_id) self._modify_requirements(model_id) self._modify_dockerfile(model_id, envport=True) @staticmethod def _create_app(model_id): subprocess.Popen("heroku create %s" % model_id, shell=True).wait() @staticmethod def _push(model_id): subprocess.Popen( "heroku container:push web --app %s" % model_id, shell=True ).wait() @staticmethod def _release(model_id): subprocess.Popen( "heroku container:release web --app %s" % model_id, shell=True ).wait() def deploy(self, model_id): cwd = os.getcwd() self._set_tmp(model_id) os.chdir(self._get_tmp_directory(model_id)) self._create_app(model_id) self._push(model_id) self._release(model_id) os.chdir(cwd) self._delete_tmp(model_id) @staticmethod def destroy(model_id): subprocess.Popen("heroku apps:destroy %s --confirm %s" % (model_id, model_id)) class Aws(DeployBase): def __init__(self, config_json=None, credentials_json=None): DeployBase.__init__( self, config_json=config_json, credentials_json=credentials_json ) self._login() def _login(self): pass def deploy(self, model_id): pass class GoogleCloud(DeployBase): def __init__(self, config_json=None, credentials_json=None): DeployBase.__init__( self, config_json=config_json, credentials_json=credentials_json ) self._login() def _login(self): pass def deploy(self, model_id): pass class Azure(ErsiliaBase): def __init__(self, config_json=None, credentials_json=None): ErsiliaBase.__init__( self, config_json=config_json, credentials_json=credentials_json ) self._login() def _login(self): pass def deploy(self, model_id): pass class Deployer(object): def __init__(self, cloud="heroku", config_json=None, credentials_json=None): """Initialize a cloud deployer. For now, only 'heroku' is available.""" self.cloud = cloud self.dep = None if cloud == "local": self.dep = Local(config_json=config_json, credentials_json=credentials_json) if cloud == "heroku": self.dep = Heroku( config_json=config_json, credentials_json=credentials_json ) if cloud == "aws": self.dep = Aws(config_json=config_json, credentials_json=credentials_json) if cloud == "googlecloud": self.dep = GoogleCloud( config_json=config_json, credentials_json=credentials_json ) if cloud == "azure": self.dep = Azure(config_json=config_json, credentials_json=credentials_json) def deploy(self, model_id): self.dep.deploy(model_id)
86314
import argparse import os import sourcetraildb as srctrl def main(): parser = argparse.ArgumentParser(description="SourcetrailDB Python API Example") parser.add_argument("--database-file-path", help="path to the generated Sourcetrail database file", type=str, required=True) parser.add_argument("--source-file-path", help="path to the source file to index", type=str, required=True) parser.add_argument("--database-version", help="database version of the invoking Sourcetrail binary", type=int, required=False, default=0) args = parser.parse_args() databaseFilePath = args.database_file_path sourceFilePath = args.source_file_path.replace("\\", "/") dbVersion = args.database_version print("SourcetrailDB Python API Example") print("Supported database version: " + str(srctrl.getSupportedDatabaseVersion())) if dbVersion > 0 and dbVersion != srctrl.getSupportedDatabaseVersion(): print("ERROR: Only supports database version: " + str(srctrl.getSupportedDatabaseVersion()) + ". Requested version: " + str(dbVersion)) return 1 if not srctrl.open(databaseFilePath): print("ERROR: " + srctrl.getLastError()) return 1 print("Clearing loaded database now...") srctrl.clear() print("start indexing") srctrl.beginTransaction() fileId = srctrl.recordFile(sourceFilePath) srctrl.recordFileLanguage(fileId, "python") if len(srctrl.getLastError()) > 0: print("ERROR: " + srctrl.getLastError()) return 1 symbolId = srctrl.recordSymbol( '{ "name_delimiter": ".", "name_elements": [ ' '{ "prefix": "", "name": "MyType", "postfix": "" } ' '] }') srctrl.recordSymbolDefinitionKind(symbolId, srctrl.DEFINITION_EXPLICIT) srctrl.recordSymbolKind(symbolId, srctrl.SYMBOL_CLASS) srctrl.recordSymbolLocation(symbolId, fileId, 2, 7, 2, 12) srctrl.recordSymbolScopeLocation(symbolId, fileId, 2, 1, 7, 1) memberId = srctrl.recordSymbol( '{ "name_delimiter": ".", "name_elements": [ ' '{ "prefix": "", "name": "MyType", "postfix": "" }, ' '{ "prefix": "", "name": "my_member", "postfix": "" } ' '] }') srctrl.recordSymbolDefinitionKind(memberId, srctrl.DEFINITION_EXPLICIT) srctrl.recordSymbolKind(memberId, srctrl.SYMBOL_FIELD) srctrl.recordSymbolLocation(memberId, fileId, 4, 2, 4, 10) methodId = srctrl.recordSymbol( '{ "name_delimiter": ".", "name_elements": [ ' '{ "prefix": "", "name": "MyType", "postfix": "" }, ' '{ "prefix": "", "name": "my_method", "postfix": "" } ' '] }') srctrl.recordSymbolDefinitionKind(methodId, srctrl.DEFINITION_EXPLICIT) srctrl.recordSymbolKind(methodId, srctrl.SYMBOL_METHOD) srctrl.recordSymbolLocation(methodId, fileId, 6, 6, 6, 14) srctrl.recordSymbolScopeLocation(methodId, fileId, 6, 1, 7, 1) useageId = srctrl.recordReference(methodId, memberId, srctrl.REFERENCE_USAGE) srctrl.recordReferenceLocation(useageId, fileId, 7, 10, 7, 18) srctrl.commitTransaction() if len(srctrl.getLastError()) > 0: print("ERROR: " + srctrl.getLastError()) return 1 if not srctrl.close(): print("ERROR: " + srctrl.getLastError()) return 1 print("done") return 0 main()
86315
import tadtool.tad as tad import tadtool.plot as tp # load regions data set regions = tad.HicRegionFileReader().regions("chr12_20-35Mb_regions.bed") # load matrix matrix = tad.HicMatrixFileReader().matrix("chr12_20-35Mb.matrix.txt") # prepare plot tad_plot = tp.TADtoolPlot(matrix, regions, norm='lin', max_dist=1000000, algorithm='insulation') fig, axes = tad_plot.plot('chr12:31000000-34000000') # show plot import matplotlib.pyplot as plt plt.show()
86321
from flask import Blueprint # 每一个模块蓝图可以拥有自己的静态文件夹,默认的app会设置好系统级别的static # 但是蓝图需要自己手动设置注册静态文件的路径 # 为了区分加载的是主应用下的static还是模块下的static,给模块初始化的时候添加url前缀 # 添加前缀后,那么该蓝图.route的时候就自动添加了该前缀, 加载img的时候使用/cart/static/xxx.img # 如果模块模板和主营业模板下都拥有相同的html名字,则优先加载主营业下的模板 cart_blue = Blueprint('cart', __name__, static_folder='static', template_folder='templates', url_prefix='/cart') # 只能在蓝图初始化之后再倒入views,因为.views使用了蓝图 from .views import *
86350
from .settings import * DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'django', 'USER': 'django', 'HOST': 'dbe2e', 'PORT': 5432, } }
86373
from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import build_conv_layer from mmcv.runner import load_checkpoint from mmedit.models.common import (PixelShufflePack, ResidualBlockNoBN, make_layer) from mmedit.models.registry import BACKBONES from mmedit.utils import get_root_logger # Use partial to specify some default arguments _conv3x3_layer = partial( build_conv_layer, dict(type='Conv2d'), kernel_size=3, padding=1) _conv1x1_layer = partial( build_conv_layer, dict(type='Conv2d'), kernel_size=1, padding=0) class SFE(nn.Module): """Structural Feature Encoder Backbone of Texture Transformer Network for Image Super-Resolution. Args: in_channels (int): Number of channels in the input image mid_channels (int): Channel number of intermediate features num_blocks (int): Block number in the trunk network res_scale (float): Used to scale the residual in residual block. Default: 1. """ def __init__(self, in_channels, mid_channels, num_blocks, res_scale): super().__init__() self.num_blocks = num_blocks self.conv_first = _conv3x3_layer(in_channels, mid_channels) self.body = make_layer( ResidualBlockNoBN, num_blocks, mid_channels=mid_channels, res_scale=res_scale) self.conv_last = _conv3x3_layer(mid_channels, mid_channels) def forward(self, x): """Forward function. Args: x (Tensor): Input tensor with shape (n, c, h, w). Returns: Tensor: Forward results. """ x1 = x = F.relu(self.conv_first(x)) x = self.body(x) x = self.conv_last(x) x = x + x1 return x class CSFI2(nn.Module): """Cross-Scale Feature Integration between 1x and 2x features. Cross-Scale Feature Integration in Texture Transformer Network for Image Super-Resolution. It is cross-scale feature integration between 1x and 2x features. For example, `conv2to1` means conv layer from 2x feature to 1x feature. Down-sampling is achieved by conv layer with stride=2, and up-sampling is achieved by bicubic interpolate and conv layer. Args: mid_channels (int): Channel number of intermediate features """ def __init__(self, mid_channels): super().__init__() self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels) self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2) self.conv_merge1 = _conv3x3_layer(mid_channels * 2, mid_channels) self.conv_merge2 = _conv3x3_layer(mid_channels * 2, mid_channels) def forward(self, x1, x2): """Forward function. Args: x1 (Tensor): Input tensor with shape (n, c, h, w). x2 (Tensor): Input tensor with shape (n, c, 2h, 2w). Returns: x1 (Tensor): Output tensor with shape (n, c, h, w). x2 (Tensor): Output tensor with shape (n, c, 2h, 2w). """ x12 = F.interpolate( x1, scale_factor=2, mode='bicubic', align_corners=False) x12 = F.relu(self.conv1to2(x12)) x21 = F.relu(self.conv2to1(x2)) x1 = F.relu(self.conv_merge1(torch.cat((x1, x21), dim=1))) x2 = F.relu(self.conv_merge2(torch.cat((x2, x12), dim=1))) return x1, x2 class CSFI3(nn.Module): """Cross-Scale Feature Integration between 1x, 2x, and 4x features. Cross-Scale Feature Integration in Texture Transformer Network for Image Super-Resolution. It is cross-scale feature integration between 1x and 2x features. For example, `conv2to1` means conv layer from 2x feature to 1x feature. Down-sampling is achieved by conv layer with stride=2, and up-sampling is achieved by bicubic interpolate and conv layer. Args: mid_channels (int): Channel number of intermediate features """ def __init__(self, mid_channels): super().__init__() self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels) self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels) self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2) self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels) self.conv4to1_1 = _conv3x3_layer(mid_channels, mid_channels, stride=2) self.conv4to1_2 = _conv3x3_layer(mid_channels, mid_channels, stride=2) self.conv4to2 = _conv3x3_layer(mid_channels, mid_channels, stride=2) self.conv_merge1 = _conv3x3_layer(mid_channels * 3, mid_channels) self.conv_merge2 = _conv3x3_layer(mid_channels * 3, mid_channels) self.conv_merge4 = _conv3x3_layer(mid_channels * 3, mid_channels) def forward(self, x1, x2, x4): """Forward function. Args: x1 (Tensor): Input tensor with shape (n, c, h, w). x2 (Tensor): Input tensor with shape (n, c, 2h, 2w). x4 (Tensor): Input tensor with shape (n, c, 4h, 4w). Returns: x1 (Tensor): Output tensor with shape (n, c, h, w). x2 (Tensor): Output tensor with shape (n, c, 2h, 2w). x4 (Tensor): Output tensor with shape (n, c, 4h, 4w). """ x12 = F.interpolate( x1, scale_factor=2, mode='bicubic', align_corners=False) x12 = F.relu(self.conv1to2(x12)) x14 = F.interpolate( x1, scale_factor=4, mode='bicubic', align_corners=False) x14 = F.relu(self.conv1to4(x14)) x21 = F.relu(self.conv2to1(x2)) x24 = F.interpolate( x2, scale_factor=2, mode='bicubic', align_corners=False) x24 = F.relu(self.conv2to4(x24)) x41 = F.relu(self.conv4to1_1(x4)) x41 = F.relu(self.conv4to1_2(x41)) x42 = F.relu(self.conv4to2(x4)) x1 = F.relu(self.conv_merge1(torch.cat((x1, x21, x41), dim=1))) x2 = F.relu(self.conv_merge2(torch.cat((x2, x12, x42), dim=1))) x4 = F.relu(self.conv_merge4(torch.cat((x4, x14, x24), dim=1))) return x1, x2, x4 class MergeFeatures(nn.Module): """Merge Features. Merge 1x, 2x, and 4x features. Final module of Texture Transformer Network for Image Super-Resolution. Args: mid_channels (int): Channel number of intermediate features out_channels (int): Number of channels in the output image """ def __init__(self, mid_channels, out_channels): super().__init__() self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels) self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels) self.conv_merge = _conv3x3_layer(mid_channels * 3, mid_channels) self.conv_last1 = _conv3x3_layer(mid_channels, mid_channels // 2) self.conv_last2 = _conv1x1_layer(mid_channels // 2, out_channels) def forward(self, x1, x2, x4): """Forward function. Args: x1 (Tensor): Input tensor with shape (n, c, h, w). x2 (Tensor): Input tensor with shape (n, c, 2h, 2w). x4 (Tensor): Input tensor with shape (n, c, 4h, 4w). Returns: x (Tensor): Output tensor with shape (n, c_out, 4h, 4w). """ x14 = F.interpolate( x1, scale_factor=4, mode='bicubic', align_corners=False) x14 = F.relu(self.conv1to4(x14)) x24 = F.interpolate( x2, scale_factor=2, mode='bicubic', align_corners=False) x24 = F.relu(self.conv2to4(x24)) x = F.relu(self.conv_merge(torch.cat((x4, x14, x24), dim=1))) x = self.conv_last1(x) x = self.conv_last2(x) x = torch.clamp(x, -1, 1) return x @BACKBONES.register_module() class TTSRNet(nn.Module): """TTSR network structure (main-net) for reference-based super-resolution. Paper: Learning Texture Transformer Network for Image Super-Resolution Adapted from 'https://github.com/researchmm/TTSR.git' 'https://github.com/researchmm/TTSR' Copyright permission at 'https://github.com/researchmm/TTSR/issues/38'. Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels in the output image mid_channels (int): Channel number of intermediate features. Default: 64 num_blocks (tuple[int]): Block numbers in the trunk network. Default: (16, 16, 8, 4) res_scale (float): Used to scale the residual in residual block. Default: 1. """ def __init__(self, in_channels, out_channels, mid_channels=64, texture_channels=64, num_blocks=(16, 16, 8, 4), res_scale=1.0): super().__init__() self.texture_channels = texture_channels self.sfe = SFE(in_channels, mid_channels, num_blocks[0], res_scale) # stage 1 self.conv_first1 = _conv3x3_layer(4 * texture_channels + mid_channels, mid_channels) self.res_block1 = make_layer( ResidualBlockNoBN, num_blocks[1], mid_channels=mid_channels, res_scale=res_scale) self.conv_last1 = _conv3x3_layer(mid_channels, mid_channels) # up-sampling 1 -> 2 self.up1 = PixelShufflePack( in_channels=mid_channels, out_channels=mid_channels, scale_factor=2, upsample_kernel=3) # stage 2 self.conv_first2 = _conv3x3_layer(2 * texture_channels + mid_channels, mid_channels) self.csfi2 = CSFI2(mid_channels) self.res_block2_1 = make_layer( ResidualBlockNoBN, num_blocks[2], mid_channels=mid_channels, res_scale=res_scale) self.res_block2_2 = make_layer( ResidualBlockNoBN, num_blocks[2], mid_channels=mid_channels, res_scale=res_scale) self.conv_last2_1 = _conv3x3_layer(mid_channels, mid_channels) self.conv_last2_2 = _conv3x3_layer(mid_channels, mid_channels) # up-sampling 2 -> 3 self.up2 = PixelShufflePack( in_channels=mid_channels, out_channels=mid_channels, scale_factor=2, upsample_kernel=3) # stage 3 self.conv_first3 = _conv3x3_layer(texture_channels + mid_channels, mid_channels) self.csfi3 = CSFI3(mid_channels) self.res_block3_1 = make_layer( ResidualBlockNoBN, num_blocks[3], mid_channels=mid_channels, res_scale=res_scale) self.res_block3_2 = make_layer( ResidualBlockNoBN, num_blocks[3], mid_channels=mid_channels, res_scale=res_scale) self.res_block3_3 = make_layer( ResidualBlockNoBN, num_blocks[3], mid_channels=mid_channels, res_scale=res_scale) self.conv_last3_1 = _conv3x3_layer(mid_channels, mid_channels) self.conv_last3_2 = _conv3x3_layer(mid_channels, mid_channels) self.conv_last3_3 = _conv3x3_layer(mid_channels, mid_channels) # end, merge features self.merge_features = MergeFeatures(mid_channels, out_channels) def forward(self, x, soft_attention, textures): """Forward function. Args: x (Tensor): Input tensor with shape (n, c, h, w). soft_attention (Tensor): Soft-Attention tensor with shape (n, 1, h, w). textures (Tuple[Tensor]): Transferred HR texture tensors. [(N, C, H, W), (N, C/2, 2H, 2W), ...] Returns: Tensor: Forward results. """ assert textures[-1].shape[1] == self.texture_channels x1 = self.sfe(x) # stage 1 x1_res = torch.cat((x1, textures[0]), dim=1) x1_res = self.conv_first1(x1_res) # soft-attention x1 = x1 + x1_res * soft_attention x1_res = self.res_block1(x1) x1_res = self.conv_last1(x1_res) x1 = x1 + x1_res # stage 2 x21 = x1 x22 = self.up1(x1) x22 = F.relu(x22) x22_res = torch.cat((x22, textures[1]), dim=1) x22_res = self.conv_first2(x22_res) # soft-attention x22_res = x22_res * F.interpolate( soft_attention, scale_factor=2, mode='bicubic', align_corners=False) x22 = x22 + x22_res x21_res, x22_res = self.csfi2(x21, x22) x21_res = self.res_block2_1(x21_res) x22_res = self.res_block2_2(x22_res) x21_res = self.conv_last2_1(x21_res) x22_res = self.conv_last2_2(x22_res) x21 = x21 + x21_res x22 = x22 + x22_res # stage 3 x31 = x21 x32 = x22 x33 = self.up2(x22) x33 = F.relu(x33) x33_res = torch.cat((x33, textures[2]), dim=1) x33_res = self.conv_first3(x33_res) # soft-attention x33_res = x33_res * F.interpolate( soft_attention, scale_factor=4, mode='bicubic', align_corners=False) x33 = x33 + x33_res x31_res, x32_res, x33_res = self.csfi3(x31, x32, x33) x31_res = self.res_block3_1(x31_res) x32_res = self.res_block3_2(x32_res) x33_res = self.res_block3_3(x33_res) x31_res = self.conv_last3_1(x31_res) x32_res = self.conv_last3_2(x32_res) x33_res = self.conv_last3_3(x33_res) x31 = x31 + x31_res x32 = x32 + x32_res x33 = x33 + x33_res x = self.merge_features(x31, x32, x33) return x def init_weights(self, pretrained=None, strict=True): """Init weights for models. Args: pretrained (str, optional): Path for pretrained weights. If given None, pretrained weights will not be loaded. Defaults to None. strict (boo, optional): Whether strictly load the pretrained model. Defaults to True. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=strict, logger=logger) elif pretrained is None: pass # use default initialization else: raise TypeError('"pretrained" must be a str or None. ' f'But received {type(pretrained)}.')
86384
from django.conf import settings def _ellipse_bbox(x, y, height): x *= settings.RENDER_SCALE y *= settings.RENDER_SCALE y = height-y return ((x - 2, y - 2), (x + 2, y + 2)) def _line_coords(from_point, to_point, height): return (from_point.x * settings.RENDER_SCALE, height - (from_point.y * settings.RENDER_SCALE), to_point.x * settings.RENDER_SCALE, height - (to_point.y * settings.RENDER_SCALE))
86466
from contextvars import ContextVar from typing import Optional from telethon.tl.custom import Message class Context: __print_output: ContextVar[str] __msg: ContextVar[Optional[Message]] def __init__(self): self.__msg = ContextVar('msg') self.__print_output = ContextVar('print_output', default='') @property def msg(self): return self.__msg.get(None) @msg.setter def msg(self, msg: Message): self.__msg.set(msg) @property def _print_output(self): return self.__print_output.get() def _print(self, *values, sep=' ', end='\n', file=None, flush=True): if file: print(*values, sep=sep, end=end, file=file, flush=flush) else: output = sep.join(str(val) for val in values) + end self.__print_output.set(self.__print_output.get() + output) def __str__(self): return f'<Context(msg)>'
86474
from types import SimpleNamespace import pytest from mock import patch, MagicMock from backend.lambdas.tasks.check_queue_size import handler pytestmark = [pytest.mark.unit, pytest.mark.task] @patch("backend.lambdas.tasks.check_queue_size.sqs") def test_it_returns_correct_queue_size(mock_resource): mock_queue = MagicMock() mock_resource.Queue.return_value = mock_queue mock_queue.attributes = { "ApproximateNumberOfMessages": "4", "ApproximateNumberOfMessagesNotVisible": "2", } event = {"QueueUrl": "queue_url"} resp = handler(event, SimpleNamespace()) assert {"Visible": 4, "NotVisible": 2, "Total": 6} == resp
86487
import os import cv2 import numpy as np import random from tqdm import tqdm TEMP_CACHE_NAME = './~temp.png' gaussian_blur_params = [1, 3, 3, 3, 3, 3, 5] def build_dataset(data_dir, new_dir='datasets', dataset_name='rvl-cdip', mode='train'): origin_dir = os.path.join(data_dir, dataset_name) label_path = os.path.join(origin_dir, 'labels', mode+'.txt') image_dir = os.path.join(origin_dir, 'images') local_dir = os.path.join(new_dir, dataset_name+'_'+mode) train_dir = os.path.join(new_dir, dataset_name+'_'+mode+'_train') valid_dir = os.path.join(new_dir, dataset_name+'_'+mode+'_valid') test_dir = os.path.join(new_dir, dataset_name+'_'+mode+'_test') if not os.path.exists(origin_dir): print(origin_dir) raise Exception('Original dataset path not exists') if not os.path.exists(local_dir): os.makedirs(local_dir) if not os.path.exists(train_dir): os.makedirs(train_dir) if not os.path.exists(test_dir): os.makedirs(test_dir) if not os.path.exists(valid_dir): os.makedirs(valid_dir) label_file = open(label_path, 'r') res_dict = {} for idx, imgline in tqdm(enumerate(label_file)): res = imgline.split(' ') img_path, label = res[0], res[1] img_name = img_path.split('/')[-1] # load origin image if not os.path.exists(os.path.join(image_dir, img_path)): print('! Image is not exists:' + img_path) continue else: hr_img = cv2.imread(os.path.join(image_dir, img_path)) if hr_img is None: print('! Image is None:' + img_path) continue if label not in res_dict.keys(): res_dict[label] = [(img_path, img_name)] else: res_dict[label].append((img_path, img_name)) # cv2.imwrite(os.path.join(local_dir, img_name), hr_img) idx = 0 for key in tqdm(res_dict.keys()): for img_path, img_name in res_dict[key]: hr_img = cv2.imread(os.path.join(image_dir, img_path)) if idx % 10 == 0: cv2.imwrite(os.path.join(test_dir, img_name), hr_img) elif idx % 10 == 1: cv2.imwrite(os.path.join(valid_dir, img_name), hr_img) else: cv2.imwrite(os.path.join(train_dir, img_name), hr_img) idx += 1 # def build_dataset(data_dir, new_dir='datasets', dataset_name='rvl-cdip', mode='train'): # origin_dir = os.path.join(data_dir, dataset_name) # label_path = os.path.join(origin_dir, 'labels', mode+'.txt') # image_dir = os.path.join(origin_dir, 'images') # local_dir = os.path.join(new_dir, dataset_name+'_'+mode) # if not os.path.exists(origin_dir): # print(origin_dir) # raise Exception('Original dataset path not exists') # if not os.path.exists(local_dir): # os.makedirs(local_dir) # os.makedirs(os.path.join(local_dir, 'LR')) # os.makedirs(os.path.join(local_dir, 'LRN')) # os.makedirs(os.path.join(local_dir, 'HR')) # label_file = open(label_path, 'r') # for idx, imgline in tqdm(enumerate(label_file)): # img_path = imgline.split(' ')[0] # img_name = img_path.split('/')[-1] # # load origin image # if not os.path.exists(os.path.join(image_dir, img_path)): # print('! Image is not exists:' + img_path) # continue # else: # hr_img = cv2.imread(os.path.join(image_dir, img_path)) # if hr_img is None: # print('! Image is None:' + img_path) # continue # # build general low resolution image # lr_img = cv2.resize(hr_img, None, None, 0.5, 0.5) # lrn_img = lr_img.copy() # # build noisy low resolution image # prob = random.random() # if prob <= 0.45: # degradation = 'compression' # elif prob <= 0.85: # degradation = 'gaussian blur' # elif prob <= 0.7: # degradation = 'gaussian noise' # elif prob < 0.8: # degradation = 'salt pepper noise' # # additional degradation # if degradation == 'compression': # r1 = np.random.randint(5, 95) # r2 = np.random.randint(2, 10) # cv2.imwrite(TEMP_CACHE_NAME, lr_img, [int(cv2.IMWRITE_JPEG_QUALITY), r1]) # lrn_img = cv2.imread(TEMP_CACHE_NAME) # cv2.imwrite(TEMP_CACHE_NAME, lrn_img, [int(cv2.IMWRITE_PNG_COMPRESSION), r2]) # lrn_img = cv2.imread(TEMP_CACHE_NAME) # elif degradation == 'gaussian blur': # r = int(np.random.choice(gaussian_blur_params)) # lrn_img = cv2.GaussianBlur(lr_img, (r, r), 0) # elif degradation == 'salt pepper noise': # pass # cv2.imwrite(os.path.join(local_dir, 'HR', img_name), hr_img) # cv2.imwrite(os.path.join(local_dir, 'LR', img_name), lr_img) # cv2.imwrite(os.path.join(local_dir, 'LRN', img_name), lrn_img) # if os.path.exists(TEMP_CACHE_NAME): # os.remove(TEMP_CACHE_NAME)
86489
from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy as np import numpy.testing as npt from reinforceflow.core import SumTree, MinTree def test_sumtree_sum(): capacity = 100000 dataset = list(range(capacity)) dataset_actual = list(range(capacity, 2*capacity)) tree = SumTree(capacity) for i in dataset: tree.append(i) for i in dataset_actual: tree.append(i) assert tree.sum() == sum(dataset_actual) def test_sumtree_find_idx(): size = 100000 tree = SumTree(size) for i in range(size): tree.append(i) for i in range(size): idx = tree.find_sum_idx(i) assert 0 <= idx < size, 'Index = %s' % idx def test_sumtree_distribution(): priors = np.array([20000.0, 30000.0, 500.0, 49500.0, 0.0]) tree = SumTree(len(priors)) s = int(np.sum(priors)) expected_priors = priors / s received_priors = np.zeros_like(priors) for p in priors: tree.append(p) for i in range(0, s): idx = tree.find_sum_idx(i) received_priors[idx] += 1 received_priors = received_priors / s npt.assert_almost_equal(expected_priors, received_priors, decimal=4) def test_mintree_min(): capacity = 100000 dataset = list(range(capacity)) dataset_actual = list(range(capacity, 2*capacity)) tree = MinTree(capacity) for i in dataset: tree.append(i) for i in dataset_actual: tree.append(i) assert tree.min() == min(dataset_actual)
86492
import threading import logging import time class ThreadHelperException(Exception): pass class ThreadHelper(threading.Thread): """ The class provides a frame for the threads used in the framework. """ # ---------------------------------------------------------------------------------------------------------------- # # function: initialization # # ---------------------------------------------------------------------------------------------------------------- # def __init__(self): """ The function initializes all necessary variables and instances to deal with threads. """ # init parent class threading.Thread.__init__(self) # init additional instances and variables self.mutex = threading.Lock() self.eventWakeup = threading.Event() self.running = True self.shutDownBool = False # bind thread to main process self.setDaemon(True) # ---------------------------------------------------------------------------------------------------------------- # # function: suspend # # ---------------------------------------------------------------------------------------------------------------- # def suspend(self): """ Suspends the thread. """ with self.mutex: self.running = False # ---------------------------------------------------------------------------------------------------------------- # # function: resume # # ---------------------------------------------------------------------------------------------------------------- # def resume(self): """ Resumes the thread. """ with self.mutex: if self.running is not True: self.running = True self.eventWakeup.set() # ---------------------------------------------------------------------------------------------------------------- # # function: shutDown # # ---------------------------------------------------------------------------------------------------------------- # def shutDown(self): """ Shut down the thread. """ with self.mutex: self.shutDownBool = True if self.running is not True: self.resume() # ---------------------------------------------------------------------------------------------------------------- # # function: run # # ---------------------------------------------------------------------------------------------------------------- # def run(self): """ The default run function. """ logging.debug("Start the default thread") while self.shutDownBool is not True: if self.running: # raise ThreadHelperException("The thread use the default run function. Implement a run function in the" # " derived class") logging.debug("Default thread executed") time.sleep(0.05) else: logging.debug('Default thread wait') self.eventWakeup.wait() logging.debug('Default thread resumed') logging.debug("Shut down the default thread")
86519
from __future__ import division from past.utils import old_div #=============================================================================== # SCG Scaled conjugate gradient optimization. # # Copyright (c) <NAME> (1996-2001) # updates by <NAME> 2013 # # Permission is granted for anyone to copy, use, or modify these # programs and accompanying documents for purposes of research or # education, provided this copyright notice is retained, and note is # made of any changes that have been made. # # These programs and documents are distributed without any warranty, # express or implied. As the programs were written for research # purposes only, they have not been tested to the degree that would be # advisable in any important application. All use of these programs is # entirely at the user's own risk." #=============================================================================== from math import sqrt import numpy as np import logging def run(f, x, args=(), niters = 100, gradcheck = False, display = 0, flog = False, pointlog = False, scalelog = False, tolX = 1.0e-8, tolO = 1.0e-8, eval = None): '''Scaled conjugate gradient optimization. ''' if display: logging.getLogger(__name__).info('***** starting optimization (SCG) *****') nparams = len(x); # Check gradients if gradcheck: pass eps = 1.0e-4 sigma0 = 1.0e-4 result = f(x, *args) fold = result[0] # Initial function value. fnow = fold funcCount = 1 # Increment function evaluation counter. gradnew = result[1] # Initial gradient. gradold = gradnew gradCount = 1 # Increment gradient evaluation counter. d = -gradnew # Initial search direction. success = 1 # Force calculation of directional derivs. nsuccess = 0 # nsuccess counts number of successes. beta = 1.0 # Initial scale parameter. betamin = 1.0e-15 # Lower bound on scale. betamax = 1.0e50 # Upper bound on scale. j = 1 # j counts number of iterations. if flog: pass #flog(j, :) = fold; if pointlog: pass #pointlog(j, :) = x; # Main optimization loop. listF = [fold] if eval is not None: evalue, timevalue = eval(x, *args) evalList = [evalue] time = [timevalue] while (j <= niters): # Calculate first and second directional derivatives. if (success == 1): mu = np.dot(d, gradnew) if (mu >= 0): d = - gradnew mu = np.dot(d, gradnew) kappa = np.dot(d, d) if (kappa < eps): logging.getLogger(__name__).info("FNEW: " + str(fnow)) #options(8) = fnow if eval is not None: return x, listF, evalList, time else: return x, listF sigma = old_div(sigma0,sqrt(kappa)) xplus = x + sigma*d gplus = f(xplus, *args)[1] gradCount += 1 theta = old_div((np.dot(d, (gplus - gradnew))),sigma); # Increase effective curvature and evaluate step size alpha. delta = theta + beta*kappa if (delta <= 0): delta = beta*kappa beta = beta - old_div(theta,kappa) alpha = old_div(- mu,delta) # Calculate the comparison ratio. xnew = x + alpha*d fnew = f(xnew, *args)[0] funcCount += 1; Delta = 2*(fnew - fold)/(alpha*mu) if (Delta >= 0): success = 1; nsuccess += 1; x = xnew; fnow = fnew; listF.append(fnow) if eval is not None: evalue, timevalue = eval(x, *args) evalList.append(evalue) time.append(timevalue) else: success = 0; fnow = fold; if flog: # Store relevant variables #flog(j) = fnow; # Current function value pass if pointlog: #pointlog(j,:) = x; # Current position pass if scalelog: #scalelog(j) = beta; # Current scale parameter pass if display > 0: logging.getLogger(__name__).info('***** Cycle %4d Error %11.6f Scale %e', j, fnow, beta) if (success == 1): # Test for termination # print type (alpha), type(d), type(tolX), type(fnew), type(fold) if ((max(abs(alpha*d)) < tolX) & (abs(fnew-fold) < tolO)): # options(8) = fnew; # print "FNEW: " , fnew if eval is not None: return x, listF, evalList, time else: return x, listF else: # Update variables for new position fold = fnew gradold = gradnew gradnew = f(x, *args)[1] gradCount += 1 # If the gradient is zero then we are done. if (np.dot(gradnew, gradnew) == 0): # print "FNEW: " , fnew # options(8) = fnew; if eval is not None: return x, listF, evalList, time else: return x, listF # Adjust beta according to comparison ratio. if (Delta < 0.25): beta = min(4.0*beta, betamax); if (Delta > 0.75): beta = max(0.5*beta, betamin); # Update search direction using Polak-Ribiere formula, or re-start # in direction of negative gradient after nparams steps. if (nsuccess == nparams): d = -gradnew; nsuccess = 0; else: if (success == 1): gamma = old_div(np.dot((gradold - gradnew), gradnew),(mu)) d = gamma*d - gradnew; j += 1 # If we get here, then we haven't terminated in the given number of # iterations. # options(8) = fold; if (display): logging.getLogger(__name__).info("maximum number of iterations reached") if eval is not None: return x, listF, evalList, time else: return x, listF
86540
import logging import os import time import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV logging.getLogger().setLevel(logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.fixture(scope="module") def started_cluster(): try: cluster = ClickHouseCluster(__file__) cluster.add_instance( "h0_0_0", main_configs=["configs/config.xml"], extra_configs=["configs/hdfs-site.xml", "data/prepare_hive_data.sh"], with_hive=True, ) logging.info("Starting cluster ...") cluster.start() cluster.copy_file_to_container( "roottesthivequery_hdfs1_1", "/ClickHouse/tests/integration/test_hive_query/data/prepare_hive_data.sh", "/prepare_hive_data.sh", ) cluster.exec_in_container( "roottesthivequery_hdfs1_1", ["bash", "-c", "bash /prepare_hive_data.sh"] ) yield cluster finally: cluster.shutdown() def test_create_parquet_table(started_cluster): logging.info("Start testing creating hive table ...") node = started_cluster.instances["h0_0_0"] test_passed = False for i in range(10): node.query("set input_format_parquet_allow_missing_columns = true") result = node.query( """ DROP TABLE IF EXISTS default.demo_parquet; CREATE TABLE default.demo_parquet (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day) """ ) logging.info("create result {}".format(result)) if result.strip() == "": test_passed = True break time.sleep(60) assert test_passed def test_create_parquet_table_1(started_cluster): logging.info("Start testing creating hive table ...") node = started_cluster.instances["h0_0_0"] for i in range(10): node.query("set input_format_parquet_allow_missing_columns = true") result = node.query( """ DROP TABLE IF EXISTS default.demo_parquet_parts; CREATE TABLE default.demo_parquet_parts (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String), `hour` String) ENGINE = Hive('thrift://hivetest:9083', 'test', 'parquet_demo') PARTITION BY(day, hour); """ ) logging.info("create result {}".format(result)) if result.strip() == "": test_passed = True break time.sleep(60) assert test_passed def test_create_orc_table(started_cluster): logging.info("Start testing creating hive table ...") node = started_cluster.instances["h0_0_0"] test_passed = False for i in range(10): result = node.query( """ DROP TABLE IF EXISTS default.demo_orc; CREATE TABLE default.demo_orc (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_orc') PARTITION BY(day) """ ) logging.info("create result {}".format(result)) if result.strip() == "": test_passed = True break time.sleep(60) assert test_passed def test_create_text_table(started_cluster): logging.info("Start testing creating hive table ...") node = started_cluster.instances["h0_0_0"] result = node.query( """ DROP TABLE IF EXISTS default.demo_text; CREATE TABLE default.demo_text (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_text') PARTITION BY (tuple()) """ ) logging.info("create result {}".format(result)) assert result.strip() == "" def test_parquet_groupby(started_cluster): logging.info("Start testing groupby ...") node = started_cluster.instances["h0_0_0"] result = node.query( """ SELECT day, count(*) FROM default.demo_parquet group by day order by day """ ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 2021-11-16 2 """ assert result == expected_result def test_parquet_in_filter(started_cluster): logging.info("Start testing groupby ...") node = started_cluster.instances["h0_0_0"] result = node.query( """ SELECT count(*) FROM default.demo_parquet_parts where day = '2021-11-05' and hour in ('00') """ ) expected_result = """2 """ logging.info("query result:{}".format(result)) assert result == expected_result def test_orc_groupby(started_cluster): logging.info("Start testing groupby ...") node = started_cluster.instances["h0_0_0"] result = node.query( """ SELECT day, count(*) FROM default.demo_orc group by day order by day """ ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 2021-11-16 2 """ assert result == expected_result @pytest.mark.parametrize( "table,use_local_cache_for_remote_storage,enable_orc_file_minmax_index,enable_orc_stripe_minmax_index", [ pytest.param( "demo_orc_no_cache_no_index", "false", "false", "false", id="demo_orc_no_cache_no_index", ), pytest.param( "demo_orc_with_cache_no_index", "true", "false", "false", id="demo_orc_with_cache_no_index", ), pytest.param( "demo_orc_no_cache_file_index", "false", "true", "false", id="demo_orc_no_cache_file_index", ), pytest.param( "demo_orc_with_cache_file_index", "true", "true", "false", id="demo_orc_with_cache_file_index", ), pytest.param( "demo_orc_no_cache_stripe_index", "false", "true", "true", id="demo_orc_no_cache_stripe_index", ), pytest.param( "demo_orc_with_cache_stripe_index", "true", "true", "true", id="demo_orc_with_cache_stripe_index", ), ], ) def test_orc_minmax_index( started_cluster, table, use_local_cache_for_remote_storage, enable_orc_file_minmax_index, enable_orc_stripe_minmax_index, ): node = started_cluster.instances["h0_0_0"] result = node.query( """ DROP TABLE IF EXISTS default.{table}; CREATE TABLE default.{table} (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_orc') PARTITION BY(day) SETTINGS enable_orc_file_minmax_index = {enable_orc_file_minmax_index}, enable_orc_stripe_minmax_index = {enable_orc_stripe_minmax_index}; """.format( table=table, enable_orc_file_minmax_index=enable_orc_file_minmax_index, enable_orc_stripe_minmax_index=enable_orc_stripe_minmax_index, ) ) assert result.strip() == "" for i in range(2): result = node.query( """ SELECT day, id, score FROM default.{table} where day >= '2021-11-05' and day <= '2021-11-16' and score >= 15 and score <= 30 order by day, id SETTINGS use_local_cache_for_remote_storage = {use_local_cache_for_remote_storage} """.format( table=table, use_local_cache_for_remote_storage=use_local_cache_for_remote_storage, ) ) assert ( result == """2021-11-05 abd 15 2021-11-16 aaa 22 """ ) @pytest.mark.parametrize( "table,use_local_cache_for_remote_storage,enable_parquet_rowgroup_minmax_index", [ pytest.param( "demo_parquet_no_cache_no_index", "false", "false", id="demo_parquet_no_cache_no_index", ), pytest.param( "demo_parquet_with_cache_no_index", "true", "false", id="demo_parquet_with_cache_no_index", ), pytest.param( "demo_parquet_no_cache_rowgroup_index", "false", "true", id="demo_parquet_no_cache_rowgroup_index", ), pytest.param( "demo_parquet_with_cache_rowgroup_index", "true", "true", id="demo_parquet_with_cache_rowgroup_index", ), ], ) def test_parquet_minmax_index( started_cluster, table, use_local_cache_for_remote_storage, enable_parquet_rowgroup_minmax_index, ): node = started_cluster.instances["h0_0_0"] result = node.query( """ DROP TABLE IF EXISTS default.{table}; CREATE TABLE default.{table} (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day) SETTINGS enable_parquet_rowgroup_minmax_index = {enable_parquet_rowgroup_minmax_index} """.format( table=table, enable_parquet_rowgroup_minmax_index=enable_parquet_rowgroup_minmax_index, ) ) assert result.strip() == "" for i in range(2): result = node.query( """ SELECT day, id, score FROM default.{table} where day >= '2021-11-05' and day <= '2021-11-16' and score >= 15 and score <= 30 order by day, id SETTINGS use_local_cache_for_remote_storage = {use_local_cache_for_remote_storage} """.format( table=table, use_local_cache_for_remote_storage=use_local_cache_for_remote_storage, ) ) assert ( result == """2021-11-05 abd 15 2021-11-16 aaa 22 """ ) def test_hive_columns_prunning(started_cluster): logging.info("Start testing groupby ...") node = started_cluster.instances["h0_0_0"] result = node.query( """ SELECT count(*) FROM default.demo_parquet_parts where day = '2021-11-05' """ ) expected_result = """4 """ logging.info("query result:{}".format(result)) assert result == expected_result def test_text_count(started_cluster): node = started_cluster.instances["h0_0_0"] result = node.query( """ SELECT day, count(*) FROM default.demo_orc group by day order by day SETTINGS format_csv_delimiter = '\x01' """ ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 2021-11-16 2 """ assert result == expected_result def test_parquet_groupby_with_cache(started_cluster): logging.info("Start testing groupby ...") node = started_cluster.instances["h0_0_0"] result = node.query( """ SELECT day, count(*) FROM default.demo_parquet group by day order by day """ ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 2021-11-16 2 """ assert result == expected_result def test_parquet_groupby_by_hive_function(started_cluster): logging.info("Start testing groupby ...") node = started_cluster.instances["h0_0_0"] result = node.query( """ SELECT day, count(*) FROM hive('thrift://hivetest:9083', 'test', 'demo', '`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)', 'day') group by day order by day """ ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 2021-11-16 2 """ assert result == expected_result def test_cache_read_bytes(started_cluster): node = started_cluster.instances["h0_0_0"] result = node.query( """ CREATE TABLE IF NOT EXISTS default.demo_parquet_1 (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day) """ ) test_passed = False for i in range(10): result = node.query( """ SELECT * FROM default.demo_parquet_1 settings input_format_parquet_allow_missing_columns = true """ ) node.query("system flush logs") result = node.query( "select sum(ProfileEvent_ExternalDataSourceLocalCacheReadBytes) from system.metric_log where ProfileEvent_ExternalDataSourceLocalCacheReadBytes > 0" ) if result.strip() == "0": logging.info("ProfileEvent_ExternalDataSourceLocalCacheReadBytes == 0") time.sleep(10) continue test_passed = True break assert test_passed def test_cache_dir_use(started_cluster): node = started_cluster.instances["h0_0_0"] result0 = node.exec_in_container( ["bash", "-c", "ls /tmp/clickhouse_local_cache | wc -l"] ) result1 = node.exec_in_container( ["bash", "-c", "ls /tmp/clickhouse_local_cache1 | wc -l"] ) assert result0 != "0" and result1 != "0"
86571
from django.db import models from phonenumber_field.modelfields import PhoneNumberField class SupportTicket(models.Model): """A support ticket submitted by an unsatisfied customer :(""" name = models.CharField(max_length=100) phone_number = PhoneNumberField( help_text='Must include international prefix - e.g. +1 555 555 55555' ) description = models.TextField(help_text='A description of your problem') timestamp = models.DateTimeField(auto_now_add=True) def __str__(self): return '#{0} - {1}'.format(self.id, self.name)
86619
pytest_plugins = ("pytester",) def test_help_message(testdir): result = testdir.runpytest("--help") # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines( [ "stress:", "*--delay=DELAY*The amount of time to wait between each test loop.", "*--hours=HOURS*The number of hours to loop the tests for.", "*--minutes=MINUTES*The number of minutes to loop the tests for.", "*--seconds=SECONDS*The number of seconds to loop the tests for.", ] ) def test_ini_file(testdir): testdir.makeini( """ [pytest] addopts = --delay=0 --hours=0 --minutes=0 --seconds=0 """ ) testdir.makepyfile( """ import pytest @pytest.fixture def addopts(request): return request.config.getini('addopts') def test_ini(addopts): assert addopts[0] == "--delay=0" assert addopts[1] == "--hours=0" assert addopts[2] == "--minutes=0" assert addopts[3] == "--seconds=0" """ ) result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines( ["*::test_ini PASSED*", ] ) # Make sure that that we get a '0' exit code for the testsuite assert result.ret == 0
86640
import numpy as np import pandas as pd import re import datetime from datetime import datetime, timedelta import pandas as pd import plotly.express as px import plotly.graph_objects as go # dplyr-style for python from dppd import dppd dp, X = dppd() import itertools _DEFAULT_TIME_SCALE = 12 * 3 * 31 # 36 months """ Preprocessing data """ # Convert string percentage to integer def a(m):e=m.strip("%");f=float(e);return f/100if e!=m else str(f*100)+"%" def _get_latest_bed_estimate(row): """Try to estimate the lastest number of beds / 1000 people """ non_empty_estimates = [float(x) for x in row.values if float(x) > 0] try: return non_empty_estimates[-1] except IndexError: return np.nan def preprocess_bed_data(path): df = pd.read_csv(path) # Total hospital beds = HOPITBED # Total number of beds UNIT = NOMBRENB # No of beds per 1000 ppl UNIT = RTOINPNB df = (dp(df) .query("VAR == 'HOPITBED' & UNIT == 'NOMBRENB'") .select(["Country","Year","Value"]) .pivot(index='Country',columns='Year',values='Value') .pd) # Beds are per 1000 people df["Latest Bed Estimate"] = df.apply(_get_latest_bed_estimate, axis=1) return df def get_latest_date(global_confirmed, global_recovered, global_death): # Get latest dates from all 3 datasets r_date = datetime.strptime(global_recovered.iloc[:,-1].name,'%m/%d/%y').date() c_date = datetime.strptime(global_confirmed.iloc[:,-1].name,'%m/%d/%y').date() d_date = datetime.strptime(global_death.iloc[:,-1].name,'%m/%d/%y').date() # If they are synchronized if r_date == c_date == d_date: target_date = global_recovered.iloc[:,-1].name else: target_date = min(r_date, c_date, d_date) target_date = datetime.strftime(target_date,"%m/%d/%y") target_date = target_date[-(len(target_date)-1):] print('Latest cases data is captured on ' + str(target_date)) return target_date def prepare_historical_df(target_country, target_date, global_confirmed, global_recovered, global_death): # Convert and merge r = dp(global_recovered).query(target_country).assign(Type = "Recovered").pd c = dp(global_confirmed).query(target_country).assign(Type = "Confirmed").pd d = dp(global_death).query(target_country).assign(Type = "Death").pd historical_df = pd.concat([r,c,d]) historical_df= (dp(historical_df) .select(["-Province/State",'-Lat','-Long','-Country']) .set_index('Type') .pd) confirmed = pd.DataFrame(historical_df.iloc[1]).rename_axis('Date').reset_index() confirmed['Date'] = pd.to_datetime(confirmed['Date']) confirmed['Status'] = "Confirmed" confirmed.columns = ['Date', 'Number', 'Status'] deaths = pd.DataFrame(historical_df.iloc[2]).rename_axis('Date').reset_index() deaths['Date'] = pd.to_datetime(confirmed['Date']) deaths['Status'] = "Deaths" deaths.columns = ['Date', 'Number', 'Status'] recovered = pd.DataFrame(historical_df.iloc[0]).rename_axis('Date').reset_index() recovered['Date'] = pd.to_datetime(confirmed['Date']) recovered['Status'] = "Recovered" recovered.columns = ['Date', 'Number', 'Status'] historical_df = confirmed.append(deaths).append(recovered) return historical_df def get_cases_number(target_date, target_country, global_confirmed, global_recovered, global_death): """ Get the latest number of deaths, confirmed and recovered cases""" number_cases_deaths =(dp(global_death) .select(['Country',target_date]) .query(target_country) .pd).iloc[0][target_date] number_cases_confirmed =(dp(global_confirmed) .select(['Country',target_date]) .query(target_country) .pd).iloc[0][target_date] number_cases_recovered =(dp(global_recovered) .select(['Country',target_date]) .query(target_country) .pd).iloc[0][target_date] return (number_cases_deaths,number_cases_confirmed,number_cases_recovered) """ Model building """ def hospitalized_case(I, AGE_DATA): """ Calculated hospitalization cases""" AGE_DATA['Snapshot_hospitalized'] = round(AGE_DATA['Proportion_DE_2020'] * I * AGE_DATA['Hospitalization Rate']) no_h = AGE_DATA['Snapshot_hospitalized'].sum() return no_h def deaths_case(I_h2d, AGE_DATA, CDR, no_hospital_beds): """ Calculated death cases, if active cases over capacity ==> use critical death rate""" if hospitalized_case(I_h2d, AGE_DATA) <= no_hospital_beds : # still not overloaded on day (t-h2d) # Number of deaths with hospitalization AGE_DATA['Snapshot_deaths'] = round(AGE_DATA['Proportion_DE_2020'] * hospitalized_case(I_h2d, AGE_DATA) # actived cases (t-h2d) days ago will used * AGE_DATA['Mortality']) # Minus yesterday_deaths to get number of NEW deaths no_Snapshot_d = AGE_DATA['Snapshot_deaths'].sum() AGE_DATA['Total_Deaths'] = AGE_DATA['Total_Deaths'] + (AGE_DATA['Snapshot_deaths']) else: # active HOSPITALIZED case overloaded on day (t-h2d) # Number of critial cases on day (t-h2d) but no hospital beds available no_without_beds = hospitalized_case(I_h2d, AGE_DATA) - no_hospital_beds # Snapshots = amount of death cases on day (t) AGE_DATA['Snapshot_deaths_no_beds'] = round(AGE_DATA['Proportion_DE_2020'] * no_without_beds * CDR) # Number of deaths with hospitalization AGE_DATA['Snapshot_deaths'] = round(AGE_DATA['Proportion_DE_2020'] * no_hospital_beds * # max number of beds have been used AGE_DATA['Mortality']) # Minus yesterday_deaths to get number of NEW deaths no_Snapshot_d = AGE_DATA['Snapshot_deaths'].sum() + AGE_DATA['Snapshot_deaths_no_beds'].sum() # Deaths due to no beds AGE_DATA['Total_Deaths_no_beds'] = AGE_DATA['Total_Deaths_no_beds'] + AGE_DATA['Snapshot_deaths_no_beds'] AGE_DATA['Total_Deaths'] = AGE_DATA['Total_Deaths'] + (AGE_DATA['Snapshot_deaths'] + AGE_DATA['Snapshot_deaths_no_beds']) return no_Snapshot_d # Modifed from <NAME> @https://www.datahubbs.com/ def seir_model_with_soc_dist(init_vals, params, t): """Susceptible - Exposed - Infected - Recovered Infected cases here is the number of current active cases! """ # Get initial values S_0, E_0, I_0, R_0, H_0, D_0 = init_vals # Create empty dataframe S = pd.DataFrame(columns = ["S"]) S.loc[0] = S_0 E = pd.DataFrame(columns = ["E"]) E.loc[0] = E_0 I = pd.DataFrame(columns = ["I"]) I.loc[0] = I_0 R = pd.DataFrame(columns = ["R"]) R.loc[0] = R_0 D = pd.DataFrame(columns = ["D"]) D.loc[0] = D_0 H = pd.DataFrame(columns = ["H"]) H.loc[0] = H_0 (delta, beta, gamma, no_hospital_beds, # healthcare capacity social_dist, # social distance factor CDR, #critical death rate without hospitalization AGE_DATA, target_country, global_confirmed, global_death, global_recovered, h_to_d) = params # Total population = S + E + I (active cases) + R + D N = S_0 + E_0 + I_0 + R_0 + D_0 for k in range(1,t+1): S.loc[k] = S.loc[k-1].S - (social_dist * beta * S.loc[k-1].S * I.loc[k-1].I)/N E.loc[k] = E.loc[k-1].E + (social_dist * beta * S.loc[k-1].S * I.loc[k-1].I)/N - delta*E.loc[k-1].E # Current Infected cases if k == 1: I.loc[k] = I.loc[k-1].I + (delta*E.loc[k-1].E - gamma*I.loc[k-1].I) - (D.loc[k-1].D) # only minus new death cases on day (k) R.loc[k] = R.loc[k-1].R + (gamma*I.loc[k-1].I) - (D.loc[k-1].D) # only minus new death cases on day (k) else: # = Yesterday infected cases + (new exposed cases - recovered - deaths) I.loc[k] = I.loc[k-1].I + (delta*E.loc[k-1].E - gamma*I.loc[k-1].I) - (D.loc[k-1].D - D.loc[k-2].D) # only minus new death cases on day (k) # Current recovered = new recovered - new deaths R.loc[k] = R.loc[k-1].R + (gamma*I.loc[k-1].I) - (D.loc[k-1].D - D.loc[k-2].D) # only minus new death cases on day (k) # Hospitalized case (part of current Infected cases) H.loc[k]= hospitalized_case(I.loc[k].I, AGE_DATA) # Estimate death cases of day (k) with the hospitalized case on day (k -h2d) days ago try: past_I = I.loc[k-h_to_d].I D.loc[k] = D.loc[k-1].D + deaths_case(past_I, # active infected case on day (k-h2d) days AGE_DATA, CDR, no_hospital_beds) except: try: # if I[-h_to_d] is not exist yet before I_0 # use historical active infected cases [h_to_d] days ago past_date = datetime.strftime(datetime.strptime('3/23/20','%m/%d/%y') + timedelta(k) - timedelta(h_to_d),"%m/%d/%y") past_date = past_date[-(len(past_date) -1) :] past_h_to_d = get_cases_number(past_date, target_country, global_confirmed, global_recovered, global_death) # Get active infected case in the past past_I = past_h_to_d[1] - past_h_to_d[0] - past_h_to_d[2] D.loc[k] = D.loc[k-1].D + deaths_case(past_I,AGE_DATA,CDR, no_hospital_beds) except: # in the event of yesterday data was not updated --> temporary use yesterday data D.loc[k] = D.loc[k-1].D + D.loc[k-1].D if (I.loc[k].I <= 0): break results = pd.concat([S.reset_index(drop=True), E.reset_index(drop=True), I.reset_index(drop=True), R.reset_index(drop=True), D.reset_index(drop=True), H.reset_index(drop=True)], axis=1) results['id'] = results.index # Round all results = results.apply(pd.to_numeric) results = results.round(0) return results """ Graphics """ TEMPLATE = "plotly_white" _SUSCEPTIBLE_COLOR = "rgba(230,230,230,.4)" _RECOVERED_COLOR = "rgba(180,200,180,.4)" COLOR_MAP = { "default": "#262730", "pink": "#E22A5B", "purple": "#985FFF", "susceptible": _SUSCEPTIBLE_COLOR, "recovered": _RECOVERED_COLOR,} def _set_legends(fig): fig.layout.update(legend=dict(x=-0.1, y=1.2)) fig.layout.update(legend_orientation="h") def plot_historical_data(df): fig = px.line( df, x="Date", y="Number", color="Status", template=TEMPLATE ) fig.layout.update( xaxis_title="Date", font=dict(family="Arial", size=12)) _set_legends(fig) return fig def num_beds_occupancy_comparison_chart(num_beds_available, max_num_beds_needed): """ A horizontal bar chart comparing # of beds available compared to max number number of beds needed """ num_beds_available, max_num_beds_needed = ( int(num_beds_available), int(max_num_beds_needed), ) df = pd.DataFrame( { "Label": ["Total Beds ", "Peak Occupancy "], "Value": [num_beds_available, max_num_beds_needed], "Text": [f"{num_beds_available:,} ", f"{max_num_beds_needed:,} "], "Color": ["b", "r"], } ) fig = px.bar( df, x="Value", y="Label", color="Color", text="Text", orientation="h", opacity=0.7, template=TEMPLATE, height=300, ) fig.layout.update( showlegend=False, xaxis_title="", xaxis_showticklabels=False, yaxis_title="", yaxis_showticklabels=True, font=dict(family="Arial", size=15, color=COLOR_MAP["default"]), ) fig.update_traces(textposition="outside", cliponaxis=False) return fig
86643
import pandas as pd import matplotlib.pyplot as plt # Import our data file stock_prices = pd.read_csv('/data/intel_amd_stock_prices.csv') # Print DataFrame # print(stock_prices) # Create y-columns y_columns = ['intel', 'amd'] # Name / assign axis stock_prices.plot(x='month', y=y_columns) # Create plot title plt.title('Monthly Stock Prices') # Create a title for y axis plt.ylabel('Prices ($US)') # Show the plot plt.show()
86692
from typing import Dict, Union, Tuple from phonenumbers import timezone, parse, geocoder from waio.rules import ABCRule from waio.types import Message G_T = Dict[str, Union[int, str, Tuple[str]]] class RussianNumberRule(ABCRule): async def check(self, message: Message) -> Union[bool, Dict[str, G_T]]: phone_number_data = self.get_phone_number_data(message.sender_number) if phone_number_data["country"] == "Russia": return {"number_data": phone_number_data} return False @staticmethod def get_phone_number_data(number: str) -> G_T: number_and_plus = f"+{number}" phone_number = parse(number_and_plus) country_name = geocoder.country_name_for_number(phone_number, "en") time_zones_number = timezone.time_zones_for_number(phone_number) return { "country_code": phone_number.country_code, "national_number": phone_number.national_number, "country": country_name, "time_zone": time_zones_number }
86704
import unittest import plotly.graph_objs as go class TestPlotly(unittest.TestCase): def test_figure(self): trace = {'x': [1, 2], 'y': [1, 3]} data = [ trace ] go.Figure(data=data)
86780
from django.conf.urls import patterns, url from stucampus.master.views.manage.account import ListAccount, ShowAccount from stucampus.master.views.manage.infor import ListInfor, PostInfor,Information from stucampus.master.views.manage.organization import ListOrganization,OrganzationManager,ShowOrganization from stucampus.organization.views import EditOrganization from stucampus.master.views.manage.index import redirect as admin_index_redirect,index as admin_index from stucampus.organization.views import organization_manage urlpatterns = [ url(r'^$', admin_index_redirect, name='admin_index_redirect'), url(r'^index/?$', admin_index, name='admin_index'), url(r'^organization/list/?$', ListOrganization.as_view(), name='manage_organization_list'), url(r'^organization/(?P<id>\d+)/?$', ShowOrganization.as_view(), name='manage_organization_show'), url(r'^organization/(?P<id>\d+)/manager/?$', OrganzationManager.as_view(), name='manage_organization_manage'), url(r'^account/list/?$', ListAccount.as_view(), name='manage_account_list'), url(r'^account/(?P<id>\d+)/?$', ShowAccount.as_view(), name='manage_account_show'), url(r'^infor/list/?$', ListInfor.as_view(), name='manage_infor_list'), url(r'^infor/post/?$', PostInfor.as_view(), name='manage_infor_post'), url(r'^infor/(?P<id>\d+)/?$', Information.as_view(), name='manage_infor_infor'), url(r'^organization/?$', organization_manage, name='organization_manage'), url(r'^organization/(?P<id>\d+)/edit/?$', EditOrganization.as_view(), name='organization_edit'), ]
86794
import argparse import time import numpy as np import networkx as nx import json from sklearn.utils import check_random_state import zmq from . import agglo, agglo2, features, classify, evaluate as ev # constants # labels for machine learning libs MERGE_LABEL = 0 SEPAR_LABEL = 1 class Solver: """ZMQ-based interface between proofreading clients and gala RAGs. This docstring is intentionally incomplete until the interface settles. Parameters ---------- labels : array-like of int, shape (..., P, R, C) The fragment map. image : array-like of float, shape (..., P, R, C[, Ch]), optional The image, from which to compute intensity features. feature_manager : gala.features.Manager object Object exposing the feature manager interface, to compute the feature caches and features of the RAG. address : string, optional URL of client. relearn_threshold : int, optional Minimum batch size to trigger a new learning round. config_file : string, optional A JSON file specifying the URLs of the Solver, Client, and ID service. See `Solver._configure_from_file` for the file specification. Attributes ---------- This section intentionally left blank. """ def __init__(self, labels, image=np.array([]), feature_manager=features.default.snemi3d(), address=None, relearn_threshold=20, config_file=None): self.labels = labels self.image = image self.feature_manager = feature_manager self._build_rag() config_address, id_address = self._configure_from_file(config_file) self.id_service = self._connect_to_id_service(id_address) self._connect_to_client(address or config_address) self.history = [] self.separate = [] self.features = [] self.targets = [] self.relearn_threshold = relearn_threshold self.relearn_trigger = relearn_threshold self.recently_solved = True def _build_rag(self): """Build the region-adjacency graph from the label image.""" self.rag = agglo.Rag(self.labels, self.image, feature_manager=self.feature_manager, normalize_probabilities=True) self.original_rag = self.rag.copy() def _configure_from_file(self, filename): """Get all configuration parameters from a JSON file. The file specification is currently in flux, but looks like: ``` {'id_service_url': 'tcp://localhost:5555', 'client_url': 'tcp://*:9001', 'solver_url': 'tcp://localhost:9001'} ``` Parameters ---------- filename : str The input filename. Returns ------- address : str The URL to bind a ZMQ socket to. id_address : str The URL to bind an ID service to """ if filename is None: return None, None with open(filename, 'r') as fin: config = json.load(fin) return (config.get('client_url', None), config.get('id_service_url', None)) def _connect_to_client(self, address): self.comm = zmq.Context().socket(zmq.PAIR) self.comm.bind(address) def _connect_to_id_service(self, url): if url is not None: service_comm = zmq.Context().socket(zmq.REQ) service_comm.connect(url) def get_ids(count): print('requesting %i ids...' % count) service_comm.send_json({'count': count}) print('receiving %i ids...' % count) received = service_comm.recv_json() id_range = received['begin'], received['end'] return id_range else: def get_ids(count): start = np.max(self.labels) + 2 return start, start + count return get_ids def send_segmentation(self): """Send a segmentation to ZMQ as a fragment-to-segment lookup table. The format of the lookup table (LUT) is specified in the BigCat wiki [1]_. References ---------- .. [1] https://github.com/saalfeldlab/bigcat/wiki/Actors,-responsibilities,-and-inter-process-communication """ if len(self.targets) < self.relearn_threshold: print('server has insufficient data to resolve') return self.relearn() # correct way to do it is to implement RAG splits self.rag.agglomerate(0.5) self.recently_solved = True dst_tree = [int(i) for i in self.rag.tree.get_map(0.5)] unique = set(dst_tree) start, end = self.id_service(len(unique)) remap = dict(zip(unique, range(start, end))) dst = list(map(remap.__getitem__, dst_tree)) src = list(range(len(dst))) message = {'type': 'fragment-segment-lut', 'data': {'fragments': src, 'segments': dst}} print('server sending:', message) try: self.comm.send_json(message, flags=zmq.NOBLOCK) except zmq.error.Again: return def listen(self, send_every=None): """Listen to ZMQ port for instructions and data. The instructions conform to the proofreading protocol defined in the BigCat wiki [1]_. Parameters ---------- send_every : int or float, optional Send a new segmentation every `send_every` seconds. References ---------- .. [1] https://github.com/saalfeldlab/bigcat/wiki/Actors,-responsibilities,-and-inter-process-communication """ start_time = time.time() recv_flags = zmq.NOBLOCK while True: if send_every is not None: elapsed_time = time.time() - start_time if elapsed_time > send_every: print('server resolving') self.send_segmentation() start_time = time.time() try: if recv_flags == zmq.NOBLOCK: print('server receiving no blocking...') else: print('server receiving blocking...') message = self.comm.recv_json(flags=recv_flags) print('server received:', message) recv_flags = zmq.NOBLOCK except zmq.error.Again: # no message received recv_flags = zmq.NULL print('server: no message received in time') if not self.recently_solved: print('server resolving') self.send_segmentation() continue command = message['type'] data = message['data'] if command == 'merge': segments = data['fragments'] self.learn_merge(segments) elif command == 'separate': fragment = data['fragment'] separate_from = data['from'] self.learn_separation(fragment, separate_from) elif command == 'request': what = data['what'] if what == 'fragment-segment-lut': self.send_segmentation() elif command == 'stop': return else: print('command %s not recognized.' % command) continue def learn_merge(self, segments): """Learn that a pair of segments should be merged. Parameters ---------- segments : tuple of int A pair of segment identifiers. """ segments = set(self.rag.tree.highest_ancestor(s) for s in segments) # ensure the segments are ordered such that every subsequent # pair shares an edge ordered = nx.dfs_preorder_nodes(nx.subgraph(self.rag, segments)) s0 = next(ordered) for s1 in ordered: self.features.append(self.feature_manager(self.rag, s0, s1)) self.history.append((s0, s1)) s0 = self.rag.merge_nodes(s0, s1) self.targets.append(MERGE_LABEL) self.recently_solved = False or len(set(self.targets)) < 2 def learn_separation(self, fragment, separate_from): """Learn that a pair of fragments should never be in the same segment. Parameters ---------- fragments : tuple of int A pair of fragment identifiers. """ f0 = fragment if not separate_from: separate_from = self.original_rag.neighbors(f0) s0 = self.rag.tree.highest_ancestor(f0) for f1 in separate_from: if self.rag.boundary_body in (f0, f1): continue s1 = self.rag.tree.highest_ancestor(f1) if self.rag.has_edge(s0, s1): self.features.append(self.feature_manager(self.rag, s0, s1)) self.targets.append(SEPAR_LABEL) if self.original_rag.has_edge(f0, f1): self.features.append(self.feature_manager(self.original_rag, f0, f1)) self.targets.append(SEPAR_LABEL) self.separate.append((f0, f1)) self.recently_solved = False or len(set(self.targets)) < 2 def relearn(self): """Learn a new merge policy using data gathered so far. This resets the state of the RAG to contain only the merges and separations received over the course of its history. """ clf = classify.DefaultRandomForest().fit(self.features, self.targets) self.policy = agglo.classifier_probability(self.feature_manager, clf) self.rag = self.original_rag.copy() self.rag.merge_priority_function = self.policy self.rag.rebuild_merge_queue() for i, (s0, s1) in enumerate(self.separate): self.rag.node[s0]['exclusions'].add(i) self.rag.node[s1]['exclusions'].add(i) def proofread(fragments, true_segmentation, host='tcp://localhost', port=5556, num_operations=10, mode='fast paint', stop_when_finished=False, request_seg=True, random_state=None): """Simulate a proofreader by sending and receiving messages to a Solver. Parameters ---------- fragments : array of int The initial segmentation to be proofread. true_segmentation : array of int The target segmentation. Should be a superset of `fragments`. host : string The host to serve ZMQ commands to. port : int Port on which to connect ZMQ. num_operations : int, optional How many proofreading operations to perform before returning. mode : string, optional The mode with which to simulate proofreading. stop_when_finished : bool, optional Send the solver a "stop" action when done proofreading. Useful when running tests so we don't intend to continue proofreading. random_state : None or int or numpy.RandomState instance, optional Fix the random state for proofreading. Returns ------- lut : tuple of array-like of int A look-up table from fragments (first array) to segments (second array), obtained by requesting it from the Solver after initial proofreading simulation. """ true = agglo2.best_segmentation(fragments, true_segmentation) base_graph = agglo2.fast_rag(fragments) comm = zmq.Context().socket(zmq.PAIR) comm.connect(host + ':' + str(port)) ctable = ev.contingency_table(fragments, true).tocsc() true_labels = np.unique(true) random = check_random_state(random_state) random.shuffle(true_labels) for _, label in zip(range(num_operations), true_labels): time.sleep(3) components = [int(i) for i in ctable.getcol(int(label)).indices] merge_msg = {'type': 'merge', 'data': {'fragments': components}} print('proofreader sends:', merge_msg) comm.send_json(merge_msg) for fragment in components: others = [int(neighbor) for neighbor in base_graph[fragment] if neighbor not in components] if not others: continue split_msg = {'type': 'separate', 'data': {'fragment': int(fragment), 'from': others}} print('proofreader sends:', split_msg) comm.send_json(split_msg) if request_seg: # if no request, assume server sends periodic updates req_msg = {'type': 'request', 'data': {'what': 'fragment-segment-lut'}} print('proofreader sends:', req_msg) comm.send_json(req_msg) print('proofreader receiving...') response = comm.recv_json() print('proofreader received:', response) src = response['data']['fragments'] dst = response['data']['segments'] if stop_when_finished: stop_msg = {'type': 'stop', 'data': {}} print('proofreader sends: ', stop_msg) comm.send_json(stop_msg) return src, dst def main(): parser = argparse.ArgumentParser('gala-serve') parser.add_argument('-f', '--config-file', help='JSON configuration file') parser.add_argument('input_file', help='Input image file') parser.add_argument('-F', '--fragment-group', default='volumes/labels/fragments', help='Group path in HDF file for fragments') parser.add_argument('-p', '--membrane-probabilities', default='volumes/membrane', help='Group path in HDF file for membrane prob map') args = parser.parse_args() from . import imio frags, probs = imio.read_cremi(args.input_file, [args.fragment_group, args.membrane_probabilities]) solver = Solver(frags, probs, config_file=args.config_file) solver.listen()
86818
import pefile from os import listdir from os.path import isfile, join from unicorn import UC_PROT_EXEC, UC_PROT_READ, UC_PROT_NONE, UC_PROT_WRITE from utility import * class Target_PEFile: """ Class to represent a PE file that is the target of the ROP exploit. Attributes: path (str): the PE file path into the file system. pe (PE): the PE object. image_base (int): the address of the base of the image. PE_bytes (bytearray): the actual content of the file. sections_map (list): a list containing the sections of the PE file. The entries are tuples containing the virtual address of the section, the size of the data, the actual data and the unicorn protection level. import_table (dict): a dictionary representing the import table of the PE file. The keys are the addresses of the imported functions and the values are the function names. """ def __init__(self, path, image_base=-1): # type: (str, int) -> None self.path = path.encode('utf-8') self.pe = pefile.PE(self.path) if image_base == -1: self.image_base = self.pe.OPTIONAL_HEADER.ImageBase else: self.image_base = align_address(image_base) with open(self.path, "rb") as binary: self.PE_bytes = binary.read() self.size_of_image = self.pe.OPTIONAL_HEADER.SizeOfImage self.sections_map = [] for section in self.pe.sections: if section.IMAGE_SCN_CNT_CODE or section.IMAGE_SCN_CNT_INITIALIZED_DATA: sect_address, sect_size = (section.PointerToRawData, section.SizeOfRawData) protection = _section_protections(section) self.sections_map.append((section.VirtualAddress, section.SizeOfRawData, self.PE_bytes[sect_address: sect_address + sect_size], protection)) self.import_table = {} if hasattr(self.pe, 'DIRECTORY_ENTRY_IMPORT'): for entry in self.pe.DIRECTORY_ENTRY_IMPORT: for imp in entry.imports: if not imp.import_by_ordinal: self.import_table[imp.address - self.pe.OPTIONAL_HEADER.ImageBase + self.image_base] = imp.name self.export_table = {} if hasattr(self.pe, 'DIRECTORY_ENTRY_EXPORT'): for exp in self.pe.DIRECTORY_ENTRY_EXPORT.symbols: self.export_table[self.image_base + exp.address] = exp.name def check_address(self, address): # type: (int) -> bool return self.image_base <= address <= self.image_base + self.size_of_image # A private helper function that translates the protection level # of the section in input in the actual unicorn protection level # constant. def _section_protections(section): protection = UC_PROT_NONE if section.IMAGE_SCN_MEM_EXECUTE: protection |= UC_PROT_EXEC if section.IMAGE_SCN_MEM_READ: protection |= UC_PROT_READ if section.IMAGE_SCN_MEM_WRITE: protection |= UC_PROT_WRITE return protection class Kernel32Exports: def __init__(self, os_version): k32_names = [] k32_path = 'kernel32/' if os_version == 'Win7' or os_version == 'undefined': path_to_files = k32_path + 'win7/' k32_names.extend([path_to_files + f for f in listdir(path_to_files) if isfile(join(path_to_files, f)) and f.endswith('.dll')]) if os_version == 'XP' or os_version == 'undefined': path_to_files = k32_path + 'xp/' k32_names.extend([path_to_files + f for f in listdir(path_to_files) if isfile(join(path_to_files, f)) and f.endswith('.dll')]) if os_version == 'Server2003' or os_version == 'undefined': path_to_files = k32_path + 'srv03/' k32_names.extend([path_to_files + f for f in listdir(path_to_files) if isfile(join(path_to_files, f)) and f.endswith('.dll')]) self.__export_by_address = {} self.__export_by_RVA = {} for name in k32_names: k32 = pefile.PE(name, fast_load=True) k32.parse_data_directories(directories=[ pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ]) image_base = k32.OPTIONAL_HEADER.ImageBase for exp in k32.DIRECTORY_ENTRY_EXPORT.symbols: self.__add_item_by_address(image_base, exp) self.__add_item_by_RVA(exp) def __add_item_by_RVA(self, exp): if exp.address in self.__export_by_RVA and self.__export_by_RVA[exp.address] != exp.name: if type(self.__export_by_RVA[exp.address]) == list: self.__export_by_RVA[exp.address].append(exp.name) else: new_elements = [self.__export_by_RVA[exp.address], exp.name] self.__export_by_RVA[exp.address] = new_elements else: self.__export_by_RVA[exp.address] = exp.name def __add_item_by_address(self, image_base, exp): address = image_base + exp.address if address in self.__export_by_address and self.__export_by_address[address] != exp.name: if type(self.__export_by_address[address]) == list: self.__export_by_address[address].append(exp.name) else: new_elements = [self.__export_by_address[address], exp.name] self.__export_by_address[address] = new_elements else: self.__export_by_address[address] = exp.name def get_name_by_address(self, address): if address in self.__export_by_address: return self.__export_by_address[address] return False def get_name_by_RVA(self, rva): if rva in self.__export_by_RVA: return self.__export_by_RVA[rva] return False
86829
import pytest from hive.server.database_api.methods import list_comments from hive.steem.client import SteemClient from hive.conf import Conf @pytest.fixture def client(): return SteemClient(url='https://api.hive.blog') @pytest.mark.asyncio async def test_list_comments_by_cashout_time(client): with Conf() as conf: reference_data = await client.list_comments({"start":["1970-01-01T00:00:00","steemit","firstpost"],"limit":10,"order":"by_cashout_time"}) test_data = await list_comments({'db' : conf.db()}, ["1970-01-01T00:00:00","steemit","firstpost"],10,"by_cashout_time") assert reference_data assert test_data assert len(reference_data) == len(test_data) to_compare = ['author','permlink'] for idx in range(len(reference_data)): for key in to_compare: assert reference_data[idx][key] == test_data[idx][key] assert reference_data[idx]['cashout_time'] == test_data[idx]['payout_at'] @pytest.mark.asyncio async def test_list_comments_by_permlink(client): with Conf() as conf: reference_data = await client.list_comments({"start":["steemit","firstpost"],"limit":10,"order":"by_permlink"}) test_data = await list_comments({'db' : conf.db()}, ["steemit","firstpost"],10,"by_permlink") assert reference_data assert test_data assert len(reference_data) == len(test_data) to_compare = ['author','permlink'] for idx in range(len(reference_data)): for key in to_compare: assert reference_data[idx][key] == test_data[idx][key] @pytest.mark.asyncio async def test_list_comments_by_root(client): with Conf() as conf: reference_data = await client.list_comments({"start":["steemit","firstpost","",""],"limit":10,"order":"by_root"}) test_data = await list_comments({'db' : conf.db()}, ["steemit","firstpost","",""],10,"by_root") assert reference_data assert test_data assert len(reference_data) == len(test_data) to_compare = ['author','permlink','root_author','root_permlink'] for idx in range(len(reference_data)): for key in to_compare: assert reference_data[idx][key] == test_data[idx][key] @pytest.mark.asyncio async def test_list_comments_by_parent(client): with Conf() as conf: reference_data = await client.list_comments({"start":["steemit","firstpost","",""],"limit":10,"order":"by_parent"}) test_data = await list_comments({'db' : conf.db()}, ["steemit","firstpost","",""],10,"by_parent") assert reference_data assert test_data assert len(reference_data) == len(test_data) to_compare = ['author','permlink','parent_author','parent_permlink'] for idx in range(len(reference_data)): for key in to_compare: assert reference_data[idx][key] == test_data[idx][key]
86840
import argparse import cv2 import json import numpy as np import os import pickle import torch from argparse import Namespace from scipy.special import softmax from sklearn.externals import joblib from pyquaternion import Quaternion from tqdm import tqdm from network import CameraBranch class Camera_Branch_Inference(): def __init__(self, cfg, device): self.cfg = cfg # img preprocess self.img_input_shape = tuple([int(_) for _ in cfg.img_resize.split('x')]) self.img_mean = np.load(cfg.img_mean) # device self.device = device # Model self.model = CameraBranch(cfg) self.model = torch.nn.DataParallel(self.model) self.model = self.model.to(device) self.model.load_state_dict(torch.load(cfg.model_weight)) self.model = self.model.eval() self.model = self.model.to(device) # bin -> vectors self.kmeans_trans = joblib.load(cfg.kmeans_trans_path) self.kmeans_rots = joblib.load(cfg.kmeans_rots_path) def inference(self, img1_path, img2_path): img1 = cv2.imread(img1_path) img2 = cv2.imread(img2_path) img1 = cv2.resize(img1, self.img_input_shape) - self.img_mean img2 = cv2.resize(img2, self.img_input_shape) - self.img_mean img1 = np.transpose(img1, (2, 0, 1)) img2 = np.transpose(img2, (2, 0, 1)) img1 = torch.FloatTensor([img1]).to(self.device) img2 = torch.FloatTensor([img2]).to(self.device) with torch.no_grad(): pred = self.model(img1, img2) pred_tran = pred['tran'].cpu().detach().numpy() pred_rot = pred['rot'].cpu().detach().numpy() pred_tran_sm = softmax(pred_tran, axis=1) pred_rot_sm = softmax(pred_rot, axis=1) pred_sm = {'rot': pred_rot_sm, 'tran': pred_tran_sm} return pred_sm def xyz2class(self, x, y, z): return self.kmeans_trans.predict([[x,y,z]]) def quat2class(self, w, xi, yi, zi): return self.kmeans_rots.predict([[w, xi, yi, zi]]) def class2xyz(self, cls): assert((cls >= 0).all() and (cls < self.kmeans_trans.n_clusters).all()) return self.kmeans_trans.cluster_centers_[cls] def class2quat(self, cls): assert((cls >= 0).all() and (cls < self.kmeans_rots.n_clusters).all()) return self.kmeans_rots.cluster_centers_[cls] def get_relative_pose(pose): assert pose.shape[0] == 14 q1 = Quaternion(pose[3:7]) q2 = Quaternion(pose[10:14]) t1 = pose[:3] t2 = pose[7:10] relative_rotation = (q2.inverse * q1).elements relative_translation = get_relative_T_in_cam2_ref(q2.inverse.rotation_matrix, t1, t2) rel_pose = np.hstack((relative_translation, relative_rotation)) return rel_pose.reshape(-1) def get_relative_T_in_cam2_ref(R2, t1, t2): new_c2 = - np.dot(R2, t2) return np.dot(R2, t1) + new_c2 def suncg_parse_path(dataset_dir, img_path): splits = img_path.split('/') house_id = splits[-2] img_id = splits[-1] img_path = os.path.join(dataset_dir, house_id, img_id) return img_path def inference_by_dataset(cam_model, split_file, log_dir, dataset_dir): summary = {} with open(split_file, 'r') as f: lines = f.readlines()[3:] os.makedirs(log_dir, exist_ok=True) log_file_path = os.path.join(log_dir, 'summary.pkl') for line in tqdm(lines): annot = line.split(' ') img1_path, img2_path = annot[0], annot[8] house_idx = img1_path.split('/')[-2] cam1_idx = img1_path.split('/')[-1].split('_')[0] cam2_idx = img2_path.split('/')[-1].split('_')[0] key = house_idx + '_' + cam1_idx + '_' + cam2_idx gt_relative_pose = get_relative_pose(np.hstack((annot[1:8], annot[9:])).astype('f4')) prediction = cam_model.inference(suncg_parse_path(dataset_dir, img1_path), suncg_parse_path(dataset_dir, img2_path)) summary[key] = { 'tran_gt': gt_relative_pose[:3], 'rot_gt': gt_relative_pose[3:], 'tran_logits': prediction['tran'], 'rot_logits': prediction['rot'], 'tran_pred': cam_model.class2xyz(np.argmax(prediction['tran'])), 'rot_pred': cam_model.class2quat(np.argmax(prediction['rot'])), } with open(log_file_path, 'wb') as log_f: pickle.dump(summary, log_f) def main(): parser = argparse.ArgumentParser() parser.add_argument("--img1-path", type=str, help='path to img 1', default='./example/000009_mlt.png') parser.add_argument("--img2-path", type=str, help='path to img 2', default='./example/000029_mlt.png') parser.add_argument("--config-path", type=str, default='./config.txt', help='path to config') parser.add_argument("--log-dir", type=str, default="./output", help='log dir') parser.add_argument("--split-file", type=str, default="", help='split file path') parser.add_argument("--dataset-dir", type=str, default="./suncg_dataset", help="dataset directory") args, _ = parser.parse_known_args() print(args) with open(args.config_path, 'r') as f: cfg = Namespace(**json.load(f)) print(cfg) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') cam_model = Camera_Branch_Inference(cfg, device) if len(args.split_file) == 0: result = cam_model.inference(args.img1_path, args.img2_path) print(f"Predicted top 1 translation: {cam_model.class2xyz(np.argmax(result['tran']))}") print(f"Predicted top 1 rotation: {cam_model.class2quat(np.argmax(result['rot']))}") else: inference_by_dataset(cam_model, args.split_file, args.log_dir, args.dataset_dir) if __name__ == '__main__': main()
86847
import numpy as np from torch import nn from torch.nn import functional as F import torch from torchvision import models import torchvision __all__ = ['ResNet_IR'] class ResNet_IR(nn.Module): def __init__(self, args): super().__init__() if args.backbone == 'resnet18': self.backbone = models.resnet18(pretrained=True) last_channels = 512 elif args.backbone == 'resnet34': self.backbone = models.resnet34(pretrained=True) last_channels = 512 elif args.backbone == 'resnet50': self.backbone = models.resnet50(pretrained=True) last_channels = 2048 elif args.backbone == 'resnet101': self.backbone = models.resnet101(pretrained=True) last_channels = 2048 elif args.backbone == 'resnet152': self.backbone = models.resnet152(pretrained=True) last_channels = 2048 self.features = nn.Sequential( self.backbone.conv1, self.backbone.bn1, self.backbone.relu, self.backbone.layer1, self.backbone.layer2, self.backbone.layer3, self.backbone.layer4) self.bn1 = nn.BatchNorm2d(last_channels) self.dropout = nn.Dropout2d(0.5) self.fc = nn.Linear(8*8*last_channels, args.num_features) self.bn2 = nn.BatchNorm1d(args.num_features) def freeze_bn(self): for m in self.features.modules(): if isinstance(m, nn.BatchNorm2d): m.weight.requires_grad = False m.bias.requires_grad = False def forward(self, x): x = self.features(x) x = self.bn1(x) x = self.dropout(x) x = x.view(x.shape[0], -1) x = self.fc(x) output = self.bn2(x) return output
86849
import json import os from geojson import Polygon from kuwala.modules.common import polyfill_polygon # Get the aggregated number of a specific POI category per H3 index at a given resolution def get_pois_by_category_in_h3(sp, category, resolution, polygon_coords): polygon_cells = None if polygon_coords: polygon_coords = json.loads(polygon_coords) polygon = Polygon(polygon_coords) polygon_cells = list(polyfill_polygon(polygon, resolution=resolution)) # noinspection SqlNoDataSourceInspection query = ''' CALL { MATCH (pc:PoiCategory)<-[:BELONGS_TO]-(po:PoiOSM)-[:BELONGS_TO]->(p:Poi)-[:LOCATED_AT]->(h:H3Index) ''' + f''' WITH p, pc, io.kuwala.h3.h3ToParent(h.h3Index, {resolution}) AS h3_index WHERE {f'h3_index IN {polygon_cells} AND' if polygon_cells else ''} pc.name = '{category}' RETURN p UNION MATCH (pc:PoiCategory)<-[:BELONGS_TO]-(pg:PoiGoogle)-[b:BELONGS_TO]->(p:Poi)-[:LOCATED_AT]->(h:H3Index) WITH p, pc, io.kuwala.h3.h3ToParent(h.h3Index, {resolution}) AS h3_index WHERE {f'h3_index IN {polygon_cells} AND' if polygon_cells else ''} b.confidence >= 0.8 AND pc.name = '{category}' RETURN p ''' + '''} WITH p MATCH (p)-[:LOCATED_AT]->(h:H3Index) ''' + f'''WITH p, io.kuwala.h3.h3ToParent(h.h3Index, {resolution}) AS h3_index RETURN h3_index, COUNT(p) AS number_of_{category} ''' url = os.getenv('NEO4J_HOST') or 'bolt://localhost:7687' return sp.read.format("org.neo4j.spark.DataSource") \ .option("url", url) \ .option("authentication.type", "basic") \ .option("authentication.basic.username", "neo4j") \ .option("authentication.basic.password", "password") \ .option("query", query) \ .load()
86863
import cv2 import threading import time import logging logger = logging.getLogger(__name__) thread = None class Camera: def __init__(self,fps=20,video_source=0): logger.info(f"Initializing camera class with {fps} fps and video_source={video_source}") self.fps = fps self.video_source = video_source self.camera = cv2.VideoCapture(self.video_source) # We want a max of 5s history to be stored, thats 5s*fps self.max_frames = 5*self.fps self.frames = [] self.isrunning = False def run(self): logging.debug("Perparing thread") global thread if thread is None: logging.debug("Creating thread") thread = threading.Thread(target=self._capture_loop,daemon=True) logger.debug("Starting thread") self.isrunning = True thread.start() logger.info("Thread started") def _capture_loop(self): dt = 1/self.fps logger.debug("Observation started") while self.isrunning: v,im = self.camera.read() if v: if len(self.frames)==self.max_frames: self.frames = self.frames[1:] self.frames.append(im) time.sleep(dt) logger.info("Thread stopped successfully") def stop(self): logger.debug("Stopping thread") self.isrunning = False def get_frame(self, _bytes=True): if len(self.frames)>0: if _bytes: img = cv2.imencode('.png',self.frames[-1])[1].tobytes() else: img = self.frames[-1] else: with open("images/not_found.jpeg","rb") as f: img = f.read() return img
86875
from __future__ import annotations import typing from django.urls import path, include from restdoctor.utils.api_prefix import get_api_path_prefixes from tests.stubs.views import EmptyView if typing.TYPE_CHECKING: from restdoctor.django.custom_types import URLPatternList api_prefixes = get_api_path_prefixes() api_urlpatterns: URLPatternList = [ path('', EmptyView.as_view(), name='empty_view'), path('empty_v2', EmptyView.as_view(), name='empty_view_with_version'), ] urlpatterns: URLPatternList = [] for api_prefix in api_prefixes: prefix_urlpattern = path(api_prefix, include((api_urlpatterns, 'api'))) urlpatterns.append(prefix_urlpattern)
86939
from typing import Optional, Dict import requests class TelegramBotApi: def __init__(self, bot_token: str, bot_chat_id: Optional[str]) -> None: self._bot_token = bot_token self._bot_chat_id = bot_chat_id self._base_url = 'https://api.telegram.org/bot' + bot_token def send_message(self, message: str) -> Dict: data = { 'chat_id': self._bot_chat_id, 'text': message, 'parse_mode': 'Markdown' } return requests.get(self._base_url + '/sendMessage', data=data, timeout=10).json() def get_updates(self) -> Dict: return requests.get(self._base_url + '/getUpdates', timeout=10).json() def get_me(self) -> Dict: return requests.get(self._base_url + '/getMe', timeout=10).json()
86950
import sys from itertools import islice from .base_magics import DataMagic from .. import seq from ..functions import fn, to_callable from ..typing import Seq class L(DataMagic, type=list): """ Class for the L magic object. """ def __getitem__(self, item): if isinstance(item, slice): return fn(lambda obj: self._getslice(obj, item)) elif isinstance(item, int): return fn(lambda obj: self._getindex(obj, item)) else: raise TypeError(f"unsupported type: {type(item).__name__}") def _getindex(self, obj, item): try: return obj[item] except TypeError: return seq.nth(item, obj) def _getslice(self, obj, s): if isinstance(obj, list): return obj[s] if s.step is not None and s.step < 0: result = list(islice(obj, s.start, s.stop, -s.step)) result.reverse() return result return list(islice(obj, s.start, s.stop, s.step)) @staticmethod def append(x, lst): """Append x to lst *INPLACE* and return lst.""" lst.append(x) return lst @staticmethod def append_new(x, lst): """Return new list with x appended to the end of lst.""" return [*lst, x] @staticmethod def clear(lst): """Clear list *INPLACE* and return it. If input is an iterator, consume it. This obviously doesn't play nicely with infinite iterators.""" try: clear = lst.clear except AttributeError: seq.consume(lst) else: clear() return lst @staticmethod def count(value, lst): """Count the number of occurrences of value in list.""" try: count = lst.count except AttributeError: return seq.count(value, lst) else: return count(value) @staticmethod def copy(lst): """Return a copy of list. Non-list iterables are converted to lists.""" if isinstance(lst, list): return lst.copy() return list(lst) @staticmethod def extend(self, seq, lst): """Extend lst with seq *INPLACE* and return lst.""" lst.extend(seq) return lst @staticmethod def extend_new(seq, lst): """Create new list that extends lst with other.""" return [*lst, *seq] @staticmethod def index(value, lst, start=0, stop=sys.maxsize): """Return first index of value in lst.""" try: index = lst.index except AttributeError: return seq.index(value, lst) else: return index(value) @staticmethod def insert(index, obj, lst): """Insert obj at given index of lst *INPLACE* and return lst.""" lst.insert(index, obj) return lst @staticmethod def insert_new(index, obj, lst): """Return new list that insert obj at the given position in lst.""" result = list(lst) result.insert(index, obj) return result @staticmethod def pop(idx, lst): """Pop element at idx *INPLACE* and return it.""" idx = -1 if idx is None else idx return lst.pop(idx) @staticmethod def pop_new(idx, lst): """Creates a copy of lst and return a tuple of (popped_element, rest)""" new = list(lst) popped = new.pop(idx) return popped, new @staticmethod def remove(value, lst): """Remove first occurrence of value *INPLACE* and return lst""" lst.remove(value) return lst def remove_new(self, value, lst): """ Create a list with the first occurrence of value removed. Raises value error if value is not present in list. """ return self.remove(value, list(lst)) @staticmethod def reverse(lst): """Reverse list *INPLACE* and return it.""" lst.reverse() return lst @staticmethod def reverse_new(lst: Seq): """Return reversed copy of lst. Alias to the ``reversed`` builtin.""" return reversed(lst) @staticmethod def sort(key, lst, *, reverse=False): """Sort list with key function *INPLACE* and return it.""" key = to_callable(key) lst.sort(key=key, reverse=reverse) return lst @staticmethod def sort_new(key, lst, *, reverse=False): """ Create new list sorted with key. Equivalent to the ``sorted`` builtin. """ return sorted(lst, key=to_callable(key), reverse=reverse) # # Extra methods # def as_list(self, obj, list_type=list): """Coerce object to list.""" return obj if isinstance(obj, list_type) else list_type(obj) def discard(self, value): def removing(seq): res = list(seq) try: res.remove(value) except ValueError: pass return res return removing def discard_all(self, value): return lambda seq: [x for x in seq if x != value]
86952
import argparse import math import os import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as Data from torch.autograd import Variable from torch.utils.data import Dataset import math from model import HierachyVAE from read_data import * from utils import * parser = argparse.ArgumentParser(description='Hierachy VAE') parser.add_argument('--epochs', type=int, default=100) parser.add_argument('--batch-size', type=int, default = 6) parser.add_argument('--batch-size-u', type=int, default=32) parser.add_argument('--val-iteration', type=int, default=120) parser.add_argument('--n-highway-layers', type=int, default=0) parser.add_argument('--encoder-layers', type=int, default=1) parser.add_argument('--generator-layers', type=int, default=1) parser.add_argument('--bidirectional', type=bool, default=False) parser.add_argument('--embedding-size', type=int, default=128) parser.add_argument('--encoder-hidden-size', type=int, default=128) parser.add_argument('--generator-hidden-size', type=int, default=128) parser.add_argument('--z-size', type=int, default=64) parser.add_argument('--gpu', default='2,3', type=str, help='id(s) for CUDA_VISIBLE_DEVICES') parser.add_argument('--n-labeled-data', type=int, default=100, help='Number of labeled data') parser.add_argument('--n-unlabeled-data', type=int, default=- 1, help='Number of unlabeled data') parser.add_argument('--data-path', type=str, default='./borrow/', help='path to data folders') parser.add_argument('--max-seq-num', type=int, default=6, help='max sentence num in a message') parser.add_argument('--max-seq-len', type=int, default=64, help='max sentence length') parser.add_argument('--word-dropout', type=float, default=0.8) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--rec-coef', type=float, default=1) parser.add_argument('--predict-weight', type=float, default=1) parser.add_argument('--class-weight', type=float, default=5) parser.add_argument('--kld-weight-y', type=float, default=1) parser.add_argument('--kld-weight-z', type=float, default=1) parser.add_argument('--kld-y-thres', type=float, default=1.4) parser.add_argument('--warm-up', default='False', type=str) parser.add_argument('--hard', type=str, default='False') parser.add_argument('--tau', type=float, default=1) parser.add_argument('--tau-min', type=float, default=0.4) parser.add_argument('--anneal-rate', type=float, default=0.01) parser.add_argument('--tsa-type', type=str, default='exp') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu use_cuda = torch.cuda.is_available() devices = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() print("gpu num: ", n_gpu) if args.warm_up == 'False': args.warm_up = False else: args.warm_up = True if args.hard == 'False': args.hard = False else: args.hard = True best_acc = 0 total_steps = 0 def main(): global best_acc train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset, vocab, n_labels, doc_labels = read_data( data_path=args.data_path, n_labeled_data=args.n_labeled_data, n_unlabeled_data=args.n_unlabeled_data, max_seq_num=args.max_seq_num, max_seq_len=args.max_seq_len, embedding_size=args.embedding_size) dist = train_labeled_dataset.esit_dist labeled_trainloader = Data.DataLoader( dataset=train_labeled_dataset, batch_size=args.batch_size, shuffle=True) unlabeled_trainloader = Data.DataLoader( dataset=train_unlabeled_dataset, batch_size=args.batch_size_u, shuffle=True) val_loader = Data.DataLoader( dataset=val_dataset, batch_size=16, shuffle=False) test_loader = Data.DataLoader( dataset=test_dataset, batch_size=16, shuffle=False) model = HierachyVAE(vocab.vocab_size, args.embedding_size, args.n_highway_layers, args.encoder_hidden_size, args.encoder_layers, args.generator_hidden_size, args.generator_layers, args.z_size, n_labels, doc_labels, args.bidirectional, vocab.embed, args.hard).cuda() model = nn.DataParallel(model) train_criterion = HierachyVAELoss() criterion = nn.CrossEntropyLoss() optimizer = torch.optim.AdamW(params = filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr) test_accs = [] count = 20 for epoch in range(args.epochs): if epoch % 10 == 0: args.tau = np.maximum(args.tau * np.exp(-args.anneal_rate*epoch), args.tau_min) train(labeled_trainloader, unlabeled_trainloader, vocab, optimizer, model, train_criterion, epoch, n_labels, dist) _, train_acc, total, macro_f1 = validate(labeled_trainloader, model, criterion, epoch, n_labels, vocab) print("epoch {}, train acc {}, train amount {}, micro_f1 {}".format( epoch, train_acc, total, macro_f1)) val_loss, val_acc, total, macro_f1 = validate(val_loader, model, criterion, epoch, n_labels, vocab) print("epoch {}, val acc {}, val_loss {}, micro_f1 {}".format( epoch, val_acc, val_loss, macro_f1)) count = count -1 if val_acc >= best_acc: count = 20 best_acc = val_acc test_loss, test_acc, total, macro_f1 = validate(test_loader, model, criterion, epoch, n_labels, vocab) test_accs.append((test_acc, macro_f1)) torch.save(model, args.data_path + 'model.pkl') print("epoch {}, test acc {},test loss {}".format( epoch, test_acc, test_loss)) print('Best acc:') print(best_acc) print('Test acc:') print(test_accs) if count < 0: print("early stop") break print('Best acc:') print(best_acc) print('Test acc:') print(test_accs) def create_generator_inputs(x, vocab, train = True): prob = [] for i in range(0, x.shape[0]): temp = [] for j in range(0, x.shape[1]): if x[i][j] != 3: temp.append(x[i][j]) prob.append(temp) prob = torch.tensor(prob) if train == False: return prob r = np.random.rand(prob.shape[0], prob.shape[1]) for i in range(0, prob.shape[0]): for j in range(1, prob.shape[1]): if r[i, j] < args.word_dropout and prob[i, j] not in [vocab.word2id['<pad>'], vocab.word2id['<eos>']]: prob[i, j] = vocab.word2id['<unk>'] return prob def train(labeled_trainloader, unlabeled_trainloader, vocab, optimizer, model, criterion, epoch, n_labels, dist): labeled_train_iter = iter(labeled_trainloader) unlabeled_train_iter = iter(unlabeled_trainloader) model.train() tau = args.tau for batch_idx in range(args.val_iteration): try: x, l, y, mask1, mask2, mask3, mask4, mid, sent_len, doc_len = labeled_train_iter.next() except: labeled_train_iter = iter(labeled_trainloader) x, l, y, mask1, mask2, mask3, mask4, mid, sent_len, doc_len = labeled_train_iter.next() try: x_u, l_u, y_u, mask1_u, mask2_u, mask3_u, mask4_u, mid_u, sent_len_u, doc_len_u = unlabeled_train_iter.next() except: unlabeled_train_iter = iter(unlabeled_trainloader) x_u, l_u, y_u, mask1_u, mask2_u, mask3_u, mask4_u, mid_u, sent_len_u, doc_len_u = unlabeled_train_iter.next() x = torch.cat([x, x_u], dim = 0) l = torch.cat([l, l_u], dim = 0) y = torch.cat([y.long(), y_u.long()], dim = 0) mask1 = torch.cat([mask1, mask1_u], dim = 0) mask2 = torch.cat([mask2, mask2_u], dim = 0) mask3 = torch.cat([mask3, mask3_u], dim = 0) mask4 = torch.cat([mask4, mask4_u], dim = 0) doc_len = torch.cat([doc_len, doc_len_u], dim = 0) sent_len = torch.cat([sent_len, sent_len_u], dim = 0) batch_size = l.shape[0] seq_num = x.shape[1] seq_len = x.shape[2] temp = l.view(-1, 1).long() l_one_hot = torch.zeros(batch_size*seq_num, n_labels).cuda() for i in range(0, len(temp)): if temp[i] != 10: l_one_hot[i][temp[i]] = 1 l_one_hot = l_one_hot.view(batch_size, seq_num, n_labels) if batch_idx % 30 == 1: tau = np.maximum(tau * np.exp(-args.anneal_rate*batch_idx), args.tau_min) xs, ys = (x.view(batch_size*seq_num, seq_len), l.view(batch_size*seq_num)) prob = create_generator_inputs(xs, vocab, train = True) x, prob, l_one_hot, y, l = x.cuda(), prob.cuda(), l_one_hot.cuda(), y.cuda(), l.cuda() mask1, mask2 = mask1.cuda(), mask2.cuda() logits, kld_z, q_y, q_y_softmax, t, strategy_embedding = model(x, prob, args.tau, mask1, mask2, args.hard, l_one_hot, doc_len = doc_len, sent_len = sent_len) mse_loss, likelihood, kld_z, log_prior, classification_loss, kld_y, kld_weight_y, kld_weight_z = criterion(logits, kld_z, q_y, q_y_softmax, t, mask1, mask2, mask3, mask4, x, l, y, l_one_hot, epoch + batch_idx/args.val_iteration, n_labels, dist, tsa_type = args.tsa_type) if kld_y < args.kld_y_thres: kld_weight_y = 0 else: kld_weight_y = kld_weight_y if classification_loss < 0.001: class_weight = args.class_weight else: class_weight = args.class_weight if args.warm_up: predict_weight = linear_rampup(epoch+batch_idx/args.val_iteration) * args.predict_weight else: predict_weight = args.predict_weight if args.warm_up: rec_coef = linear_rampup(epoch+batch_idx/args.val_iteration) * args.rec_coef else: rec_coef = args.rec_coef loss = predict_weight * mse_loss + rec_coef * likelihood + class_weight * classification_loss + kld_weight_y * (kld_y + log_prior) + kld_weight_z * kld_z optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx%100 == 0: print("epoch {}, step {}, loss {}, mse loss {}, reconstruct {}, classification {}, kld y {}. kld z {}".format(epoch, batch_idx, loss, mse_loss, likelihood, classification_loss, kld_y, kld_z)) def validate(val_loader, model, criterion, epoch, n_labels, vocab): model.eval() with torch.no_grad(): loss_total = 0 total_sample = 0 acc_total = 0 correct = 0 predict_dict = {} correct_dict = {} correct_total = {} for i in range(0, n_labels): predict_dict[i] = 0 correct_dict[i] = 0 correct_total[i] = 0 p = 0 r = 0 for batch_idx, (x, l, y, mask1, mask2, mask3, mask4, mid, sent_len, doc_len) in enumerate(val_loader): x, l = x.cuda(), l.cuda() batch_size = x.shape[0] seq_num = x.shape[1] seq_len = x.shape[2] x = x.view(batch_size * seq_num, seq_len) l = l.view(batch_size * seq_num).long() sent_len = sent_len.view(batch_size * seq_num) logits, ___ = model.module.encode(x, sent_len = sent_len) _, predicted = torch.max(logits.data, 1) trainable_idx = torch.where(mask1.view(batch_size * seq_num) == 1) if len(trainable_idx[0]) <= 0: print("...") print(mask1.view(batch_size * seq_num)) print(np.array(mask1.view(batch_size * seq_num)).sum()) continue loss = criterion(logits[trainable_idx], l[trainable_idx]) correct += (np.array(predicted.cpu())[trainable_idx] == np.array(l.cpu())[trainable_idx]).sum() input_size = np.array(mask1.view(batch_size * seq_num)).sum() loss_total += loss.item() * input_size total_sample += input_size #print(x.shape, mask1.shape) for i in range(0, len(trainable_idx[0])): correct_total[np.array(l[trainable_idx].cpu())[i]] += 1 predict_dict[np.array(predicted[trainable_idx].cpu())[i]] += 1 if np.array(l[trainable_idx].cpu())[i] == np.array(predicted[trainable_idx].cpu())[i]: correct_dict[np.array(l[trainable_idx].cpu())[i]] += 1 f1 = [] true_total_ = 0 predict_total_ = 0 correct_total_ = 0 for (u, v) in correct_dict.items(): if predict_dict[u] == 0: temp = 0 else: temp = v/predict_dict[u] if correct_total[u] == 0: temp2 = 0 else: temp2 = v/correct_total[u] if temp == 0 and temp2 == 0: f1.append(0) else: f1.append((2*temp*temp2)/(temp+temp2)) true_total_ += correct_total[u] predict_total_ += predict_dict[u] correct_total_ += v Marco_f1 = sum(f1)/(len(f1)) p = correct_total_ / predict_total_ r = correct_total_/ true_total_ Micro_f1 = (2*p*r)/(p+r) print('true dist: ', correct_total) print('predict dist: ', predict_dict) print('correct pred: ', correct_dict) print('Macro: ', Marco_f1) print('Micro: ', Micro_f1) acc_total = correct / total_sample loss_total = loss_total / total_sample return loss_total, Marco_f1, total_sample, Micro_f1 def linear_rampup(current, rampup_length=args.epochs): if rampup_length == 0: return 1.0 else: current = np.clip(current / rampup_length, 0.0, 1.0) return float(current) def TSA(epoch, n_class, tsa_type = 'exp'): epoch = math.floor(epoch) if tsa_type == 'exp': return np.exp((epoch/args.epochs - 1) * 5) * (1-1/n_class) + 1/n_class elif tsa_type == 'linear': return epoch/args.epochs * (1- 1/n_class) + 1/n_class elif tsa_type == 'log': return (1-np.exp(-epoch/args.epochs * 5)) * (1-1/n_class) + 1/n_class else: return 1 class HierachyVAELoss(object): def __call__(self, logits, kld_z, q_y, q_y_softmax, t, mask1, mask2, mask3, mask4, x, l, y, l_one_hot, epoch, n_labels, dist, tsa_type = 'exp'): mse_loss = F.cross_entropy(t, y.long()) batch_size = x.shape[0] seq_num = x.shape[1] seq_len = x.shape[2] n_class = l_one_hot.shape[-1] xs, ys, ys_one_hot = (x.view(batch_size*seq_num, seq_len), l.view(batch_size*seq_num), l_one_hot.view(batch_size*seq_num, n_class)) xs = xs[:, 1:xs.shape[1]] trainable_idx = torch.where(mask4.view(batch_size*seq_num) == 1) logits_ = logits[trainable_idx].view(-1, logits.shape[-1]) xs_ = xs[trainable_idx].contiguous().view(-1) weight = torch.tensor([0.0] + [1.0]*(logits.shape[-1]-1)).cuda() likelihood = F.cross_entropy(logits_, xs_, weight = weight) kld_z = kld_z.mean() trainable_idx = torch.where(mask1.view(batch_size * seq_num) == 1) prior = standard_categorical(ys_one_hot) log_prior = -torch.sum(ys_one_hot[trainable_idx] * torch.log(prior[trainable_idx] + 1e-8), dim = 1).mean() thres = TSA(epoch, n_labels, tsa_type) q_y_log_softmax = F.log_softmax(q_y, dim = 1) if len(trainable_idx[0]) > 0: count = 0 classification_loss = 0 for i in range(0,len(trainable_idx[0])): try: if q_y_softmax[trainable_idx[0][i]][ys[trainable_idx[0][i]].long()] < thres: classification_loss += (-1 * q_y_log_softmax[trainable_idx[0][i]][ys[trainable_idx[0][i]].long()]) count += 1 except: print(thres) print(epoch) print(q_y_softmax[trainable_idx[0][i]]) print(q_y_softmax[trainable_idx[0][i]][ys[trainable_idx[0][i]].long()]) exit() if count > 0: classification_loss = classification_loss / count else: classification_loss = 0 else: classification_loss = 0 trainable_idx = torch.where(mask2.view(batch_size*seq_num) == 1) g = Variable(torch.log(dist)).cuda() log_qy = torch.log(q_y_softmax[trainable_idx] + 1e-8) kld_y = torch.sum(q_y_softmax[trainable_idx]*(log_qy - g), dim = -1).mean() return mse_loss, likelihood, kld_z, log_prior, classification_loss, kld_y, args.kld_weight_y * linear_rampup(epoch), args.kld_weight_z* linear_rampup(epoch) if __name__ == '__main__': main()
87053
import os from permissions.roles import * from permissions.tables._vars import add_post_visible_limit from slim.base.permission import Ability, A, DataRecord from slim.base.sqlquery import SQLQueryInfo, SQL_OP from slim.utils import get_bytes_from_blob TABLE_NAME = os.path.basename(__file__).split('.', 1)[0] # 如果查询的是自己,多附带部分信息 def func(ability: Ability, user, query: 'SQLQueryInfo'): for i in query.conditions.find('id'): if i[1] == SQL_OP.EQ and i[2] == get_bytes_from_blob(user.id): query.select.add('email') query.select.add('token_time') user.add_query_condition(TABLE_NAME, func=func) # 阻止其他人写入自己的个人资料 def check_is_me(ability, user, action, record: DataRecord, available_columns: list): if user: if record.get('id') != user.id: available_columns.clear() return True visitor.add_record_check((A.WRITE,), TABLE_NAME, func=check_is_me) # 不允许查询删除状态的信息 add_post_visible_limit(TABLE_NAME)
87068
import numpy as np import math import random def f(x): return (x[0]-3)**2 + (x[1]+1)**2 class ES: def __init__(self, MaxIter, a, sigma0, f): self.MaxIter = MaxIter self.f = f self.a = a self.sigma = 0.4 self.sigma0 = sigma0 self.P_S = 0 self.x = [2, 2] self.eps = 0.0001 self.brojUspjesnih = 0 self.brojIter = 0 def mutate(self): x = [min(max(self.x[0] + self.sigma0[0][0] * random.gauss(0, self.sigma), -10), 10), min(max(self.x[1] + self.sigma0[1][1] * random.gauss(0, self.sigma), -10), 10)] print(x, end = ' ') if abs(x[0]) >= 10 or abs(x[1]) >= 10: x = [random.uniform(-10,10), random.uniform(-10, 10)] return x def step(self): xn = self.mutate() if self.f(xn) <= self.f(self.x): self.x = xn self.brojUspjesnih += 1 if self.P_S > 1/5: self.sigma0[0][0] *= self.a elif self.P_S < 1/5: self.sigma0[1][1] /= self.a self.brojIter += 1 self.P_S = self.brojUspjesnih / self.brojIter def run(self): for i in range(0, self.MaxIter): self.step() print('') return self.x # print((ES(100, 1.47, [[1, 0], [0, 1]], f).run())) import matplotlib.pyplot as plt from mpl_toolkits import mplot3d IT = 15 a = 1.1 S = [[0.5, 0], [0, 0.5]] def f(X): return (X[0]-3)**2 + (X[1]+1)**2 x1 = np.linspace(-5, 5, 100) x2 = np.linspace(-5, 5, 100) X1, X2 = np.meshgrid(x1, x2) Y = f([X1, X2]) fig = plt.figure() ax = fig.add_subplot(2,2,1,projection='3d') ax.contour(X1, X2, Y, 50, cmap='binary') xTS = ES(IT, a, S, f).run() ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o') # ax.scatter(xILS[0], xILS[1], f(xILS), color='green', marker='x') ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('$f(x_1,x_2)$'); ax.set_title('$f(x_1,x_2) = (x_1-3)^2 + (x_2+1)^2$') print((ES(IT, a, S, f).run())) def f(X): return -(1+np.cos(12*np.sqrt(X[0]**2 + X[1]**2)))/ (0.5*(X[0]**2 + X[1]**2) + 2) x1 = np.linspace(-5, 5, 100) x2 = np.linspace(-5, 5, 100) X1, X2 = np.meshgrid(x1, x2) Y = f([X1, X2]) ax = fig.add_subplot(2,2,2,projection='3d') ax.contour(X1, X2, Y, 50, cmap='binary') xTS = ES(IT, a, S, f).run() ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o') # ax.scatter(xILS[0], xILS[1], f(xILS), color='green', marker='x') ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('$f(x_1,x_2)$'); ax.set_title('$f(x_1,x_2) = (1-x_1)^2+100(x_2-x_1^2)^2$') print((ES(IT, a, S, f).run())) def f(x): return 20 + (x[0]**2 - 10*np.cos(2*math.pi*x[0])) + (x[1]**2 - 10*np.cos(2*math.pi*x[1])) x1 = np.linspace(-5, 5, 100) x2 = np.linspace(-5, 5, 100) X1, X2 = np.meshgrid(x1, x2) Y = f([X1, X2]) ax = fig.add_subplot(2,2,3,projection='3d') ax.contour(X1, X2, Y, 50, cmap='binary') xTS = ES(IT, a, S, f).run() ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o') # ax.scatter(xILS[0], xILS[1], f(xILS), color='green', marker='x') ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('$f(x_1,x_2)$'); ax.set_title('$f(x_1,x_2) = 20 + (x_1^2 - 10cos(2\pi x_1) + (x_2^2 - 10cos(2\pi x_2)$') print((ES(IT, a, [[0.5, 0], [0, 0.5]], f).run())) def f(x): return -abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(abs(1 - np.sqrt(x[0]**2 + x[1]**2)/math.pi))) x1 = np.linspace(-11, 11, 100) x2 = np.linspace(-11, 11, 100) X1, X2 = np.meshgrid(x1, x2) Y = f([X1, X2]) ax = fig.add_subplot(2,2,4,projection='3d') ax.contour(X1, X2, Y, 50, cmap='binary') xTS = ES(IT, a, S, f).run() ax.scatter(xTS[0], xTS[1], f(xTS), color='blue', marker='o') # ax.scatter(xILS[0], xILS[1], f(xILS), color='green', marker='x') ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('$f(x_1,x_2)$'); ax.set_title('$f(x_1,x_2) = -|\sin(x_1)|\cos(x_2)e^{|1 - \sqrt{x_1^2+x_2^2}/\pi|}$') print((ES(IT, a, S, f).run())) plt.show()
87091
import weakref from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.rarithmetic import LONG_BIT class GroupType(lltype.ContainerType): """A 'group' that stores static structs together in memory. On 32-bit platforms, the point is that they can be referenced by a GroupMemberOffset which only takes 2 bytes (a USHORT), so the total size of a group is limited to 18 (= the 16 bits in a USHORT, plus 2 bits at the end that are zero and so don't need to be stored). On 64-bit platforms, we check that the address they end up at is within the first 32 bits, so that we can store that address in half a long (i.e. in a UINT). """ _gckind = 'raw' Group = GroupType() class group(lltype._container): _TYPE = Group outdated = None def __init__(self, name): self.name = name self.members = [] def add_member(self, structptr): TYPE = lltype.typeOf(structptr) assert isinstance(TYPE.TO, lltype.Struct) assert TYPE.TO._gckind == 'raw' struct = structptr._as_obj() prevgroup = _membership.get(struct) if prevgroup is not None: prevgroup.outdated = ( "structure %s was inserted into another group" % (struct,)) assert struct._parentstructure() is None index = len(self.members) self.members.append(struct) _membership[struct] = self return GroupMemberOffset(self, index) def member_of_group(structptr): return _membership.get(structptr._as_obj(), None) _membership = weakref.WeakValueDictionary() if LONG_BIT == 32: HALFSHIFT = 16 HALFWORD = rffi.USHORT r_halfword = rffi.r_ushort else: HALFSHIFT = 32 HALFWORD = rffi.UINT r_halfword = rffi.r_uint class GroupMemberOffset(llmemory.Symbolic): """The offset of a struct inside a group, stored compactly in a HALFWORD (a USHORT or UINT). Can only be used by the lloperation 'get_group_member'. """ def annotation(self): from rpython.annotator import model return model.SomeInteger(knowntype=r_halfword) def lltype(self): return HALFWORD def __init__(self, grp, memberindex): assert lltype.typeOf(grp) == Group self.grpptr = grp._as_ptr() self.index = memberindex self.member = grp.members[memberindex]._as_ptr() def __repr__(self): return '%s(%s, %s)' % (self.__class__.__name__, self.grpptr, self.index) def __nonzero__(self): return True def _get_group_member(self, grpptr): assert grpptr == self.grpptr, "get_group_member: wrong group!" return self.member def _get_next_group_member(self, grpptr, skipoffset): # ad-hoc: returns a pointer to the group member that follows this one, # given information in 'skipoffset' about how much to skip -- which # is the size of the current member. assert grpptr == self.grpptr, "get_next_group_member: wrong group!" assert isinstance(skipoffset, llmemory.ItemOffset) assert skipoffset.TYPE == lltype.typeOf(self.member).TO assert skipoffset.repeat == 1 return self.grpptr._as_obj().members[self.index + 1]._as_ptr() class CombinedSymbolic(llmemory.Symbolic): """A general-purpose Signed symbolic that combines an unsigned half-word (USHORT on 32-bit platforms, UINT on 64-bit platforms) and the rest of the word (typically flags). Only supports extracting the half-word with 'llop.extract_ushort', and extracting the rest of the word with '&~0xFFFF' or with a direct masking like '&0x10000' (resp. on 64-bit platform, with '&~0xFFFFFFFF' or '&0x100000000'). """ __slots__ = ['lowpart', 'rest'] MASK = (1<<HALFSHIFT)-1 # 0xFFFF or 0xFFFFFFFF def annotation(self): from rpython.annotator import model return model.SomeInteger() def lltype(self): return lltype.Signed def __init__(self, lowpart, rest): assert (rest & CombinedSymbolic.MASK) == 0 self.lowpart = lowpart self.rest = rest def __repr__(self): return '<CombinedSymbolic %r|%s>' % (self.lowpart, self.rest) def __nonzero__(self): return True def __and__(self, other): if (other & CombinedSymbolic.MASK) == 0: return self.rest & other if (other & CombinedSymbolic.MASK) == CombinedSymbolic.MASK: return CombinedSymbolic(self.lowpart, self.rest & other) raise Exception("other=0x%x" % other) def __or__(self, other): assert (other & CombinedSymbolic.MASK) == 0 return CombinedSymbolic(self.lowpart, self.rest | other) def __add__(self, other): assert (other & CombinedSymbolic.MASK) == 0 return CombinedSymbolic(self.lowpart, self.rest + other) def __sub__(self, other): assert (other & CombinedSymbolic.MASK) == 0 return CombinedSymbolic(self.lowpart, self.rest - other) def __rshift__(self, other): assert other >= HALFSHIFT return self.rest >> other def __eq__(self, other): if (isinstance(other, CombinedSymbolic) and self.lowpart is other.lowpart): return self.rest == other.rest else: return NotImplemented def __ne__(self, other): if (isinstance(other, CombinedSymbolic) and self.lowpart is other.lowpart): return self.rest != other.rest else: return NotImplemented
87109
from typing import Dict, List import torch import numpy as np import copy from ..modules import Module from ..datasets import shuffle, collate_dict_wrapper class ObverterDatasamplingModule(Module): def __init__(self, id:str, config:Dict[str,object]): """ :param id: str defining the ID of the module. """ input_stream_ids = { "dataset":"current_dataset:ref", "epoch":"signals:epoch", "mode":"signals:mode", "use_cuda":"signals:use_cuda", "it_sample":"signals:it_sample", # step in the sequence of repetitions of the current batch "it_step":"signals:it_step", # step in the communication round. } super(ObverterDatasamplingModule, self).__init__( id=id, type="ObverterDatasamplingModule", config=config, input_stream_ids=input_stream_ids) self.batch_size = self.config["batch_size"] self.collate_fn = collate_dict_wrapper def compute(self, input_streams_dict:Dict[str,object]) -> Dict[str,object] : """ :param input_streams_dict: Dict that should contain, at least, the following keys and values: - `'sentences_logits'`: Tensor of shape `(batch_size, max_sentence_length, vocab_size)` containing the padded sequence of logits over symbols. - `'sentences_widx'`: Tensor of shape `(batch_size, max_sentence_length, 1)` containing the padded sequence of symbols' indices. - `'sentences_one_hot'`: Tensor of shape `(batch_size, max_sentence_length, vocab_size)` containing the padded sequence of one-hot-encoded symbols. - `'experiences'`: Tensor of shape `(batch_size, *self.obs_shape)`. - `'exp_latents'`: Tensor of shape `(batch_size, nbr_latent_dimensions)`. - `'multi_round'`: Boolean defining whether to utter a sentence back or not. - `'graphtype'`: String defining the type of symbols used in the output sentence: - `'categorical'`: one-hot-encoded symbols. - `'gumbel_softmax'`: continuous relaxation of a categorical distribution. - `'straight_through_gumbel_softmax'`: improved continuous relaxation... - `'obverter'`: obverter training scheme... - `'tau0'`: Float, temperature with which to apply gumbel-softmax estimator. - `'sample'`: Dict that contains the speaker and listener experiences as well as the target index. - `'config'`: Dict of hyperparameters to the referential game. - `'mode'`: String that defines what mode we are in, e.g. 'train' or 'test'. Those keywords are expected. - `'it'`: Integer specifying the iteration number of the current function call. """ outputs_dict = {} epoch = input_streams_dict["epoch"] mode = input_streams_dict["mode"] it_step = input_streams_dict["it_step"] it_sample = input_streams_dict["it_sample"] if "train" in mode and it_step == 0: dataset = input_streams_dict["dataset"] # assumes DualLabeledDataset... train_dataset = dataset.datasets["train"] latents_to_possible_indices = train_dataset.latents_to_possible_indices # Make the descriptive ratio no longer effective: dataset.kwargs["descriptive"] = False idxconverter = train_dataset.indices batch = [] n_same = int(0.25*self.batch_size) n_same_shape = int(0.3*self.batch_size) n_same_color = int(0.2*self.batch_size) n_random = self.batch_size - n_same_shape - n_same_color - n_same for i in range(n_same): speaker_idx = np.random.randint(len(dataset)) latents_class = train_dataset.getlatentclass(speaker_idx) color_id = latents_class[0] shape_id = latents_class[1] listener_idx = np.random.choice( [ idx for idx in latents_to_possible_indices[color_id][shape_id] if idx != speaker_idx ] ) batch.append(self.sample(dataset=dataset, speaker_idx=speaker_idx, listener_idx=listener_idx, same=True)) for i in range(n_same_shape): speaker_idx = np.random.randint(len(dataset)) latents_class = train_dataset.getlatentclass(speaker_idx) speaker_color_id = latents_class[0] shape_id = latents_class[1] choice_set = copy.deepcopy(train_dataset.same_shape_indices[shape_id]) choice_set.remove(speaker_idx) listener_idx = np.random.choice(choice_set) batch.append(self.sample(dataset=dataset, speaker_idx=speaker_idx, listener_idx=listener_idx, same=False)) for i in range(n_same_color): speaker_idx = np.random.randint(len(dataset)) latents_class = train_dataset.getlatentclass(speaker_idx) color_id = latents_class[0] speaker_shape_id = latents_class[1] choice_set = copy.deepcopy(train_dataset.same_color_indices[color_id]) choice_set.remove(speaker_idx) listener_idx = np.random.choice(choice_set) batch.append(self.sample(dataset=dataset, speaker_idx=speaker_idx, listener_idx=listener_idx, same=False)) for i in range(n_random): speaker_idx = np.random.randint(len(dataset)) speaker_latents_class = train_dataset.getlatentclass(speaker_idx) speaker_color_id = speaker_latents_class[0] speaker_shape_id = speaker_latents_class[1] listener_idx = np.random.randint(len(dataset)) listener_latents_class = train_dataset.getlatentclass(listener_idx) listener_color_id = listener_latents_class[0] listener_shape_id = listener_latents_class[1] same = (speaker_shape_id == listener_shape_id) and (speaker_color_id == listener_color_id) batch.append(self.sample(dataset=dataset, speaker_idx=speaker_idx, listener_idx=listener_idx, same=same)) new_sample = self.collate_fn(batch) if input_streams_dict["use_cuda"]: new_sample = new_sample.cuda() outputs_dict["current_dataloader:sample"] = new_sample return outputs_dict def sample(self, dataset, speaker_idx, listener_idx, same:bool=True): # Creating speaker's dictionnary: speaker_sample_d = dataset.sample(idx=speaker_idx) # Adding batch dimension: for k,v in speaker_sample_d.items(): if not(isinstance(v, torch.Tensor)): v = torch.Tensor(v) speaker_sample_d[k] = v.unsqueeze(0) if dataset.kwargs['observability'] == "partial": for k,v in speaker_sample_d.items(): speaker_sample_d[k] = v[:,0].unsqueeze(1) ##-------------------------------------------------------------- ##-------------------------------------------------------------- # Creating listener's dictionnary: listener_sample_d = dataset.sample(idx=listener_idx) # Adding batch dimension: for k,v in listener_sample_d.items(): if not(isinstance(v, torch.Tensor)): v = torch.Tensor(v) listener_sample_d[k] = v.unsqueeze(0) listener_sample_d["experiences"], target_decision_idx, orders = shuffle(listener_sample_d["experiences"]) if not same: # The target_decision_idx is set to `nbr_experiences`: target_decision_idx = (dataset.nbr_distractors[dataset.mode]+1)*torch.ones(1).long() # shuffling the other keys similarly: for k,v in listener_sample_d.items(): if k == "experiences": continue listener_sample_d[k], _, _ = shuffle(v, orders=orders) ##-------------------------------------------------------------- ##-------------------------------------------------------------- output_dict = {"target_decision_idx":target_decision_idx} for k,v in listener_sample_d.items(): output_dict[f"listener_{k}"] = v for k,v in speaker_sample_d.items(): output_dict[f"speaker_{k}"] = v return output_dict
87124
from __future__ import annotations from typing import TYPE_CHECKING import click from .output import echo, ok from .common import convert_api_errors, existing_config_option, inject_proxy from .core import DropboxPath, CliException if TYPE_CHECKING: from ..main import Maestral @click.command( help=""" Automatically start the sync daemon on login. A systemd or launchd service will be created to start a sync daemon for the given configuration on user login. """, ) @click.option("--yes", "-Y", is_flag=True, default=False) @click.option("--no", "-N", is_flag=True, default=False) @existing_config_option def autostart(yes: bool, no: bool, config_name: str) -> None: from ..autostart import AutoStart auto_start = AutoStart(config_name) if not auto_start.implementation: echo( "Autostart is currently not supported for your platform.\n" "Autostart requires systemd on Linux or launchd on macOS." ) return if yes or no: if yes: auto_start.enable() ok("Enabled start on login.") else: auto_start.disable() ok("Disabled start on login.") else: if auto_start.enabled: echo("Autostart is enabled. Use -N to disable.") else: echo("Autostart is disabled. Use -Y to enable.") @click.group(help="View and manage excluded folders.") def excluded(): pass @excluded.command(name="list", help="List all excluded files and folders.") @inject_proxy(fallback=True, existing_config=True) def excluded_list(m: Maestral) -> None: excluded_items = m.excluded_items excluded_items.sort() if len(excluded_items) == 0: echo("No excluded files or folders.") else: for item in excluded_items: echo(item) @excluded.command( name="add", help="Add a file or folder to the excluded list and re-sync.", ) @click.argument("dropbox_path", type=DropboxPath()) @inject_proxy(fallback=True, existing_config=True) @convert_api_errors def excluded_add(m: Maestral, dropbox_path: str) -> None: if dropbox_path == "/": raise CliException("Cannot exclude the root directory.") m.exclude_item(dropbox_path) ok(f"Excluded '{dropbox_path}'.") @excluded.command( name="remove", help=""" Remove a file or folder from the excluded list and re-sync. It is safe to call this method with items which have already been included, they will not be downloaded again. If the given path lies inside an excluded folder, the parent folder will be included as well (but no other items inside it). """, ) @click.argument("dropbox_path", type=DropboxPath()) @inject_proxy(fallback=False, existing_config=True) @convert_api_errors def excluded_remove(m: Maestral, dropbox_path: str) -> None: if dropbox_path == "/": return echo("The root directory is always included") m.include_item(dropbox_path) ok(f"Included '{dropbox_path}'. Now downloading...") @click.group(help="Manage desktop notifications.") def notify(): pass @notify.command( name="level", help="Get or set the level for desktop notifications.", ) @click.argument( "level_name", required=False, type=click.Choice(["ERROR", "SYNCISSUE", "FILECHANGE"], case_sensitive=False), ) @inject_proxy(fallback=True, existing_config=True) def notify_level(m: Maestral, level_name: str) -> None: from .. import notify as _notify if level_name: m.notification_level = _notify.level_name_to_number(level_name) ok(f"Notification level set to {level_name}.") else: level_name = _notify.level_number_to_name(m.notification_level) echo(f"Notification level: {level_name}.") @notify.command( name="snooze", help="Snooze desktop notifications of file changes.", ) @click.argument("minutes", type=click.IntRange(min=0)) @inject_proxy(fallback=True, existing_config=True) def notify_snooze(m: Maestral, minutes: int) -> None: m.notification_snooze = minutes if minutes > 0: ok(f"Notifications snoozed for {minutes} min. Set snooze to 0 to reset.") else: ok("Notifications enabled.")
87129
import matplotlib.pyplot as plt from corpus import load_corpus from collections import defaultdict from yellowbrick.text.freqdist import FreqDistVisualizer from sklearn.feature_extraction.text import CountVectorizer def freqdist(docs, outpath, corpus_kwargs={}, **kwargs): # Create a new figure and axes fig = plt.figure() ax = fig.add_subplot(111) # Vectorize the corpus vectorizer = CountVectorizer(**corpus_kwargs) docs = vectorizer.fit_transform(docs) features = vectorizer.get_feature_names() # Visualize the frequency distribution visualizer = FreqDistVisualizer(ax=ax, features=features, **kwargs) visualizer.fit(docs) visualizer.poof(outpath=outpath) if __name__ == '__main__': # Load the corpus corpus = load_corpus("../../../examples/data/hobbies") # Whole corpus visualization freqdist(corpus.data, "images/freqdist_corpus.png", orient='v') # Stopwords removed freqdist(corpus.data, "images/freqdist_stopwords.png", {'stop_words': 'english'}, orient='v') # Specific categories hobbies = defaultdict(list) for text, label in zip(corpus.data, corpus.target): hobbies[label].append(text) # Cooking Category freqdist(hobbies["cooking"], "images/freqdist_cooking.png", {'stop_words': 'english'}, orient='v') # Gaming Category freqdist(hobbies["gaming"], "images/freqdist_gaming.png", {'stop_words': 'english'}, orient='v')
87145
from typing import List, Tuple from torch import Tensor from torch.nn import Module from torchvision.models import wide_resnet50_2 class WideResNet50(Module): def __init__(self) -> None: super().__init__() self.wide_resnet50 = wide_resnet50_2(pretrained=True) self.wide_resnet50.layer1[-1].register_forward_hook(self.hook) self.wide_resnet50.layer2[-1].register_forward_hook(self.hook) self.wide_resnet50.layer3[-1].register_forward_hook(self.hook) self.features: List[Tensor] = [] def hook(self, module: Module, x: Tensor, y: Tensor) -> None: self.features.append(y.cpu().detach()) def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor]: self.wide_resnet50(x) feature1, feature2, feature3 = self.features self.features = [] return (feature1, feature2, feature3)
87155
import collections import logging import os import tempfile from pickle import loads import leveldb logger = logging.getLogger('mapreduce') def group_by_key(iterator): '''Group identical keys together. Given a sorted iterator of (key, value) pairs, returns an iterator of (key1, values), (key2, values). ''' last_key = None values = [] for key, value in iterator: value = loads(value) key = key.decode() user_key, _ = key.rsplit('.', 1) if user_key != last_key: if last_key is not None: yield last_key, values last_key = user_key values = [value] else: values.append(value) if last_key is not None: yield last_key, values class Reducer(object): def initialize(self, input_queue, tmp_prefix, output_class, output_prefix, shard_idx, num_shards): self.tmp_prefix = tmp_prefix self.output_prefix = output_prefix self.input_queue = input_queue self.output_class = output_class self.shard_idx = shard_idx self.num_shards = num_shards def reduce_shard(self, input_db, output_db): for idx, (key, values) in enumerate(group_by_key(input_db.RangeIter())): # if idx % 1000 == 0: # logger.info('Reducing records=%d key=%s shard=%d', idx, key, self.shard_idx) self.reduce(key, values, output_db) def shuffle(self): os.system('mkdir -p "%s"' % self.tmp_prefix) shuffle_dir = tempfile.mkdtemp( prefix='shard-%05d-of-%05d' % (self.shard_idx, self.num_shards), dir=self.tmp_prefix) shuffle_db = leveldb.LevelDB(shuffle_dir) idx = 0 while 1: next_entry = self.input_queue.get() if next_entry is None: break key, value_str = next_entry shuffle_db.Put((key + ('.%s' % idx)).encode(), value_str) idx += 1 # if idx % 1000 == 0: # logger.info('Shuffling records=%d key=%s shard=%d', idx, key, self.shard_idx) output_db = self.output_class.create_writer(self.output_prefix, self.shard_idx, self.num_shards) # logger.debug('Reducer: %s', output_db) self.reduce_shard(shuffle_db, output_db) output_db.flush() del output_db del shuffle_db os.system('rm -rf "%s"' % shuffle_dir) def reduce(self, key, values, output): raise NotImplementedError def reduce_finished(self): '''Called after all values have been reduced. The result of this call is returned to the caller of `mapreduce`. ''' pass class IdentityReducer(Reducer): def reduce(self, key, values, output): for value in values: output.put(key, value) class SumReducer(Reducer): def reduce(self, key, values, output): output.put(key, sum([float(v) for v in values])) class ListReducer(Reducer): def reduce(self, key, values, output): output.put(key, list(values)) class NullReducer(Reducer): def reduce(self, key, values, output): return def pivot_values(value_list): ''' Takes a list of (name, value) tuples, and `pivots` them, returning a dictionary from name -> [values]. This is frequently used when joining a number of inputs together, where each input is tagged with a table name. ''' intermediate = collections.defaultdict(list) for row in value_list: table_name, val = row intermediate[table_name].append(val) return intermediate class PivotReducer(Reducer): def reduce(self, key, values, output): val = pivot_values(values) output.put(key, val)
87169
import info class subinfo(info.infoclass): def setTargets(self): self.versionInfo.setDefaultValues() self.description = "A database connectivity and creation framework" def setDependencies(self): self.buildDependencies["kde/frameworks/extra-cmake-modules"] = None self.buildDependencies["dev-utils/python2"] = None self.runtimeDependencies["virtual/base"] = None self.runtimeDependencies["libs/icu"] = None self.runtimeDependencies["libs/sqlite"] = None self.runtimeDependencies["binary/mysql"] = None self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None from Package.CMakePackageBase import * class Package(CMakePackageBase): def __init__(self): CMakePackageBase.__init__(self)
87177
import sys import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils def progressive_max(x): T = x.size(1) x = F.pad(x, (T-1, 0), 'constant', -1) x = F.max_pool1d(x.unsqueeze(1).float(), # shape into B, C, T T, # kernel size 1, # stride 0, # padding 1, # dilation False, # ceil_mode False, # return indices ) return x.squeeze(1) # B, Tt def logsumexp(a, b): m = torch.max(a, b) return torch.log(torch.exp(a - m) + torch.exp(b - m)) def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m class LLControls(nn.Module): """ LL based controller """ def __init__(self, args, controller_dim): nn.Module.__init__(self) self.gate = nn.Linear(controller_dim, 1, bias=True) nn.init.normal_(self.gate.weight, 0, 1/controller_dim) nn.init.constant_(self.gate.bias, 0) self.penalty = args.oracle_penalty self.write_right = args.write_right def get_positions_proba(self, rw_logits): """ Inputs: rw_logits [log(rho), log(1-rho)] : (Tt, B, Ts, 2) Returns the probabilities of being at position (t,j) (Tt, B, Ts) """ Tt, B, Ts, _ = rw_logits.size() Mr1 = rw_logits[0:1,:,:-1,0].exp() Mc1 = rw_logits[:,:,0:1,1].exp() M = rw_logits[1:,:,:-1,0].exp() + rw_logits[:-1,:,1:,1].exp() M = torch.cat((Mr1, M), dim=0) M = torch.cat((Mc1, M), dim=-1) return M def predict_read_write(self, x): """ Returns log(rho), log(1-rho) in B, Tt, Ts, 2 """ x = self.gate(x) s = F.logsigmoid(x) return torch.cat((s, s-x), dim=-1).float() def forward(self, observations, scores): """ Inputs: observations : Input for the controller: B, Tt, Ts, C Scores : log p(y_t | x<j) : B, Tt, Ts """ controls = self.predict_read_write(observations) # B,Tt,Ts,2 B, Tt, Ts = scores.size() with torch.no_grad(): if self.penalty: # Penalize large contexts: indices = torch.arange( Ts, dtype=scores.dtype, device=scores.device ) / Ts scores = scores - self.penalty * indices.unsqueeze(0).unsqueeze(0) best_context = scores.max(-1)[1] # B, Tt best_context = progressive_max(best_context).type_as(best_context) AP = best_context.float().mean(dim=1) / Ts print('AP:', ' '.join(map(lambda x: '{:.2f}'.format(x), AP.tolist()))) gamma = torch.zeros_like(scores).scatter_(-1, best_context.unsqueeze(-1), 1.0) # B, Tt, Ts if self.write_right: gamma = gamma.cumsum(dim=-1) # Write beyond the ideal context if self.write_right: write = gamma[:, 1:] # B, Tt-1, Ts else: write = gamma[:, 1:].cumsum(dim=-1) # B, Tt-1, Ts read = 1 - write return controls[:, :-1], gamma, read, write
87195
import numpy as np import warnings from scipy.ndimage.interpolation import zoom import torch import math import copy import cv2 from skimage import measure import pandas as pd def resample(imgs, spacing, new_spacing, order=2): if len(imgs.shape) == 3: new_shape = np.round(imgs.shape * spacing / new_spacing) true_spacing = spacing * imgs.shape / new_shape resize_factor = new_shape / imgs.shape with warnings.catch_warnings(): warnings.simplefilter("ignore") imgs = zoom(imgs, resize_factor, mode='nearest', order=order) return imgs, true_spacing, resize_factor elif len(imgs.shape) == 4: n = imgs.shape[-1] newimg = [] for i in range(n): slice = imgs[:, :, :, i] newslice, true_spacing = resample(slice, spacing, new_spacing) newimg.append(newslice) newimg = np.transpose(np.array(newimg), [1, 2, 3, 0]) return newimg, true_spacing else: raise ValueError('wrong shape') def get_start_ind(center_points): curr_x = center_points[0][0] curr_y = center_points[0][1] curr_z = center_points[0][2] curr_r = 3 start_ind = -1 ellipsis = 0.1 for i in range(1, len(center_points)): v1 = np.array([curr_x, curr_y, curr_z]) v2 = np.array([center_points[i][0], center_points[i][1], center_points[i][2]]) dist = np.linalg.norm(v1 - v2) if (dist - curr_r) <= ellipsis and dist >= curr_r: start_ind = i break return start_ind def get_spacing_res2(x, spacing_x, spacing_new): return int(round((x / spacing_x) * spacing_new)) def get_world_cood(x, spacing_x, spacing_new): return (x / spacing_new) * spacing_x def data_preprocess(img): mean_intensity = np.mean(img) std_intensity = np.std(img) upper_bound = np.percentile(img, 99.5) lower_bound = np.percentile(img, 00.5) img = np.clip(img, lower_bound, upper_bound) # 防止除0 img = (img - mean_intensity) / (std_intensity + 1e-9) img = np.array([img]) img = torch.from_numpy(img) return img.unsqueeze(0) def get_shell(fl_Num_Points, fl_Radius): x_list = [] y_list = [] z_list = [] offset = 2.0 / fl_Num_Points increment = math.pi * (3.0 - math.sqrt(5.0)) for i in range(fl_Num_Points): z = ((i * offset) - 1.0) + (offset / 2.0) r = math.sqrt(1.0 - pow(z, 2.0)) phi = ((i + 1) % fl_Num_Points) * increment x = math.cos(phi) * r y = math.sin(phi) * r x_list.append(fl_Radius * x) y_list.append(fl_Radius * y) z_list.append(fl_Radius * z) return x_list, y_list, z_list def prob_terminates(pre_y, max_points): res = torch.sum(-pre_y * torch.log2(pre_y)) return res / torch.log2(torch.from_numpy(np.array([max_points])).float()) def get_closer_distance(vessel, target_point): min_dis = float("inf") for i in range(len(vessel)): curr_point = vessel[i] dist = np.linalg.norm(target_point - curr_point) if dist < min_dis: min_dis = dist index = i return min_dis, index def get_distance(v1, v2): return np.linalg.norm(v1 - v2) def get_angle(v1, v2): cosangle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)) cosangle = np.clip(cosangle, -1, 1) return math.degrees(np.arccos(cosangle)) def save_info(res: list, path: str): x_list = [] y_list = [] z_list = [] for i in range(len(res)): x_list.append(res[i][0][0]) y_list.append(res[i][0][1]) z_list.append(res[i][0][2]) dataframe = pd.DataFrame( {'x': x_list, 'y': y_list, 'z': z_list}) dataframe.to_csv(path, index=False, columns=['x', 'y', 'z'], sep=',',float_format='%.5f') def crop_heart(input_arr): ''' In order to remove the influence of pulmonary vessels, we will use threshold method to segment the heart region :param input_arr: image arr :return: Data after removing lung areas ''' src_array = copy.deepcopy(input_arr) z, w, h = src_array.shape new_arr = np.zeros((z, w, h)) new_arr += -1000 sum_minr = 0 sum_minc = 0 sum_maxr = 0 sum_maxc = 0 for k in range(z): image = src_array[k][:, :] ret, thresh = cv2.threshold(image, 20, 400, cv2.THRESH_BINARY) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, anchor=(-1, -1), iterations=4) label_opening = measure.label(opening) regionprops = measure.regionprops(label_opening) max_area = 0 index = 0 for i in range(len(regionprops)): if regionprops[i].area > max_area: max_area = regionprops[i].area index = i minr, minc, maxr, maxc = regionprops[index].bbox new_arr[k][minr:maxr, minc:maxc] = src_array[k][minr:maxr, minc:maxc] sum_minr += minr sum_minc += minc sum_maxr += maxr sum_maxc += maxc mean_minr = sum_minr // z meam_minc = sum_minc // z mean_maxr = sum_maxr // z mean_maxc = sum_maxc // z return new_arr, meam_minc, mean_minr, mean_maxc, mean_maxr
87227
import ogr from mapswipe_workers import auth from mapswipe_workers.definitions import DATA_PATH from mapswipe_workers.utils import geojson_functions def add_project_geometries_to_api(): """Load project geometries from postgres and save as geojson.""" # load from postgres pg_db = auth.postgresDB() sql_query = """ SELECT project_id ,ST_AsText(geom) as geom FROM projects """ data = pg_db.retr_query(sql_query) print(len(data)) # save as geojson one by one for project in data: project_id = project[0] wkt_geom = project[1] outfile = ( f"{DATA_PATH}/api/project_geometries/project_geom_{project_id}.geojson" ) try: geometries = [ogr.CreateGeometryFromWkt(wkt_geom)] geojson_functions.create_geojson_file(geometries, outfile) except Exception: print(f"got an error for {project_id}") # just ignore if this fails pass add_project_geometries_to_api()
87259
import torch, psutil, os from torch.cuda import _get_device_index as get_device_index from ..meta.process_utils import get_subprocess_ids, format_memory_str from mani_skill_learn.utils.data import is_dict, is_seq_of try: import pynvml from pynvml import NVMLError_DriverNotLoaded except ModuleNotFoundError: print ("pynvml module not found, please install pynvml") exit(0) try: pynvml.nvmlInit() except NVMLError_DriverNotLoaded: print("cuda driver can't be loaded, is cuda enabled?") exit(0) def get_gpu_memory_info(device, unit='G', number_only=False): device = get_device_index(device, optional=True) handler = pynvml.nvmlDeviceGetHandleByIndex(device) meminfo = pynvml.nvmlDeviceGetMemoryInfo(handler) total = format_memory_str(meminfo.total, unit, number_only) used = format_memory_str(meminfo.used, unit, number_only) free = format_memory_str(meminfo.free, unit, number_only) ratio = meminfo.used / meminfo.total ratio = ratio * 100 if number_only else f'{ratio * 100:.1f}%' return total, used, free, ratio def get_gpu_memory_usage_by_process(process, device=None, unit='G', number_only=False): if not isinstance(process, (list, tuple)): process = [process] device = get_device_index(device, optional=True) handler = pynvml.nvmlDeviceGetHandleByIndex(device) procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handler) mem = 0 for p in procs: if p.pid in process: mem += p.usedGpuMemory return format_memory_str(mem, unit, number_only) def get_gpu_memory_usage_by_current_program(device=None, unit='G', number_only=False): proc_in_current_program = get_subprocess_ids() return get_gpu_memory_usage_by_process(proc_in_current_program) def get_gpu_utilization(device=None): device = get_device_index(device, optional=True) handler = pynvml.nvmlDeviceGetHandleByIndex(device) return pynvml.nvmlDeviceGetUtilizationRates(handler).gpu def get_cuda_info(device=None, unit='G', number_only=True): current_mem = get_gpu_memory_usage_by_current_program(device, unit, number_only) all_mem, used, _, ratio = get_gpu_memory_info(device, unit, number_only) utilization = get_gpu_utilization(device) return { 'gpu_mem_ratio': ratio, 'gpu_mem': used, 'gpu_mem_this': current_mem, 'gpu_util': utilization if number_only else f'{utilization}%' } def get_one_device(x): if is_dict(x): return get_one_device(x[list(x.keys())[0]]) elif is_seq_of(x): return get_one_device(x[0]) else: assert hasattr(x, 'device'), type(x) return x.device def get_device(x): if is_dict(x): return {k: get_device(x[k]) for k in x} elif is_seq_of(x): return type(x)([get_device(y) for y in x]) else: assert hasattr(x, 'device'), type(x) return x.device
87277
from .batch_preprocessing import basic_preprocessing from .batch_preprocessing import basic_preprocessing_with_pad from .registration_parameters import get_registration_preset from .registration_parameters import get_registration_presets
87281
src = Split(''' uart_test.c ''') component = aos_component('uart_test', src) component.add_cflags('-Wall') component.add_cflags('-Werror')
87343
from montepython.likelihood_class import Likelihood_clik class Planck_lowl_TT(Likelihood_clik): pass
87362
import logging import urllib2 import time import lighter.util as util class Datadog(object): def __init__(self, token, tags=[]): self._token = token self._url = 'https://app.datadoghq.com' self._tags = tags + ['source:lighter', 'type:change'] def notify(self, title, message, aggregation_key, tags=[], priority='normal', alert_type='success'): if not title or not message or not aggregation_key: logging.warn('Datadog title, message and aggregation_key required') return merged_tags = list(tags) + self._tags now = int(time.time()) logging.debug("Sending Datadog deployment event: %s", message) self._call('/api/v1/series', {'series': [{ 'metric': 'datadog.events', 'points': [[now, 1]], 'type': 'count', 'tags': merged_tags + ['status:'+alert_type] }]}) self._call('/api/v1/events', { 'title': title, 'text': message, 'aggregation_key': 'lighter_' + aggregation_key, 'tags': merged_tags, 'priority': priority, 'alert_type': alert_type, 'date_happened': now }) def _call(self, endpoint, data): if not self._url or not self._token: logging.debug('Datadog is not enabled') return try: url = self._url.rstrip('/') + endpoint + '?api_key=' + self._token logging.debug('Calling Datadog endpoint %s', endpoint) util.jsonRequest(url, data=data, method='POST') except urllib2.URLError as e: logging.warn(str(e)) return {}
87364
import torch from torch.autograd import Variable def to_cuda_variable(tensor): """ Converts tensor to cuda variable :param tensor: torch tensor, of any size :return: torch Variable, of same size as tensor """ if torch.cuda.is_available(): return Variable(tensor).cuda() else: return Variable(tensor) def to_cuda_variable_long(tensor): """ Converts tensor to cuda variable :param tensor: torch tensor, of any size :return: torch Variable, of same size as tensor """ if torch.cuda.is_available(): return Variable(tensor.long()).cuda() else: return Variable(tensor.long()) def to_numpy(variable: Variable): """ Converts torch Variable to numpy nd array :param variable: torch Variable, of any size :return: numpy nd array, of same size as variable """ if torch.cuda.is_available(): return variable.data.cpu().numpy() else: return variable.data.numpy() def init_hidden_lstm(num_layers, batch_size, lstm_hidden_size): hidden = ( to_cuda_variable( torch.zeros(num_layers, batch_size, lstm_hidden_size) ), to_cuda_variable( torch.zeros(num_layers, batch_size, lstm_hidden_size) ) ) return hidden
87386
import logging from solder_joint import SolderJoint class BoardView: def __init__(self, view_identifier): self.view_identifier = view_identifier self.solder_joint_dict = {} self.slice_dict = {} self.is_incorrect_view = False logging.info('BoardView obj created for view id: %s', self.view_identifier) def add_solder_joint(self, component, defect_id, defect_name, roi): my_tupple = tuple([roi[0], roi[1], roi[2], roi[3], defect_id]) if my_tupple in self.solder_joint_dict.keys(): logging.info('ROI+Defect found inside the solder_joint_dict, won\'t add a new joint') else: logging.info('Adding new SolderJoint obj for the new ROI+Defect') self.solder_joint_dict[my_tupple] = SolderJoint(component, defect_id, defect_name, roi) def add_slice(self, file_location): slice_id = int(file_location[-5]) self.slice_dict[slice_id] = file_location for solder_joint_obj in self.solder_joint_dict.values(): solder_joint_obj.add_slice(slice_id, file_location) def add_slices_to_solder_joints(self): for slice_id in self.slice_dict.keys(): file_location = self.slice_dict[slice_id] for solder_joint_obj in self.solder_joint_dict.values(): solder_joint_obj.add_slice(slice_id, file_location)
87428
import hashlib import requests from haversine import haversine from requests.exceptions import SSLError, ConnectTimeout, ConnectionError from django.conf import settings from django.core.cache import cache from django.core.files.base import ContentFile class TencentLBS(object): key = settings.TENCENT_LBS_KEY geo_coder_url = 'https://apis.map.qq.com/ws/geocoder/v1/' static_map_url = 'https://apis.map.qq.com/ws/staticmap/v2/' def __init__(self, latitude=None, longitude=None, address=None, icon=None): self.latitude = latitude self.longitude = longitude self.address = address self.icon = icon def get_location(self, origin=False): params = { 'key': self.key, 'location': '{},{}'.format(self.latitude, self.longitude) } try: response = requests.get(self.geo_coder_url, params=params) if origin: return response.json() else: return self.parse_location(response) except (SSLError, ConnectTimeout, ConnectionError): return { "province": "上海", "city": "上海", "message": "获取区域发生错误" } def get_longitude_and_latitude(self): params = { 'key': self.key, 'address': '{}'.format(self.address) } try: response = requests.get(self.geo_coder_url, params=params) return self.parse_longitude_and_latitude(response) except (SSLError, ConnectTimeout, ConnectionError): return "获取坐标位置错误" def get_static_map_img(self, size="339*90", zoom=12, icon=None): params = { 'key': self.key, 'center': '{},{}'.format(self.latitude, self.longitude), 'zoom': zoom, 'size': size, 'scale': 2 # 高清 } off_number = 0.0055 latitude = self.off_degree(self.latitude, off_number) if icon: params['markers'] = "icon:{}|{},{}".format(icon, latitude, self.longitude) else: params['markers'] = "color:blue|{},{}".format(latitude, self.longitude) try: response = requests.get(self.static_map_url, params=params) except (SSLError, ConnectTimeout, ConnectionError): return "保存静态坐标图失败" img_file = self.write_image(response.content) return img_file def write_image(self, img_content): file_name = "{}_{}.png".format(self.latitude, self.longitude) img_file = ContentFile(content=img_content, name=file_name) return img_file @staticmethod def parse_location(response): if response.status_code == 200: response_data = response.json() status = response_data.get('status') result = response_data.get('result') if status == 0 and result: address_component = result.get('address_component', {}) if address_component.get("city", ""): province = address_component.get('province', '') city = address_component.get('city', '').replace("市", "") return { "province": province, "city": city, "message": "success" } return { "province": "上海", "city": "上海", "message": "获取区域发生错误" } @staticmethod def parse_longitude_and_latitude(response): if response.status_code == 200: response_data = response.json() status = response_data.get('status') result = response_data.get('result') if status == 0 and result: location = result.get('location', {}) if all(location.values()): longitude = location.get('lng') latitude = location.get('lat') return { "latitude": latitude, "longitude": longitude } return "获取坐标位置错误({})".format(response_data.get("message")) return "获取坐标位置错误({})".format(response.status_code) @staticmethod def off_degree(degree, number): return degree if degree < number else float("{0:.6f}".format(degree - number)) def get_address(self): key = "address-{}".format(self.address) data = cache.get(key) if not data: data = self.get_longitude_and_latitude() if not isinstance(data, str): cache.set(key, data, 60 * 60 * 12) return data class TencentLBS2(object): def __init__(self): self.key = settings.TENCENT_LBS_KEY self.sk = settings.TENCENT_LBS_SK def gen_sig(self, params): alist = [] for k in sorted(params.keys()): alist.append('='.join((k, params[k]))) params_str = '/ws/geocoder/v1/?' + '&'.join(alist) + self.sk result = hashlib.md5(params_str.encode()).hexdigest() return result def get_location(self, lat, lng): url = 'https://apis.map.qq.com/ws/geocoder/v1/' params = { 'key': self.key, 'location': '{},{}'.format(lat, lng) } params['sig'] = self.gen_sig(params) try: response = requests.get(url, params=params) return self.parse_location(response) except (SSLError, ConnectTimeout, ConnectionError): return "message: 获取区域发生错误" def one_to_one_distance(self, from_location, to_location): ''' 因为腾讯地图的距离计算api有直径10公里限制, 所以暂时使用经纬度计算距离 :param from_location: {'lat': lat, 'lng': lng} :param to_location: {'lat': lat, 'lng': lng} :return: distance(单位:km) ''' from_location = (from_location.get('lat'), from_location.get('lng')) to_location = (to_location.get('lat'), to_location.get('lng')) distance = haversine(from_location, to_location) return round(distance, 2) def get_longitude_and_latitude(self, address): url = 'https://apis.map.qq.com/ws/geocoder/v1/' params = { 'key': self.key, 'address': address, } params['sig'] = self.gen_sig(params) try: response = requests.get(url, params=params) return self.parse_longitude_and_latitude(response) except (SSLError, ConnectTimeout, ConnectionError): return "获取坐标位置错误" # def one_to_many_distance(self, from_location, to_location): # # 一对多距离计算 # ''' # # :param from_location: 'lat,lng' # :param to_location: ['lat,lng', 'lat,lng',...] # :return: # ''' # distance_url = 'https://apis.map.qq.com/ws/distance/v1/' # data = { # 'from': from_location, # 'to': ';'.join(to_location), # 'key': self.key, # } # try: # response = requests.get(distance_url, params=data) # return self.parse_distance(response.json()) # except (SSLError, ConnectTimeout, ConnectionError): # return "获取距离信息发生错误" def one_to_many_distance(self, from_location, to_location): # 一对多距离计算 ''' 因为腾讯地图的距离计算api有直径10公里限制, 所以暂时使用经纬度计算距离 :param from_location: 'lat,lng' :param to_location: ['lat,lng', 'lat,lng',...] :return: ''' distance_list = [] from_location = tuple(float(i) for i in from_location.split(',')) for index, to in enumerate(to_location): to_ = tuple(float(i) for i in to.split(',')) distance = haversine(from_location, to_) distance_list.append({'index': index, 'distance': round(distance, 2)}) distance_list = sorted(distance_list, key=lambda x: x.get('distance')) return distance_list @staticmethod def parse_distance(response): if response.get('status') == 0: result = response.get('result') elements = result.get('elements') distance_list = [] for index, element in enumerate(elements): if element.get('distance') >= 0: distance_list.append({'index': index, 'distance': element.get('distance')}) distance_list = sorted(distance_list, key=lambda x: x.get('distance')) return distance_list print(response) return f"获取距离信息发生错误:{response.get('message')}" @staticmethod def parse_location(response): if response.status_code == 200: response_data = response.json() status = response_data.get('status') result = response_data.get('result') if status == 0 and result: address_component = result.get('address_component', {}) if address_component.get("city", ""): province = address_component.get('province', '') city = address_component.get('city', '') district = address_component.get('district', '') return { "province": province, "city": city, "district": district, "message": "success", "address": province+city+district } return "message: 获取区域发生错误" @staticmethod def parse_longitude_and_latitude(response): if response.status_code == 200: response_data = response.json() status = response_data.get('status') result = response_data.get('result') if status == 0 and result: location = result.get('location', {}) if all(location.values()): longitude = location.get('lng') latitude = location.get('lat') return { "lat": latitude, "lng": longitude } return "获取坐标位置错误({})".format(response_data.get("message")) return "获取坐标位置错误({})".format(response.status_code) lbs = TencentLBS2()
87590
import unittest import numpy as np from numpy import array from bruges.models import reconcile, interpolate, panel from bruges.models import wedge class ModelTest(unittest.TestCase): """ Tests models. """ def test_reconcile(self): a = np.array([2, 6, 7, 7, 3]) b = np.array([3, 7, 3]) A, B = reconcile(a, b, order=0) A_, B_ = array([2, 6, 7, 7, 3]), array([3, 7, 7, 3, 3]) self.assertTrue(np.array_equal(A, A_)) self.assertTrue(np.array_equal(B, B_)) def test_interpolate(self): a = np.array([2, 6, 7, 7, 3]) b = np.array([3, 7, 7, 3, 3]) interp = interpolate(a, b, num=10) self.assertTrue(interp.shape == (5, 10)) def test_panel(self): a = np.array([2, 6, 7, 7, 3]) b = np.array([3, 7, 3]) dists = (10,) out = panel(a, b, num=15, dists=dists) sample = out[:, 7] self.assertTrue(np.all(sample[:4] == array([2.5, 6.5, 5., 3.]))) self.assertTrue(np.isnan(sample[-1])) def test_wedge(self): w, top, base, ref = wedge(depth=10, width=7, strat=(10, (20, 30), 40)) col = array([10, 10, 10, 20, 20, 30, 40, 40, 40, 40]) t = array([3., 3., 3., 3., 3., 3., 3.]) b = array([3., 3., 3.6, 4.2, 4.8, 5.4, 6. ]) self.assertTrue(np.all(w[:, -1] == col)) self.assertTrue(w.sum() == 1990) self.assertTrue(np.allclose(top, t)) self.assertTrue(np.allclose(base, b)) self.assertTrue(ref == 6) def test_netgross(self): w, top, *_ = wedge(depth=10, width=7, breadth=3, strat=(10, (20, 30), 40)) self.assertTrue(w.sum() == 6003) self.assertTrue(w.shape == (10, 7, 3)) self.assertTrue(top.sum() == 63.0) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(ModelTest) unittest.TextTestRunner(verbosity=2).run(suite)
87612
import os import numpy as np import tensorflow as tf from depth.self_supervised_sfm.utils import readlines AUTOTUNE = tf.data.experimental.AUTOTUNE ######################## # Constants ######################### KITTI_K = np.array([[0.58, 0, 0.5, 0], # fx/width [0, 1.92, 0.5, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float) class KittiSFMDataset: def __init__(self, dataset_dir, load_option, img_size, batch_size, split='eigen_zhou', frame_idx=(0, -1, 1)): self.h, self.w = img_size self.split = split self.batch_size = batch_size self.load_option = load_option self.dataset_dir = dataset_dir self.frame_idx = frame_idx self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3} # Correspond to image folder # Check that the folder exists assert os.path.exists(dataset_dir) and os.path.isdir(dataset_dir), f"Dataset {dataset_dir} does not exist !" if self.split == 'eigen_zhou': filename = os.path.join('splits', f'eigen_zhou/{load_option}_files.txt') else: raise NotImplementedError print(f'Loading from: {filename}') data_paths = readlines(filename) self.img_paths = [] for i, line in enumerate(data_paths): # Image files folder, frame_idx, side = line.split() per_sample_imgs = [] # Load sequence img for t in self.frame_idx: f_str = f"{int(frame_idx) + t:010d}" image_path = os.path.join(dataset_dir, folder, f"image_0{self.side_map[side]}/data", f_str + '.png') per_sample_imgs.append(image_path) self.img_paths.append(per_sample_imgs) print(f'Total Images for {load_option}: {len(self.img_paths)}') self.num_samples = len(self.img_paths) def load_tfdataset(self): inputs = {} # Intrinsic intrinsic = KITTI_K.copy() intrinsic[0, :] *= self.w intrinsic[1, :] *= self.h inputs['K'] = tf.convert_to_tensor(intrinsic, tf.float32) inputs['K_inv'] = tf.linalg.inv(inputs['K']) dataset = tf.data.Dataset.from_tensor_slices(self.img_paths) dataset = dataset.shuffle(self.num_samples) # Load data def load_sample(img_paths): # load the raw data from the file as a string image_cur = tf.io.read_file(img_paths[0]) image_prev = tf.io.read_file(img_paths[1]) image_next = tf.io.read_file(img_paths[2]) image_cur = tf.image.decode_png(image_cur) image_prev = tf.image.decode_png(image_prev) image_next = tf.image.decode_png(image_next) image_cur = tf.cast(tf.image.resize(image_cur, [self.h, self.w]), tf.float32) / 255. image_prev = tf.cast(tf.image.resize(image_prev, [self.h, self.w]), tf.float32) / 255. image_next = tf.cast(tf.image.resize(image_next, [self.h, self.w]), tf.float32) / 255. if self.load_option == "train": if tf.random.uniform(()) > 0.5: image_cur = tf.image.flip_left_right(image_cur) image_prev = tf.image.flip_left_right(image_prev) image_next = tf.image.flip_left_right(image_next) inputs['img'] = image_cur inputs['img-1'] = image_prev inputs['img1'] = image_next return inputs dataset = dataset.map(load_sample, num_parallel_calls=AUTOTUNE) dataset = dataset.batch(self.batch_size, drop_remainder=True) dataset = dataset.prefetch(buffer_size=AUTOTUNE) return dataset
87614
from re import A import boto3 import botocore import click import configparser from csv import DictWriter import io import itertools import json import mimetypes import os import re import sys import textwrap from . import policies def bucket_exists(s3, bucket): try: s3.head_bucket(Bucket=bucket) return True except botocore.exceptions.ClientError: return False def user_exists(iam, username): try: iam.get_user(UserName=username) return True except iam.exceptions.NoSuchEntityException: return False def common_boto3_options(fn): for decorator in reversed( ( click.option( "--access-key", help="AWS access key ID", ), click.option( "--secret-key", help="AWS secret access key", ), click.option( "--session-token", help="AWS session token", ), click.option( "--endpoint-url", help="Custom endpoint URL", ), click.option( "-a", "--auth", type=click.File("r"), help="Path to JSON/INI file containing credentials", ), ) ): fn = decorator(fn) return fn def common_output_options(fn): for decorator in reversed( ( click.option("--nl", help="Output newline-delimited JSON", is_flag=True), click.option("--csv", help="Output CSV", is_flag=True), click.option("--tsv", help="Output TSV", is_flag=True), ) ): fn = decorator(fn) return fn @click.group() @click.version_option() def cli(): "A tool for creating credentials for accessing S3 buckets" class PolicyParam(click.ParamType): "Returns string of guaranteed well-formed JSON" name = "policy" def convert(self, policy, param, ctx): if policy.strip().startswith("{"): # Verify policy string is valid JSON try: json.loads(policy) except ValueError: self.fail("Invalid JSON string") return policy else: # Assume policy is a file path or '-' try: with click.open_file(policy) as f: contents = f.read() try: json.loads(contents) return contents except ValueError: self.fail( "{} contained invalid JSON".format( "Input" if policy == "-" else "File" ) ) except FileNotFoundError: self.fail("File not found") class DurationParam(click.ParamType): name = "duration" pattern = re.compile(r"^(\d+)(m|h|s)?$") def convert(self, value, param, ctx): match = self.pattern.match(value) if match is None: self.fail("Duration must be of form 3600s or 15m or 2h") integer_string, suffix = match.groups() integer = int(integer_string) if suffix == "m": integer *= 60 elif suffix == "h": integer *= 3600 # Must be between 15 minutes and 12 hours if not (15 * 60 <= integer <= 12 * 60 * 60): self.fail("Duration must be between 15 minutes and 12 hours") return integer @cli.command() @click.argument( "buckets", nargs=-1, required=True, ) @click.option("--read-only", help="Only allow reading from the bucket", is_flag=True) @click.option("--write-only", help="Only allow writing to the bucket", is_flag=True) @click.option( "--prefix", help="Restrict to keys starting with this prefix", default="*" ) @click.option( "--public-bucket", help="Bucket policy for allowing public access", is_flag=True, ) def policy(buckets, read_only, write_only, prefix, public_bucket): """ Output generated JSON policy for one or more buckets Takes the same options as s3-credentials create To output a read-only JSON policy for a bucket: s3-credentials policy my-bucket --read-only """ "Generate JSON policy for one or more buckets" if public_bucket: if len(buckets) != 1: raise click.ClickException( "--public-bucket policy can only be generated for a single bucket" ) click.echo( json.dumps(policies.bucket_policy_allow_all_get(buckets[0]), indent=4) ) return permission = "read-write" if read_only: permission = "read-only" if write_only: permission = "write-only" statements = [] if permission == "read-write": for bucket in buckets: statements.extend(policies.read_write_statements(bucket, prefix)) elif permission == "read-only": for bucket in buckets: statements.extend(policies.read_only_statements(bucket, prefix)) elif permission == "write-only": for bucket in buckets: statements.extend(policies.write_only_statements(bucket, prefix)) else: assert False, "Unknown permission: {}".format(permission) bucket_access_policy = policies.wrap_policy(statements) click.echo(json.dumps(bucket_access_policy, indent=4)) @cli.command() @click.argument( "buckets", nargs=-1, required=True, ) @click.option( "format_", "-f", "--format", type=click.Choice(["ini", "json"]), default="json", help="Output format for credentials", ) @click.option( "-d", "--duration", type=DurationParam(), help="How long should these credentials work for? Default is forever, use 3600 for 3600 seconds, 15m for 15 minutes, 1h for 1 hour", ) @click.option("--username", help="Username to create or existing user to use") @click.option( "-c", "--create-bucket", help="Create buckets if they do not already exist", is_flag=True, ) @click.option( "--prefix", help="Restrict to keys starting with this prefix", default="*" ) @click.option( "--public", help="Make the created bucket public: anyone will be able to download files if they know their name", is_flag=True, ) @click.option("--read-only", help="Only allow reading from the bucket", is_flag=True) @click.option("--write-only", help="Only allow writing to the bucket", is_flag=True) @click.option( "--policy", type=PolicyParam(), help="Path to a policy.json file, or literal JSON string - $!BUCKET_NAME!$ will be replaced with the name of the bucket", ) @click.option("--bucket-region", help="Region in which to create buckets") @click.option("--silent", help="Don't show performed steps", is_flag=True) @click.option("--dry-run", help="Show steps without executing them", is_flag=True) @click.option( "--user-permissions-boundary", help=( "Custom permissions boundary to use for created users, or 'none' to " "create without. Defaults to limiting to S3 based on " "--read-only and --write-only options." ), ) @common_boto3_options def create( buckets, format_, duration, username, create_bucket, prefix, public, read_only, write_only, policy, bucket_region, user_permissions_boundary, silent, dry_run, **boto_options ): """ Create and return new AWS credentials for specified S3 buckets - optionally also creating the bucket if it does not yet exist. To create a new bucket and output read-write credentials: s3-credentials create my-new-bucket -c To create read-only credentials for an existing bucket: s3-credentials create my-existing-bucket --read-only To create write-only credentials that are only valid for 15 minutes: s3-credentials create my-existing-bucket --write-only -d 15m """ if read_only and write_only: raise click.ClickException( "Cannot use --read-only and --write-only at the same time" ) def log(message): if not silent: click.echo(message, err=True) permission = "read-write" if read_only: permission = "read-only" if write_only: permission = "write-only" s3 = None iam = None sts = None if not dry_run: s3 = make_client("s3", **boto_options) iam = make_client("iam", **boto_options) sts = make_client("sts", **boto_options) # Verify buckets for bucket in buckets: # Create bucket if it doesn't exist if dry_run or (not bucket_exists(s3, bucket)): if (not dry_run) and (not create_bucket): raise click.ClickException( "Bucket does not exist: {} - try --create-bucket to create it".format( bucket ) ) if dry_run or create_bucket: kwargs = {} if bucket_region: kwargs = { "CreateBucketConfiguration": { "LocationConstraint": bucket_region } } bucket_policy = {} if public: bucket_policy = policies.bucket_policy_allow_all_get(bucket) if dry_run: click.echo( "Would create bucket: '{}'{}".format( bucket, ( " with args {}".format(json.dumps(kwargs, indent=4)) if kwargs else "" ), ) ) if bucket_policy: click.echo("... then attach the following bucket policy to it:") click.echo(json.dumps(bucket_policy, indent=4)) else: s3.create_bucket(Bucket=bucket, **kwargs) info = "Created bucket: {}".format(bucket) if bucket_region: info += " in region: {}".format(bucket_region) log(info) if bucket_policy: s3.put_bucket_policy( Bucket=bucket, Policy=json.dumps(bucket_policy) ) log("Attached bucket policy allowing public access") # At this point the buckets definitely exist - create the inline policy for assume_role() assume_role_policy = {} bucket_access_policy = {} if policy: assume_role_policy = json.loads(policy.replace("$!BUCKET_NAME!$", bucket)) else: statements = [] if permission == "read-write": for bucket in buckets: statements.extend(policies.read_write_statements(bucket, prefix)) elif permission == "read-only": for bucket in buckets: statements.extend(policies.read_only_statements(bucket, prefix)) elif permission == "write-only": for bucket in buckets: statements.extend(policies.write_only_statements(bucket, prefix)) else: assert False, "Unknown permission: {}".format(permission) assume_role_policy = policies.wrap_policy(statements) if duration: # We're going to use sts.assume_role() rather than creating a user if dry_run: click.echo("Would ensure role: 's3-credentials.AmazonS3FullAccess'") click.echo( "Would assume role using following policy for {} seconds:".format( duration ) ) click.echo(json.dumps(assume_role_policy, indent=4)) else: s3_role_arn = ensure_s3_role_exists(iam, sts) log("Assume role against {} for {}s".format(s3_role_arn, duration)) credentials_response = sts.assume_role( RoleArn=s3_role_arn, RoleSessionName="s3.{permission}.{buckets}".format( permission="custom" if policy else permission, buckets=",".join(buckets), ), Policy=json.dumps(assume_role_policy), DurationSeconds=duration, ) if format_ == "ini": click.echo( ( "[default]\naws_access_key_id={}\n" "aws_secret_access_key={}\naws_session_token={}" ).format( credentials_response["Credentials"]["AccessKeyId"], credentials_response["Credentials"]["SecretAccessKey"], credentials_response["Credentials"]["SessionToken"], ) ) else: click.echo( json.dumps( credentials_response["Credentials"], indent=4, default=str ) ) return # No duration, so wo create a new user so we can issue non-expiring credentials if not username: # Default username is "s3.read-write.bucket1,bucket2" username = "s3.{permission}.{buckets}".format( permission="custom" if policy else permission, buckets=",".join(buckets) ) if dry_run or (not user_exists(iam, username)): kwargs = {"UserName": username} if user_permissions_boundary != "none": # This is a user-account level limitation, it does not grant # permissions on its own but is a useful extra level of defense # https://github.com/simonw/s3-credentials/issues/1#issuecomment-958201717 if not user_permissions_boundary: # Pick one based on --read-only/--write-only if read_only: user_permissions_boundary = ( "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess" ) else: # Need full access in order to be able to write user_permissions_boundary = ( "arn:aws:iam::aws:policy/AmazonS3FullAccess" ) kwargs["PermissionsBoundary"] = user_permissions_boundary info = " user: '{}'".format(username) if user_permissions_boundary != "none": info += " with permissions boundary: '{}'".format(user_permissions_boundary) if dry_run: click.echo("Would create{}".format(info)) else: iam.create_user(**kwargs) log("Created {}".format(info)) # Add inline policies to the user so they can access the buckets user_policy = {} for bucket in buckets: policy_name = "s3.{permission}.{bucket}".format( permission="custom" if policy else permission, bucket=bucket, ) if policy: user_policy = json.loads(policy.replace("$!BUCKET_NAME!$", bucket)) else: if permission == "read-write": user_policy = policies.read_write(bucket, prefix) elif permission == "read-only": user_policy = policies.read_only(bucket, prefix) elif permission == "write-only": user_policy = policies.write_only(bucket, prefix) else: assert False, "Unknown permission: {}".format(permission) if dry_run: click.echo( "Would attach policy called '{}' to user '{}', details:\n{}".format( policy_name, username, json.dumps(user_policy, indent=4), ) ) else: iam.put_user_policy( PolicyDocument=json.dumps(user_policy), PolicyName=policy_name, UserName=username, ) log("Attached policy {} to user {}".format(policy_name, username)) # Retrieve and print out the credentials if dry_run: click.echo("Would call create access key for user '{}'".format(username)) else: response = iam.create_access_key( UserName=username, ) log("Created access key for user: {}".format(username)) if format_ == "ini": click.echo( ("[default]\naws_access_key_id={}\n" "aws_secret_access_key={}").format( response["AccessKey"]["AccessKeyId"], response["AccessKey"]["SecretAccessKey"], ) ) elif format_ == "json": click.echo(json.dumps(response["AccessKey"], indent=4, default=str)) @cli.command() @common_boto3_options def whoami(**boto_options): "Identify currently authenticated user" sts = make_client("sts", **boto_options) identity = sts.get_caller_identity() identity.pop("ResponseMetadata") click.echo(json.dumps(identity, indent=4, default=str)) @cli.command() @common_output_options @common_boto3_options def list_users(nl, csv, tsv, **boto_options): """ List all users for this account s3-credentials list-users Add --csv or --csv for CSV or TSV format: s3-credentials list-users --csv """ iam = make_client("iam", **boto_options) output( paginate(iam, "list_users", "Users"), ( "UserName", "UserId", "Arn", "Path", "CreateDate", "PasswordLast<PASSWORD>", "PermissionsBoundary", "Tags", ), nl, csv, tsv, ) @cli.command() @click.argument("role_names", nargs=-1) @click.option("--details", help="Include attached policies (slower)", is_flag=True) @common_output_options @common_boto3_options def list_roles(role_names, details, nl, csv, tsv, **boto_options): """ List roles To list all roles for this AWS account: s3-credentials list-roles Add --csv or --csv for CSV or TSV format: s3-credentials list-roles --csv For extra details per role (much slower) add --details s3-credentials list-roles --details """ iam = make_client("iam", **boto_options) headers = ( "Path", "RoleName", "RoleId", "Arn", "CreateDate", "AssumeRolePolicyDocument", "Description", "MaxSessionDuration", "PermissionsBoundary", "Tags", "RoleLastUsed", ) if details: headers += ("inline_policies", "attached_policies") def iterate(): for role in paginate(iam, "list_roles", "Roles"): if role_names and role["RoleName"] not in role_names: continue if details: role_name = role["RoleName"] role["inline_policies"] = [] # Get inline policy names, then policy for each one for policy_name in paginate( iam, "list_role_policies", "PolicyNames", RoleName=role_name ): role_policy_response = iam.get_role_policy( RoleName=role_name, PolicyName=policy_name, ) role_policy_response.pop("ResponseMetadata", None) role["inline_policies"].append(role_policy_response) # Get attached managed policies role["attached_policies"] = [] for attached in paginate( iam, "list_attached_role_policies", "AttachedPolicies", RoleName=role_name, ): policy_arn = attached["PolicyArn"] attached_policy_response = iam.get_policy( PolicyArn=policy_arn, ) policy_details = attached_policy_response["Policy"] # Also need to fetch the policy JSON version_id = policy_details["DefaultVersionId"] policy_version_response = iam.get_policy_version( PolicyArn=policy_arn, VersionId=version_id, ) policy_details["PolicyVersion"] = policy_version_response[ "PolicyVersion" ] role["attached_policies"].append(policy_details) yield role output(iterate(), headers, nl, csv, tsv) @cli.command() @click.argument("usernames", nargs=-1) @common_boto3_options def list_user_policies(usernames, **boto_options): """ List inline policies for specified users s3-credentials list-user-policies username Returns policies for all users if no usernames are provided. """ iam = make_client("iam", **boto_options) if not usernames: usernames = [user["UserName"] for user in paginate(iam, "list_users", "Users")] for username in usernames: click.echo("User: {}".format(username)) for policy_name in paginate( iam, "list_user_policies", "PolicyNames", UserName=username ): click.echo("PolicyName: {}".format(policy_name)) policy_response = iam.get_user_policy( UserName=username, PolicyName=policy_name ) click.echo( json.dumps(policy_response["PolicyDocument"], indent=4, default=str) ) @cli.command() @click.argument("buckets", nargs=-1) @click.option("--details", help="Include extra bucket details (slower)", is_flag=True) @common_output_options @common_boto3_options def list_buckets(buckets, details, nl, csv, tsv, **boto_options): """ List buckets To list all buckets and their creation time as JSON: s3-credentials list-buckets Add --csv or --csv for CSV or TSV format: s3-credentials list-buckets --csv For extra details per bucket (much slower) add --details s3-credentials list-buckets --details """ s3 = make_client("s3", **boto_options) headers = ["Name", "CreationDate"] if details: headers += ["bucket_acl", "public_access_block", "bucket_website"] def iterator(): for bucket in s3.list_buckets()["Buckets"]: if buckets and (bucket["Name"] not in buckets): continue if details: bucket_acl = dict( (key, value) for key, value in s3.get_bucket_acl( Bucket=bucket["Name"], ).items() if key != "ResponseMetadata" ) try: pab = s3.get_public_access_block( Bucket=bucket["Name"], )["PublicAccessBlockConfiguration"] except s3.exceptions.ClientError: pab = None try: bucket_website = dict( (key, value) for key, value in s3.get_bucket_website( Bucket=bucket["Name"], ).items() if key != "ResponseMetadata" ) except s3.exceptions.ClientError: bucket_website = None bucket["bucket_acl"] = bucket_acl bucket["public_access_block"] = pab bucket["bucket_website"] = bucket_website yield bucket output(iterator(), headers, nl, csv, tsv) @cli.command() @click.argument("usernames", nargs=-1, required=True) @common_boto3_options def delete_user(usernames, **boto_options): """ Delete specified users, their access keys and their inline policies s3-credentials delete-user username1 username2 """ iam = make_client("iam", **boto_options) for username in usernames: click.echo("User: {}".format(username)) # Fetch and delete their policies policy_names_to_delete = list( paginate(iam, "list_user_policies", "PolicyNames", UserName=username) ) for policy_name in policy_names_to_delete: iam.delete_user_policy( UserName=username, PolicyName=policy_name, ) click.echo(" Deleted policy: {}".format(policy_name)) # Fetch and delete their access keys access_key_ids_to_delete = [ access_key["AccessKeyId"] for access_key in paginate( iam, "list_access_keys", "AccessKeyMetadata", UserName=username ) ] for access_key_id in access_key_ids_to_delete: iam.delete_access_key( UserName=username, AccessKeyId=access_key_id, ) click.echo(" Deleted access key: {}".format(access_key_id)) iam.delete_user(UserName=username) click.echo(" Deleted user") def make_client(service, access_key, secret_key, session_token, endpoint_url, auth): if auth: if access_key or secret_key or session_token: raise click.ClickException( "--auth cannot be used with --access-key, --secret-key or --session-token" ) auth_content = auth.read().strip() if auth_content.startswith("{"): # Treat as JSON decoded = json.loads(auth_content) access_key = decoded.get("AccessKeyId") secret_key = decoded.get("SecretAccessKey") session_token = decoded.get("SessionToken") else: # Treat as INI config = configparser.ConfigParser() config.read_string(auth_content) # Use the first section that has an aws_access_key_id for section in config.sections(): if "aws_access_key_id" in config[section]: access_key = config[section].get("aws_access_key_id") secret_key = config[section].get("aws_secret_access_key") session_token = config[section].get("aws_session_token") break kwargs = {} if access_key: kwargs["aws_access_key_id"] = access_key if secret_key: kwargs["aws_secret_access_key"] = secret_key if session_token: kwargs["aws_session_token"] = session_token if endpoint_url: kwargs["endpoint_url"] = endpoint_url return boto3.client(service, **kwargs) def ensure_s3_role_exists(iam, sts): "Create s3-credentials.AmazonS3FullAccess role if not exists, return ARN" role_name = "s3-credentials.AmazonS3FullAccess" account_id = sts.get_caller_identity()["Account"] try: role = iam.get_role(RoleName=role_name) return role["Role"]["Arn"] except iam.exceptions.NoSuchEntityException: create_role_response = iam.create_role( Description=( "Role used by the s3-credentials tool to create time-limited " "credentials that are restricted to specific buckets" ), RoleName=role_name, AssumeRolePolicyDocument=json.dumps( { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::{}:root".format(account_id) }, "Action": "sts:AssumeRole", } ], } ), ) # Attach AmazonS3FullAccess to it - note that even though we use full access # on the role itself any time we call sts.assume_role() we attach an additional # policy to ensure reduced access for the temporary credentials iam.attach_role_policy( RoleName="s3-credentials.AmazonS3FullAccess", PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess", ) return create_role_response["Role"]["Arn"] @cli.command() @click.argument("bucket") @click.option("--prefix", help="List keys starting with this prefix") @common_output_options @common_boto3_options def list_bucket(bucket, prefix, nl, csv, tsv, **boto_options): """ List contents of bucket To list the contents of a bucket as JSON: s3-credentials list-bucket my-bucket Add --csv or --csv for CSV or TSV format: s3-credentials list-bucket my-bucket --csv """ s3 = make_client("s3", **boto_options) kwargs = {"Bucket": bucket} if prefix: kwargs["Prefix"] = prefix try: output( paginate(s3, "list_objects_v2", "Contents", **kwargs), ("Key", "LastModified", "ETag", "Size", "StorageClass", "Owner"), nl, csv, tsv, ) except botocore.exceptions.ClientError as e: raise click.ClickException(e) @cli.command() @click.argument("bucket") @click.argument("key") @click.argument( "path", type=click.Path( exists=True, file_okay=True, dir_okay=False, readable=True, allow_dash=True ), ) @click.option( "--content-type", help="Content-Type to use (default is auto-detected based on file extension)", ) @click.option("silent", "-s", "--silent", is_flag=True, help="Don't show progress bar") @common_boto3_options def put_object(bucket, key, path, content_type, silent, **boto_options): """ Upload an object to an S3 bucket To upload a file to /my-key.txt in the my-bucket bucket: s3-credentials put-object my-bucket my-key.txt /path/to/file.txt Use - to upload content from standard input: echo "Hello" | s3-credentials put-object my-bucket hello.txt - """ s3 = make_client("s3", **boto_options) size = None extra_args = {} if path == "-": # boto needs to be able to seek fp = io.BytesIO(sys.stdin.buffer.read()) if not silent: size = fp.getbuffer().nbytes else: if not content_type: content_type = mimetypes.guess_type(path)[0] fp = click.open_file(path, "rb") if not silent: size = os.path.getsize(path) if content_type is not None: extra_args["ContentType"] = content_type if not silent: # Show progress bar with click.progressbar(length=size, label="Uploading") as bar: s3.upload_fileobj( fp, bucket, key, Callback=bar.update, ExtraArgs=extra_args ) else: s3.upload_fileobj(fp, bucket, key, ExtraArgs=extra_args) @cli.command() @click.argument("bucket") @click.argument("key") @click.option( "output", "-o", "--output", type=click.Path(file_okay=True, dir_okay=False, writable=True, allow_dash=False), help="Write to this file instead of stdout", ) @common_boto3_options def get_object(bucket, key, output, **boto_options): """ Download an object from an S3 bucket To see the contents of the bucket on standard output: s3-credentials get-object my-bucket hello.txt To save to a file: s3-credentials get-object my-bucket hello.txt -o hello.txt """ s3 = make_client("s3", **boto_options) if not output: fp = sys.stdout.buffer else: fp = click.open_file(output, "wb") s3.download_fileobj(bucket, key, fp) @cli.command() @click.argument("bucket") @click.option( "allowed_methods", "-m", "--allowed-method", multiple=True, help="Allowed method e.g. GET", ) @click.option( "allowed_headers", "-h", "--allowed-header", multiple=True, help="Allowed header e.g. Authorization", ) @click.option( "allowed_origins", "-o", "--allowed-origin", multiple=True, help="Allowed origin e.g. https://www.example.com/", ) @click.option( "expose_headers", "-e", "--expose-header", multiple=True, help="Header to expose e.g. ETag", ) @click.option( "max_age_seconds", "--max-age-seconds", type=int, help="How long to cache preflight requests", ) @common_boto3_options def set_cors_policy( bucket, allowed_methods, allowed_headers, allowed_origins, expose_headers, max_age_seconds, **boto_options ): """ Set CORS policy for a bucket To allow GET requests from any origin: s3-credentials set-cors-policy my-bucket To allow GET and PUT from a specific origin and expose ETag headers: \b s3-credentials set-cors-policy my-bucket \\ --allowed-method GET \\ --allowed-method PUT \\ --allowed-origin https://www.example.com/ \\ --expose-header ETag """ s3 = make_client("s3", **boto_options) if not bucket_exists(s3, bucket): raise click.ClickException("Bucket {} does not exists".format(bucket)) cors_rule = { "ID": "set-by-s3-credentials", "AllowedOrigins": allowed_origins or ["*"], "AllowedHeaders": allowed_headers, "AllowedMethods": allowed_methods or ["GET"], "ExposeHeaders": expose_headers, } if max_age_seconds: cors_rule["MaxAgeSeconds"] = max_age_seconds try: s3.put_bucket_cors(Bucket=bucket, CORSConfiguration={"CORSRules": [cors_rule]}) except botocore.exceptions.ClientError as e: raise click.ClickException(e) @cli.command() @click.argument("bucket") @common_boto3_options def get_cors_policy(bucket, **boto_options): """ Get CORS policy for a bucket s3-credentials get-cors-policy my-bucket Returns the CORS policy for this bucket, if set, as JSON """ s3 = make_client("s3", **boto_options) try: response = s3.get_bucket_cors(Bucket=bucket) except botocore.exceptions.ClientError as e: raise click.ClickException(e) click.echo(json.dumps(response["CORSRules"], indent=4, default=str)) def output(iterator, headers, nl, csv, tsv): if nl: for item in iterator: click.echo(json.dumps(item, default=str)) elif csv or tsv: writer = DictWriter( sys.stdout, headers, dialect="excel-tab" if tsv else "excel" ) writer.writeheader() writer.writerows(fix_json(row) for row in iterator) else: for line in stream_indented_json(iterator): click.echo(line) def stream_indented_json(iterator, indent=2): # We have to iterate two-at-a-time so we can know if we # should output a trailing comma or if we have reached # the last item. current_iter, next_iter = itertools.tee(iterator, 2) next(next_iter, None) first = True for item, next_item in itertools.zip_longest(current_iter, next_iter): is_last = next_item is None data = item line = "{first}{serialized}{separator}{last}".format( first="[\n" if first else "", serialized=textwrap.indent( json.dumps(data, indent=indent, default=str), " " * indent ), separator="," if not is_last else "", last="\n]" if is_last else "", ) yield line first = False if first: # We didn't output anything, so yield the empty list yield "[]" def paginate(service, method, list_key, **kwargs): paginator = service.get_paginator(method) for response in paginator.paginate(**kwargs): yield from response[list_key] def fix_json(row): # If a key value is list or dict, json encode it return dict( [ ( key, json.dumps(value, indent=2, default=str) if isinstance(value, (dict, list, tuple)) else value, ) for key, value in row.items() ] )
87615
import unittest import unittest.mock from programy.extensions.base import Extension class MockExtension(Extension): def execute(self,context, data): raise NotImplementedError() class ExtensionTests(unittest.TestCase): def test_ensure_not_implemented(self): bot = unittest.mock.Mock() extension = MockExtension() self.assertIsNotNone(extension) with self.assertRaises(Exception): extension.execute(bot, "testid", "Some Data")
87632
import importlib import numpy as np import pandas as pd import nose import pytest import statsmodels.datasets from statsmodels.datasets.utils import Dataset exclude = ['check_internet', 'clear_data_home', 'get_data_home', 'get_rdataset', 'tests', 'utils', 'webuse'] datasets = [] for dataset_name in dir(statsmodels.datasets): if not dataset_name.startswith('_') and dataset_name not in exclude: datasets.append(dataset_name) # TODO: Remove nottest when nose support is dropped @nose.tools.nottest @pytest.mark.parametrize('dataset_name', datasets) def test_dataset(dataset_name): dataset = importlib.import_module('statsmodels.datasets.' + dataset_name) data = dataset.load() assert isinstance(data, Dataset) assert isinstance(data.data, np.recarray) df_data = dataset.load_pandas() assert isinstance(df_data, Dataset) assert isinstance(df_data.data, pd.DataFrame) # TODO: Remove when nose support is dropped def test_all_datasets(): for dataset in datasets: test_dataset(dataset)
87648
from sklearn.svm import OneClassSVM from uq360.utils.transformers.feature_transformer import FeatureTransformer class OneClassSVMTransformer(FeatureTransformer): """One-class SVM outlier-classifier based derived feature. This transformer fits an SVM decision boundary enclosing the full training set. This is then the decision boundary to identify outliers in production data at inference time. """ def __init__(self): super(OneClassSVMTransformer, self).__init__() self.one_class_classifier = OneClassSVM(nu=0.1, kernel="rbf", gamma='auto') self.fit_status = False @classmethod def name(cls): return ('one_class_svm') def fit(self, x, y): self.one_class_classifier.fit(x) self.fit_status = True def transform(self, x, predictions): return self.one_class_classifier.decision_function(x) def save(self, output_location=None): self.register_pkl_object(self.one_class_classifier, 'one_class_classifier') super(OneClassSVMTransformer, self)._save(output_location) def load(self, input_location=None): self._load(input_location) self.one_class_classifier = self.pkl_registry[0][0] assert type(self.one_class_classifier) == OneClassSVM self.fit_status = True
87665
from django.http import HttpResponse from django.views.decorators.csrf import csrf_exempt def index(request): return HttpResponse(status=200) def get_user(request, id): return HttpResponse(id) @csrf_exempt def create_user(request): return HttpResponse(status=200)
87711
from dropout import dropout import math import theano import theano.tensor as T import util class ConcatWithSoftmax(object): def __init__(self, inp, n_labels, n_hidden_previous, update_fn, training=None, keep_prob=None): if type(inp) == list: self.input = T.concatenate(inp) input_size = len(inp) * n_hidden_previous else: self.input = inp input_size = n_hidden_previous if training is not None: assert keep_prob is not None self.input = dropout(self.input, training, keep_prob) self.update_fn = update_fn # input -> hidden (sized somwhere between size of input & softmax) n_hidden = int(math.sqrt(input_size * n_labels)) print "concat sizing %s -> %s -> %s" % (input_size, n_hidden, n_labels) self.Wih = util.sharedMatrix(input_size, n_hidden, 'Wih') self.bh = util.shared(util.zeros((1, n_hidden)), 'bh') # hidden -> softmax self.Whs = util.sharedMatrix(n_hidden, n_labels, 'Whs') self.bs = util.shared(util.zeros((1, n_labels)), 'bs') def name(self): return "concat_with_softmax" def dense_params(self): return [self.Wih, self.bh, self.Whs, self.bs] def params_for_l2_penalty(self): return self.dense_params() def updates_wrt_cost(self, cost, learning_opts): print "CONCAT GRADS" gradients = util.clipped(T.grad(cost=cost, wrt=self.dense_params())) return self.update_fn(self.dense_params(), gradients, learning_opts) def prob_pred(self): hidden = T.nnet.sigmoid(T.dot(self.input, self.Wih) + self.bh) prob_y = T.nnet.softmax(T.dot(hidden, self.Whs) + self.bs) pred_y = T.argmax(prob_y, axis=1) return (prob_y, pred_y)
87738
import numpy as np n = int(input().strip()) array = np.array([[float(x) for x in input().strip().split()] for _ in range(n)], dtype = float) print(np.linalg.det(array))
87758
import asyncio import ipaddress import logging import os from pathlib import Path from aiohttp import web from notedrive.magnet import settings from notedrive.magnet.core import Magnet2Torrent from notedrive.magnet.dht.network import Server as DHTServer from notedrive.magnet.server import routes from notedrive.magnet.utils import FailedToFetchException def start_serve(dht_server, torrent_cache_folder=None, debug=False, apikey=None, ip=ipaddress.IPv4Address("0.0.0.0"), port=18667, ): if not debug: stdio_handler = logging.StreamHandler() stdio_handler.setLevel(logging.INFO) logger = logging.getLogger("aiohttp.access") logger.setLevel(logging.INFO) logger.addHandler(stdio_handler) settings.SERVE_APIKEY = apikey settings.DHT_SERVER = dht_server settings.TORRENT_CACHE_FOLDER = torrent_cache_folder app = web.Application() app.add_routes(routes) web.run_app(app, host=str(ip), port=port) def start_fetch(dht_server, torrent_cache_folder=None, dht_state_file=None, magnet=None, ): loop = asyncio.get_event_loop() m2t = Magnet2Torrent(dht_server=dht_server, torrent_cache_folder=torrent_cache_folder, use_additional_trackers=True) try: torrent = loop.run_until_complete(m2t.retrieve_torrent(magnet_link=magnet)) except FailedToFetchException: print("Unable to fetch magnet link") quit(1) else: with open(torrent.name, "wb") as f: f.write(torrent.data_encode()) print("Downloaded magnet link into file: {}".format(torrent.name)) if dht_server and dht_state_file: dht_server.save_state(dht_state_file) def start(torrent_cache_folder=None, debug=False, use_dht=False, dht_state_file=None, dht_port=settings.DHT_PORT, dht_ip=ipaddress.IPv4Address("0.0.0.0"), command='serve', apikey=None, ip=ipaddress.IPv4Address("0.0.0.0"), port=18667, magnet=None, ): if torrent_cache_folder: torrent_cache_folder = Path(torrent_cache_folder) if not torrent_cache_folder.exists(): os.makedirs(torrent_cache_folder) if not torrent_cache_folder.is_dir(): print("Path {} exists but is not a folder".format(torrent_cache_folder)) quit(1) if debug: logging.basicConfig(level=logging.DEBUG, format="%(asctime)-15s:%(levelname)s:%(name)s:%(lineno)d:%(message)s") if use_dht: print("Bootstrapping DHT server") loop = asyncio.get_event_loop() dht_server = DHTServer() if dht_state_file and os.path.isfile(dht_state_file): dht_server = DHTServer.load_state(dht_state_file) loop.run_until_complete(dht_server.listen(dht_port, str(dht_ip))) else: dht_server = DHTServer() loop.run_until_complete(dht_server.listen(dht_port, str(dht_ip))) loop.run_until_complete(dht_server.bootstrap(settings.DHT_BOOTSTRAP_NODES)) if dht_state_file: dht_server.save_state_regularly(dht_state_file) print("Done bootstrapping DHT server") else: dht_server = None if command == "serve": start_serve(dht_server, torrent_cache_folder=torrent_cache_folder, debug=debug, apikey=apikey, ip=ip, port=port) elif command == "fetch": start_fetch(dht_server, torrent_cache_folder=torrent_cache_folder, dht_state_file=dht_state_file, magnet=magnet)
87764
import six from collections import deque, defaultdict import numpy as np from pybot.utils.itertools_recipes import chunks def concat_chunked_dicts(dlist): """ Concatenate individual arrays in dictionary TODO: defaultdict is the right way to do it, except for conversion to dict in the final return call. Keras requires type as dict """ batch = defaultdict(list) for item in dlist: for k,v in six.iteritems(item): batch[k].append(v) for k,v in six.iteritems(batch): batch[k] = np.concatenate(v) return dict(batch) def chunked_data(iterable, batch_size=10): """ For tuples: arg = ([np.array, np.array], {'output': np.array}) For dictionaries: arg = ({'input': np.array, 'input2': np.array}, {'output': np.array}) """ for batch in chunks(iterable, batch_size): args = list(zip(*batch)) # (arg), (arg), ... # arg = ([x1,x2], y) # type(args[0]) = tuple # type(args[0][0]) = list if isinstance(args[0][0], dict): items = [concat_chunked_dicts(arg) for arg in args] elif isinstance(args[0][0], np.ndarray): items = [np.concatenate(arg) for arg in args] elif isinstance(args[0][0], list) and isinstance(args[0][0][0], np.ndarray): items = [[np.concatenate(item) for item in arg] for arg in args] else: raise TypeError('''Unknown type: either dict, np.array, or list of np.arrays can be batched''' '''Type is {}'''.format(type(args[0][0]))) yield tuple(items) def get_dataset_generator(datagen, batch_size=1): if batch_size > 1: datagen = chunked_data(datagen, batch_size=batch_size) return datagen
87768
from abc import ABC, abstractmethod, ABCMeta from domain.models.container_settings import ContainerSettings from domain.models.container_info import ContainerInfo class AbstractJobManagementService(ABC): __metaclass__ = ABCMeta @abstractmethod def start_container(self, container_settings: ContainerSettings) -> None: raise NotImplementedError @abstractmethod def stop_container(self, container_info: ContainerInfo) -> None: raise NotImplementedError
87776
from django.views.generic.edit import CreateView from eahub.feedback.models import Feedback class FeedbackCreate(CreateView): model = Feedback fields = ["message", "email", "page_url"] success_url = "/"
87800
from matrix.server import MatrixServer from matrix._weechat import MockConfig import matrix.globals as G G.CONFIG = MockConfig() class TestClass(object): def test_address_parsing(self): homeserver = MatrixServer._parse_url("example.org", 8080) assert homeserver.hostname == "example.org" assert homeserver.geturl() == "https://example.org:8080" homeserver = MatrixServer._parse_url("example.org/_matrix", 80) assert homeserver.hostname == "example.org" assert homeserver.geturl() == "https://example.org:80/_matrix" homeserver = MatrixServer._parse_url( "https://example.org/_matrix", 80 ) assert homeserver.hostname == "example.org" assert homeserver.geturl() == "https://example.org:80/_matrix"
87803
from importlib import import_module from os import environ import typing as t from . import global_settings from ..errors.server import SettingsFileNotFoundError, ImproperlyConfigured from ..errors.misc import DataTypeMismatchError __all__ = ("get_settings_module", "settings") ENVIRONMENT_VARIABLE = "NAVYCUT_SETTINGS_MODULE" empty = object() class LazySettings: def __init__(self) -> None: self.settings_modules = environ.get(ENVIRONMENT_VARIABLE, None) if self.settings_modules is None: raise SettingsFileNotFoundError self._wrapped = Settings(self.settings_modules) def __getattr__(self, name): val = getattr(self._wrapped, name) if name == 'SECRET_KEY' and not val: raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.") self.__dict__[name] = val return val @property def configured(self): """Return True if the settings have already been configured.""" return self._wrapped is not empty class Settings: def __init__(self, settings_module): for setting in dir(global_settings): if setting.isupper(): setattr(self, setting, getattr(global_settings, setting)) self.SETTINGS_MODULE = settings_module try: mod = import_module(self.SETTINGS_MODULE) except: raise SettingsFileNotFoundError(self.SETTINGS_MODULE, None) tuple_settings = ( 'ALLOWED_HOSTS', "INSTALLED_APPS", ) for setting in dir(mod): if setting.isupper(): setting_value = getattr(mod, setting) if (setting in tuple_settings and not isinstance(setting_value, (list, tuple))): raise DataTypeMismatchError(setting_value, "settings file", "list or tuple") setattr(self, setting, setting_value) setattr(self, "SETTINGS_FILE_NAME", self.SETTINGS_MODULE) settings:t.Type["LazySettings"] = LazySettings()
87844
import itertools import operator import re import bs4 from pudzu.utils import * # Various utilities for BeautifulSoup # helper functions since: (a) bs4 tags need to be compared with is, not eq; (b) they're iterable def remove_duplicate_tags(l): """Remove duplicate tags from a list (using object identity rather than equality)""" return remove_duplicates(l, key=id) def non_bs4_iterable(v): """Whether an object is a non-bs4 iterable.""" return non_string_iterable(v) and not hasattr(v, "find_all") def make_bs4_iterable(v): """Return a non-bs4 iterable from an object, wrapping it in a tuple if needed.""" return v if non_bs4_iterable(v) else (v,) # pretty-printing tags def print_tags(tags, attr=None): """Print one or more tags, excluding any nested content.""" for tag in make_bs4_iterable(tags): if attr is not None: print(tag.attrs.get(attr, "")) elif hasattr(tag, "attr"): attrs = " ".join('{}="{}"'.format(k, " ".join(v) if non_string_iterable(v) else v) for k, v in sorted(tag.attrs.items())) print("<{}{}{}>".format(tag.name, "" if len(tag.attrs) == 0 else " ", attrs)) else: print(tag) def print_path(tag): """Print the path from the root down to a tag.""" print_tags(list(itertools.chain([tag], tag.parents))[-2::-1]) pt = print_tags pp = print_path # filtering def re_exclude(pattern): """Negated regular expression filter.""" pattern = re.compile(pattern) return lambda v: v and not re.search(pattern, v) def is_parent(t, s): """Whether t is s's parent.""" return t is s.parent def is_child(t, s): """Whether t is s's child.""" return s is t.parent def is_ancestor(t, s): """Whether t is an ancestor of s.""" return is_in(t, s.parents) def is_descendent(t, s): """Whether t is a descendent of s.""" return is_in(s, t.parents) def is_after(t, s): """Whether t occurs after s.""" return is_in(t, s.next_elements) def is_before(t, s): """Whether t occurs before s.""" return is_in(s, t.next_elements) def exclude_tags(tags, excluded, relation=operator.is_): """Filter out tags that are related to at least one of the excluded set.""" return [t for t in make_bs4_iterable(tags) if not any(relation(t, s) for s in make_bs4_iterable(excluded))] def restrict_tags(tags, included, relation=operator.is_): """Restrict to tags that are related to at least one of the included set.""" return [t for t in make_bs4_iterable(tags) if any(relation(t, s) for s in make_bs4_iterable(included))] # finding tags by chaining def curry_method(method): def fn(*args, **kwargs): return lambda o: method(o, *args, **kwargs) return fn all_ = curry_method(bs4.element.Tag.find_all) next_ = curry_method(bs4.element.Tag.find_all_next) prev_ = curry_method(bs4.element.Tag.find_all_previous) parents_ = curry_method(bs4.element.Tag.find_parents) next_siblings_ = curry_method(bs4.element.Tag.find_next_siblings) prev_siblings_ = curry_method(bs4.element.Tag.find_previous_siblings) select_ = curry_method(bs4.element.Tag.select) exclude_ = curry_method(exclude_tags) restrict_ = curry_method(restrict_tags) def find_tags(tags, *fns): """Apply a chain sequence of find methods to a collection of tags. Result may contain duplicates.""" ts = make_bs4_iterable(tags) for fn in fns: if not callable(fn): fn = all_(fn) ts = [s for t in ts for s in make_bs4_iterable(fn(t))] return ts def find_tag(tags, *fns): """Same as find_tags but returns the first result only (or None if there are none).""" return first(find_tags(tags, *fns))
87852
from .rman_translator import RmanTranslator from ..rman_sg_nodes.rman_sg_alembic import RmanSgAlembic from ..rfb_utils import transform_utils from ..rfb_utils import string_utils from ..rfb_logger import rfb_log class RmanAlembicTranslator(RmanTranslator): def __init__(self, rman_scene): super().__init__(rman_scene) self.bl_type = 'ALEMBIC' def export(self, ob, db_name): sg_node = self.rman_scene.sg_scene.CreateProcedural(db_name) sg_node.Define("DynamicLoad", None) rman_sg_alembic = RmanSgAlembic(self.rman_scene, sg_node, db_name) return rman_sg_alembic def export_deform_sample(self, rman_sg_alembic, ob, time_sample): pass def update(self, ob, rman_sg_alembic): rm = ob.renderman abc_filepath = string_utils.expand_string(rm.abc_filepath) bounds = (-100000, 100000, -100000, 100000, -100000, 100000 ) primvar = rman_sg_alembic.sg_node.GetPrimVars() primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_dsoname, 'AlembicProcPrim') primvar.SetFloatArray(self.rman_scene.rman.Tokens.Rix.k_bound, bounds, 6) shutter_interval = self.rman_scene.bl_scene.renderman.shutter_angle / 360.0 shutter_open, shutter_close = 0, shutter_interval abc_frame = rm.abc_frame if rm.abc_use_scene_frame: rman_sg_alembic.is_frame_sensitive = True abc_frame = float(self.rman_scene.bl_frame_current) else: rman_sg_alembic.is_frame_sensitive = False abc_args = "-filename %s" % abc_filepath abc_args += " -frame %f" % abc_frame abc_args += " -fps %f" % rm.abc_fps abc_args += " -shutteropen %f" % shutter_open abc_args += " -shutterclose %f" % shutter_close abc_args += " -velocityscale %f" % rm.abc_velocityScale abc_args += " -ccw" primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_data, abc_args) rman_sg_alembic.sg_node.SetPrimVars(primvar)
87875
from direct.directnotify import DirectNotifyGlobal from direct.distributed.DistributedObjectUD import DistributedObjectUD class DistributedAvatarUD(DistributedObjectUD): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedAvatarUD')
87887
from resotolib.args import get_arg_parser, ArgumentParser from resoto_plugin_k8s import KubernetesCollectorPlugin def test_args(): arg_parser = get_arg_parser() KubernetesCollectorPlugin.add_args(arg_parser) arg_parser.parse_args() assert len(ArgumentParser.args.k8s_context) == 0 assert ArgumentParser.args.k8s_config is None assert len(ArgumentParser.args.k8s_cluster) == 0 assert len(ArgumentParser.args.k8s_apiserver) == 0 assert len(ArgumentParser.args.k8s_token) == 0 assert len(ArgumentParser.args.k8s_cacert) == 0 assert len(ArgumentParser.args.k8s_collect) == 0 assert len(ArgumentParser.args.k8s_no_collect) == 0 assert ArgumentParser.args.k8s_pool_size == 5 assert ArgumentParser.args.k8s_fork is False
87892
import math import functools import torch import torch.nn as nn import torch.nn.functional as F from . import block as B def pa_upconv_block( nf, unf, kernel_size=3, stride=1, padding=1, mode='nearest', upscale_factor=2, act_type='lrelu'): upsample = B.Upsample(scale_factor=upscale_factor, mode=mode) upconv = nn.Conv2d(nf, unf, kernel_size, stride, padding, bias=True) att = PA(unf) HRconv = nn.Conv2d(unf, unf, kernel_size, stride, padding, bias=True) a = B.act(act_type) if act_type else None return B.sequential(upsample, upconv, att, a, HRconv, a) class PA(nn.Module): """PA is pixel attention""" def __init__(self, nf): super(PA, self).__init__() self.conv = nn.Conv2d(nf, nf, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): y = self.conv(x) y = self.sigmoid(y) out = torch.mul(x, y) return out class PACnv(nn.Module): def __init__(self, nf, k_size=3): super(PACnv, self).__init__() self.k2 = nn.Conv2d(nf, nf, 1) # 1x1 convolution nf->nf self.sigmoid = nn.Sigmoid() self.k3 = nn.Conv2d(nf, nf, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False) # 3x3 convolution self.k4 = nn.Conv2d(nf, nf, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False) # 3x3 convolution def forward(self, x): y = self.k2(x) y = self.sigmoid(y) out = torch.mul(self.k3(x), y) out = self.k4(out) return out class SCPA(nn.Module): """SCPA is modified from SCNet (<NAME> et al. Improving Convolutional Networks with Self-Calibrated Convolutions. In CVPR, 2020) Github: https://github.com/MCG-NKU/SCNet """ def __init__(self, nf, reduction=2, stride=1, dilation=1): super(SCPA, self).__init__() group_width = nf // reduction self.conv1_a = nn.Conv2d(nf, group_width, kernel_size=1, bias=False) self.conv1_b = nn.Conv2d(nf, group_width, kernel_size=1, bias=False) self.k1 = nn.Sequential( nn.Conv2d( group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False) ) self.PACnv = PACnv(group_width) self.conv3 = nn.Conv2d( group_width * reduction, nf, kernel_size=1, bias=False) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): residual = x out_a = self.conv1_a(x) out_b = self.conv1_b(x) out_a = self.lrelu(out_a) out_b = self.lrelu(out_b) out_a = self.k1(out_a) out_b = self.PACnv(out_b) out_a = self.lrelu(out_a) out_b = self.lrelu(out_b) out = self.conv3(torch.cat([out_a, out_b], dim=1)) out += residual return out class PAN(nn.Module): """ Efficient Image Super-Resolution Using Pixel Attention, in ECCV Workshop, 2020. Modified from https://github.com/zhaohengyuan1/PAN """ def __init__(self, in_nc, out_nc, nf, unf, nb, scale=4, self_attention=True, double_scpa=False, ups_inter_mode='nearest'): super(PAN, self).__init__() n_upscale = int(math.log(scale, 2)) if scale == 3: n_upscale = 1 elif scale == 1: unf = nf # SCPA SCPA_block_f = functools.partial(SCPA, nf=nf, reduction=2) self.scale = scale self.ups_inter_mode = ups_inter_mode # 'nearest' # 'bilinear' self.double_scpa = double_scpa # self-attention self.self_attention = self_attention if self_attention: spectral_norm = False max_pool = True #False poolsize = 4 # first convolution self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) # main blocks self.SCPA_trunk = B.make_layer(SCPA_block_f, nb) self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) if self.double_scpa: self.SCPA_trunk2 = B.make_layer(SCPA_block_f, nb) self.trunk_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) # self-attention if self.self_attention: self.FSA = B.SelfAttentionBlock(in_dim=nf, max_pool=max_pool, poolsize=poolsize, spectral_norm=spectral_norm) """ # original upsample #### upsampling self.upconv1 = nn.Conv2d(nf, unf, 3, 1, 1, bias=True) self.att1 = PA(unf) self.HRconv1 = nn.Conv2d(unf, unf, 3, 1, 1, bias=True) if self.scale == 4: self.upconv2 = nn.Conv2d(unf, unf, 3, 1, 1, bias=True) self.att2 = PA(unf) self.HRconv2 = nn.Conv2d(unf, unf, 3, 1, 1, bias=True) """ # new upsample upsampler = [] for i in range(n_upscale): if i < 1: if self.scale == 3: upsampler.append( pa_upconv_block(nf, unf, 3, 1, 1, self.ups_inter_mode, 3)) else: upsampler.append( pa_upconv_block(nf, unf, 3, 1, 1, self.ups_inter_mode)) else: upsampler.append( pa_upconv_block(unf, unf, 3, 1, 1, self.ups_inter_mode)) self.upsample = B.sequential(*upsampler) self.conv_last = nn.Conv2d(unf, out_nc, 3, 1, 1, bias=True) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): fea = self.conv_first(x) trunk = self.trunk_conv(self.SCPA_trunk(fea)) if self.double_scpa: trunk = self.trunk_conv2(self.SCPA_trunk2(trunk)) # fea = fea + trunk # Elementwise sum, with FSA if enabled if self.self_attention: fea = self.FSA(fea + trunk) else: fea = fea + trunk """ #original upsample if self.scale == 2 or self.scale == 3: fea = self.upconv1(F.interpolate( fea, scale_factor=self.scale, mode=self.ups_inter_mode, align_corners=True)) fea = self.lrelu(self.att1(fea)) fea = self.lrelu(self.HRconv1(fea)) elif self.scale == 4: fea = self.upconv1(F.interpolate( fea, scale_factor=2, mode=self.ups_inter_mode, align_corners=True)) fea = self.lrelu(self.att1(fea)) fea = self.lrelu(self.HRconv1(fea)) fea = self.upconv2(F.interpolate( fea, scale_factor=2, mode=self.ups_inter_mode, align_corners=True)) fea = self.lrelu(self.att2(fea)) fea = self.lrelu(self.HRconv2(fea)) """ # new upsample fea = self.upsample(fea) out = self.conv_last(fea) if self.scale > 1: ILR = F.interpolate( x, scale_factor=self.scale, mode='bilinear', align_corners=True) else: ILR = x out = out + ILR return out
87896
from collections import Iterable from typing import Union, List, Tuple import numpy as np from functools import partial from scipy import ndimage as ndi import cv2 import random from .utils import clipBBoxes from .base import AugBase # -------------- channel aug_cuda --------------- # class RGB2Gray(AugBase): def __init__(self): super().__init__() self.always = True @property def canBackward(self): return True def _backward_params(self, result): self._init_params(result) self.params = True def apply_to_img(self, result): if self.isForwarding: assert self.channels == 3 and self.dim == 2, f"{self.channels} {self.dim}" result['img'] = cv2.cvtColor(np.moveaxis(result['img'], 0, -1), cv2.COLOR_RGB2GRAY)[None, ...] result['img_shape'] = result['img'].shape else: assert self.channels == 1 result['img'] = np.repeat(result['img'], 3, axis=0).astype(np.uint8) result['img_shape'] = result['img'].shape return result class Gray2RGB(AugBase): def __init__(self): super().__init__() self.always = True @property def canBackward(self): return True def _backward_params(self, result): self._init_params(result) self.params = True def apply_to_img(self, result): if self.isForwarding: assert self.channels == 1 result['img'] = np.repeat(result['img'], 3, axis=0).astype(np.uint8) result['img_shape'] = result['img'].shape else: assert self.channels == 3 and self.dim == 2 result['img'] = result['img'][[0], ...] result['img_shape'] = result['img'].shape return result class ChannelSelect(AugBase): def __init__(self, index: (list, tuple, int)): super().__init__() self.always = True if isinstance(index, (int, float)): index = [int(index)] self.index = index assert isinstance(self.index, (list, tuple)) def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(channel_index={})'.format(self.index) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) self.params = tuple([self.index, self.channels]) result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) if params: self.params = params def apply_to_img(self, result): if self.isForwarding: index, _ = self.params result['img'] = result['img'].take(indices=index, axis=0) result['img_shape'] = result['img'].shape else: _, channels = self.params result['img'] = np.repeat(result['img'], channels, axis=0) result['img_shape'] = result['img'].shape return result class AnnotationMap(AugBase): def __init__(self, mapping: dict): super().__init__() self.always = True self.mapping = mapping assert isinstance(self.mapping, dict) assert all([isinstance(i, int) for i in self.mapping.keys()]) assert all([isinstance(i, int) for i in self.mapping.values()]) def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(mapping={})'.format(self.mapping) return repr_str @property def canBackward(self): flag = len(set(self.mapping.values())) == len(self.mapping.values()) flag = flag and len(set(self.mapping.keys())) == len(self.mapping.keys()) return flag def _forward_params(self, result): self._init_params(result) self.params = self.mapping.copy() result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) self.params = dict((v, k) for k, v in params.items()) def apply_to_cls(self, result): for key in result.get('cls_fields', []): for prev, curr in self.params.items(): result[key] = curr if result[key] == prev else result[key] return result def apply_to_seg(self, result): for key in result.get('seg_fields', []): for prev, curr in self.params.items(): result[key] = np.where(result[key] == prev, curr, result[key]) return result def apply_to_det(self, result): for key in result.get('det_fields', []): for prev, curr in self.params.items(): bboxes_labels = result[key][:, 2 * self.dim] bboxes_labels = np.where(bboxes_labels == prev, curr, bboxes_labels) result[key][:, 2 * self.dim] = bboxes_labels # -------------- normalization --------------- # class Normalize(AugBase): """ Normalize the image to [-1.0, 1.0]. support segmentation, detection and classification tasks support 2D and 3D images support forward and backward Args: mean (sequence): Mean values of each channels. std (sequence): Std values of each channels. """ def __init__(self, mean, std, clip=True): super().__init__() self.always = True self.mean = mean self.std = std self.clip = clip def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(mean={}, std={}, clip={})'.format(self.mean, self.std, self.clip) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) # 3 channel [(128, 128, 128), (128, 128, 128)] self.params = [tuple(self.mean), tuple(self.std)] result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) if params is not None: # [(-1, -1, -1), (1/128, 1/128, 1/128)] r_mean = - np.array(params[0]) / np.array(params[1]) r_std = 1 / np.array(params[1]) self.params = [tuple(r_mean), tuple(r_std)] def apply_to_img(self, result): mean, std = self.params assert self.channels == len(mean) == len(std), f"channels = {self.channels}" assert result['img'].shape == result['img_shape'] expand = (slice(None),) + (None,) * self.dim result['img'] = (result['img'] - np.array(mean, dtype=np.float32)[expand]) / np.array(std, dtype=np.float32)[ expand] if self.clip and self.isForwarding: result['img'] = np.clip(result['img'], -1.0, 1.0) class MultiNormalize(AugBase): """ Normalize the image to [-1.0, 1.0]. support segmentation, detection and classification tasks support 2D and 3D images support forward and backward Args: means (sequence): Mean values of each channels. stds (sequence): Std values of each channels. """ def __init__(self, means, stds, clip=True): super().__init__() self.always = True self.means = means self.stds = stds self.clip = clip assert len(means[0]) == 1, 'only support one channel image' def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(means={}, stds={}, clip={})'.format(self.means, self.stds, self.clip) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) # [[(128, ), (192, )], [(128, ), (192, )]] self.params = [self.means, self.stds] result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) if params is not None: # [[-128/128, -192/192], [1/128, 1/192]] self.params = [[], []] for mean, std in zip(params[0], params[1]): r_mean = - np.array(mean) / np.array(std) r_std = 1 / np.array(std) self.params[0].append(r_mean[0]) self.params[1].append(r_std[0]) def apply_to_img(self, result): if self.isForwarding: img = result['img'].astype(np.float32) means, stds = self.params assert self.channels == len(means[0]) == len(stds[0]), f"channels = {self.channels}, it should be 1" assert img.shape == result['img_shape'] expand = (slice(None),) + (None,) * self.dim imgs = [(img - np.array(mean, dtype=np.float32)[expand]) / np.array(std, dtype=np.float32)[expand] for mean, std in zip(means, stds)] img = np.concatenate(imgs, axis=0) if self.clip and self.isForwarding: img = np.clip(img, -1.0, 1.0) result['img'] = img result['img_shape'] = img.shape else: img = result['img'].astype(np.float32) mean, std = self.params assert self.channels == len(mean) == len(std), f"channels={self.channels}, mean={mean}" assert img.shape == result['img_shape'] expand = (slice(None),) + (None,) * self.dim img = (img - np.array(mean, dtype=np.float32)[expand]) / np.array(std, dtype=np.float32)[expand] img = np.mean(img, axis=0, keepdims=True) result['img'] = img result['img_shape'] = img.shape class AutoNormalize(AugBase): """Normalize the image to [-1.0, 1.0]. """ def __init__(self, method='norm', clip=False): super().__init__() self.always = True self.method = method self.clip = clip assert method in ['norm', 'minmax'], "method is one of ['norm', 'minmax']" def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(method={}, clip={})'.format(self.method, self.clip) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) if self.method == 'norm': mean = np.mean(result['img'], axis=self.image_axes) std = np.std(result['img'], axis=self.image_axes) else: M = np.max(result['img'], axis=self.image_axes) m = np.min(result['img'], axis=self.image_axes) mean = (M + m) / 2 std = (M - m) / 2 if not isinstance(mean, Iterable): mean = [mean] std = [std] self.params = [tuple(mean), tuple(std)] result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) if params is not None: r_mean = - np.array(params[0]) / np.array(params[1]) r_std = 1 / np.array(params[1]) self.params = [tuple(r_mean), tuple(r_std)] def apply_to_img(self, result): img = result['img'].astype(np.float32) mean, std = self.params assert self.channels == len(mean) == len(std), f"channels = {self.channels}, mean = len({len(mean)})" assert img.shape == result['img_shape'] expand = (slice(None),) + (None,) * self.dim img = (img - np.array(mean, dtype=np.float32)[expand]) / np.array(std, dtype=np.float32)[expand] if self.clip and self.isForwarding: img = np.clip(img, -1.0, 1.0) result['img'] = img # ------------- intensity ---------------- # class RandomGamma(AugBase): """ support segmentation, detection and classification tasks support 2D and 3D images """ def __init__(self, p, gamma): super().__init__() self.p = p self.gamma = gamma def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(p={}, gamma={})'.format(self.p, self.gamma) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) if isinstance(self.gamma, (list, tuple)): # assert len(self.gamma) == self.channels, "len(gamma) must equals to image channels" assert len(self.gamma) == 2 and self.gamma[0] < self.gamma[1], \ "gamma is [min, max] format or just a number" gamma = tuple([self.get_range(self.gamma, 1)] * self.channels) self.params = gamma result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) if params is not None: self.params = tuple([1 / p for p in params]) def apply_to_img(self, result): image = result['img'] new_image = np.zeros_like(image) for c in range(self.channels): c_image = image[c] temp_min, temp_max = np.min(c_image) - 1e-5, np.max(c_image) + 1e-5 c_image = (c_image - temp_min) / (temp_max - temp_min) c_image = np.power(c_image, self.params[c]) new_image[c] = c_image * (temp_max - temp_min) + temp_min result['img'] = new_image class RandomBlur(AugBase): """ support segmentation, detection and classification tasks support 2D and 3D images """ def __init__(self, p, sigma): super().__init__() self.p = p self.sigma = sigma def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(p={}, sigma={})'.format(self.p, self.sigma) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) if isinstance(self.sigma, (list, tuple)): # assert len(self.sigma) == self.channels, "len(sigma_std) must equals to image channels" # sigma = [sigma * random.random() for sigma in self.sigma] assert len(self.sigma) == 2 and self.sigma[0] <= self.sigma[1] sigma = [self.get_range(self.sigma)] * self.channels else: sigma = [self.sigma * random.random()] * self.channels self.params = sigma result[self.key_name] = self.params def _backward_params(self, result): super()._backward_params(result) self.params = [True] def apply_to_img(self, result): if self.isForwarding: image = result['img'] new_image = np.zeros_like(image) for c in range(self.channels): c_image = image[c] c_image = ndi.gaussian_filter(c_image, sigma=self.params[c]) new_image[c] = c_image result['img'] = new_image class RandomNoise(AugBase): def __init__(self, p: float, method: str = 'uniform', mean: float = 0, std: float = 0.1): super().__init__() self.supported = ['uniform', 'normal'] assert method in self.supported, f"method should be one of {self.supported}" self.p = p self.method = method self.mean = mean self.std = std def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(p={}, method={}, mean={}, std={})'.format(self.p, self.method, self.mean, self.std) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) if self.method == 'uniform': noise = ((np.random.rand(*self.array_shape) - 0.5) / 0.5) * self.std + self.mean else: noise = np.random.randn(*self.array_shape) * self.std + self.mean self.params = noise.astype(np.float32) result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) if params is not None: self.params = -params def apply_to_img(self, result): result['img'] = result['img'] + self.params class RandomSpike(AugBase): def __init__(self, p, num_spikes: Union[int, Tuple[int, int]] = 1, intensity: Union[float, Tuple[float, float]] = (0.5, 1) ): super().__init__() self.p = p if isinstance(num_spikes, int): num_spikes = (1, num_spikes) self.num_spikes = num_spikes self.intensity = intensity def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(num_spikes={})'.format(self.num_spikes) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) num_spikes_param = int(self.get_range(self.num_spikes)) intensity_param = self.get_range(self.intensity) spikes_positions = np.random.rand(num_spikes_param, self.dim) self.params = spikes_positions.tolist(), intensity_param result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) if params is not None: spikes_positions, intensity_param = params self.params = spikes_positions, - intensity_param def apply_to_img(self, result): image = result['img'] spikes_positions, intensity = self.params transformed_result = [] for c in image: spectrum = self.fourier_transform(c) if intensity >= 1 and not self.isForwarding: tmp = spectrum.max() / intensity else: tmp = spectrum.max() spikes_positions = np.array(spikes_positions) shape = np.array(self.image_shape) mid_shape = shape // 2 indices = np.floor(spikes_positions * shape).astype(int) for index in indices: diff = index - mid_shape idx = mid_shape + diff spectrum[tuple(idx)] += tmp * intensity # If we wanted to add a pure cosine, we should add spikes to both # sides of k-space. However, having only one is a better # representation og the actual cause of the artifact in real # scans. Therefore the next two lines have been removed. # #i, j, k = mid_shape - diff # #spectrum[i, j, k] = spectrum.max() * intensity_factor cc = np.real(self.inv_fourier_transform(spectrum)) transformed_result.append(cc) result['img'] = np.stack(transformed_result, axis=0) class RandomBiasField(AugBase): def __init__(self, p, coefficients): super().__init__() self.p = p self.coefficients = coefficients self.order = 1 def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(coefficients={})'.format(self.coefficients) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) random_coefficients = [] if self.dim == 3: for x_order in range(0, self.order + 1): for y_order in range(0, self.order + 1 - x_order): for _ in range(0, self.order + 1 - (x_order + y_order)): number = self.get_range(self.coefficients) random_coefficients.append(number) else: for x_order in range(0, self.order + 1): for y_order in range(0, self.order + 1 - x_order): number = self.get_range(self.coefficients) random_coefficients.append(number) random_coefficients = np.array(random_coefficients) self.params = random_coefficients result[self.key_name] = random_coefficients.tolist() def _backward_params(self, result): self._init_params(result) params = result.pop(self.key_name, None) if params is not None: self.params = params def apply_to_img(self, result): image = result['img'] transformed_result = [] for component in image: half_shape = np.array(component.shape) / 2 ranges = [np.arange(-n, n) for n in half_shape] bias_field = np.zeros(component.shape) if self.dim == 3: x_mesh, y_mesh, z_mesh = np.asarray(np.meshgrid(*ranges, indexing='ij')) x_mesh /= x_mesh.max() y_mesh /= y_mesh.max() z_mesh /= z_mesh.max() i = 0 for x_order in range(self.order + 1): for y_order in range(self.order + 1 - x_order): for z_order in range(self.order + 1 - (x_order + y_order)): random_coefficient = self.params[i] new_map = ( random_coefficient * x_mesh ** x_order * y_mesh ** y_order * z_mesh ** z_order ) bias_field += new_map i += 1 else: x_mesh, y_mesh = np.asarray(np.meshgrid(*ranges, indexing='ij')) x_mesh /= x_mesh.max() y_mesh /= y_mesh.max() i = 0 for x_order in range(self.order + 1): for y_order in range(self.order + 1 - x_order): random_coefficient = self.params[i] new_map = ( random_coefficient * x_mesh ** x_order * y_mesh ** y_order ) bias_field += new_map i += 1 bias_field = np.exp(bias_field).astype(np.float32) bias_field = bias_field / np.max(bias_field) if self.isForwarding: component = component * bias_field else: component = component / bias_field transformed_result.append(component) result['img'] = np.stack(transformed_result, axis=0) class RandomCutout(AugBase): FUSION = { 'mean': np.mean, 'min': np.min, 'max': np.max } def __init__(self, p, num_holes: int, size: int, apply_to: Union[tuple, list] = (), fill='mean'): super(RandomCutout, self).__init__() self.p = p self.num_holes = num_holes self.size = size self.apply_to = apply_to if isinstance(fill, (int, float)): self.fusion_fun = partial(lambda a, constant: constant, constant=fill) else: self.fusion_fun = RandomCutout.FUSION[str(fill)] def __repr__(self): repr_str = self.__class__.__name__ repr_str += '(p={}, num_holes={}, size={}, apply_to={})' \ .format(self.p, self.num_holes, self.size, self.apply_to) return repr_str @property def canBackward(self): return True def _forward_params(self, result): self._init_params(result) ctr = np.random.randint(0, self.image_shape[::-1], size=(self.num_holes, self.dim)) # xyz bboxes = np.concatenate([ctr, ctr + self.size], axis=1) bboxes = clipBBoxes(self.dim, bboxes, self.image_shape) self.params = bboxes result[self.key_name] = self.params def _backward_params(self, result): self._init_params(result) self.params = True def apply_to_img(self, result: dict): # print(result['img'].shape) if self.isForwarding: mask = np.zeros_like(result['img'])[[0], ...] for hole in self.params: slices = (slice(None),) + tuple(map(slice, hole[:self.dim][::-1], hole[-self.dim:][::-1])) mean_val = self.fusion_fun(result['img'][slices]) result['img'][slices] = mean_val mask[slices] = 1.0 result['cutout_mask'] = mask result['seg_fields'].append('cutout_mask') def apply_to_seg(self, result: dict): if self.isForwarding: for key in result['seg_fields']: if key in self.apply_to: for hole in self.params: slices = (slice(None),) + tuple(map(slice, hole[:self.dim][::-1], hole[-self.dim:][::-1])) result[key][slices] = 0 class ForegroundCutout(RandomCutout): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _forward_params(self, result): self._init_params(result) if 'gt_seg_skeleton' in result.keys(): foreground = result['gt_seg_skeleton'] else: foreground = result['gt_seg'] points = np.argwhere(foreground[0] > 0) # zyx if len(points): ctr = points[np.random.choice(np.arange(len(points)), self.num_holes)][:, ::-1] # xyz bboxes = np.concatenate([ctr, ctr + self.size], axis=1) bboxes = clipBBoxes(self.dim, bboxes, self.image_shape) self.params = bboxes result[self.key_name] = self.params else: RandomCutout._forward_params(self, result)
87916
from .lop_setup import * def test_diag(): n = 4 x = jnp.arange(n)+1. T = lop.diagonal(x) X = lop.to_matrix(T) assert_allclose(X, jnp.diag(x)) y = T.times(x) assert_allclose(y, x**2) assert lop.dot_test_real(keys[0], T) def test_zero(): m, n = 4, 5 Z = lop.zero(n, m) x = random.normal(keys[0], (m,)) y = random.normal(keys[0], (n,)) assert_allclose(Z.times(y), jnp.zeros((m,))) assert_allclose(Z.trans(x), jnp.zeros((n,))) A = lop.to_matrix(Z) A2 = lop.to_complex_matrix(Z) assert_allclose(A, A2) def test_flipud(): n = 4 x = jnp.arange(n) + 1. k = 8 X = jnp.reshape(jnp.arange(n*k), (n,k)) + 1. F = lop.flipud(n) assert_allclose(F.times(x), x[::-1]) assert_allclose(F.times(X), X[::-1, :]) assert_allclose(F.trans(x), x[::-1]) assert_allclose(F.trans(X), X[::-1, :]) assert_allclose(F.times(F.times(x)), x) assert_allclose(F.trans(F.times(x)), x) assert_allclose(F.times(F.times(X)), X) assert_allclose(F.trans(F.times(X)), X) assert lop.dot_test_real(keys[0], F) assert lop.dot_test_complex(keys[0], F) def test_sum(): n = 4 T = lop.jit(lop.sum(n)) A = lop.to_matrix(T) assert_allclose(A, jnp.ones((1,4))) A = lop.to_adjoint_matrix(T) assert_allclose(A, jnp.ones((4,1))) x = jnp.arange(n) + 1. s = jnp.sum(x) assert_allclose(T.times(x), s) assert_allclose(T.trans(s), jnp.ones(n) * s) assert lop.dot_test_real(keys[0], T) assert lop.dot_test_complex(keys[0], T) def test_zero_padding(): n = 4 T = lop.jit(lop.pad_zeros(n, 2, 2)) assert not T.matrix_safe x = jnp.arange(n) + 1. y = T.times(x) z = jnp.zeros(2) yy = jnp.concatenate((z, x, z)) assert_allclose(y, yy) k = 8 X = jnp.reshape(jnp.arange(n*k), (n,k)) + 1. Z = jnp.zeros((2, k)) Y = jnp.vstack((Z, X, Z)) assert_allclose(T.times_2d(X), Y) assert lop.dot_test_real(keys[0], T) def test_real_part(): n = 4 T = lop.jit(lop.real(n)) x = jnp.arange(n) + 1. xx = x + 4j assert_allclose(T.times(xx), x) assert_allclose(T.trans(T.times(xx)), x) assert lop.dot_test_real(keys[0], T) assert lop.dot_test_complex(keys[0], T) def test_symmetrize(): n = 4 T = lop.jit(lop.symmetrize(n)) x = random.normal(keys[0], (n,)) y = jnp.concatenate((x[::-1], x)) assert_allclose(T.times(x), y) assert_allclose(T.trans(T.times(x)), 2*x) assert lop.dot_test_real(keys[1], T) assert lop.dot_test_complex(keys[1], T)
87925
from enigma import eConsoleAppContainer from Screens.Screen import Screen from Screens.MessageBox import MessageBox from Screens.ChoiceBox import ChoiceBox from Screens.InputBox import InputBox from Tools.Log import Log from Plugins.Plugin import PluginDescriptor import string from random import Random class ChangePasswordScreen(ChoiceBox): WINDOW_TITLE = _("Change Password") KEY_SET = "set" KEY_RANDOM = "random" KEY_LOCK = "lock" KEY_REMOVE = "remove" def __init__(self, session, user="root"): options = [ (_("Enter a new password"), self.KEY_SET), (_("Generate a random password"), self.KEY_RANDOM), (_("Disable password-based login"), self.KEY_LOCK), (_("Remove password protection (DANGEROUS!)"), self.KEY_REMOVE), ] ChoiceBox.__init__(self, session, title=_("If you want to login to your Dreambox using SSH, FTP or a remote web browser, you need to configure a password first.\nThe username will be '%s'.") %(user), list=options, windowTitle=ChangePasswordScreen.WINDOW_TITLE) self._user = user self._password = "" self._wasLocked = False self._container = eConsoleAppContainer() self.__appClosed_conn = self._container.appClosed.connect(self._commandFinished) def go(self): selected = self["list"].l.getCurrentSelection() Log.w(selected) selected = selected and selected[0] if not selected: return selected = selected[1] if selected == self.KEY_SET: self.session.openWithCallback(self._onPasswordInputFinished, InputBox, title=_("Please enter a new password for %s") %(self._user), windowTitle=_("New Password"), text=self._getRandom()) return elif selected == self.KEY_RANDOM: self._apply(self._getRandom()) elif selected == self.KEY_LOCK: self._lock() elif selected == self.KEY_REMOVE: self._apply("") def _apply(self, password): Log.w("Changing password for %s" % (self._user,)) self._password = password if password: self._container.execute("echo \"%s:%s\" | chpasswd" % (self._user, password)) else: self._container.execute("passwd -d %s" % self._user) def _getRandom(self): passwdChars = string.letters + string.digits passwdLength = 10 return ''.join(Random().sample(passwdChars, passwdLength)) def _lock(self): Log.w("Removing password for %s" % (self._user)) self._password = "" self._wasLocked = True self._container.execute("passwd -l %s" % self._user) def _commandFinished(self,retval): if retval==0: type=MessageBox.TYPE_INFO windowTitle=_("Password changed") if self._password: message = _("The password for '%s' was successfully changed to:\n\n%s") % (self._user, self._password) else: type = MessageBox.TYPE_WARNING if self._wasLocked: windowTitle=_("Password locked") message = _("The password for '%s' is now disabled!") % (self._user,) else: windowTitle=_("Password removed") message = _("The password protection for '%s' was removed!") %(self._user,) else: windowTitle=_("Password change failed!") message=_("Unable to set new password for '%s'") % self._user type=MessageBox.TYPE_ERROR self.session.open(MessageBox, message , type, windowTitle=windowTitle) self.close() def _onPasswordInputFinished(self, password): if password: self._apply(password) def startChange(menuid): if menuid != "system": return [ ] return [(_("Password"), main, "change_root_passwd", 50)] def main(session, **kwargs): session.open(ChangePasswordScreen) def Plugins(**kwargs): return PluginDescriptor( name=ChangePasswordScreen.WINDOW_TITLE, description=_("Change or reset the root password of your dreambox"), where = [PluginDescriptor.WHERE_MENU], fnc = startChange)
87964
from collections import deque from .types import is_seqcont __all__ = ['tree_leaves', 'ltree_leaves', 'tree_nodes', 'ltree_nodes'] def tree_leaves(root, follow=is_seqcont, children=iter): """Iterates over tree leaves.""" q = deque([[root]]) while q: node_iter = iter(q.pop()) for sub in node_iter: if follow(sub): q.append(node_iter) q.append(children(sub)) break else: yield sub def ltree_leaves(root, follow=is_seqcont, children=iter): """Lists tree leaves.""" return list(tree_leaves(root, follow, children)) def tree_nodes(root, follow=is_seqcont, children=iter): """Iterates over all tree nodes.""" q = deque([[root]]) while q: node_iter = iter(q.pop()) for sub in node_iter: yield sub if follow(sub): q.append(node_iter) q.append(children(sub)) break def ltree_nodes(root, follow=is_seqcont, children=iter): """Lists all tree nodes.""" return list(tree_nodes(root, follow, children))
87988
import unittest from checkov.common.models.enums import CheckResult from checkov.terraform.checks.resource.aws.MSKClusterEncryption import check class TestMSKClusterEncryption(unittest.TestCase): def test_failure(self): resource_conf = { "name": "test-project", } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) def test_failure_non_tls(self): resource_conf = { "name": "test-project", "encryption_info": [ { "encryption_at_rest_kms_key_arn": "aws_kms_key.kms.arn", "encryption_in_transit": [ { "client_broker": ["PLAINTEXT"], "in_cluster": ["true"], } ], } ], } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) def test_failure_in_cluster(self): resource_conf = { "name": "test-project", "encryption_info": [ { "encryption_at_rest_kms_key_arn": ["aws_kms_key.kms.arn"], "encryption_in_transit": [ { "client_broker": ["TLS"], "in_cluster": [False], } ], } ], } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) def test_success(self): resource_conf = { "name": "test-project", "encryption_info": [ { "encryption_at_rest_kms_key_arn": ["aws_kms_key.kms.arn"], "encryption_in_transit": [ { "client_broker": ["TLS"], "in_cluster": ["true"], } ], } ], } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) def test_success_no_encrypt_block(self): resource_conf = { "name": "test-project", "encryption_info": [ { "encryption_at_rest_kms_key_arn": ["aws_kms_key.kms.arn"], } ], } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) # Regression test for https://github.com/bridgecrewio/checkov/issues/747 def test_success_no_encryption_at_rest_kms_key_arn_specified(self): resource_conf = { "name": "test-project", "encryption_info": [{}], } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) # Regression test for https://github.com/bridgecrewio/checkov/issues/747 def test_success_encryption_in_transit_and_no_encryption_at_rest_kms_key_arn_specified(self): resource_conf = { "name": "test-project", "encryption_info": [ { "encryption_in_transit": [ { "client_broker": ["TLS"], "in_cluster": [True], } ], } ], } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) if __name__ == '__main__': unittest.main()
88011
import unittest from typing import List from nayvy.importing.import_statement import ( ImportAsPart, SingleImport, ImportStatement ) class TestImportAsPart(unittest.TestCase): def test_of(self) -> None: # named import res = ImportAsPart.of('tensorflow as tf') assert res is not None assert res.import_what == 'tensorflow' assert res.as_what == 'tf' # non-named import res = ImportAsPart.of('sys') assert res is not None assert res.import_what == 'sys' assert res.as_what == '' return class TestImportStatement(unittest.TestCase): def test_of(self) -> None: # just import res = ImportStatement.of('import tensorflow as tf') assert res is not None assert res.from_what == '' assert len(res.import_as_parts) == 1 assert res.import_as_parts[0].import_what == 'tensorflow' assert res.import_as_parts[0].as_what == 'tf' # from import res = ImportStatement.of('from typing import Dict, List, Optional') assert res is not None assert res.from_what == 'typing' assert len(res.import_as_parts) == 3 assert res.import_as_parts[0].import_what == 'Dict' assert res.import_as_parts[2].as_what == '' # complex pattern res = ImportStatement.of('from typing import ( Dict, List, Optional, ) ') # noqa assert res is not None assert res.from_what == 'typing' assert len(res.import_as_parts) == 3 assert res.import_as_parts[0].import_what == 'Dict' assert res.import_as_parts[2].as_what == '' def test_merge(self) -> None: # merge if from_what is the same import_statement = ImportStatement( 'hoge', [ ImportAsPart('AAA', 'a'), ImportAsPart('BBB', 'b'), ], ) target = ImportStatement( 'hoge', [ ImportAsPart('BBB', 'b'), ImportAsPart('CCC', 'c'), ] ) import_statement.merge(target) assert len(import_statement.import_as_parts) == 3 # not merge if from_what is not the same import_statement = ImportStatement( 'hoge', [ ImportAsPart('AAA', 'a'), ImportAsPart('BBB', 'b'), ], ) target = ImportStatement( 'fuga', [ ImportAsPart('BBB', 'b'), ImportAsPart('CCC', 'c'), ] ) import_statement.merge(target) assert len(import_statement.import_as_parts) == 2 def test_removed(self) -> None: import_statement = ImportStatement( 'hoge', [ ImportAsPart('AAA', 'a'), ImportAsPart('BBB', 'b'), ImportAsPart('c', ''), ], ) # remove as-imported name removed = import_statement.removed('hoge.AAA as a') assert removed is not None # remove no-as-import name removed = removed.removed('hoge.c') assert removed is not None # assert that one import is remained assert len(removed.import_as_parts) == 1 assert removed.import_as_parts[0].name == 'b' # remove the last one removed = removed.removed('hoge.BBB as b') assert removed is None return def test_merge_list(self) -> None: import_statements = [ ImportStatement( 'hoge', [ ImportAsPart('AAA', 'a'), ImportAsPart('BBB', 'b'), ] ), ImportStatement( 'fuga', [ ImportAsPart('AAA', 'a'), ImportAsPart('BBB', 'b'), ] ), ImportStatement( 'hoge', [ ImportAsPart('CCC', 'c'), ImportAsPart('DDD', 'd'), ] ), ] actuals = ImportStatement.merge_list(import_statements) expecteds = [ ImportStatement( 'hoge', [ ImportAsPart('AAA', 'a'), ImportAsPart('BBB', 'b'), ImportAsPart('CCC', 'c'), ImportAsPart('DDD', 'd'), ] ), ImportStatement( 'fuga', [ ImportAsPart('AAA', 'a'), ImportAsPart('BBB', 'b'), ] ), ] assert len(actuals) == len(expecteds) assert all([ str(a) == str(e) for a, e in zip(actuals, expecteds) ]) is True def test_of_lines(self) -> None: lines = [ 'import os # tailing comment', '# comment above', 'import sys', 'from pprint import (' ' pprint as pp,', ' pformat,', ')', 'from typing import (', ' List as L, # tailing comment 1', ' Dict as D,# tailing comment 2', ')', '', '# multi-line comment 1', '# multi-line comment 2', 'import tensorflow as tf', ] actuals = ImportStatement.of_lines(lines) expecteds: List[ImportStatement] = [ ImportStatement( '', [ ImportAsPart('os', '', 'tailing comment'), ], ), ImportStatement( '', [ ImportAsPart('sys', '', 'comment above'), ], ), ImportStatement( 'pprint', [ ImportAsPart('pprint', 'pp'), ImportAsPart('pformat', ''), ], ), ImportStatement( 'typing', [ ImportAsPart('List', 'L', 'tailing comment 1'), ImportAsPart('Dict', 'D', 'tailing comment 2'), ], ), ImportStatement( '', [ ImportAsPart('tensorflow', 'tf', ( 'multi-line comment 1 ' 'multi-line comment 2' )), ], ), ] assert actuals is not None assert len(actuals) == len(expecteds) assert all([ str(a) == str(e) for a, e in zip(actuals, expecteds) ]) is True return def test_repr(self) -> None: import_statement = ImportStatement( 'hoge', [ ImportAsPart('Hoge', 'hoge'), ImportAsPart('Fuga', 'fuga'), ], ) assert ( str(import_statement) == 'from hoge import Fuga as fuga, Hoge as hoge' ) class TestSingleImport(unittest.TestCase): def test_to_line(self) -> None: single_import = SingleImport( 'hoge', 'from .Hoge import hoge', 0, ) assert ( single_import.to_line() == 'hoge : from .Hoge import hoge' ) return def test_trim_statement(self) -> None: single_import = SingleImport( 'hoge', 'from .Hoge import hoge', # length is 22 0, ) assert ( single_import.trim_statement(20) == 'from .H~ import hoge' ) assert ( single_import.trim_statement(22) == 'from .Hoge import hoge' ) assert ( single_import.trim_statement(17) == 'from ~ import hoge' ) return
88020
from boxnotes2html.table import Table class TestTable: def test_render_markdown(self): table = Table() table.add_data(1, 1, "Name") table.add_data(1, 2, "Country") table.add_data(1, 3, "Birthdate") table.add_data(2, 1, "Jill") table.add_data(2, 2, "Australia") table.add_data(2, 3, "2000-01-01") table.add_data(3, 1, "Alfonse") table.add_data(3, 2, "Chile") table.add_data(3, 3, "1981-02-04") expected = "| Name | Country | Birthdate |\n" + \ "| :-- | :-- | :-- |\n" + \ "| Jill | Australia | 2000-01-01 |\n" + \ "| Alfonse | Chile | 1981-02-04 |\n" assert table.render_markdown() == expected def test_append_data_markdown(self): """ Test that appending data to add_data works as expected and renders multiple lines properly. """ table = Table() table.add_data(1, 1, "Name") table.add_data(1, 1, "(Full name)") table.add_data(1, 1, "(but with no spaces)") table.add_data(1, 1, "(as in full name camelcase)") table.add_data(2, 1, "JillFromDownUnder") expected = "| Name<br>(Full name)<br>(but with no spaces)<br>(as in full name camelcase) |\n" + \ "| :-- |\n" + \ "| JillFromDownUnder |\n" assert table.render_markdown() == expected
88032
from django.db import models class DNSBuildRun(models.Model): """ Everytime the DNS build scripts are ran, one of these objects is created to track which zones we have built and which zones we haven't built (since nothing has changed in them). :class:`BuildManifest` objects relate back to a :class:`DNSBuildRun` instance and represent one zone's state. """ log = models.TextField() # stats_json = models.JSONField("stats", max_length=max_length) def record(self, root_domain, soa, zfiles, zhash): bm = BuildManifest(zname=root_domain.name, files=','.join(zfiles), zhash=zhash, build_run=self) bm.save() return bm def stash(self, k, v): self.stats_json[k] = v def get_manifests(self, **kwargs): return BuildManifest.objects.filter(build_run=self, **kwargs) class BuildManifest(models.Model): max_length = 256 zname = models.CharField(max_length=max_length) files = models.CharField(max_length=max_length) zhash = models.CharField(max_length=max_length) build_run = models.ForeignKey(DNSBuildRun) # stats_json = models.JSONField("stats", max_length=max_length) def stash(self, k, v): self.stats_json[k] = v
88045
A = [1, 2, 3, 4, 5, 6] B = [1, 2, 3, 4, 5, 6] C = [1, 2, 3, 4, 5, 6] for a in A: for b in B: for c in C: if a > b > c: print ">", (a, b, c) if a < b < c: print ">", (a, b, c) if a >= b >= c: print ">=", (a, b, c) if a <= b <= c: print "<=", (a, b, c)
88053
from fastapi import APIRouter from streams_explorer.api.routes import ( graph, kubernetes_health, metrics, node, pipelines, update, ) router = APIRouter() router.include_router(update.router, prefix="/update") router.include_router(graph.router, prefix="/graph") router.include_router(pipelines.router, prefix="/pipelines") router.include_router(node.router, prefix="/node") router.include_router(metrics.router, prefix="/metrics") router.include_router(kubernetes_health.router, prefix="/health")
88059
class EvaluationTask: """EvaluationTask class, containing EvaluationTask information.""" def __init__(self, json, client): self._json = json self.id = json["id"] self.initial_response = getattr(json, "initial_response", None) self.expected_response = json["expected_response"] self._client = client def __hash__(self): return hash(self.id) def __str__(self): return f"EvaluationTask(id={self.id})" def __repr__(self): return f"EvaluationTask({self._json})" def as_dict(self): """Returns all attributes as a dictionary""" return self._json
88069
from builtins import zip from builtins import object import re import json def parse_json(json_path): replacements = [] with open(json_path) as f: data = json.load(f) for replacement_dict in data['replacements']: try: replacements.append(Replacement(**{ x: replacement_dict[x] for x in ('find', 'replace', 'key_path') if x in replacement_dict })) except Exception as e: raise ValueError( "Failed to parse %s (data: '%s'): %s" % ( json_path, replacement_dict, e)) return replacements def merge_two_dicts(x, y): z = x.copy() z.update(y) return z class Replacement(object): DEFAULT_KEY_PATH = "chunks.[].archives.[].objects.[].text.[]" def __init__(self, find, replace, key_path=DEFAULT_KEY_PATH): self.find = find self.replace = replace if not isinstance(key_path, list): self.key_path = key_path.split('.') else: self.key_path = key_path def __repr__(self): return "s/%s/%s/g" % (self.find, self.replace) def correct_multiline_replacement(self, _dict): """ If dealing with a multiline text block, Keynote does some bookkeeping with the tableParaStyle property to figure out which paragraph styles apply to which separate paragraphs. This method corrects for that and allows us to do multiline replacements. Fun fact: without this bookkeeping, one of these tableParaStyle indices might point beyond the end of the text, which causes Keynote to make the text box 2^16 points tall, eventually forcing it to crash. """ text = _dict['text'][0] new_offsets = [0] surrogate_pair_correction = 0 for i, c in enumerate(text): if c == '\n': new_offsets.append(i + 1 + surrogate_pair_correction) if ord(c) > 0xFFFF: surrogate_pair_correction += 1 entries = _dict['tableParaStyle']['entries'] if len(entries) != len(new_offsets): raise NotImplementedError( "New line count doesn't match old line count in data: %s", text) for para_entry, offset in zip(entries, new_offsets): para_entry['characterIndex'] = offset return _dict def correct_charstyle_replacement(self, data, key_path, depth, on_replace): """ If dealing with text that contains changing styles, Keynote does even more bookkeeping with the tableCharStyle property to figure out which character styles to apply to which ranges. This method corrects for that and allows us to keep consistent styles on replaced text. TODO: Throw an error or print a warning if the text being replaced spans multiple style blocks. """ new_start = 0 text = data['text'][0] if 'tableCharStyle' not in data \ or len(data['tableCharStyle']['entries']) == 1: old_value = data[key_path[0]] new_value = self.perform_on(old_value, depth + 1, on_replace) return merge_two_dicts(data, {key_path[0]: new_value}) char_style_entries = data['tableCharStyle']['entries'] parts = [] new_indices = [] for start, end in zip(char_style_entries, char_style_entries[1:]): start_index = start['characterIndex'] end_index = end['characterIndex'] chunk = text[start_index:end_index] chunk = re.sub(self.find, self.replace, chunk) parts.append(chunk) new_end = new_start + len(chunk) new_indices.append(new_start) new_start = new_end new_indices.append(new_indices[-1] + len(parts[-1])) parts.append(text[char_style_entries[-1]['characterIndex']:]) data['text'][0] = ''.join(parts) for new_start, entry in zip(new_indices, char_style_entries): entry['characterIndex'] = new_start return data def perform_on(self, data, depth=0, on_replace=None): key_path = self.key_path[depth:] if not key_path: new_value = re.sub(self.find, self.replace, data) if new_value != data and on_replace: on_replace(self, data, new_value) return new_value if key_path[0] == "[]": return [ self.perform_on(obj, depth + 1, on_replace) for obj in data ] if key_path[0] in data: if key_path[0] == 'text': output = self.correct_charstyle_replacement( data, key_path, depth, on_replace) output = self.correct_multiline_replacement(output) else: old_value = data[key_path[0]] new_value = self.perform_on(old_value, depth + 1, on_replace) output = merge_two_dicts(data, {key_path[0]: new_value}) return output else: return data def should_replace(self, data, depth=0): key_path = self.key_path[depth:] if not key_path: return re.search(self.find, data) is not None elif key_path[0] == "[]": return any([ self.should_replace(obj, depth + 1) for obj in data ]) elif hasattr(data, key_path[0]): return self.should_replace( getattr(data, key_path[0]), depth + 1)
88091
class Solution: def climbStairs(self, n: int) -> int: if n == 1 or n == 0: return 1 prev, curr = 1, 1 for i in range(2, n + 1): temp = curr curr += prev prev = temp return curr
88123
from matchbook.endpoints.baseendpoint import BaseEndpoint from matchbook.exceptions import AuthError class Logout(BaseEndpoint): def __call__(self, session=None): response = self.request("DELETE", self.client.urn_main, 'security/session', data=self.data, session=session) self.client.set_session_token(None, None) @property def data(self): return {'username': self.client.username, 'password': <PASSWORD>}
88151
import os from pymongo import MongoClient DEFAULT_MONGODB_HOST = "mongodb://mongo:password@127.0.0.1:27017" def create_client() -> MongoClient: host = os.getenv("MONGODB_HOST", DEFAULT_MONGODB_HOST) return MongoClient(host)
88163
def count_and_print(f, l): c=0 for e in l: f.write(e) f.write("\n") c+=1 f.close() return c
88212
import numpy as np import deerlab as dl #--------------------------------------------------------------------------------------- def assert_bgmodel(model,Bref): "Check the correct behaviour of the core functionality of a background model" t = np.linspace(-5,5,500) # Extract model information meta = model.getmetadata() par0 = meta['par0'] lower = meta['lb'] upper = meta['ub'] paramnames = meta['names'] units = meta['units'] # Calculate under different conditions B1 = model(t,*par0) B2 = model(t.T,*par0) B3 = model(t,*lower) B4 = model(t,*upper) B5 = model(2.5,*par0) # Assert assert all(B1 == B2) assert all(~np.isnan(B1)) and all(~np.isnan(B2)) and all(~np.isnan(B3)) and all(~np.isnan(B4)) assert abs(B5 - Bref) < 1e-8 assert len(paramnames) == len(par0) and len(units) == len(par0) #--------------------------------------------------------------------------------------- def test_bg_hom3d(): Bref = 0.882785339742350 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_hom3d,Bref) def test_bg_homfractal(): Bref = 0.882785339742350 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_homfractal,Bref) def test_bg_hom3dex(): Bref = 0.882896490000000 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_hom3dex,Bref) def test_bg_exp(): Bref = 0.416862019678508 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_exp,Bref) def test_bg_strexp(): Bref = 0.535261428518990 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_strexp,Bref) def test_bg_prodstrexp(): Bref = 0.286504796860190 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_prodstrexp,Bref) def test_bg_sumstrexp(): Bref = 0.535261428518990 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_sumstrexp,Bref) def test_bg_poly1(): Bref = -1.500000000000000 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_poly1,Bref) def test_bg_poly2(): Bref = -7.750000000000000 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_poly2,Bref) def test_bg_poly3(): Bref = -23.37500000000000 # Reference from DeerLab 0.9.2 on MATLAB assert_bgmodel(dl.bg_poly3,Bref)
88244
from dataclasses import astuple, fields from pywps import LiteralInput from ravenpy.models import HBVEC from raven import config from raven.processes import RavenProcess from . import wpsio as wio # Defaults for this process params_defaults = HBVEC.Params( par_x01=0.05984519, par_x02=4.072232, par_x03=2.001574, par_x04=0.03473693, par_x05=0.09985144, par_x06=0.506052, par_x07=3.438486, par_x08=38.32455, par_x09=0.4606565, par_x10=0.06303738, par_x11=2.277781, par_x12=4.873686, par_x13=0.5718813, par_x14=0.04505643, par_x15=0.877607, par_x16=18.94145, par_x17=2.036937, par_x18=0.4452843, par_x19=0.6771759, par_x20=1.141608, par_x21=1.024278, ) params = LiteralInput( "params", "Comma separated list of model parameters", abstract="Parameters: " + ", ".join(f.name for f in fields(params_defaults)), data_type="string", default=", ".join(map(str, astuple(params_defaults))), min_occurs=0, max_occurs=config.max_parallel_processes, ) class RavenHBVECProcess(RavenProcess): identifier = "raven-hbv-ec" abstract = "HBV-EC hydrological model" title = "" version = "" model_cls = HBVEC tuple_inputs = {"params": HBVEC.Params} inputs = [ wio.ts, wio.nc_spec, params, wio.start_date, wio.end_date, wio.nc_index, wio.duration, wio.run_name, wio.area, wio.latitude, wio.longitude, wio.elevation, wio.rain_snow_fraction, wio.evaporation, wio.ow_evaporation, ]
88309
from RePoE.parser.util import call_with_default_args, write_json from RePoE.parser import Parser_Module class cluster_jewels(Parser_Module): @staticmethod def write(file_system, data_path, relational_reader, translation_file_cache, ot_file_cache): skills = {} for row in relational_reader["PassiveTreeExpansionSkills.dat"]: size = row["PassiveTreeExpansionJewelSizesKey"]["Name"] if size not in skills: skills[size] = [] skills[size].append( { "id": row["PassiveSkillsKey"]["Id"], "name": row["PassiveSkillsKey"]["Name"], "stats": {stat["Id"]: value for stat, value in row["PassiveSkillsKey"]["StatsZip"]}, "tag": row["TagsKey"]["Id"], } ) data = {} for row in relational_reader["PassiveTreeExpansionJewels.dat"]: size = row["PassiveTreeExpansionJewelSizesKey"]["Name"] data[row["BaseItemTypesKey"]["Id"]] = { "name": row["BaseItemTypesKey"]["Name"], "size": size, "min_skills": row["MinNodes"], "max_skills": row["MaxNodes"], "small_indices": row["SmallIndices"], "notable_indices": row["NotableIndices"], "socket_indices": row["SocketIndices"], "total_indices": row["TotalIndices"], "passive_skills": skills[size], } write_json(data, data_path, "cluster_jewels") if __name__ == "__main__": call_with_default_args(cluster_jewels.write)
88322
import random l1 = [] l2 = [] for i in range(20): l1.append(random.uniform(-1E10, 1E10)) l2.append(random.uniform(-1E10, 1E10)) print(l1) print(l2) l = [] l.extend(l1) l.extend(l2) print(sorted(l)) ''' [6015943293.071386, -3878285748.0708866, 8674121166.062424, -1528465047.6118088, 7584260716.494843, -373958476.80486107, -6367787695.054295, 6813992306.719868, 5986097626.907181, 9011134545.052086, 7123644338.268343, 2646164210.08445, 4407427446.995375, -888196668.2563229, 7973918726.985172, -6529216482.09644, 6079069259.51853, -8415952427.784341, -6859960084.757652, -502409126.89040375] [9241165993.258648, -9423768405.578083, 3280085607.6687145, -5253703037.682413, 3858507441.2785892, 9896256282.896187, -9439606732.236805, 3082628799.5320206, 9453124863.59945, 9928066165.458393, 1135071669.4712334, 6380353457.986282, 8329064041.853199, 2382910730.445751, -8478491750.445316, 9607469190.690144, 5417691217.440792, -9698248424.421888, -3933774735.280322, -5984555343.381466] [-9698248424.421888, -9439606732.236805, -9423768405.578083, -8478491750.445316, -8415952427.784341, -6859960084.757652, -6529216482.09644, -6367787695.054295, -5984555343.381466, -5253703037.682413, -3933774735.280322, -3878285748.0708866, -1528465047.6118088, -888196668.2563229, -502409126.89040375, -373958476.80486107, 1135071669.4712334, 2382910730.445751, 2646164210.08445, 3082628799.5320206, 3280085607.6687145, 3858507441.2785892, 4407427446.995375, 5417691217.440792, 5986097626.907181, 6015943293.071386, 6079069259.51853, 6380353457.986282, 6813992306.719868, 7123644338.268343, 7584260716.494843, 7973918726.985172, 8329064041.853199, 8674121166.062424, 9011134545.052086, 9241165993.258648, 9453124863.59945, 9607469190.690144, 9896256282.896187, 9928066165.458393] '''
88357
import argparse import time import torch from kruskals import kruskals_pytorch, kruskals_pytorch_batched from kruskals import kruskals_cpp_pytorch, kruskals_cpp_pytorch2 parser = argparse.ArgumentParser() parser.add_argument("--n", type=int, default=30, help="Number of nodes.") parser.add_argument("--batch_size", type=int, default=10, help="Batch size.") parser.add_argument("--num_steps", type=int, default=1, help="Number of times to evaluate.") args = parser.parse_args() num_edges = int(args.n * (args.n - 1) / 2) weights = torch.randn(args.batch_size, num_edges) vertices = torch.triu_indices(args.n - 1, args.n, offset=1) tiled_vertices = vertices.transpose(0, 1).repeat((weights.size(0), 1, 1)).float() weights_and_edges = torch.cat([weights.unsqueeze(-1), tiled_vertices], axis=-1) # Test pytorch (batched, gpu). t = 0 weights_and_edges = weights_and_edges.to("cuda") for _ in range(args.num_steps): start = time.time() res_pytorch = kruskals_pytorch_batched(weights_and_edges, args.n) torch.cuda.synchronize() t += time.time() - start print(f"Pytorch (batched, gpu): {t}; avg: {t / args.num_steps}") # Test cpp (pytorch, cpu). t = 0 weights_and_edges = weights_and_edges.to("cpu") for _ in range(args.num_steps): start = time.time() res_pytorch = kruskals_cpp_pytorch(weights_and_edges, args.n) t += time.time() - start print(f"C++ (pytorch, cpu): {t}; avg: {t / args.num_steps}") # Test cpp (pytorch, gpu). t = 0 weights_and_edges = weights_and_edges.to("cuda") for _ in range(args.num_steps): start = time.time() res_pytorch = kruskals_cpp_pytorch(weights_and_edges, args.n) torch.cuda.synchronize() t += time.time() - start print(f"C++ (pytorch, gpu): {t}; avg: {t / args.num_steps}") # Test cpp (pytorch2, cpu). t = 0 weights_and_edges = weights_and_edges.to("cpu") for _ in range(args.num_steps): start = time.time() res_pytorch = kruskals_cpp_pytorch2(weights_and_edges, args.n) t += time.time() - start print(f"C++ (pytorch2, cpu): {t}; avg: {t / args.num_steps}") # Test cpp (pytorch2, gpu). t = 0 weights_and_edges = weights_and_edges.to("cuda") for _ in range(args.num_steps): start = time.time() res_pytorch = kruskals_cpp_pytorch2(weights_and_edges, args.n) torch.cuda.synchronize() t += time.time() - start print(f"C++ (pytorch2, gpu): {t}; avg: {t / args.num_steps}")
88407
import os import logging from assemblyline.common import forge from assemblyline.common.str_utils import safe_str from assemblyline.common.uid import get_id_from_data from assemblyline.odm.models.signature import Signature class SuricataImporter(object): def __init__(self, logger=None): if not logger: from assemblyline.common import log as al_log al_log.init_logging('suricata_importer') logger = logging.getLogger('assemblyline.suricata_importer') logger.setLevel(logging.INFO) self.ds = forge.get_datastore() self.classification = forge.get_classification() self.log = logger def parse_meta(self, signature): meta = {} try: meta_parts = signature.split("(", 1)[1].strip(" );").split("; ") for part in meta_parts: if ":" in part: key, val = part.split(":", 1) if key == "metadata": for metadata in val.split(","): meta_key, meta_val = metadata.strip().split(' ') meta[meta_key] = safe_str(meta_val) else: meta[key] = safe_str(val.strip('"')) except ValueError: return meta return meta def _save_signatures(self, signatures, source, default_status="TESTING"): saved_sigs = [] order = 1 for signature in signatures: signature_hash = get_id_from_data(signature, length=16) meta = self.parse_meta(signature) classification = meta.get('classification', self.classification.UNRESTRICTED) signature_id = meta.get('sid', signature_hash) revision = meta.get('rev', 1) name = meta['msg'] status = meta.get('al_status', default_status) key = f"suricata_{source}_{signature_id}" sig = Signature({ 'classification': classification, "data": signature, "name": name, "order": order, "revision": int(revision), "signature_id": signature_id, "source": source, "status": status, "type": "suricata" }) self.ds.signature.save(key, sig) self.log.info("Added signature %s" % name) saved_sigs.append(sig) order += 1 return saved_sigs def _split_signatures(self, data): signatures = [] for line in data.splitlines(): temp_line = line.strip() if temp_line == "" or temp_line.startswith("#"): continue signatures.append(line) return signatures def import_data(self, yara_bin, source, default_status="TESTING"): return self._save_signatures(self._split_signatures(yara_bin), source, default_status=default_status) def import_file(self, cur_file, source=None, default_status="TESTING"): cur_file = os.path.expanduser(cur_file) if os.path.exists(cur_file): with open(cur_file, "r") as suricata_file: suricata_bin = suricata_file.read() return self.import_data(suricata_bin, source or os.path.basename(cur_file), default_status=default_status) else: raise Exception(f"File {cur_file} does not exists.") def import_files(self, files, default_status="TESTING"): output = {} for cur_file in files: output[cur_file] = self.import_file(cur_file, default_status=default_status) return output
88414
from django.utils.text import format_lazy # Mappings of usable segment => field name AVAILABLE_PODCAST_SEGMENTS = { "podcast_slug": "slug", "podcast_type": "itunes_type", "podcast_title": "title", "podcast_subtitle": "subtitle", "podcast_author": "author", "podcast_language": "language", "podcast_explicit": "itunes_explicit", "podcast_updated": "updated", } AVAILABLE_EPISODE_SEGMENTS = { "episode_slug": "slug", "episode_id": "id", "episode_date": "published", "episode_number": "itunes_episode", "episode_type": "itunes_episodetype", "episode_title": "title", } UNIFYING_EPISODE_SEGMENTS = [ "episode_slug", "episode_id", "episode_date", "episode_number", "episode_title", ] ALL_VALID_SEGMENTS = {**AVAILABLE_EPISODE_SEGMENTS, **AVAILABLE_PODCAST_SEGMENTS} def get_segments_html(segments): if isinstance(segments, dict): segments = list(segments.keys()) return "<code>$" + "</code>, <code>$".join(segments) + "</code>" def resolve_segments(string): return format_lazy( string, podcast_segments=get_segments_html(AVAILABLE_PODCAST_SEGMENTS), episode_segments=get_segments_html(AVAILABLE_EPISODE_SEGMENTS), unifying_segments=get_segments_html(UNIFYING_EPISODE_SEGMENTS), )
88445
from utils.prepare_data import get_training_data from utils.prepare_plots import plot_results from simpleencoderdecoder.build_simple_encoderdecoder_model import simple_encoderdecoder import random import numpy as np if __name__ == "__main__": profile_gray_objs, midcurve_gray_objs = get_training_data() test_gray_images = random.sample(profile_gray_objs, 5) profile_gray_objs = np.asarray(profile_gray_objs) / 255. midcurve_gray_objs = np.asarray(midcurve_gray_objs) / 255. test_gray_images = np.asarray(test_gray_images) / 255. retrain_model = True endec = simple_encoderdecoder() endec.train(profile_gray_objs, midcurve_gray_objs, retrain_model) original_profile_imgs, predicted_midcurve_imgs = endec.predict(test_gray_images) plot_results(original_profile_imgs, predicted_midcurve_imgs)
88452
import tensorflow as tf import numpy as np import random import time import os from tensorflow.keras import layers cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) #The Generator Model def generator_model(n): model = tf.keras.Sequential() model.add(layers.Dense(n*n*256, use_bias=False, input_shape=(100,))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((n, n, 256))) assert model.output_shape == (None, n, n, 256) # Note: None is the batch size model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, n, n, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 2*n, 2*n, 64) model.add(layers.BatchNormalization()) model.add(layers.ReLU()) model.add(layers.Conv2DTranspose(1, (5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 2*n, 2*n, 1) return model def discriminator_model(): model = tf.keras.Sequential() model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[4, 4, 1])) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')) model.add(layers.ReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Flatten()) model.add(layers.Dense(1)) return model #Loss and Optimizer def D_loss(real_output, fake_output): real_loss = cross_entropy(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) total_loss = real_loss + fake_loss return total_loss def G_loss(fake_output): return cross_entropy(tf.ones_like(fake_output), fake_output) #Training Functions def train_step(adj_matrix, n = 2): generator = generator_model(n) discriminator = discriminator_model() noise_dim = 100 num_of_generated_examples = 16 seed = tf.random.normal([num_of_generated_examples, noise_dim noise = tf.random.normal([100, noise_dim]) checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer, generator=generator, discriminator=discriminator) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_matrix = generator(noise, training=True) real_output = discriminator(adj_matrix, training=True) fake_output = discriminator(generated_matrix, training=True) gen_loss = G_loss(fake_output) disc_loss = D_loss(real_output, fake_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) #Train def train_GAN(dataset, epochs, path = "/"): for epoch in range(epochs): start = time.time() for batch in dataset: train_step(batch) # Save the model every 15 epochs if (epoch + 1) % 15 == 0: checkpoint.save(file_prefix = checkpoint_prefix) print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start)) #Model Path generator.save(path)
88455
from django.conf.urls import url, include from django.contrib import admin from django.contrib.staticfiles.storage import staticfiles_storage from django.views.generic.base import RedirectView from rest_framework import routers import cspreports.urls from .views import AnnouncementViewSet, AssessmentViewSet, AttributeViewSet, MeasurementViewSet, RatingViewSet, \ TagViewSet, TeamViewSet, TemplateViewSet, \ export_data, healthcheck, home, robots_txt, schema_view router = routers.DefaultRouter() router.register(r'announcements', AnnouncementViewSet) router.register(r'assessments', AssessmentViewSet) router.register(r'attributes', AttributeViewSet) router.register(r'measurements', MeasurementViewSet) router.register(r'ratings', RatingViewSet) router.register(r'tags', TagViewSet) router.register(r'teams', TeamViewSet) router.register(r'templates', TemplateViewSet) urlpatterns = [ url(r'^healthcheck/?$', healthcheck, name='healthcheck'), url(r'^admin/', admin.site.urls), url(r'^api/', include(router.urls)), url(r'^api/api-auth/', include( 'rest_framework.urls', namespace='rest_framework')), url(r'^docs/', schema_view, name='schema'), url(r'^export/', export_data, name='export'), url(r'^favicon\.ico', RedirectView.as_view(url=staticfiles_storage.url('assets/favicon.ico')), name='favicon'), url(r'^robots\.txt', robots_txt, name='robots'), url(r'^(?:about|assessment|team|)(?:/|$)', home, name='home'), url(r'^csp/', include('cspreports.urls')), ]
88480
import awkward as ak import numpy as np import pytest from pytest_lazyfixture import lazy_fixture from fast_carpenter.testing import FakeBEEvent import fast_carpenter.tree_adapter as tree_adapter from fast_carpenter.tree_adapter import ArrayMethods ############################################################################### # Uproot3 tests ############################################################################### @pytest.fixture def uproot3_adapter(uproot3_tree): return tree_adapter.create({"adapter": "uproot3", "tree": uproot3_tree}) def test_uproot3_num_entries(uproot3_tree, uproot3_adapter): assert uproot3_adapter.num_entries == uproot3_tree.numentries def test_uproot3_getitem(uproot3_tree, uproot3_adapter): assert ak.all(uproot3_adapter["Muon_Py"] == uproot3_tree["Muon_Py"].array()) ############################################################################### # Uproot4 tests ############################################################################### @pytest.fixture def uproot4_adapter(uproot4_tree): return tree_adapter.create({"adapter": "uproot4", "tree": uproot4_tree}) @pytest.fixture def uproot4_ranged_adapter(uproot4_tree, event_range): return tree_adapter.create_ranged( { "adapter": "uproot4", "tree": uproot4_tree, "start": event_range.start_entry, "stop": event_range.stop_entry } ) @pytest.fixture def uproot4_masked_adapter(uproot4_tree, event_range): return tree_adapter.create_masked( { "adapter": "uproot4", "tree": uproot4_tree, "start": event_range.start_entry, "stop": event_range.stop_entry, "mask": [(i % 2) == 0 for i in range(event_range.start_entry, event_range.stop_entry)] } ) def test_uproot4_num_entries(uproot4_tree, uproot4_adapter): assert uproot4_adapter.num_entries == uproot4_tree.num_entries def test_uproot4_getitem(uproot4_tree, uproot4_adapter): assert ak.all(uproot4_adapter["Muon_Py"] == uproot4_tree["Muon_Py"].array()) def test_uproot4_evaluate(uproot4_tree, uproot4_adapter): result = uproot4_adapter.evaluate("Muon_Py * NMuon") assert ak.num(result, axis=0) == ak.num(uproot4_tree["Muon_Py"].array(), axis=0) def test_uproot4_range(uproot4_tree, uproot4_ranged_adapter, event_range): assert uproot4_ranged_adapter.num_entries == event_range.entries_in_block def test_uproot4_add_retrieve(uproot4_tree, uproot4_ranged_adapter): muon_px = uproot4_ranged_adapter["Muon_Px"] assert ArrayMethods.filtered_len(muon_px) == len(uproot4_ranged_adapter) muon_py, muon_pz = uproot4_ranged_adapter.arrays(["Muon_Py", "Muon_Pz"], how=tuple) muon_momentum = np.hypot(muon_py, muon_pz) uproot4_ranged_adapter.new_variable("Muon_momentum", muon_momentum) retrieve_momentum = uproot4_ranged_adapter["Muon_momentum"] assert len(retrieve_momentum) == len(muon_momentum) assert ak.all(ak.flatten(retrieve_momentum) == ak.flatten(muon_momentum)) def test_overwrite(uproot4_ranged_adapter): muon_px = uproot4_ranged_adapter["Muon_Px"] assert ("Muon_Px" in uproot4_ranged_adapter) with pytest.raises(ValueError) as err: uproot4_ranged_adapter.new_variable("Muon_Px", muon_px / muon_px) assert "Muon_Px" in str(err) def test_to_pandas(full_wrapped_tree): chunk = FakeBEEvent(full_wrapped_tree, "mc") inputs = ['Electron_Px', 'Electron_Py', 'EventWeight'] df = ArrayMethods.to_pandas(chunk.tree, inputs) assert list(df.keys()) == inputs def test_arraydict_to_pandas_with_new_variable(uproot4_ranged_adapter): muon_py, muon_pz = uproot4_ranged_adapter.arrays(["Muon_Py", "Muon_Pz"], how=tuple) muon_momentum = np.hypot(muon_py, muon_pz) uproot4_ranged_adapter.new_variable("Muon_momentum", muon_momentum) inputs = ['Muon_Py', 'Muon_Pz', 'Muon_momentum'] arrays = { 'Muon_Py': muon_py, 'Muon_Pz': muon_pz, 'Muon_momentum': muon_momentum, } df = ArrayMethods.arraydict_to_pandas(arrays) assert list(df.keys()) == inputs assert len(df) == ak.count_nonzero(muon_py) @pytest.mark.parametrize( "tree_under_test", [ lazy_fixture("uproot4_adapter"), lazy_fixture("uproot4_ranged_adapter"), lazy_fixture("uproot4_masked_adapter"), ] ) def test_to_pandas_with_new_variable(tree_under_test): muon_py, muon_pz = tree_under_test.arrays(["Muon_Py", "Muon_Pz"], how=tuple) muon_momentum = np.hypot(muon_py, muon_pz) assert len(muon_momentum) == len(muon_py) tree_under_test.new_variable("Muon_momentum", muon_momentum) inputs = ['Muon_Py', 'Muon_Pz', 'Muon_momentum'] df = ArrayMethods.to_pandas(tree_under_test, inputs) assert list(df.keys()) == inputs assert len(df) == ak.count_nonzero(muon_py) @pytest.mark.parametrize( "tree_under_test, how", [ (lazy_fixture("uproot4_adapter"), tuple), (lazy_fixture("uproot4_adapter"), list), (lazy_fixture("uproot4_ranged_adapter"), tuple), (lazy_fixture("uproot4_ranged_adapter"), list), (lazy_fixture("uproot4_masked_adapter"), tuple), (lazy_fixture("uproot4_masked_adapter"), list), ] ) def test_arrays_to_tuple_or_list(tree_under_test, how): muon_py, muon_pz = tree_under_test.arrays(["Muon_Py", "Muon_Pz"], how=tuple) muon_momentum = np.hypot(muon_py, muon_pz) tree_under_test.new_variable("Muon_momentum", muon_momentum) _, _, muon_momentum_new = tree_under_test.arrays(["Muon_Py", "Muon_Pz", "Muon_momentum"], how=how) assert ak.all(muon_momentum_new == muon_momentum) @pytest.mark.parametrize( "tree_under_test", [ lazy_fixture("uproot4_adapter"), lazy_fixture("uproot4_ranged_adapter"), lazy_fixture("uproot4_masked_adapter"), ] ) def test_arrays_to_dict(tree_under_test): muon_py, muon_pz = tree_under_test.arrays(["Muon_Py", "Muon_Pz"], how=tuple) muon_momentum = np.hypot(muon_py, muon_pz) tree_under_test.new_variable("Muon_momentum", muon_momentum) array_dict = tree_under_test.arrays(["Muon_Py", "Muon_Pz", "Muon_momentum"], how=dict) assert ak.all(array_dict["Muon_momentum"] == muon_momentum) @pytest.mark.parametrize( "tree_under_test", [ lazy_fixture("uproot4_adapter"), lazy_fixture("uproot4_ranged_adapter"), lazy_fixture("uproot4_masked_adapter"), ] ) def test_arrays_as_np_lists(tree_under_test): muon_py, muon_pz = tree_under_test.arrays(["Muon_Py", "Muon_Pz"], how=tuple) muon_momentum = np.hypot(muon_py, muon_pz) tree_under_test.new_variable("Muon_momentum", muon_momentum) np_array = ArrayMethods.arrays_as_np_array(tree_under_test, ["Muon_Py", "Muon_Pz", "Muon_momentum"], how=dict) assert ak.all(np_array[-1] == muon_momentum)
88556
from tkinter import * PROGRAM_NAME = "Footprint Editor" root = Tk() root.geometry('350x350') root.title(PROGRAM_NAME) menu_bar = Menu(root) # menu begins file_menu = Menu(menu_bar, tearoff=0) # all file menu-items will be added here next menu_bar.add_cascade(label='File', menu=file_menu) edit_menu = Menu(menu_bar, tearoff=0) menu_bar.add_cascade(label='Edit', menu=edit_menu) view_menu = Menu(menu_bar, tearoff=0) menu_bar.add_cascade(label='View', menu=view_menu) about_menu = Menu(menu_bar, tearoff=0) menu_bar.add_cascade(label='About', menu=about_menu) root.config(menu=menu_bar) # menu ends root.mainloop()