content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
from .jsonc import load, loads, dump, dumps
[ 6738, 764, 17752, 66, 1330, 3440, 11, 15989, 11, 10285, 11, 45514, 198 ]
3.384615
13
########################################################################## # # Copyright 2008-2009 VMware, Inc. # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ##########################################################################/ """d3d9caps.h""" from winapi import * from d3d9types import * D3DVS20CAPS = Flags(DWORD, [ "D3DVS20CAPS_PREDICATION", ]) D3DVSHADERCAPS2_0 = Struct("D3DVSHADERCAPS2_0", [ (D3DVS20CAPS, "Caps"), (INT, "DynamicFlowControlDepth"), (INT, "NumTemps"), (INT, "StaticFlowControlDepth"), ]) D3DPS20CAPS = Flags(DWORD, [ "D3DPS20CAPS_ARBITRARYSWIZZLE", "D3DPS20CAPS_GRADIENTINSTRUCTIONS", "D3DPS20CAPS_PREDICATION", "D3DPS20CAPS_NODEPENDENTREADLIMIT", "D3DPS20CAPS_NOTEXINSTRUCTIONLIMIT", ]) D3DPSHADERCAPS2_0 = Struct("D3DPSHADERCAPS2_0", [ (D3DPS20CAPS, "Caps"), (INT, "DynamicFlowControlDepth"), (INT, "NumTemps"), (INT, "StaticFlowControlDepth"), (INT, "NumInstructionSlots"), ]) D3DCAPS = Flags(DWORD, [ "D3DCAPS_READ_SCANLINE", ]) D3DCAPS2 = Flags(DWORD, [ "D3DCAPS2_FULLSCREENGAMMA", "D3DCAPS2_CANCALIBRATEGAMMA", "D3DCAPS2_RESERVED", "D3DCAPS2_CANMANAGERESOURCE", "D3DCAPS2_DYNAMICTEXTURES", "D3DCAPS2_CANAUTOGENMIPMAP", "D3DCAPS2_CANSHARERESOURCE", ]) D3DCAPS3 = Flags(DWORD, [ "D3DCAPS3_RESERVED", "D3DCAPS3_ALPHA_FULLSCREEN_FLIP_OR_DISCARD", "D3DCAPS3_LINEAR_TO_SRGB_PRESENTATION", "D3DCAPS3_COPY_TO_VIDMEM", "D3DCAPS3_COPY_TO_SYSTEMMEM", ]) D3DPRESENT_INTERVAL = Flags(DWORD, [ #"D3DPRESENT_INTERVAL_DEFAULT", # 0 "D3DPRESENT_INTERVAL_ONE", "D3DPRESENT_INTERVAL_TWO", "D3DPRESENT_INTERVAL_THREE", "D3DPRESENT_INTERVAL_FOUR", "D3DPRESENT_INTERVAL_IMMEDIATE", ]) D3DCURSORCAPS = Flags(DWORD, [ "D3DCURSORCAPS_COLOR", "D3DCURSORCAPS_LOWRES", ]) D3DDEVCAPS = Flags(DWORD, [ "D3DDEVCAPS_EXECUTESYSTEMMEMORY", "D3DDEVCAPS_EXECUTEVIDEOMEMORY", "D3DDEVCAPS_TLVERTEXSYSTEMMEMORY", "D3DDEVCAPS_TLVERTEXVIDEOMEMORY", "D3DDEVCAPS_TEXTURESYSTEMMEMORY", "D3DDEVCAPS_TEXTUREVIDEOMEMORY", "D3DDEVCAPS_DRAWPRIMTLVERTEX", "D3DDEVCAPS_CANRENDERAFTERFLIP", "D3DDEVCAPS_TEXTURENONLOCALVIDMEM", "D3DDEVCAPS_DRAWPRIMITIVES2", "D3DDEVCAPS_SEPARATETEXTUREMEMORIES", "D3DDEVCAPS_DRAWPRIMITIVES2EX", "D3DDEVCAPS_HWTRANSFORMANDLIGHT", "D3DDEVCAPS_CANBLTSYSTONONLOCAL", "D3DDEVCAPS_HWRASTERIZATION", "D3DDEVCAPS_PUREDEVICE", "D3DDEVCAPS_QUINTICRTPATCHES", "D3DDEVCAPS_RTPATCHES", "D3DDEVCAPS_RTPATCHHANDLEZERO", "D3DDEVCAPS_NPATCHES", ]) D3DPMISCCAPS = Flags(DWORD, [ "D3DPMISCCAPS_MASKZ", "D3DPMISCCAPS_CULLNONE", "D3DPMISCCAPS_CULLCW", "D3DPMISCCAPS_CULLCCW", "D3DPMISCCAPS_COLORWRITEENABLE", "D3DPMISCCAPS_CLIPPLANESCALEDPOINTS", "D3DPMISCCAPS_CLIPTLVERTS", "D3DPMISCCAPS_TSSARGTEMP", "D3DPMISCCAPS_BLENDOP", "D3DPMISCCAPS_NULLREFERENCE", "D3DPMISCCAPS_INDEPENDENTWRITEMASKS", "D3DPMISCCAPS_PERSTAGECONSTANT", "D3DPMISCCAPS_FOGANDSPECULARALPHA", "D3DPMISCCAPS_SEPARATEALPHABLEND", "D3DPMISCCAPS_MRTINDEPENDENTBITDEPTHS", "D3DPMISCCAPS_MRTPOSTPIXELSHADERBLENDING", "D3DPMISCCAPS_FOGVERTEXCLAMPED", "D3DPMISCCAPS_POSTBLENDSRGBCONVERT", ]) D3DLINECAPS = Flags(DWORD, [ "D3DLINECAPS_TEXTURE", "D3DLINECAPS_ZTEST", "D3DLINECAPS_BLEND", "D3DLINECAPS_ALPHACMP", "D3DLINECAPS_FOG", "D3DLINECAPS_ANTIALIAS", ]) D3DPRASTERCAPS = Flags(DWORD, [ "D3DPRASTERCAPS_DITHER", "D3DPRASTERCAPS_ZTEST", "D3DPRASTERCAPS_FOGVERTEX", "D3DPRASTERCAPS_FOGTABLE", "D3DPRASTERCAPS_MIPMAPLODBIAS", "D3DPRASTERCAPS_ZBUFFERLESSHSR", "D3DPRASTERCAPS_FOGRANGE", "D3DPRASTERCAPS_ANISOTROPY", "D3DPRASTERCAPS_WBUFFER", "D3DPRASTERCAPS_WFOG", "D3DPRASTERCAPS_ZFOG", "D3DPRASTERCAPS_COLORPERSPECTIVE", "D3DPRASTERCAPS_SCISSORTEST", "D3DPRASTERCAPS_SLOPESCALEDEPTHBIAS", "D3DPRASTERCAPS_DEPTHBIAS", "D3DPRASTERCAPS_MULTISAMPLE_TOGGLE", ]) D3DPCMPCAPS = Flags(DWORD, [ "D3DPCMPCAPS_NEVER", "D3DPCMPCAPS_LESS", "D3DPCMPCAPS_EQUAL", "D3DPCMPCAPS_LESSEQUAL", "D3DPCMPCAPS_GREATER", "D3DPCMPCAPS_NOTEQUAL", "D3DPCMPCAPS_GREATEREQUAL", "D3DPCMPCAPS_ALWAYS", ]) D3DPBLENDCAPS = Flags(DWORD, [ "D3DPBLENDCAPS_ZERO", "D3DPBLENDCAPS_ONE", "D3DPBLENDCAPS_SRCCOLOR", "D3DPBLENDCAPS_INVSRCCOLOR", "D3DPBLENDCAPS_SRCALPHA", "D3DPBLENDCAPS_INVSRCALPHA", "D3DPBLENDCAPS_DESTALPHA", "D3DPBLENDCAPS_INVDESTALPHA", "D3DPBLENDCAPS_DESTCOLOR", "D3DPBLENDCAPS_INVDESTCOLOR", "D3DPBLENDCAPS_SRCALPHASAT", "D3DPBLENDCAPS_BOTHSRCALPHA", "D3DPBLENDCAPS_BOTHINVSRCALPHA", "D3DPBLENDCAPS_BLENDFACTOR", "D3DPBLENDCAPS_SRCCOLOR2", "D3DPBLENDCAPS_INVSRCCOLOR2", ]) D3DPSHADECAPS = Flags(DWORD, [ "D3DPSHADECAPS_COLORGOURAUDRGB", "D3DPSHADECAPS_SPECULARGOURAUDRGB", "D3DPSHADECAPS_ALPHAGOURAUDBLEND", "D3DPSHADECAPS_FOGGOURAUD", ]) D3DPTEXTURECAPS = Flags(DWORD, [ "D3DPTEXTURECAPS_PERSPECTIVE", "D3DPTEXTURECAPS_POW2", "D3DPTEXTURECAPS_ALPHA", "D3DPTEXTURECAPS_SQUAREONLY", "D3DPTEXTURECAPS_TEXREPEATNOTSCALEDBYSIZE", "D3DPTEXTURECAPS_ALPHAPALETTE", "D3DPTEXTURECAPS_NONPOW2CONDITIONAL", "D3DPTEXTURECAPS_PROJECTED", "D3DPTEXTURECAPS_CUBEMAP", "D3DPTEXTURECAPS_VOLUMEMAP", "D3DPTEXTURECAPS_MIPMAP", "D3DPTEXTURECAPS_MIPVOLUMEMAP", "D3DPTEXTURECAPS_MIPCUBEMAP", "D3DPTEXTURECAPS_CUBEMAP_POW2", "D3DPTEXTURECAPS_VOLUMEMAP_POW2", "D3DPTEXTURECAPS_NOPROJECTEDBUMPENV", ]) D3DPTFILTERCAPS = Flags(DWORD, [ "D3DPTFILTERCAPS_MINFPOINT", "D3DPTFILTERCAPS_MINFLINEAR", "D3DPTFILTERCAPS_MINFANISOTROPIC", "D3DPTFILTERCAPS_MINFPYRAMIDALQUAD", "D3DPTFILTERCAPS_MINFGAUSSIANQUAD", "D3DPTFILTERCAPS_MIPFPOINT", "D3DPTFILTERCAPS_MIPFLINEAR", "D3DPTFILTERCAPS_CONVOLUTIONMONO", "D3DPTFILTERCAPS_MAGFPOINT", "D3DPTFILTERCAPS_MAGFLINEAR", "D3DPTFILTERCAPS_MAGFANISOTROPIC", "D3DPTFILTERCAPS_MAGFPYRAMIDALQUAD", "D3DPTFILTERCAPS_MAGFGAUSSIANQUAD", ]) D3DPTADDRESSCAPS = Flags(DWORD, [ "D3DPTADDRESSCAPS_WRAP", "D3DPTADDRESSCAPS_MIRROR", "D3DPTADDRESSCAPS_CLAMP", "D3DPTADDRESSCAPS_BORDER", "D3DPTADDRESSCAPS_INDEPENDENTUV", "D3DPTADDRESSCAPS_MIRRORONCE", ]) D3DSTENCILCAPS = Flags(DWORD, [ "D3DSTENCILCAPS_KEEP", "D3DSTENCILCAPS_ZERO", "D3DSTENCILCAPS_REPLACE", "D3DSTENCILCAPS_INCRSAT", "D3DSTENCILCAPS_DECRSAT", "D3DSTENCILCAPS_INVERT", "D3DSTENCILCAPS_INCR", "D3DSTENCILCAPS_DECR", "D3DSTENCILCAPS_TWOSIDED", ]) D3DTEXOPCAPS = Flags(DWORD, [ "D3DTEXOPCAPS_DISABLE", "D3DTEXOPCAPS_SELECTARG1", "D3DTEXOPCAPS_SELECTARG2", "D3DTEXOPCAPS_MODULATE", "D3DTEXOPCAPS_MODULATE2X", "D3DTEXOPCAPS_MODULATE4X", "D3DTEXOPCAPS_ADD", "D3DTEXOPCAPS_ADDSIGNED", "D3DTEXOPCAPS_ADDSIGNED2X", "D3DTEXOPCAPS_SUBTRACT", "D3DTEXOPCAPS_ADDSMOOTH", "D3DTEXOPCAPS_BLENDDIFFUSEALPHA", "D3DTEXOPCAPS_BLENDTEXTUREALPHA", "D3DTEXOPCAPS_BLENDFACTORALPHA", "D3DTEXOPCAPS_BLENDTEXTUREALPHAPM", "D3DTEXOPCAPS_BLENDCURRENTALPHA", "D3DTEXOPCAPS_PREMODULATE", "D3DTEXOPCAPS_MODULATEALPHA_ADDCOLOR", "D3DTEXOPCAPS_MODULATECOLOR_ADDALPHA", "D3DTEXOPCAPS_MODULATEINVALPHA_ADDCOLOR", "D3DTEXOPCAPS_MODULATEINVCOLOR_ADDALPHA", "D3DTEXOPCAPS_BUMPENVMAP", "D3DTEXOPCAPS_BUMPENVMAPLUMINANCE", "D3DTEXOPCAPS_DOTPRODUCT3", "D3DTEXOPCAPS_MULTIPLYADD", "D3DTEXOPCAPS_LERP", ]) D3DFVFCAPS = Flags(DWORD, [ "D3DFVFCAPS_TEXCOORDCOUNTMASK", "D3DFVFCAPS_DONOTSTRIPELEMENTS", "D3DFVFCAPS_PSIZE", ]) D3DVTXPCAPS = Flags(DWORD, [ "D3DVTXPCAPS_TEXGEN", "D3DVTXPCAPS_MATERIALSOURCE7", "D3DVTXPCAPS_DIRECTIONALLIGHTS", "D3DVTXPCAPS_POSITIONALLIGHTS", "D3DVTXPCAPS_LOCALVIEWER", "D3DVTXPCAPS_TWEENING", "D3DVTXPCAPS_TEXGEN_SPHEREMAP", "D3DVTXPCAPS_NO_TEXGEN_NONLOCALVIEWER", ]) D3DDEVCAPS2 = Flags(DWORD, [ "D3DDEVCAPS2_STREAMOFFSET", "D3DDEVCAPS2_DMAPNPATCH", "D3DDEVCAPS2_ADAPTIVETESSRTPATCH", "D3DDEVCAPS2_ADAPTIVETESSNPATCH", "D3DDEVCAPS2_CAN_STRETCHRECT_FROM_TEXTURES", "D3DDEVCAPS2_PRESAMPLEDDMAPNPATCH", "D3DDEVCAPS2_VERTEXELEMENTSCANSHARESTREAMOFFSET", ]) D3DDTCAPS = Flags(DWORD, [ "D3DDTCAPS_UBYTE4", "D3DDTCAPS_UBYTE4N", "D3DDTCAPS_SHORT2N", "D3DDTCAPS_SHORT4N", "D3DDTCAPS_USHORT2N", "D3DDTCAPS_USHORT4N", "D3DDTCAPS_UDEC3", "D3DDTCAPS_DEC3N", "D3DDTCAPS_FLOAT16_2", "D3DDTCAPS_FLOAT16_4", ]) #D3DPS_VERSION = Enum("DWORD", [ # "D3DPS_VERSION(0,0)", # "D3DPS_VERSION(1,0)", # "D3DPS_VERSION(1,1)", # "D3DPS_VERSION(1,2)", # "D3DPS_VERSION(1,3)", # "D3DPS_VERSION(1,4)", # "D3DPS_VERSION(2,0)", # "D3DPS_VERSION(3,0)", #]) D3DPS_VERSION = DWORD #D3DVS_VERSION = Enum("DWORD", [ # "D3DVS_VERSION(0,0)", # "D3DVS_VERSION(1,0)", # "D3DVS_VERSION(1,1)", # "D3DVS_VERSION(2,0)", # "D3DVS_VERSION(3,0)", #]) D3DVS_VERSION = DWORD D3DCAPS9 = Struct("D3DCAPS9", [ (D3DDEVTYPE, "DeviceType"), (UINT, "AdapterOrdinal"), (D3DCAPS, "Caps"), (D3DCAPS2, "Caps2"), (D3DCAPS3, "Caps3"), (D3DPRESENT_INTERVAL, "PresentationIntervals"), (D3DCURSORCAPS, "CursorCaps"), (D3DDEVCAPS, "DevCaps"), (D3DPMISCCAPS, "PrimitiveMiscCaps"), (D3DPRASTERCAPS, "RasterCaps"), (D3DPCMPCAPS, "ZCmpCaps"), (D3DPBLENDCAPS, "SrcBlendCaps"), (D3DPBLENDCAPS, "DestBlendCaps"), (D3DPCMPCAPS, "AlphaCmpCaps"), (D3DPSHADECAPS, "ShadeCaps"), (D3DPTEXTURECAPS, "TextureCaps"), (D3DPTFILTERCAPS, "TextureFilterCaps"), (D3DPTFILTERCAPS, "CubeTextureFilterCaps"), (D3DPTFILTERCAPS, "VolumeTextureFilterCaps"), (D3DPTADDRESSCAPS, "TextureAddressCaps"), (D3DPTADDRESSCAPS, "VolumeTextureAddressCaps"), (D3DLINECAPS, "LineCaps"), (DWORD, "MaxTextureWidth"), (DWORD, "MaxTextureHeight"), (DWORD, "MaxVolumeExtent"), (DWORD, "MaxTextureRepeat"), (DWORD, "MaxTextureAspectRatio"), (DWORD, "MaxAnisotropy"), (Float, "MaxVertexW"), (Float, "GuardBandLeft"), (Float, "GuardBandTop"), (Float, "GuardBandRight"), (Float, "GuardBandBottom"), (Float, "ExtentsAdjust"), (D3DSTENCILCAPS, "StencilCaps"), (D3DFVFCAPS, "FVFCaps"), (D3DTEXOPCAPS, "TextureOpCaps"), (DWORD, "MaxTextureBlendStages"), (DWORD, "MaxSimultaneousTextures"), (D3DVTXPCAPS, "VertexProcessingCaps"), (DWORD, "MaxActiveLights"), (DWORD, "MaxUserClipPlanes"), (DWORD, "MaxVertexBlendMatrices"), (DWORD, "MaxVertexBlendMatrixIndex"), (Float, "MaxPointSize"), (DWORD, "MaxPrimitiveCount"), (DWORD, "MaxVertexIndex"), (DWORD, "MaxStreams"), (DWORD, "MaxStreamStride"), (D3DVS_VERSION, "VertexShaderVersion"), (DWORD, "MaxVertexShaderConst"), (D3DPS_VERSION, "PixelShaderVersion"), (Float, "PixelShader1xMaxValue"), (D3DDEVCAPS2, "DevCaps2"), (Float, "MaxNpatchTessellationLevel"), (DWORD, "Reserved5"), (UINT, "MasterAdapterOrdinal"), (UINT, "AdapterOrdinalInGroup"), (UINT, "NumberOfAdaptersInGroup"), (D3DDTCAPS, "DeclTypes"), (DWORD, "NumSimultaneousRTs"), (D3DPTFILTERCAPS, "StretchRectFilterCaps"), (D3DVSHADERCAPS2_0, "VS20Caps"), (D3DPSHADERCAPS2_0, "PS20Caps"), (D3DPTFILTERCAPS, "VertexTextureFilterCaps"), (DWORD, "MaxVShaderInstructionsExecuted"), (DWORD, "MaxPShaderInstructionsExecuted"), (DWORD, "MaxVertexShader30InstructionSlots"), (DWORD, "MaxPixelShader30InstructionSlots"), ])
[ 29113, 29113, 7804, 2235, 198, 2, 198, 2, 15069, 3648, 12, 10531, 37754, 11, 3457, 13, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 2,...
1.937797
6,527
__version__ = '4.64.0'
[ 834, 9641, 834, 796, 705, 19, 13, 2414, 13, 15, 6, 198 ]
1.916667
12
import gym from gym import spaces, error, utils from gym.utils import seeding import numpy as np import configparser from os import path import matplotlib.pyplot as plt from matplotlib.pyplot import gca font = {'family': 'sans-serif', 'weight': 'bold', 'size': 14}
[ 11748, 11550, 198, 6738, 11550, 1330, 9029, 11, 4049, 11, 3384, 4487, 198, 6738, 11550, 13, 26791, 1330, 384, 8228, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 4566, 48610, 198, 6738, 28686, 1330, 3108, 198, 11748, 2603, 29487, 8019, ...
2.83
100
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.tf.Lu.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import map_fn from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import benchmark from tensorflow.python.platform import test if __name__ == "__main__": test.main()
[ 2, 15069, 2864, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.826303
403
import multiprocessing as mp import subprocess import shutil import os from ..helper import make_path_safe, thirdparty_binary, filter_scp from ..exceptions import CorpusError def mfcc(mfcc_directory, num_jobs, feature_config, frequency_configs): """ Multiprocessing function that converts wav files into MFCCs See http://kaldi-asr.org/doc/feat.html and http://kaldi-asr.org/doc/compute-mfcc-feats_8cc.html for more details on how MFCCs are computed. Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/make_mfcc.sh for the bash script this function was based on. Parameters ---------- mfcc_directory : str Directory to save MFCC feature matrices log_directory : str Directory to store log files num_jobs : int The number of processes to use in calculation mfcc_configs : list of :class:`~aligner.config.MfccConfig` Configuration object for generating MFCCs Raises ------ CorpusError If the files per speaker exceeds the number of files that are allowed to be open on the computer (for Unix-based systems) """ child_env = os.environ.copy() os.makedirs(os.path.join(mfcc_directory, 'log'), exist_ok=True) paths = [] for j, p in frequency_configs: paths.append(feature_config.write(mfcc_directory, j, p)) jobs = [(mfcc_directory, x, paths[x]) for x in range(num_jobs)] with mp.Pool(processes=num_jobs, initializer=init, initargs=(child_env,)) as pool: r = False try: results = [pool.apply_async(mfcc_func, args=i) for i in jobs] output = [p.get() for p in results] except OSError as e: print(dir(e)) if e.errno == 24: r = True else: raise if r: raise (CorpusError( 'There were too many files per speaker to process based on your OS settings. Please try to split your data into more speakers.'))
[ 11748, 18540, 305, 919, 278, 355, 29034, 198, 11748, 850, 14681, 198, 11748, 4423, 346, 198, 11748, 28686, 198, 198, 6738, 11485, 2978, 525, 1330, 787, 62, 6978, 62, 21230, 11, 2368, 10608, 62, 39491, 11, 8106, 62, 1416, 79, 198, 6738...
2.435774
833
""" A number of static methods for interpretting the state of the fantasy football pitch that aren't required directly by the client """ from ffai.core import Game, Action, ActionType from ffai.core.procedure import * from ffai.util.pathfinding import * from typing import Optional, List, Dict def blitz_used(game: Game) -> bool: for action in game.state.available_actions: if action.action_type == ActionType.START_BLITZ: return False return True def handoff_used(game: Game) -> bool: for action in game.state.available_actions: if action.action_type == ActionType.START_HANDOFF: return False return True def foul_used(game: Game) -> bool: for action in game.state.available_actions: if action.action_type == ActionType.START_FOUL: return False return True def pass_used(game: Game) -> bool: for action in game.state.available_actions: if action.action_type == ActionType.START_PASS: return False return True def get_players(game: Game, team: Team, include_own: bool = True, include_opp: bool = True, include_stunned: bool = True, include_used: bool = True, include_off_pitch: bool = False, only_blockable: bool = False, only_used: bool = False) -> List[Player]: players: List[Player] = [] selected_players: List[Player] = [] for iteam in game.state.teams: if iteam == team and include_own: players.extend(iteam.players) if iteam != team and include_opp: players.extend(iteam.players) for player in players: if only_blockable and not player.state.up: continue if only_used and not player.state.used: continue if include_stunned or not player.state.stunned: if include_used or not player.state.used: if include_off_pitch or (player.position is not None and not game.is_out_of_bounds(player.position)): selected_players.append(player) return selected_players def caging_squares_north_east(game: Game, protect_square: Square) -> List[Square]: # * At it's simplest, a cage requires 4 platers in the North-East, South-East, South-West and North-West # * positions, relative to the ball carrier, such that there is no more than 3 squares between the players in # * each of those adjacent compass directions. # * # * 1 3 # * xx-xx # * xx-xx # * --o-- # * xx-xx # * xx-xx # * 3 4 # * # * pitch is 26 long # * # * # * Basically we need one player in each of the corners: 1-4, but spaced such that there is no gap of 3 squares. # * If the caging player is in 1-4, but next to ball carrier, he ensures this will automatically be me # * # * The only exception to this is when the ball carrier is on, or near, the sideline. Then return the squares # * that can otherwise form the cage. # * caging_squares: List[Square] = [] x = protect_square.x y = protect_square.y if x <= game.state.pitch.width - 3: if y == game.state.pitch.height-2: caging_squares.append(game.get_square(x + 1, y + 1)) caging_squares.append(game.get_square(x + 2, y + 1)) caging_squares.append(game.get_square(x + 1, y)) caging_squares.append(game.get_square(x + 2, y)) elif y == game.state.pitch.height-1: caging_squares.append(game.get_square(x + 1, y)) caging_squares.append(game.get_square(x + 2, y)) else: caging_squares.append(game.get_square(x + 1, y + 1)) caging_squares.append(game.get_square(x + 1, y + 2)) caging_squares.append(game.get_square(x + 2, y + 1)) # caging_squares.append(game.state.pitch.get_square(x + 3, y + 3)) return caging_squares def caging_squares_north_west(game: Game, protect_square: Square) -> List[Square]: caging_squares: List[Square] = [] x = protect_square.x y = protect_square.y if x >= 3: if y == game.state.pitch.height-2: caging_squares.append(game.get_square(x - 1, y + 1)) caging_squares.append(game.get_square(x - 2, y + 1)) caging_squares.append(game.get_square(x - 1, y)) caging_squares.append(game.get_square(x - 2, y)) elif y == game.state.pitch.height-1: caging_squares.append(game.get_square(x - 1, y)) caging_squares.append(game.get_square(x - 2, y)) else: caging_squares.append(game.get_square(x - 1, y + 1)) caging_squares.append(game.get_square(x - 1, y + 2)) caging_squares.append(game.get_square(x - 2, y + 1)) # caging_squares.append(game.state.pitch.get_square(x - 3, y + 3)) return caging_squares def caging_squares_south_west(game: Game, protect_square: Square) -> List[Square]: caging_squares: List[Square] = [] x = protect_square.x y = protect_square.y if x >= 3: if y == 2: caging_squares.append(game.get_square(x - 1, y - 1)) caging_squares.append(game.get_square(x - 2, y - 1)) caging_squares.append(game.get_square(x - 1, y)) caging_squares.append(game.get_square(x - 2, y)) elif y == 1: caging_squares.append(game.get_square(x - 1, y)) caging_squares.append(game.get_square(x - 2, y)) else: caging_squares.append(game.get_square(x - 1, y - 1)) caging_squares.append(game.get_square(x - 1, y - 2)) caging_squares.append(game.get_square(x - 2, y - 1)) # caging_squares.append(game.state.pitch.get_square(x - 3, y - 3)) return caging_squares def caging_squares_south_east(game: Game, protect_square: Square) -> List[Square]: caging_squares: List[Square] = [] x = protect_square.x y = protect_square.y if x <= game.state.pitch.width-3: if y == 2: caging_squares.append(game.get_square(x + 1, y - 1)) caging_squares.append(game.get_square(x + 2, y - 1)) caging_squares.append(game.get_square(x + 1, y)) caging_squares.append(game.get_square(x + 2, y)) elif y == 1: caging_squares.append(game.get_square(x + 1, y)) caging_squares.append(game.get_square(x + 2, y)) else: caging_squares.append(game.get_square(x + 1, y - 1)) caging_squares.append(game.get_square(x + 1, y - 2)) caging_squares.append(game.get_square(x + 2, y - 1)) # caging_squares.append(game.get_square(x + 3, y - 3)) return caging_squares def is_caging_position(game: Game, player: Player, protect_player: Player) -> bool: return player.position.distance(protect_player.position) <= 2 and not is_castle_position_of(game, player, protect_player) def has_player_within_n_squares(game: Game, units: List[Player], square: Square, num_squares: int) -> bool: for cur in units: if cur.position.distance(square) <= num_squares: return True return False def has_adjacent_player(game: Game, square: Square) -> bool: return not game.get_adjacent_players(square) def is_castle_position_of(game: Game, player1: Player, player2: Player) -> bool: return player1.position.x == player2.position.x or player1.position.y == player2.position.y def is_bishop_position_of(game: Game, player1: Player, player2: Player) -> bool: return abs(player1.position.x - player2.position.x) == abs(player1.position.y - player2.position.y) def attacker_would_surf(game: Game, attacker: Player, defender: Player) -> bool: if (defender.has_skill(Skill.SIDE_STEP) and not attacker.has_skill(Skill.GRAB)) or defender.has_skill(Skill.STAND_FIRM): return False if not attacker.position.is_adjacent(defender.position): return False return direct_surf_squares(game, attacker.position, defender.position) def direct_surf_squares(game: Game, attack_square: Square, defend_square: Square) -> bool: defender_on_sideline: bool = on_sideline(game, defend_square) defender_in_endzone: bool = on_endzone(game, defend_square) if defender_on_sideline and defend_square.x == attack_square.x: return True if defender_in_endzone and defend_square.y == attack_square.y: return True if defender_in_endzone and defender_on_sideline: return True return False def reverse_x_for_right(game: Game, team: Team, x: int) -> int: if not game.is_team_side(Square(13, 3), team): res = game.state.pitch.width - 1 - x else: res = x return res def reverse_x_for_left(game: Game, team: Team, x: int) -> int: if game.is_team_side(Square(13, 3), team): res = game.state.pitch.width - 1 - x else: res = x return res def on_sideline(game: Game, square: Square) -> bool: return square.y == 1 or square.y == game.state.pitch.height - 1 def on_endzone(game: Game, square: Square) -> bool: return square.x == 1 or square.x == game.state.pitch.width - 1 def on_los(game: Game, team: Team, square: Square) -> bool: return (reverse_x_for_right(game, team, square.x) == 13) and 4 < square.y < 21 def los_squares(game: Game, team: Team) -> List[Square]: squares: List[Square] = [ game.get_square(reverse_x_for_right(game, team, 13), 5), game.get_square(reverse_x_for_right(game, team, 13), 6), game.get_square(reverse_x_for_right(game, team, 13), 7), game.get_square(reverse_x_for_right(game, team, 13), 8), game.get_square(reverse_x_for_right(game, team, 13), 9), game.get_square(reverse_x_for_right(game, team, 13), 10), game.get_square(reverse_x_for_right(game, team, 13), 11) ] return squares def distance_to_sideline(game: Game, square: Square) -> int: return min(square.y - 1, game.state.pitch.height - square.y - 2) def is_endzone(game, square: Square) -> bool: return square.x == 1 or square.x == game.state.pitch.width - 1 def last_block_proc(game) -> Optional[Block]: for i in range(len(game.state.stack.items) - 1, -1, -1): if isinstance(game.state.stack.items[i], Block): block_proc = game.state.stack.items[i] return block_proc return None def is_adjacent_ball(game: Game, square: Square) -> bool: ball_square = game.get_ball_position() return ball_square is not None and ball_square.is_adjacent(square) def squares_within(game: Game, square: Square, distance: int) -> List[Square]: squares: List[Square] = [] for i in range(-distance, distance+1): for j in range(-distance, distance+1): cur_square = game.get_square(square.x+i, square.y+j) if cur_square != square and not game.is_out_of_bounds(cur_square): squares.append(cur_square) return squares def distance_to_defending_endzone(game: Game, team: Team, position: Square) -> int: res = reverse_x_for_right(game, team, position.x) - 1 return res def distance_to_scoring_endzone(game: Game, team: Team, position: Square) -> int: res = reverse_x_for_left(game, team, position.x) - 1 return res #return game.state.pitch.width - 1 - reverse_x_for_right(game, team, position.x) def players_in_scoring_endzone(game: Game, team: Team, include_own: bool = True, include_opp: bool = False) -> List[Player]: players: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp) selected_players: List[Player] = [] for player in players: if in_scoring_endzone(game, team, player.position): selected_players.append(player) return selected_players def in_scoring_endzone(game: Game, team: Team, square: Square) -> bool: return reverse_x_for_left(game, team, square.x) == 1 def players_in_scoring_distance(game: Game, team: Team, include_own: bool = True, include_opp: bool = True, include_stunned: bool = False) -> List[Player]: players: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_stunned=include_stunned) selected_players: List[Player] = [] for player in players: if distance_to_scoring_endzone(game, team, player.position) <= player.num_moves_left(): selected_players.append(player) return selected_players def distance_to_nearest_player(game: Game, team: Team, square: Square, include_own: bool = True, include_opp: bool = True, only_used: bool = False, include_used: bool = True, include_stunned: bool = True, only_blockable: bool = False) -> int: opps: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, only_used=only_used, include_used=include_used, include_stunned=include_stunned, only_blockable=only_blockable) cur_max = 100 for opp in opps: dist = opp.position.distance(square) cur_max = min(cur_max, dist) return cur_max def screening_distance(game: Game, from_square: Square, to_square: Square) -> float: # Return the "screening distance" between 3 squares. (To complete) # float dist =math.sqrt(math.pow(square.x - cur.position.x, 3) + math.pow(square.y - cur.position.y, 3)) return 0.0 def num_opponents_can_reach(game: Game, team: Team, square: Square) -> int: opps: List[Player] = get_players(game, team, include_own=False, include_opp=True) num_opps_reach: int = 0 for cur in opps: dist = max(square.x - cur.position.x, square.y - cur.position.y) if cur.state.stunned: continue move_allowed = cur.get_ma() + 2 if not cur.state.up: move_allowed -= 3 if dist < move_allowed: num_opps_reach += 1 return num_opps_reach def num_opponents_on_field(game: Game, team: Team) -> int: opps: List[Player] = get_players(game, team, include_own=False, include_opp=True) num_opponents = 0 for cur in opps: if cur.position is not None: num_opponents += 1 return num_opponents def number_opponents_closer_than_to_endzone(game: Game, team: Team, square: Square) -> int: opponents: List[Player] = get_players(game, team, include_own=False, include_opp=True) num_opps = 0 distance_square_endzone = distance_to_defending_endzone(game, team, square) for opponent in opponents: distance_opponent_endzone = distance_to_defending_endzone(game, team, opponent.position) if distance_opponent_endzone < distance_square_endzone: num_opps += 1 return num_opps def in_scoring_range(game: Game, player: Player) -> bool: return player.num_moves_left() >= distance_to_scoring_endzone(game, player.team, player.position) def players_in_scoring_range(game: Game, team: Team, include_own=True, include_opp=True, include_used=True, include_stunned=True) -> List[Player]: players: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_stunned=include_stunned, include_used=include_used) res: List[Player] = [] for player in players: if in_scoring_range(game, player): res.append(player) return res def players_in(game: Game, team: Team, squares: List[Square], include_own=True, include_opp=True, include_used=True, include_stunned=True, only_blockable=False) -> List[Player]: allowed_players: List[Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_used=include_used, include_stunned=include_stunned, only_blockable=only_blockable) res: List[Player] = [] for square in squares: player: Optional[Player] = game.get_player_at(square) if player is None: continue if player in allowed_players: res.append(player) return res
[ 37811, 198, 32, 1271, 286, 9037, 5050, 329, 6179, 889, 262, 1181, 286, 262, 8842, 4346, 7078, 326, 3588, 470, 2672, 3264, 416, 198, 1169, 5456, 198, 37811, 198, 6738, 31246, 1872, 13, 7295, 1330, 3776, 11, 7561, 11, 7561, 6030, 198, ...
2.462126
6,416
# -*- coding: utf-8 -*- """sb-fastapi CLI root.""" import logging import click from sb_backend.cli.commands.serve import serve cli.add_command(serve)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 36299, 12, 7217, 15042, 43749, 6808, 526, 15931, 198, 11748, 18931, 198, 198, 11748, 3904, 198, 6738, 264, 65, 62, 1891, 437, 13, 44506, 13, 9503, 1746, 13, 2655,...
2.566667
60
import dash from dash import html app = dash.Dash(__name__) app.layout = html.Div(children=[html.H1('Data Science', style = {'textAlign': 'center', 'color': '#0FD08D', 'font-size': '50px'}), html.H2('La carrera mas sexy del siglo XXI', style = {'textAlign': 'center', 'color' : '#009A64'}), html.P('Factores clave:'), html.Ul(children = [html.Li('Factor 1'), html.Li('Factor 2'), html.Li('Factor 3'), html.Li(['Source: ', html.A('https://www.excelsior.com.mx/nacional/ciencia-de-datos-la-carrera-mas-sexy-del-xxi-en-la-unam/1323946', href = 'https://www.excelsior.com.mx/nacional/ciencia-de-datos-la-carrera-mas-sexy-del-xxi-en-la-unam/1323946') ]) ]) ]) if __name__ == '__main__': app.run_server(debug=True)
[ 11748, 14470, 198, 6738, 14470, 1330, 27711, 198, 198, 1324, 796, 14470, 13, 43041, 7, 834, 3672, 834, 8, 198, 198, 1324, 13, 39786, 796, 27711, 13, 24095, 7, 17197, 41888, 6494, 13, 39, 16, 10786, 6601, 5800, 3256, 198, 220, 220, 2...
1.437956
959
# -*- coding: utf-8 -*- ################################################################################### from gluon import current from helper import get_constant, execute_remote_cmd, config, get_datetime, \ log_exception, is_pingable, get_context_path from libvirt import * # @UnusedWildImport from log_handler import logger from nat_mapper import create_mapping, remove_mapping import math, shutil, libvirt, os, time, random import xml.etree.ElementTree as etree def _choose_datastore(): """ Chooses datastore from a list of available datastores """ # datastore_capacity = current.db(current.db.datastore.id >= 0).select(orderby = current.db.datastore.used datastores = current.db(current.db.datastore.id >= 0).select() datastore_length = len(datastores) logger.debug("datastore_lengtn" + str(datastore_length)) if(datastore_length == 0): raise Exception("No datastore found.") else: count = datastore_length available_datastores = {} while count != 0: available = datastores[datastore_length-count].capacity - datastores[datastore_length-count].used available_datastores[datastores[datastore_length-count]] = available count = count-1 z = [(i,available_datastores[i]) for i in available_datastores] z.sort(key=lambda x: x[1]) available_datastores = z logger.debug("available d" + str(available_datastores[-1])) first_elts = available_datastores[-1] first_elts = first_elts[0] logger.debug("selected database" + str(first_elts)) return first_elts def host_resources_used(host_id): """ Returns resources utilization of a host in MB, Count """ RAM = 0.0 CPU = 0.0 vms = current.db((current.db.vm_data.host_id == host_id) & (current.db.vm_data.status != current.VM_STATUS_UNKNOWN) & (current.db.vm_data.status != current.VM_STATUS_IN_QUEUE)).select() logger.debug("vms selected are: " + str(vms)) for vm_data in vms: RAM += vm_data.RAM CPU += vm_data.vCPU return (math.ceil(RAM),math.ceil(CPU)) def getVirshDomainConn(vm_details, host_ip=None, domain_name=None): """ Generic method to establish libvirt connection """ if vm_details != None: host_ip = vm_details.host_id.host_ip.private_ip domain_name = vm_details.vm_identity connection_object = libvirt.open("qemu+ssh://root@" + host_ip + "/system") domain = connection_object.lookupByName(domain_name) return (connection_object, domain) def getVirshDomain(vm_details): """ Generic method to establish libvirt connection """ (connection_object, domain) = getVirshDomainConn(vm_details) connection_object.close() return domain def _set_portgroup_in_vm(domain_name, portgroup, host_ip, vlan_tag): """ Set the vlan tag in network configuration of VM This is required to ensure that VM fetches IP of its vlan from DHCP """ (connection_object, domain) = getVirshDomainConn(None, host_ip, domain_name) xml = etree.fromstring(domain.XMLDesc(0)) source_network_element = xml.find('.//interface/source') source_network_string=etree.tostring(source_network_element) logger.debug("Source network is " + source_network_string) if source_network_string.find(" bridge=") != -1: logger.debug("Source is set to bridge adding <vlan><tag_id> to the interface tag ") root_new = xml.find('.//interface') root_new_vlan= etree.SubElement(root_new, 'vlan') root_new_tag= etree.SubElement(root_new_vlan, 'tag') root_new_tag.set('id',vlan_tag) logger.debug("After append root_new_vlan is " + etree.tostring(root_new_vlan)) elif source_network_string.find(" network=") != -1: logger.debug("Source is set to network adding portgroup to the source tag ") source_network_element.set('portgroup', portgroup) logger.debug("Changed source network is " + etree.tostring(source_network_element)) else: logger.debug("Neither VM nor vlan tagId is added in the xml" ) domain = connection_object.defineXML(etree.tostring(xml)) domain.destroy() domain.create() domain.isActive() connection_object.close() def _get_private_ip_mac(security_domain_id): """ Chooses a random Private IP from the pool, such that: - It is not assigned to any VM or host - It belongs to VLAN of given security domain """ vlans = current.db(current.db.security_domain.id == security_domain_id)._select(current.db.security_domain.vlan) private_ip_pool = current.db((~current.db.private_ip_pool.id.belongs(current.db(current.db.vm_data.private_ip != None)._select(current.db.vm_data.private_ip))) & (~current.db.private_ip_pool.id.belongs(current.db(current.db.host.host_ip != None)._select(current.db.host.host_ip))) & (current.db.private_ip_pool.vlan.belongs(vlans))).select(current.db.private_ip_pool.ALL, orderby='<random>').first() if private_ip_pool: return private_ip_pool else: sd = current.db.security_domain[security_domain_id] raise Exception(("Available MACs are exhausted for security domain '%s'." % sd.name)) def _choose_random_public_ip(): """ Chooses a random Public IP from the pool, such that: - It is not assigned to any VM - It is not assigned to any host - IP is marked active. """ public_ip_pool = current.db((~current.db.public_ip_pool.id.belongs(current.db(current.db.vm_data.public_ip != None)._select(current.db.vm_data.public_ip))) & (~current.db.public_ip_pool.id.belongs(current.db(current.db.host.public_ip != None)._select(current.db.host.public_ip))) & (current.db.public_ip_pool.is_active == True)) \ .select(current.db.public_ip_pool.ALL, orderby='<random>').first() return public_ip_pool def _choose_mac_ip(vm_properties): """ Chooses mac address and ip address for a vm to be installed. It also chooses a random public IP if requested """ if not 'private_ip' in vm_properties: private_ip_info = _get_private_ip_mac(vm_properties['security_domain']) vm_properties['private_ip'] = private_ip_info.private_ip vm_properties['mac_addr'] = private_ip_info.mac_addr vm_properties['vlan_name'] = private_ip_info.vlan.name vm_properties['vlan_tag'] = private_ip_info.vlan.vlan_tag if vm_properties['public_ip_req']: if 'public_ip' not in vm_properties: public_ip_pool = _choose_random_public_ip() if public_ip_pool: vm_properties['public_ip'] = public_ip_pool.public_ip else: raise Exception("Available Public IPs are exhausted.") else: vm_properties['public_ip'] = None def _choose_mac_ip_vncport(vm_properties): """ Chooses mac address, ip address and vncport for a vm to be installed """ _choose_mac_ip(vm_properties) start_range = int(get_constant('vncport_start_range')) end_range = int(get_constant('vncport_end_range')) vnc_ports_taken = current.db().select(current.db.vm_data.vnc_port) while True: random_vnc_port = random.randrange(start_range, end_range, 1) if not random_vnc_port in vnc_ports_taken: break; vm_properties['vnc_port'] = str(random_vnc_port) def find_new_host(RAM, vCPU): """ Select a random host from list of 3 hosts with available RAM and CPU Availability is checked with 200 percent over-commitment. """ hosts = current.db(current.db.host.status == 1).select() hosts = hosts.as_list(True,False) count = 3 selected_hosts = [] while count != 0 and hosts: host = random.choice(hosts) logger.debug("Checking host =" + host['host_name']) (used_ram, used_cpu) = host_resources_used(host['id']) logger.debug("used ram: " + str(used_ram) + " used cpu: " + str(used_cpu) + " host ram: " + str(host['RAM']) + " host cpu "+ str(host['CPUs'])) host_ram_after_200_percent_overcommitment = math.floor((host['RAM'] * 1024) * 2) host_cpu_after_200_percent_overcommitment = math.floor(host['CPUs'] * 2) logger.debug("ram available: %s cpu available: %s cpu < max cpu: %s" % ((( host_ram_after_200_percent_overcommitment - used_ram) >= RAM), ((host_cpu_after_200_percent_overcommitment - used_cpu) >= vCPU), (vCPU <= host['CPUs']) )) if((( host_ram_after_200_percent_overcommitment - used_ram) >= RAM) and ((host_cpu_after_200_percent_overcommitment - used_cpu) >= vCPU) and (vCPU <= host['CPUs'])): selected_hosts.append(host) count = count -1 hosts.remove(host) if selected_hosts: #Sort selected host list by Ram first then Cpu selected_host = sorted(selected_hosts,key=lambda k: k['RAM'])[0] return selected_host['id'] #If no suitable host found raise Exception("No active host is available for a new vm.") def allocate_vm_properties(vm_details): """ Allocates vm properties ( datastore, host, ip address, mac address, vnc port, ram, vcpus) """ logger.debug("Inside allocate_vm_properties()...") vm_properties = {} vm_properties['datastore'] = _choose_datastore() logger.debug("Datastore selected is: " + str(vm_properties['datastore'])) vm_properties['host'] = find_new_host(vm_details.RAM, vm_details.vCPU) logger.debug("Host selected is: " + str(vm_properties['host'])) vm_properties['public_ip_req'] = False if (vm_details.public_ip == None) else True vm_properties['security_domain'] = vm_details.security_domain _choose_mac_ip_vncport(vm_properties) logger.debug("MAC is : " + str(vm_properties['mac_addr']) + " IP is : " + str(vm_properties['private_ip']) + " VNCPORT is : " \ + str(vm_properties['vnc_port']) + " Vlan tag is " + str(vm_properties['vlan_tag']) ) vm_properties['ram'] = vm_details.RAM vm_properties['vcpus'] = vm_details.vCPU return vm_properties def create_vm_image(vm_details, datastore): """ Create a VM image - Creates a directory for the new VM using vm_identity - Find the location of template image requested for - Copy the template image from its location to new vm directory """ # Creates a directory for the new vm vm_directory_path = datastore.system_mount_point + '/' + get_constant('vms') + '/' + vm_details.vm_identity logger.debug("Creating vm directory...") if not os.path.exists (vm_directory_path): os.makedirs(vm_directory_path) else: raise Exception("Directory with same name as vmname already exists.") # Finds the location of template image that the user has requested for its vm. template = current.db.template[vm_details.template_id] vm_image_name = vm_directory_path + '/' + vm_details.vm_identity + '.qcow2' # Copies the template image from its location to new vm directory storage_type = config.get("GENERAL_CONF","storage_type") copy_command = 'ndmpcopy ' if storage_type == current.STORAGE_NETAPP_NFS else 'cp ' #template_dir = get_constant('vm_templates_datastore') if copy_command == 'cp ': template_location = datastore.system_mount_point + '/' + get_constant('templates_dir') + '/' + template.hdfile logger.debug("cp %s %s" % (template_location, vm_image_name)) rc = os.system("cp %s %s" % (template_location, vm_image_name)) if rc != 0: logger.error("Copy not successful") raise Exception("Copy not successful") else: logger.debug("Copied successfully") elif copy_command == 'ndmpcopy ': template_dir = template.datastore_id.path logger.debug(template_dir) logger.debug("Copy in progress when storage type is " + str(storage_type)) command_to_execute = copy_command + template_dir + '/' + get_constant("templates_dir") + '/' + \ template.hdfile + ' ' + datastore.path + '/' + get_constant('vms') + '/' + \ vm_details.vm_identity logger.debug("ndmpcopy command: " + str(command_to_execute)) command_output = execute_remote_cmd(datastore.ds_ip, datastore.username, command_to_execute, datastore.password) logger.debug(command_output) logger.debug("Copied successfully.") try: vm_template_name = datastore.system_mount_point + '/' + get_constant('vms') + '/' + vm_details.vm_identity + '/' + template.hdfile os.rename(vm_template_name, vm_image_name) logger.debug("Template renamed successfully") except: logger.debug("Template rename not successful") raise Exception("Template rename not successful") return (template, vm_image_name) def _get_install_command(vm_details, vm_image_location, vm_properties): """ Generates install command for vm """ template = vm_properties['template'] bus = ',bus=virtio' optional = ' --import --os-type=' + template.os model = ',model=virtio' if (template.arch != 'amd64' and template.os == 'Linux'): optional = optional + ' --arch=' + template.arch + ' ' format_command = '' if (template.type == 'QCOW2'): format_command = ',format=qcow2' if (template.os == 'Windows'): bus = '' model = '' install_command = 'virt-install \ --name=' + vm_details.vm_identity + ' \ --ram=' + str(vm_properties['ram']) + ' \ --vcpus=' + str(vm_properties['vcpus']) + optional + ' \ --disk path=' + vm_image_location + format_command + bus + ',cache=none' + ' \ --network network='+current.LIBVIRT_NETWORK + model + ',mac=' + vm_properties['mac_addr'] + ' \ --graphics vnc,port=' + vm_properties['vnc_port'] + ',listen=0.0.0.0,password=duolc \ --noautoconsole \ --autostart \ --force' return install_command def _generate_disk_xml(diskpath,target_disk): """ Generates xml for defining new disk """ root_element = etree.Element('disk',attrib = {'type':'block','device':'disk'}) etree.SubElement(root_element, 'driver',attrib = {'name':'qemu','cache':'none', 'type':'qcow2'}) etree.SubElement(root_element, 'source', attrib = {'dev':diskpath}) etree.SubElement(root_element, 'target', attrib = {'dev': target_disk}) return (etree.tostring(root_element)) def create_extra_disk_image(vm_details, disk_name, size, datastore): """ Create extra disk image """ vm_extra_disks_directory_path = datastore.system_mount_point + '/' + get_constant('extra_disks_dir') + '/' + \ datastore.ds_name + '/' + vm_details.vm_identity if not os.path.exists (vm_extra_disks_directory_path): logger.debug("Making Directory") os.makedirs(vm_extra_disks_directory_path) diskpath = vm_extra_disks_directory_path + '/' + disk_name command= "qemu-img create -f qcow2 "+ diskpath + " " + str(size) + "G" output = os.system(command) return False if output != 0 else True def attach_disk(vm_details, disk_name, hostip, already_attached_disks, new_vm): """ Attach given disk to the VM """ try: (connection_object, domain) = getVirshDomainConn(None, hostip, vm_details.vm_identity) #already_attached_disks = len(current.db(current.db.attached_disks.vm_id == vm.id).select()) logger.debug("Value of alreadyattached is : " + str(already_attached_disks)) (diskpath, device_present, disk_size) = get_extra_disk_location(vm_details.datastore_id, vm_details.vm_identity, disk_name, True) if not device_present: raise Exception("Device to be attached %s missing" %(diskpath)) # Attaching disk to vm using libvirt API target_disk = "vd" + chr(97 + already_attached_disks + 1) logger.debug(target_disk) logger.debug("...................") xmlDescription = _generate_disk_xml(diskpath, target_disk) logger.debug(xmlDescription) logger.debug("new vm is %s " % new_vm) if new_vm: logger.debug("Starting to attach disk on new vm request.") domain.destroy() logger.debug("VM destroyed") domain.attachDeviceFlags(xmlDescription, VIR_DOMAIN_AFFECT_CONFIG) logger.debug("Disk attached") logger.debug("Turn on vm") domain.create() logger.debug("VM started") domain.isActive() elif vm_details.status == current.VM_STATUS_SHUTDOWN: logger.debug("Starting to attach disk while vm is shutdown.") domain.attachDeviceFlags(xmlDescription, VIR_DOMAIN_AFFECT_CONFIG) logger.debug("Disk attached") else: raise Exception("VM is not in shutdown state. Check its status on host") xmlfile = domain.XMLDesc(0) domain = connection_object.defineXML(xmlfile) logger.debug("VM XML redefined") connection_object.close() return disk_size except: logger.exception('Exception: ') return 0 def serve_extra_disk_request(vm_details, disk_size, host_ip, new_vm = False): """ Serves extra disk request and updates db """ logger.debug("Starting to serve extra disk request...") logger.debug("new vm is %s " % new_vm) datastore = _choose_datastore() already_attached_disks = len(current.db(current.db.attached_disks.vm_id == vm_details.id).select()) disk_name = vm_details.vm_identity + "_disk" + str(already_attached_disks + 1) + ".qcow2" disk_created = create_extra_disk_image(vm_details, disk_name, disk_size, datastore) vm_details.datastore_id = datastore.id if disk_created: if (attach_disk(vm_details, disk_name, host_ip, already_attached_disks, new_vm)): current.db.attached_disks.insert(vm_id = vm_details.id, datastore_id = datastore.id , attached_disk_name = disk_name, capacity = disk_size) current.db(current.db.datastore.id == datastore.id).update(used = int(datastore.used) + int(disk_size)) return True return False def launch_vm_on_host(vm_details, vm_image_location, vm_properties): """ Launches a vm image on host """ attach_disk_status_message = '' install_command = _get_install_command(vm_details, vm_image_location, vm_properties) # Starts installing a vm host_ip = current.db.host[vm_properties['host']].host_ip.private_ip logger.debug("Installation started...") logger.debug("Host is "+ host_ip) logger.debug("Installation command : " + install_command) command_output = execute_remote_cmd(host_ip, 'root', install_command) logger.debug(command_output) logger.debug("Starting to set portgroup in vm...") _set_portgroup_in_vm(vm_details['vm_identity'], vm_properties['vlan_name'], host_ip, vm_properties['vlan_tag']) logger.debug("Portgroup set in vm") # Serving HDD request if (int(vm_details.extra_HDD) != 0): if (serve_extra_disk_request(vm_details, vm_details.extra_HDD, host_ip, new_vm = True)): message = "Attached extra disk successfully." attach_disk_status_message += message logger.debug(message) else: attach_disk_status_message += "Attached extra disk failed." return attach_disk_status_message def check_if_vm_defined(hostip, vmname): """ Checks if a newly created vm is successfully defined """ vm_defined = False try: connection_object = libvirt.openReadOnly('qemu+ssh://root@'+ hostip +'/system') domain = connection_object.lookupByName(vmname) if domain.ID() in connection_object.listDomainsID(): vm_defined = True connection_object.close() return vm_defined except: return False def _free_vm_properties(vm_details, vm_properties): """ Frees vm properties in-case installation has failed mid-way """ logger.debug("VM installation fails..Starting to free vm properties") if vm_properties: host_ip_of_vm = current.db.host[vm_properties['host']].host_ip.private_ip logger.debug("Host IP of vm is " + str(host_ip_of_vm)) if check_if_vm_defined(host_ip_of_vm, vm_details.vm_identity): connection_object = libvirt.open('qemu+ssh://root@'+ host_ip_of_vm +'/system') domain = connection_object.lookupByName(vm_details.vm_identity) logger.debug("Starting to delete vm from host..") domain.destroy() domain.undefine() connection_object.close() logger.debug("VM deleted.") current.db(current.db.attached_disks.vm_id == vm_details.id).delete() if 'datastore' in vm_properties: vm_directory_path = vm_properties['datastore'].system_mount_point + '/' + get_constant('vms') + '/' + vm_details.vm_identity vm_extra_disk_dir_path = vm_properties['datastore'].system_mount_point + '/' + get_constant('extra_disks_dir') + '/' + vm_properties['datastore'].ds_name + '/' + vm_details.vm_identity if os.path.exists (vm_directory_path): logger.debug("Starting to delete vm directory.") shutil.rmtree(vm_directory_path) if os.path.exists (vm_extra_disk_dir_path): logger.debug("Starting to delete vm extra disk directory.") shutil.rmtree(vm_extra_disk_dir_path) return def update_db_after_vm_installation(vm_details, vm_properties, parent_id = None): """ Updates db after a vm is installed successfully """ logger.debug("Starting to update db after vm installation..") hostid = vm_properties['host'] datastore = vm_properties['datastore'] template_hdd = vm_properties['template'].hdd logger.debug("Inside update db after installation") logger.debug(vm_properties) # Updating the used entry of datastore current.db(current.db.datastore.id == datastore.id).update(used = int(datastore.used) + int(vm_details.extra_HDD) + int(template_hdd)) private_ip_id = current.db.private_ip_pool(private_ip=vm_properties['private_ip']).id public_ip_id = None if vm_properties['public_ip'] != None: public_ip_id = current.db.public_ip_pool(public_ip=vm_properties['public_ip']).id if parent_id: vm_status = current.VM_STATUS_SHUTDOWN else: vm_status = current.VM_STATUS_RUNNING # Update vm_data table current.db(current.db.vm_data.id == vm_details.id).update( host_id = hostid, extra_HDD = vm_details.extra_HDD, datastore_id = datastore.id, vnc_port = vm_properties['vnc_port'], private_ip = private_ip_id, public_ip = public_ip_id, start_time = get_datetime(), parent_id = parent_id, status = vm_status) logger.debug("Updated db") return # Installs a vm def install(parameters): """ Installs a vm """ vmid = parameters['vm_id'] logger.debug("In install() function...") vm_details = current.db.vm_data[vmid] vm_properties = None try: # Fetches vm details from vm_data table logger.debug("VM details are: " + str(vm_details)) # Calling allocate_vm_properties function vm_properties = allocate_vm_properties(vm_details) # Calling create_vm_image function (vm_properties['template'], vm_image_location) = create_vm_image(vm_details, vm_properties['datastore']) # Calling launch_vm_on_host attach_disk_status_message = launch_vm_on_host(vm_details, vm_image_location, vm_properties) # Checking if vm has been installed successfully assert(check_if_vm_defined(current.db.host[vm_properties['host']].host_ip.private_ip, vm_details.vm_identity)), "VM is not installed. Check logs." if vm_properties['public_ip_req']: create_mapping(vm_properties['public_ip'], vm_properties['private_ip']) # Update database after vm installation update_db_after_vm_installation(vm_details, vm_properties) message = "VM is installed successfully." + attach_disk_status_message logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: if vm_properties != None: _free_vm_properties(vm_details, vm_properties) logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def start(parameters): """ Starts a vm """ logger.debug("Inside start() function") vm_id = parameters['vm_id'] vm_details = current.db.vm_data[vm_id] try: domain = getVirshDomain(vm_details) if domain.info()[0] == VIR_DOMAIN_RUNNING: raise Exception("VM is already running. Check vm status on host.") domain.create() current.db(current.db.vm_data.id == vm_id).update(status = current.VM_STATUS_RUNNING) message = vm_details.vm_identity + " is started successfully." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def suspend(parameters): """ Suspends a vm """ logger.debug("Inside suspend() function") vm_id = parameters['vm_id'] vm_details = current.db.vm_data[vm_id] try: domain = getVirshDomain(vm_details) if domain.info()[0] == VIR_DOMAIN_PAUSED: raise Exception("VM is already paused. Check vm status on host.") domain.suspend() current.db(current.db.vm_data.id == vm_id).update(status = current.VM_STATUS_SUSPENDED) message = vm_details.vm_identity + " is suspended successfully." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def resume(parameters): """ Resumes a vm """ logger.debug("Inside resume() function") vm_id = parameters['vm_id'] vm_details = current.db.vm_data[vm_id] try: domain = getVirshDomain(vm_details) if domain.info()[0] == VIR_DOMAIN_RUNNING: raise Exception("VM is already running. Check vm status on host.") domain.resume() current.db(current.db.vm_data.id == vm_id).update(status = current.VM_STATUS_RUNNING) message = vm_details.vm_identity + " is resumed successfully." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def destroy(parameters): """ Destroys a vm forcefully """ logger.debug("Inside destroy() function") vm_id = parameters['vm_id'] vm_details = current.db.vm_data[vm_id] logger.debug(str(vm_details)) try: domain = getVirshDomain(vm_details) if domain.info()[0] == VIR_DOMAIN_SHUTOFF: raise Exception("VM is already shutoff. Check vm status on host.") domain.destroy() current.db(current.db.vm_data.id == vm_id).update(status = current.VM_STATUS_SHUTDOWN) message = vm_details.vm_identity + " is destroyed successfully." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def shutdown(parameters): """ Destroys a vm gracefully """ logger.debug("Inside shutdown() function") vm_id = parameters['vm_id'] vm_details = current.db.vm_data[vm_id] logger.debug(str(vm_details)) try: domain = getVirshDomain(vm_details) if domain.info()[0] == VIR_DOMAIN_SHUTOFF: raise Exception("VM is already shutoff. Check vm status on host.") domain.managedSave() current.db(current.db.vm_data.id == vm_id).update(status = current.VM_STATUS_SHUTDOWN) message = vm_details.vm_identity + " is shutdown successfully." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def _clean_up_database_after_vm_deletion(vm_details): """ Cleans up database after vm deletion """ logger.debug("Inside clean up database after vm deletion () function...") # moving vm image folder to archives folder archive_directory_path = vm_details.datastore_id.system_mount_point + '/' + get_constant('archives_dir') if not os.path.exists(archive_directory_path): os.makedirs(archive_directory_path) source_file = vm_details.datastore_id.system_mount_point + '/' + get_constant('vms') + '/' + vm_details.vm_identity archive_filename = vm_details.vm_identity + str(get_datetime()) logger.debug(archive_filename) destination_file = archive_directory_path + '/' + archive_filename shutil.move(source_file, destination_file) # removing hdd vm_extra_disks_directory_path = vm_details.datastore_id.system_mount_point + '/' + get_constant('extra_disks_dir') + '/' + \ vm_details.datastore_id.ds_name + "/" + vm_details.vm_identity if os.path.exists(vm_extra_disks_directory_path): shutil.rmtree(vm_extra_disks_directory_path) # updating the used entry of database current.db(current.db.datastore.id == vm_details.datastore_id).update(used = int(vm_details.datastore_id.used) - \ (int(vm_details.extra_HDD) + int(vm_details.template_id.hdd))) # updating task_queue_event entry to remove reference of VM current.db(current.db.task_queue_event.vm_id == vm_details.id).update(vm_id = None) # deleting entry of extra disk of vm current.db(current.db.attached_disks.vm_id == vm_details.id).delete() logger.debug("Database cleaned") def vm_has_snapshots(vm_id): """ Checks if a vm has snapshot(s) """ if (current.db(current.db.snapshot.vm_id == vm_id).select()): return True else: return False def delete(parameters): """ Deletes a vm """ logger.debug("Inside delete() function") vm_id = parameters['vm_id'] vm_details = current.db.vm_data[vm_id] try: domain = getVirshDomain(vm_details) logger.debug(str(vm_details.status)) if (vm_details.status == current.VM_STATUS_RUNNING or vm_details.status == current.VM_STATUS_SUSPENDED): logger.debug("Vm is not shutoff. Shutting it off first.") domain.destroy() logger.debug("Starting to delete it...") domain.undefineFlags(VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA ) if vm_details.public_ip: remove_mapping(vm_details.public_ip.public_ip, vm_details.private_ip.private_ip) message = vm_details.vm_identity + " is deleted successfully." logger.debug(message) _clean_up_database_after_vm_deletion(vm_details) current.db(current.db.vm_data.id == vm_id).delete() current.db.commit() logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def migrate_domain_with_snapshots(vm_details, destination_host_ip, domain, domain_snapshots_list, current_snapshot_name, flags, vm_backup_during_migration): """ Migrate domain with snapshots """ # XML dump of snapshot(s) of the vm logger.debug("Starting to take xml dump of the snapshot(s) of the vm... ") if not os.path.exists(vm_backup_during_migration): os.makedirs(vm_backup_during_migration) for domain_snapshot in domain_snapshots_list: logger.debug("snapshot name is " + str(domain_snapshot)) dump_xml_path = vm_backup_during_migration + '/' + 'dump_' + domain_snapshot snapshot_dumpxml_command = 'virsh snapshot-dumpxml %s %s > %s' % ( vm_details.vm_identity, domain_snapshot, dump_xml_path) logger.debug("Taking xml dump of" + str(domain_snapshot)) command_output = execute_remote_cmd(vm_details.host_id.host_ip.private_ip, 'root', snapshot_dumpxml_command) logger.debug(command_output) logger.debug("XML dump of " + str(domain_snapshot) + "succeeded.") # Delete snapshot(s) of the vm and migrate it to destination host logger.debug("Starting to delete snapshots of the vm....") for domain_snapshot in domain_snapshots_list: snapshot = domain.snapshotLookupByName(domain_snapshot, 0) snapshot.delete(0) logger.debug("Migrating the vm to destination host...") domain.migrateToURI("qemu+ssh://root@" + destination_host_ip + "/system", flags , None, 0) # Redefine all the snapshot(s) of the vm on the destination host and set current snapshot logger.debug("Starting to redefine all the snapshot(s) of the domain...") for domain_snapshot in domain_snapshots_list: redefine_xml_path = vm_backup_during_migration + '/' + 'dump_' + domain_snapshot snapshot_redefine_command = 'virsh snapshot-create --redefine %s %s ' % (vm_details.vm_identity, redefine_xml_path) command_output = execute_remote_cmd(destination_host_ip, 'root', snapshot_redefine_command) logger.debug(command_output) snapshot_current_command = 'virsh snapshot-current %s %s' % (vm_details.vm_identity, current_snapshot_name) command_output = execute_remote_cmd(destination_host_ip, 'root', snapshot_current_command) logger.debug(command_output) return def _clean_migration_directory(vm_backup_during_migration): """ Delete directory created for storing dumpxml of vm snapshots """ if os.path.exists(vm_backup_during_migration): shutil.rmtree(vm_backup_during_migration) return def undo_migration(vm_details, domain_snapshots_list, current_snapshot_name, vm_backup_during_migration): """ Undo the migration """ if domain_snapshots_list: # Redefine the snapshots of the vm on the source host logger.debug("Starting to redefine all the snapshot(s) of the vm on the source host...") for domain_snapshot in domain_snapshots_list: redefine_xml_path = vm_backup_during_migration + '/' + 'dump_' + domain_snapshot snapshot_redefine_command = 'virsh snapshot-create --redefine %s %s ' % (vm_details.vm_identity, redefine_xml_path) command_output = execute_remote_cmd(vm_details.host_id.host_ip.private_ip, 'root', snapshot_redefine_command, None, True) logger.debug(command_output) snapshot_current_command = 'virsh snapshot-current %s %s' % (vm_details.vm_identity, current_snapshot_name) command_output = execute_remote_cmd(vm_details.host_id.host_ip.private_ip, 'root', snapshot_current_command, None, True) logger.debug(command_output) # Delete directory created for storing dumpxml of vm snapshots _clean_migration_directory(vm_backup_during_migration) return def migrate_domain(vm_id, destination_host_id=None, live_migration=False): """ Migrate domain """ vm_details = current.db.vm_data[vm_id] domain_snapshots_list = [] current_snapshot_name = '' vm_migration_directory = get_constant('vm_migration_data') vm_backup_during_migration = vm_details.datastore_id.system_mount_point + '/' + vm_migration_directory + '/' + \ vm_details.vm_identity if destination_host_id == None: destination_host_id = find_new_host(vm_details.RAM, vm_details.vCPU) destination_host_ip = current.db.host[destination_host_id].host_ip.private_ip flags = VIR_MIGRATE_PEER2PEER|VIR_MIGRATE_PERSIST_DEST|VIR_MIGRATE_UNDEFINE_SOURCE|VIR_MIGRATE_UNSAFE if live_migration: flags |= VIR_MIGRATE_TUNNELLED|VIR_MIGRATE_LIVE if vm_details.status == current.VM_STATUS_SUSPENDED: logger.debug("Vm is suspended") flags |= VIR_MIGRATE_TUNNELLED|VIR_MIGRATE_PAUSED elif vm_details.status == current.VM_STATUS_SHUTDOWN: logger.debug("Vm is shut off") flags |= VIR_MIGRATE_OFFLINE logger.debug("Flags: " + str(flags)) try: domain = getVirshDomain(vm_details) dom_snapshot_names = domain.snapshotListNames(0) for snapshot in current.db(current.db.snapshot.vm_id == vm_id).select(): logger.debug("snapshot:" + str(snapshot.snapshot_name)) domain_snapshots_list.append(snapshot.snapshot_name) dom_snapshot_names.remove(snapshot.snapshot_name) logger.debug("domain snapshot list is " + str(domain_snapshots_list)) for dom_snapshot in dom_snapshot_names: logger.debug("Deleting orphan snapshot %s" %(dom_snapshot)) snapshot = domain.snapshotLookupByName(dom_snapshot, 0) snapshot.delete(0) if domain_snapshots_list: current_snapshot = domain.snapshotCurrent(0) current_snapshot_name = current_snapshot.getName() migrate_domain_with_snapshots(vm_details, destination_host_ip, domain, domain_snapshots_list, current_snapshot_name, flags, vm_backup_during_migration) else: domain.migrateToURI("qemu+ssh://root@" + destination_host_ip + "/system", flags , None, 0) vm_details.update_record(host_id = destination_host_id) current.db.commit() # Delete directory created for storing dumpxml of vm snapshot _clean_migration_directory(vm_backup_during_migration) message = vm_details.vm_identity + " is migrated successfully." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: undo_migration(vm_details, domain_snapshots_list, current_snapshot_name, vm_backup_during_migration) logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def migrate_domain_datastore(vmid, destination_datastore_id, live_migration=False): """ Migrate VM domain from one datastore to another. - Copy VM Image to new datastore - Update VM XML definition - Update database """ logger.debug(sys.path) vm_details = current.db.vm_data[vmid] # datastore_id = vm_details["datastore_id"] logger.debug("Inside live disk migration block") try: (connection_object, domain) = getVirshDomainConn(vm_details) datastore = current.db.datastore[destination_datastore_id] vm_directory_path = datastore.system_mount_point + get_constant('vms') + '/' + vm_details.vm_identity logger.debug("Creating vm directory on other datastore...") if not os.path.exists (vm_directory_path): os.makedirs(vm_directory_path) diskpath = vm_directory_path + '/' + vm_details.vm_identity + '.qcow2' current_disk_path = vm_details.datastore_id.system_mount_point + get_constant('vms') + '/' + vm_details.vm_identity current_disk_file = current_disk_path + '/' + vm_details.vm_identity + '.qcow2' logger.debug(current_disk_file) xmlfile = domain.XMLDesc(0) if(live_migration==False): rc = os.system("cp %s %s" % (current_disk_file, diskpath)) if rc != 0: logger.error("Copy not successful") raise Exception("Copy not successful") else: logger.debug("Copied successfully") else: if domain.isActive: domain.undefine() root = etree.fromstring(xmlfile) target_elem = root.find("devices/disk/target") target_disk = target_elem.get('dev') # # destxml = generate_blockcopy_xml(diskpath,target_disk) flag = VIR_DOMAIN_BLOCK_REBASE_SHALLOW | VIR_DOMAIN_BLOCK_REBASE_COPY domain.blockRebase(target_disk, diskpath, 0, flag) block_info_list = domain.blockJobInfo(current_disk_file,0) while(block_info_list['end'] != block_info_list['cur']): logger.debug("time to sleep") time.sleep(60) block_info_list = domain.blockJobInfo(current_disk_file,0) domain.blockJobAbort(current_disk_file, VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) source_elem = root.find("devices/disk/source") source_elem.set('file',diskpath) newxml_file = etree.tostring(root) domain = connection_object.defineXML(newxml_file) vm_details.update_record(datastore_id=destination_datastore_id) if os.path.exists (diskpath): os.remove(current_disk_file) restore_symboltable_path = current_disk_path+"/restore_symboltable" if os.path.exists (restore_symboltable_path): logger.debug(restore_symboltable_path) os.remove(restore_symboltable_path) os.rmdir(current_disk_path) connection_object.close() message = vm_details.vm_identity + " is migrated successfully to new datastore." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: #undo_datastore_migration(vm_details, domain, diskpath, current_disk_file, vm_directory_path, datastore_id) connection_object.close() logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def undo_datastore_migration(vm_details, domain, diskpath, current_disk_file, vm_directory_path, datastore_id): """ Undo migration in case of any issue """ # undo databse changes vm_details.update_record(datastore_id=datastore_id) if domain.isActive: logger.debug("domain is active") block_info_list = domain.blockJobInfo(current_disk_file,0) if(bool(block_info_list) == True): while(block_info_list['end'] != block_info_list['cur']): logger.debug("time to sleep") time.sleep(60) block_info_list = domain.blockJobInfo(current_disk_file,0) if(block_info_list['end'] == block_info_list['cur']): domain.blockJobAbort(current_disk_file) block_info_list = domain.blockJobInfo(current_disk_file,0) if os.path.exists (diskpath): os.remove(diskpath) os.rmdir(vm_directory_path) def migrate(parameters): """ Migrates VM to new host """ vmid = parameters['vm_id'] logger.debug("Inside migrate() function for vm_id: "+str(vmid)) destination_host_id = parameters['destination_host'] if parameters['live_migration'] == 'on': live_migration = True else: live_migration = False return migrate_domain(vmid, destination_host_id, live_migration) def migrate_datastore(parameters): """ Migrates VM to new datastore """ logger.debug("Inside migrate_datastore() function") vmid = parameters['vm_id'] destination_ds_id = parameters['destination_ds'] if parameters['live_migration'] == 'on': live_migration = True else: live_migration = False return migrate_domain_datastore(vmid, destination_ds_id, live_migration) def snapshot(parameters): """ Snapshots a vm """ logger.debug("Inside snapshot() function") vm_id = parameters['vm_id'] snapshot_type = parameters['snapshot_type'] try: vm_details = current.db.vm_data[vm_id] if is_pingable(str(vm_details.private_ip.private_ip)): logger.debug("VM is pingable. Starting to start with snapshotting...") if snapshot_type != current.SNAPSHOT_USER: snapshots = current.db((current.db.snapshot.vm_id == vm_id) & (current.db.snapshot.type == snapshot_type)).select() #Delete the existing Daily/Monthly/Yearly snapshot for snapshot_cron in snapshots: logger.debug(snapshot_cron) delete_snapshot({'vm_id':vm_id, 'snapshot_id':snapshot_cron.id}) snapshot_name = get_datetime().strftime("%I:%M%p_%B%d,%Y") domain = getVirshDomain(vm_details) xmlDesc = "<domainsnapshot><name>%s</name></domainsnapshot>" % (snapshot_name) domain.snapshotCreateXML(xmlDesc, 0) message = "Snapshotted successfully." current.db.snapshot.insert(vm_id = vm_id, datastore_id = vm_details.datastore_id, snapshot_name = snapshot_name, type = snapshot_type) logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) else: message = "Unable to ping VM before snapshoting: %s" % (vm_details.private_ip.private_ip) raise Exception("Unable to ping VM before snapshoting: %s" % (vm_details.private_ip.private_ip)) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def revert(parameters): """ Reverts to snapshot """ logger.debug("Inside revert snapshot() function") vm_id = parameters['vm_id'] snapshotid = parameters['snapshot_id'] vm_details = current.db.vm_data[vm_id] try: domain = getVirshDomain(vm_details) snapshot_name = current.db(current.db.snapshot.id == snapshotid).select().first()['snapshot_name'] snapshot = domain.snapshotLookupByName(snapshot_name, 0) domain.revertToSnapshot(snapshot, 0) message = "Reverted to snapshot successfully." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def delete_snapshot(parameters): """ Deletes a snapshot """ logger.debug("Inside delete snapshot() function") vm_id = parameters['vm_id'] snapshotid = parameters['snapshot_id'] vm_details = current.db.vm_data[vm_id] logger.debug(str(vm_details)) try: domain = getVirshDomain(vm_details) snapshot_name = current.db(current.db.snapshot.id == snapshotid).select().first()['snapshot_name'] snapshot = None try: snapshot = domain.snapshotLookupByName(snapshot_name, 0) except libvirtError: logger.debug("Snapshot %s not found" %(snapshot_name)) if snapshot != None: snapshot.delete(0) message = "Deleted snapshot successfully." logger.debug(message) current.db(current.db.snapshot.id == snapshotid).delete() logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def update_security_domain(vm_details, security_domain_id, xmlDesc=None): """ Get new IP for given security domain. Update the VM XML with new mac_address and update the information in DB """ # fetch new private IP from db from given security domain private_ip_info = _get_private_ip_mac(security_domain_id) # update vm config to add new mac address. root = etree.fromstring(xmlDesc) mac_elem = root.find("devices/interface[@type='bridge']/mac") mac_elem.set('address', private_ip_info.mac_addr) vlan_tag_elem = root.find("devices/interface[@type='bridge']/vlan/tag") vlan_tag_elem.set('id', private_ip_info.vlan.vlan_tag) # update NAT IP mapping, if public IP present if vm_details.public_ip: remove_mapping(vm_details.public_ip.public_ip, vm_details.private_ip.private_ip) create_mapping(vm_details.public_ip.public_ip, private_ip_info.private_ip) # update vm_data current.db(current.db.vm_data.id == vm_details.id).update(security_domain = security_domain_id, private_ip = private_ip_info.id) return etree.tostring(root) def edit_vm_config(parameters): """ Edits vm configuration """ logger.debug("Inside edit vm config() function") vm_id = parameters['vm_id'] vm_details = current.db.vm_data[vm_id] message = "" try: connection_object, domain = getVirshDomainConn(vm_details) if 'vcpus' in parameters: new_vcpus = int(parameters['vcpus']) domain.setVcpusFlags(new_vcpus, VIR_DOMAIN_VCPU_MAXIMUM) domain.setVcpusFlags(new_vcpus, VIR_DOMAIN_AFFECT_CONFIG) message += "Edited vCPU successfully." current.db(current.db.vm_data.id == vm_id).update(vCPU = new_vcpus) if 'ram' in parameters: new_ram = int(parameters['ram']) * 1024 logger.debug(str(new_ram)) domain.setMemoryFlags(new_ram, VIR_DOMAIN_MEM_MAXIMUM) domain.setMemoryFlags(new_ram, VIR_DOMAIN_AFFECT_CONFIG) message += " And edited RAM successfully." current.db(current.db.vm_data.id == vm_id).update(RAM = int(parameters['ram'])) if 'public_ip' in parameters: enable_public_ip = parameters['public_ip'] if enable_public_ip: public_ip_pool = _choose_random_public_ip() if public_ip_pool: create_mapping(public_ip_pool.public_ip, vm_details.private_ip.private_ip) current.db.vm_data[vm_id] = dict(public_ip=public_ip_pool.id) message += "Edited Public IP successfully." else: raise Exception("Available Public IPs are exhausted.") else: remove_mapping(vm_details.public_ip.public_ip, vm_details.private_ip.private_ip) current.db.vm_data[vm_id] = dict(public_ip = None) if 'security_domain' in parameters: logger.debug('Updating security domain') xmlfile = update_security_domain(vm_details, parameters['security_domain'], domain.XMLDesc(0)) domain = connection_object.defineXML(xmlfile) if domain.isActive(): domain.reboot(0) message += "Edited security domain successfully" connection_object.close() logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def _get_clone_properties(vm_details, cloned_vm_details, vm_properties): """ Get properties for Cloned VM. """ datastore = _choose_datastore() vm_properties['datastore'] = datastore logger.debug("Datastore selected is: " + str(datastore)) vm_properties['security_domain'] = vm_details.security_domain vm_properties['public_ip_req'] = False # Finds mac address, ip address and vnc port for the cloned vm _choose_mac_ip_vncport(vm_properties) logger.debug("MAC is : " + str(vm_properties['mac_addr']) + " IP is : " + str(vm_properties['private_ip']) + \ " VNCPORT is : " + str(vm_properties['vnc_port'])) # Template and host of parent vm vm_properties['template'] = current.db(current.db.template.id == vm_details.template_id).select()[0] vm_properties['vm_host_details'] = current.db.host[vm_details.host_id] vm_properties['host'] = vm_properties['vm_host_details'].id # Creates a directory for the cloned vm logger.debug("Creating directory for cloned vm...") cloned_vm_directory_path = datastore.system_mount_point + '/' + get_constant('vms') + '/' + cloned_vm_details.vm_identity if not os.path.exists (cloned_vm_directory_path): os.makedirs(cloned_vm_directory_path) clone_file_parameters = ' --file ' + cloned_vm_directory_path + '/' + cloned_vm_details.vm_identity + '.qcow2' else: raise Exception("Directory with same name as vmname already exists.") # Creates a folder for additional disks of the cloned vm vm = current.db(current.db.vm_data.vm_identity == vm_details.vm_identity).select().first() disk_details_of_cloning_vm = current.db(current.db.attached_disks.vm_id == vm.id).select(orderby=current.db.attached_disks.attached_disk_name) logger.debug(disk_details_of_cloning_vm) already_attached_disks = len(disk_details_of_cloning_vm) cloned_vm_extra_disks_directory = datastore.system_mount_point + '/' + get_constant('extra_disks_dir') + '/' + \ datastore.ds_name + '/' + cloned_vm_details.vm_identity if already_attached_disks > 0: if not os.path.exists (cloned_vm_extra_disks_directory): logger.debug("Making Directory") os.makedirs(cloned_vm_extra_disks_directory) count = already_attached_disks while already_attached_disks > 0: disk_name = cloned_vm_details.vm_identity + '_disk' + str(count - already_attached_disks + 1) + '.qcow2' clone_file_parameters += ' --file ' + cloned_vm_extra_disks_directory + '/' + disk_name current.db.attached_disks.insert(vm_id = cloned_vm_details.id, datastore_id = datastore.id , attached_disk_name = disk_name, capacity = disk_details_of_cloning_vm[count - already_attached_disks].capacity) already_attached_disks -= 1 return (clone_file_parameters) def migrate_clone_to_new_host(vm_details, cloned_vm_details, new_host_id_for_cloned_vm,vm_properties): """ Migrates cloned vm to new host """ try: new_host_ip_for_cloned_vm = current.db.host[new_host_id_for_cloned_vm].host_ip.private_ip logger.debug("New host ip for cloned vm is: " + str(new_host_ip_for_cloned_vm)) flags = VIR_MIGRATE_PEER2PEER|VIR_MIGRATE_PERSIST_DEST|VIR_MIGRATE_UNDEFINE_SOURCE|VIR_MIGRATE_OFFLINE|VIR_MIGRATE_UNSAFE logger.debug("Clone currently on: " + str(vm_details.host_id.host_ip)) (current_host_connection_object, domain) = getVirshDomainConn(None, vm_details.host_id.host_ip, cloned_vm_details.vm_identity) logger.debug("Starting to migrate cloned vm to host " + str(new_host_ip_for_cloned_vm)) domain.migrateToURI("qemu+ssh://root@" + new_host_ip_for_cloned_vm + "/system", flags , None, 0) current_host_connection_object.close() logger.debug("Successfully migrated cloned vm to host " + str(new_host_ip_for_cloned_vm)) cloned_vm_details.update_record(host_id = new_host_id_for_cloned_vm) vm_properties['host'] = new_host_id_for_cloned_vm return True except libvirt.libvirtError,e: message = e.get_error_message() logger.debug("Error: " + message) return False def clone(vmid): """ Clones vm """ vm_properties = {} logger.debug("Inside clone() function") cloned_vm_details = current.db.vm_data[vmid] vm_details = current.db(current.db.vm_data.id == cloned_vm_details.parent_id).select().first() try: domain = getVirshDomain(vm_details) if domain.info()[0] != VIR_DOMAIN_SHUTOFF: raise Exception("VM is not shutoff. Check vm status.") clone_file_parameters = _get_clone_properties(vm_details, cloned_vm_details, vm_properties) logger.debug("cloned vm properties after clone_file_parameters" + str(vm_properties)) host = vm_properties['vm_host_details'] logger.debug("host is: " + str(host)) logger.debug("host details are: " + str(host)) (used_ram, used_cpu) = host_resources_used(host.id) logger.debug("uram: " + str(used_ram) + " used_cpu: " + str(used_cpu) + " host ram: " + str(host.RAM) +" host cpu: " + str(host.CPUs)) host_ram_after_200_percent_overcommitment = math.floor((host.RAM * 1024) * 2) host_cpu_after_200_percent_overcommitment = math.floor(host.CPUs * 2) logger.debug("host_ram_after_200_percent_overcommitment in MB " + str(host_ram_after_200_percent_overcommitment)) logger.debug("host_cpu_after_200_percent_overcommitment " + str(host_cpu_after_200_percent_overcommitment)) logger.debug("Available RAM on host: %s, Requested RAM: %s" % ((host_ram_after_200_percent_overcommitment - used_ram), vm_details.RAM)) logger.debug("Available CPUs on host: %s, Requested CPU: %s " % ((host_cpu_after_200_percent_overcommitment - used_cpu), vm_details.vCPU)) if((( host_ram_after_200_percent_overcommitment - used_ram) >= vm_details.RAM) and ((host_cpu_after_200_percent_overcommitment - used_cpu) >= vm_details.vCPU) and (vm_details.vCPU <= host.CPUs)): clone_command = "virt-clone --original " + vm_details.vm_identity + " --name " + cloned_vm_details.vm_identity + \ clone_file_parameters + " --mac " + vm_properties['mac_addr'] command_output = execute_remote_cmd(vm_details.host_id.host_ip.private_ip, 'root', clone_command, None, True) logger.debug(command_output) logger.debug("Updating db after cloning") update_db_after_vm_installation(cloned_vm_details, vm_properties, parent_id = vm_details.id) message = "Cloned successfully. " try: new_host_id_for_cloned_vm = find_new_host(cloned_vm_details.RAM, cloned_vm_details.vCPU) if new_host_id_for_cloned_vm != host.id: if migrate_clone_to_new_host(vm_details, cloned_vm_details, new_host_id_for_cloned_vm,vm_properties): message += "Found new host and migrated successfully." else: message += "Found new host but not migrated successfully." else: message += "New host selected to migrate cloned vm is same as the host on which it currently resides." except: message += "Could not find host to migrate cloned vm." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) else: raise Exception("Host resources exhausted. Migrate the host vms and then try.") except: _free_vm_properties(cloned_vm_details, vm_properties) logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def attach_extra_disk(parameters): """ Attaches extra disk to VM """ logger.debug("Inside attach extra disk() function") vmid = parameters['vm_id'] disk_size = parameters['disk_size'] vm_details = current.db.vm_data[vmid] logger.debug(str(vm_details)) try: if (serve_extra_disk_request(vm_details, disk_size, vm_details.host_id.host_ip.private_ip)): current.db(current.db.vm_data.id == vmid).update(extra_HDD = vm_details.extra_HDD + disk_size) message = "Attached extra disk successfully" logger.debug(message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) else: message = " Your request for additional HDD could not be completed at this moment. Check logs." logger.debug("Task Status: SUCCESS Message: %s " % message) return (current.TASK_QUEUE_STATUS_FAILED, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def get_vm_image_location(datastore_id, vm_identity): """ Get the file path for qcow2 image of a VM """ datastore = current.db.datastore[datastore_id] vm_directory_path = datastore.system_mount_point + '/' + get_constant('vms') + '/' + vm_identity vm_image_name = vm_directory_path + '/' + vm_identity + '.qcow2' image_present = True if os.path.exists(vm_image_name) else False return (vm_image_name, image_present) def get_extra_disk_location(datastore_id, vm_identity, disk_name, get_disk_size=False): """ Get the file path for qcow2 image of teh extra disk """ datastore = current.db.datastore[datastore_id] if datastore: vm_extra_disks_directory_path = datastore.system_mount_point + '/' + get_constant('extra_disks_dir') + '/' + \ datastore.ds_name + '/' + vm_identity ext = '' if disk_name.endswith('.qcow2') else '.qcow2' disk_image_path = vm_extra_disks_directory_path + '/' + disk_name + ext image_present = True if os.path.exists(disk_image_path) else False disk_size = 0 if image_present & get_disk_size: command = "qemu-img info " + disk_image_path + " | grep 'virtual size'" ret = os.popen(command).read() # Returns e.g. virtual size: 40G (42949672960 bytes) disk_size = int(ret[ret.index(':')+1:ret.index('G ')].strip()) return (disk_image_path, image_present, disk_size) else: return (None, False, 0) def launch_existing_vm_image(vm_details): """ Launch existing VM image - Choose new private_ip & mac_addr if not provided - Get location for VM image - Launch VM on given host - Attach extra disk to VM if defined - Create mapping between public IP and private IP if required """ logger.debug('Launch existing VM image') vm_properties = {} vm_properties['ram'] = vm_details.RAM vm_properties['vcpus'] = vm_details.vCPU vm_properties['security_domain'] = vm_details.security_domain #If Private IP was already chosen previously and DHCP entry is done if vm_details.private_ip != None: private_ip_info = current.db.private_ip_pool[vm_details.private_ip] if private_ip_info: vm_properties['private_ip'] = private_ip_info.private_ip vm_properties['mac_addr'] = private_ip_info.mac_addr vm_properties['vlan_name'] = private_ip_info.vlan.name vm_properties['vlan_tag'] = private_ip_info.vlan.vlan_tag if vm_details.public_ip == None: vm_properties['public_ip_req'] = False else: vm_properties['public_ip_req'] = True if vm_details.public_ip.is_active: vm_properties['public_ip'] = vm_details.public_ip.public_ip _choose_mac_ip_vncport(vm_properties) vm_properties['template'] = current.db.template[vm_details.template_id] vm_properties['datastore'] = current.db.datastore[vm_details.datastore_id] vm_properties['host'] = find_new_host(vm_details.RAM, vm_details.vCPU) (vm_image_name, image_present) = get_vm_image_location(vm_details.datastore_id, vm_details.vm_identity) if image_present: launch_vm_on_host(vm_details, vm_image_name, vm_properties) #Check if extra disk needs to be attached attached_disks = current.db((current.db.attached_disks.vm_id == vm_details.id)).select() if attached_disks: #Extra disk to be attached to the VM host_ip = current.db.host[vm_properties['host']].host_ip.private_ip disk_counter = 1 for attached_disk in attached_disks: disk_size = attach_disk(vm_details, attached_disk.attached_disk_name, host_ip, disk_counter, True) current.db(current.db.attached_disks.vm_id == attached_disk.vm_id and current.db.attached_disks.attached_disk_name==attached_disk.attached_disk_name ).update(capacity = disk_size) vm_details.extra_HDD += disk_size disk_counter += 1 #Create mapping of Private_IP and Public_IP if vm_properties['public_ip_req']: create_mapping(vm_properties['public_ip'], vm_properties['private_ip']) update_db_after_vm_installation(vm_details, vm_properties) def save_vm_as_template(parameters): """ Save VM as template If template for given VM already exists, replace with new template. """ logger.debug("Inside save_as_template() function") vm_id = parameters['vm_id'] vm_data = current.db.vm_data[vm_id] user_list = [] vm_details = current.db.vm_data[vm_id] logger.debug(str(vm_details)) try: (is_templated_created, new_template, old_template) = create_new_template(vm_details) if (is_templated_created): #remove old template if os.path.exists (old_template): os.remove(old_template) else: for user in current.db(current.db.user_vm_map.vm_id == vm_id).select(current.db.user_vm_map.user_id): user_list.append(user.user_id) new_template_id = current.db.template.insert(name = vm_data.vm_name + "_template" , os = vm_data.template_id.os , os_name = vm_data.template_id.os_name , os_version = vm_data.template_id.os_version , os_type = vm_data.template_id.os_type , arch = vm_data.template_id.arch , hdd = vm_data.template_id.hdd , hdfile = new_template , type = vm_data.template_id.type , tag = vm_data.vm_name + "_template" , datastore_id = vm_data.template_id.datastore_id, owner = user_list) current.db.vm_data[vm_id] = dict(saved_template = new_template_id) message = "User Template saved successfully" logger.debug(message) return (current.TASK_QUEUE_STATUS_SUCCESS, message) else: message = " Vm Template not saved " logger.debug("Task Status: %s " % message) return (current.TASK_QUEUE_STATUS_FAILED, message) except: logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (current.TASK_QUEUE_STATUS_FAILED, log_exception()) def delete_template(parameters): """ Delete template """ logger.debug("Inside delete_template() function") template_id = parameters['template_id'] template_details = current.db.template[template_id] template_path = template_details["hdfile"] if os.path.exists(template_path): os.remove(template_path) # set value in db also parent_vm = current.db.vm_data(saved_template = template_id) if parent_vm: parent_vm.update_record(saved_template = None) del current.db.template[template_id] return (current.TASK_QUEUE_STATUS_SUCCESS, "") def create_new_template(vm_details): """ Create a new template from the VM image - Create template directory - Copy VM Image to directory(Live copy if VM is running) - Update database to define new template """ try: (connection_object, domain) = getVirshDomainConn(vm_details) xmlfile = domain.XMLDesc(0) logger.debug("connection object created") datastore = _choose_datastore() logger.debug(datastore) new_template_dir = datastore.system_mount_point + '/' +get_constant('templates_dir') + '/' + vm_details.requester_id.first_name logger.debug("Creating user template directory...") if not os.path.exists (new_template_dir): os.makedirs(new_template_dir) template = new_template_dir + '/' + vm_details.vm_identity + '_template.qcow2' template_location = '/' + vm_details.requester_id.first_name + '/' + vm_details.vm_identity + '_template.qcow2' old_template = new_template_dir + '/' + vm_details.vm_identity + '_template_old.qcow2' if os.path.exists (template): # move template to some other path logger.debug("move template to some other file") shutil.move(template, old_template) logger.debug("template " + template) current_disk_path = vm_details.datastore_id.system_mount_point + get_constant('vms') + '/' + vm_details.vm_identity current_disk_file = current_disk_path + '/' + vm_details.vm_identity + '.qcow2' if (vm_details.status == current.VM_STATUS_RUNNING or vm_details.status == current.VM_STATUS_SUSPENDED): logger.debug("vm is active in db") if domain.isActive(): domain.undefine() root = etree.fromstring(xmlfile) target_elem = root.find("devices/disk/target") target_disk = target_elem.get('dev') flag = VIR_DOMAIN_BLOCK_REBASE_SHALLOW | VIR_DOMAIN_BLOCK_REBASE_COPY domain.blockRebase(target_disk, template, 0, flag) block_info_list = domain.blockJobInfo(current_disk_file,0) while(block_info_list['end'] != block_info_list['cur']): logger.debug("time to sleep") time.sleep(60) block_info_list = domain.blockJobInfo(current_disk_file,0) domain.blockJobAbort(current_disk_file) domain = connection_object.defineXML(xmlfile) connection_object.close() return (True, template_location, old_template) else: logger.debug("domain is not running on host") return (False, template_location, old_template) elif(vm_details.status == current.VM_STATUS_SHUTDOWN): if domain.isActive(): logger.debug("Domain is still active...Please try again after some time!!!") return (False, template_location, old_template) else: logger.debug("copying") copy_command = "cp "+current_disk_file+" "+template logger.debug("copy_command"+copy_command) #rc = os.system("cp %s %s" % (current_disk_file, template)) logger.debug("copy command running on " + vm_details.host_id.host_ip.private_ip + " host") command_output = execute_remote_cmd(vm_details.host_id.host_ip.private_ip, 'root', copy_command) logger.debug(command_output) return (True, template_location, old_template) except: if not domain.isPersistent(): domain = connection_object.defineXML(xmlfile) connection_object.close() logger.debug("Task Status: FAILED Error: %s " % log_exception()) return (False, template_location, old_template)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 29113, 29113, 14468, 21017, 198, 198, 6738, 1278, 84, 261, 1330, 1459, 198, 6738, 31904, 1330, 651, 62, 9979, 415, 11, 12260, 62, 47960, 62, 28758, 11, 4566, 11, 651, ...
2.30407
31,917
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This module wraps the Android Asset Packaging Tool.""" import os from devil.utils import cmd_helper from pylib import constants _AAPT_PATH = os.path.join(constants.ANDROID_SDK_TOOLS, 'aapt') def _RunAaptCmd(args): """Runs an aapt command. Args: args: A list of arguments for aapt. Returns: The output of the command. """ cmd = [_AAPT_PATH] + args status, output = cmd_helper.GetCmdStatusAndOutput(cmd) if status != 0: raise Exception('Failed running aapt command: "%s" with output "%s".' % (' '.join(cmd), output)) return output def Dump(what, apk, assets=None): """Returns the output of the aapt dump command. Args: what: What you want to dump. apk: Path to apk you want to dump information for. assets: List of assets in apk you want to dump information for. """ assets = assets or [] if isinstance(assets, basestring): assets = [assets] return _RunAaptCmd(['dump', what, apk] + assets).splitlines()
[ 2, 15069, 1853, 383, 18255, 1505, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 198, 198, 37811...
2.887218
399
setvar('nsamples', getvar('a') + getvar('b'))
[ 2617, 7785, 10786, 5907, 12629, 3256, 651, 7785, 10786, 64, 11537, 1343, 651, 7785, 10786, 65, 6, 4008, 198 ]
2.421053
19
"""Core recipes for Psi4""" from __future__ import annotations from dataclasses import dataclass from typing import Any, Dict from ase.atoms import Atoms from ase.calculators.psi4 import Psi4 from jobflow import Maker, job from monty.dev import requires try: import psi4 except: psi4 = None from quacc.schemas.calc import summarize_run from quacc.util.basics import merge_dicts from quacc.util.calc import run_calc
[ 37811, 14055, 14296, 329, 350, 13396, 19, 37811, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 198, 198, 6738, 257, 325, 13, 265, 3150, ...
3.007042
142
#Main Program from Class import Barang import Menu histori = list() listBarang = [ Barang('Rinso', 5000, 20), Barang('Sabun', 3000, 20), Barang('Pulpen', 2500, 20), Barang('Tisu', 10000, 20), Barang('Penggaris', 1000, 20) ] while True: print(''' Menu 1. Tampilkan Barang 2. Tambahkan Barang 3. Tambah Stock Barang 4. Hapus Barang 5. Cari Barang Berdasarkan Keyword 6. Hitung Barang Belanjaan 7. Histori Keluar Masuk Barang 0. Keluar Program ''') choice = input('Masukan No Menu: ') if choice == '1': Menu.menu1(listBarang) elif choice == '2': Menu.menu2(listBarang, histori) elif choice == '3': Menu.menu3(listBarang, histori) elif choice == '4': Menu.menu4(listBarang, histori) elif choice == '5': Menu.menu5(listBarang) elif choice == '6': Menu.menu6(listBarang, histori) elif choice == '7': Menu.menu7(histori) elif choice == '0': print('Keluar Program') break else: print('Invalid Input!')
[ 2, 13383, 6118, 201, 198, 6738, 5016, 1330, 2409, 648, 201, 198, 11748, 21860, 201, 198, 201, 198, 10034, 10145, 796, 1351, 3419, 201, 198, 4868, 10374, 648, 796, 685, 201, 198, 10374, 648, 10786, 49, 259, 568, 3256, 23336, 11, 1160, ...
2.206278
446
#coding:utf-8 import numpy as np import tensorflow as tf import os import time import datetime import ctypes import threading import json ll1 = ctypes.cdll.LoadLibrary lib_cnn = ll1("./init_cnn.so") ll2 = ctypes.cdll.LoadLibrary lib_kg = ll2("./init_know.so") bags_sum = 0.0 bags_hit_NA = 0.0 sum_NA = 0.0 sum_fNA = 0.0 bags_hit = 0.0 loss_sum = 0.0 if __name__ == "__main__": lib_cnn.readWordVec() lib_cnn.readFromFile() lib_kg.init() np.random.seed(0) tf.set_random_seed(0) config = Config() word_embeddings = np.zeros(config.num_words * config.word_size, dtype = np.float32) lib_cnn.getWordVec.argtypes = [ctypes.c_void_p] lib_cnn.getWordVec(word_embeddings.__array_interface__['data'][0]) word_embeddings.resize((config.num_words,config.word_size)) config.batch_size = lib_kg.getTripleTotal() / config.nbatches config.entityTotal = lib_kg.getEntityTotal() config.relationTotal = lib_kg.getRelationTotal() with tf.Graph().as_default(): conf = tf.ConfigProto() sess = tf.Session(config=conf) with sess.as_default(): initializer = tf.contrib.layers.xavier_initializer() with tf.variable_scope("model", reuse=None, initializer = initializer): m = Model(config = config) global_step_cnn = tf.Variable(0, name="global_step_cnn", trainable=False) optimizer_cnn = tf.train.GradientDescentOptimizer(0.01) grads_and_vars_cnn = optimizer_cnn.compute_gradients(m.loss_cnn) train_op_cnn = optimizer_cnn.apply_gradients(grads_and_vars_cnn, global_step = global_step_cnn) global_step_kg = tf.Variable(0, name="global_step_kg", trainable=False) optimizer_kg = tf.train.GradientDescentOptimizer(0.001) grads_and_vars_kg = optimizer_kg.compute_gradients(m.loss_kg) train_op_kg = optimizer_kg.apply_gradients(grads_and_vars_kg, global_step=global_step_kg) sess.run(tf.initialize_all_variables()) x_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32) p_t_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32) p_h_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32) r_batch = np.zeros((1, 1), dtype = np.int32) y_batch = np.zeros((1, config.num_classes), dtype = np.int32) r_n_batch = np.zeros((1, 1), dtype = np.float32) h_batch = np.zeros((1, 1), dtype = np.int32) t_batch = np.zeros((1, 1), dtype = np.int32) x_batch_addr = x_batch.__array_interface__['data'][0] p_t_batch_addr = p_t_batch.__array_interface__['data'][0] p_h_batch_addr = p_h_batch.__array_interface__['data'][0] y_batch_addr = y_batch.__array_interface__['data'][0] r_batch_addr = r_batch.__array_interface__['data'][0] r_n_batch_addr = r_n_batch.__array_interface__['data'][0] h_batch_addr = h_batch.__array_interface__['data'][0] t_batch_addr = t_batch.__array_interface__['data'][0] lib_cnn.batch_iter.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] tipTotal = lib_cnn.getTipTotal() loop = 0 ph = np.zeros(config.batch_size * 2, dtype = np.int32) pt = np.zeros(config.batch_size * 2, dtype = np.int32) pr = np.zeros(config.batch_size * 2, dtype = np.int32) nh = np.zeros(config.batch_size * 2, dtype = np.int32) nt = np.zeros(config.batch_size * 2, dtype = np.int32) nr = np.zeros(config.batch_size * 2, dtype = np.int32) ph_addr = ph.__array_interface__['data'][0] pt_addr = pt.__array_interface__['data'][0] pr_addr = pr.__array_interface__['data'][0] nh_addr = nh.__array_interface__['data'][0] nt_addr = nt.__array_interface__['data'][0] nr_addr = nr.__array_interface__['data'][0] lib_kg.getBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int] times_kg = 0 coord = tf.train.Coordinator() threads = [] threads.append(threading.Thread(target=train_kg, args=(coord,))) threads.append(threading.Thread(target=train_cnn, args=(coord,))) for t in threads: t.start() coord.join(threads)
[ 2, 66, 7656, 25, 40477, 12, 23, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 28686, 198, 11748, 640, 198, 11748, 4818, 8079, 198, 11748, 269, 19199, 198, 11748, 4704, 278, 198, 11748, 33918,...
2.251645
1,824
#!/usr/bin/env python3 """get tag from http://demo.illustration2vec.net/.""" # note: # - error 'ERROR: Request Entity Too Large' for file 1.1 mb # <span style="color:red;">ERROR: Request Entity Too Large</span> from collections import OrderedDict from pathlib import Path from pprint import pformat import imghdr import logging import os import shutil import time import urllib import hashlib import click import requests import structlog import peewee from PIL import Image from i2vec_cli import models from i2vec_cli.requests_session import Session, convert_raw_to_hydrus from i2vec_cli.sha256 import sha256_checksum from i2vec_cli.utils import user_data_dir, thumb_folder def is_url(path): """Return True if path is url, False otherwise.""" scheme = urllib.parse.urlparse(path).scheme if scheme in ('http', 'https'): return True return False def is_ext_equal(file_ext, imghdr_ext): """compare file extension with result from imghdr_ext.""" if not imghdr_ext: return False if file_ext.lower() == '.{}'.format(imghdr_ext): return True if file_ext.lower() in ('.jpg', '.jpeg') and imghdr_ext == 'jpeg': return True return False def download(url, no_clobber): """download url. Args: url: URL to be downloaded. no_clobber: Skip download if file already exist. Returns: Downloaded filename or existing file if `no_clobber` is `True` """ log = structlog.getLogger() basename = os.path.basename(url) if os.path.isfile(basename) and no_clobber: return basename response = requests.get(url, stream=True) with open(basename, 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) name, ext = os.path.splitext(basename) imghdr_ext = imghdr.what(basename) ext_equal = is_ext_equal(file_ext=ext, imghdr_ext=imghdr_ext) if not imghdr_ext: log.debug("imghdr can't recognize file", file=basename) return basename else: new_basename = '{}.{}'.format(name, imghdr_ext) new_basename_exist = os.path.isfile(new_basename) if ext_equal: log.debug('Extension is equal', file_ext=ext, imghdr_ext=imghdr_ext) return basename elif not ext_equal: if new_basename_exist and not no_clobber: log.debug('Replace existing file', old=basename, new=new_basename) shutil.move(basename, new_basename) elif not new_basename_exist: log.debug('Rename file ext', file=basename, new_ext=imghdr_ext) shutil.move(basename, new_basename) else: log.debug('Not replace/rename file', no_clobber=no_clobber, new_basename=new_basename) return new_basename else: log.debug( 'Unknown condition', file=basename, ext_equal=ext_equal, new_basename_exist=new_basename_exist, imghdr_ext=imghdr_ext ) # just return base name if any error happen return basename def validate_close_delay(ctx, param, value): """validate close delay.""" try: value = int(value) except Exception as e: raise click.BadParameter( 'Error when validate close delay: value={}, error={}'.format(value, e)) if value >= -1: return value else: raise click.BadParameter('Close delay have to be bigger or equal than -1') def delay_close(close_delay): """delay when closing the program.""" log = structlog.getLogger() if close_delay == -1: click.pause() elif close_delay == 0: log.debug('No close delay') elif close_delay > 0: time.sleep(close_delay) else: log.error('Invalid close delay', v=close_delay) def create_thumbnail(path, thumb_path): """create thumbnail.""" size = 320, 320 try: im = Image.open(path) im.thumbnail(size) im.save(thumb_path, "JPEG") except IOError: raise IOError("cannot create thumbnail for", path) def get_print_result(path, db_path, format, session): """get print result.""" # compatibility p = path sha256 = sha256_checksum(p) md5 = md5_checksum(p) thumb_path = os.path.join(user_data_dir, 'thumb', '{}.jpg'.format(sha256)) try: load_res = models.load_result(db=db_path, sha256=sha256, md5=md5) except models.Image.DoesNotExist: load_res = None if load_res: tags = {'prediction': load_res} else: tags = session.get_tags(path=p) try: models.save_result( db=db_path, sha256=sha256, md5=md5, prediction=tags['prediction']) except peewee.IntegrityError as e: log.debug(str(e)) except keyError as e: log.debug(str(tags)) if not os.path.isfile(thumb_path): create_thumbnail(p, thumb_path) if format == 'dict': return tags if format == 'hydrus': return convert_raw_to_hydrus(tags) else: return pformat(tags['prediction']) if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 1136, 7621, 422, 2638, 1378, 9536, 78, 13, 359, 44027, 17, 35138, 13, 3262, 14, 526, 15931, 198, 2, 3465, 25, 198, 2, 532, 4049, 705, 24908, 25, 19390, 20885, 14190, 13601...
2.323905
2,192
"""Functions for builtin CherryPy tools.""" import logging import re from hashlib import md5 import six from six.moves import urllib import cherrypy from cherrypy._cpcompat import text_or_bytes from cherrypy.lib import httputil as _httputil from cherrypy.lib import is_iterator # Conditional HTTP request support # def validate_etags(autotags=False, debug=False): """Validate the current ETag against If-Match, If-None-Match headers. If autotags is True, an ETag response-header value will be provided from an MD5 hash of the response body (unless some other code has already provided an ETag header). If False (the default), the ETag will not be automatic. WARNING: the autotags feature is not designed for URL's which allow methods other than GET. For example, if a POST to the same URL returns no content, the automatic ETag will be incorrect, breaking a fundamental use for entity tags in a possibly destructive fashion. Likewise, if you raise 304 Not Modified, the response body will be empty, the ETag hash will be incorrect, and your application will break. See :rfc:`2616` Section 14.24. """ response = cherrypy.serving.response # Guard against being run twice. if hasattr(response, 'ETag'): return status, reason, msg = _httputil.valid_status(response.status) etag = response.headers.get('ETag') # Automatic ETag generation. See warning in docstring. if etag: if debug: cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS') elif not autotags: if debug: cherrypy.log('Autotags off', 'TOOLS.ETAGS') elif status != 200: if debug: cherrypy.log('Status not 200', 'TOOLS.ETAGS') else: etag = response.collapse_body() etag = '"%s"' % md5(etag).hexdigest() if debug: cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS') response.headers['ETag'] = etag response.ETag = etag # "If the request would, without the If-Match header field, result in # anything other than a 2xx or 412 status, then the If-Match header # MUST be ignored." if debug: cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS') if status >= 200 and status <= 299: request = cherrypy.serving.request conditions = request.headers.elements('If-Match') or [] conditions = [str(x) for x in conditions] if debug: cherrypy.log('If-Match conditions: %s' % repr(conditions), 'TOOLS.ETAGS') if conditions and not (conditions == ['*'] or etag in conditions): raise cherrypy.HTTPError(412, 'If-Match failed: ETag %r did ' 'not match %r' % (etag, conditions)) conditions = request.headers.elements('If-None-Match') or [] conditions = [str(x) for x in conditions] if debug: cherrypy.log('If-None-Match conditions: %s' % repr(conditions), 'TOOLS.ETAGS') if conditions == ['*'] or etag in conditions: if debug: cherrypy.log('request.method: %s' % request.method, 'TOOLS.ETAGS') if request.method in ('GET', 'HEAD'): raise cherrypy.HTTPRedirect([], 304) else: raise cherrypy.HTTPError(412, 'If-None-Match failed: ETag %r ' 'matched %r' % (etag, conditions)) def validate_since(): """Validate the current Last-Modified against If-Modified-Since headers. If no code has set the Last-Modified response header, then no validation will be performed. """ response = cherrypy.serving.response lastmod = response.headers.get('Last-Modified') if lastmod: status, reason, msg = _httputil.valid_status(response.status) request = cherrypy.serving.request since = request.headers.get('If-Unmodified-Since') if since and since != lastmod: if (status >= 200 and status <= 299) or status == 412: raise cherrypy.HTTPError(412) since = request.headers.get('If-Modified-Since') if since and since == lastmod: if (status >= 200 and status <= 299) or status == 304: if request.method in ('GET', 'HEAD'): raise cherrypy.HTTPRedirect([], 304) else: raise cherrypy.HTTPError(412) # Tool code # def allow(methods=None, debug=False): """Raise 405 if request.method not in methods (default ['GET', 'HEAD']). The given methods are case-insensitive, and may be in any order. If only one method is allowed, you may supply a single string; if more than one, supply a list of strings. Regardless of whether the current method is allowed or not, this also emits an 'Allow' response header, containing the given methods. """ if not isinstance(methods, (tuple, list)): methods = [methods] methods = [m.upper() for m in methods if m] if not methods: methods = ['GET', 'HEAD'] elif 'GET' in methods and 'HEAD' not in methods: methods.append('HEAD') cherrypy.response.headers['Allow'] = ', '.join(methods) if cherrypy.request.method not in methods: if debug: cherrypy.log('request.method %r not in methods %r' % (cherrypy.request.method, methods), 'TOOLS.ALLOW') raise cherrypy.HTTPError(405) else: if debug: cherrypy.log('request.method %r in methods %r' % (cherrypy.request.method, methods), 'TOOLS.ALLOW') def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For', scheme='X-Forwarded-Proto', debug=False): """Change the base URL (scheme://host[:port][/path]). For running a CP server behind Apache, lighttpd, or other HTTP server. For Apache and lighttpd, you should leave the 'local' argument at the default value of 'X-Forwarded-Host'. For Squid, you probably want to set tools.proxy.local = 'Origin'. If you want the new request.base to include path info (not just the host), you must explicitly set base to the full base path, and ALSO set 'local' to '', so that the X-Forwarded-Host request header (which never includes path info) does not override it. Regardless, the value for 'base' MUST NOT end in a slash. cherrypy.request.remote.ip (the IP address of the client) will be rewritten if the header specified by the 'remote' arg is valid. By default, 'remote' is set to 'X-Forwarded-For'. If you do not want to rewrite remote.ip, set the 'remote' arg to an empty string. """ request = cherrypy.serving.request if scheme: s = request.headers.get(scheme, None) if debug: cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY') if s == 'on' and 'ssl' in scheme.lower(): # This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header scheme = 'https' else: # This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https' scheme = s if not scheme: scheme = request.base[:request.base.find('://')] if local: lbase = request.headers.get(local, None) if debug: cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY') if lbase is not None: base = lbase.split(',')[0] if not base: default = urllib.parse.urlparse(request.base).netloc base = request.headers.get('Host', default) if base.find('://') == -1: # add http:// or https:// if needed base = scheme + '://' + base request.base = base if remote: xff = request.headers.get(remote) if debug: cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY') if xff: if remote == 'X-Forwarded-For': # Grab the first IP in a comma-separated list. Ref #1268. xff = next(ip.strip() for ip in xff.split(',')) request.remote.ip = xff def ignore_headers(headers=('Range',), debug=False): """Delete request headers whose field names are included in 'headers'. This is a useful tool for working behind certain HTTP servers; for example, Apache duplicates the work that CP does for 'Range' headers, and will doubly-truncate the response. """ request = cherrypy.serving.request for name in headers: if name in request.headers: if debug: cherrypy.log('Ignoring request header %r' % name, 'TOOLS.IGNORE_HEADERS') del request.headers[name] def response_headers(headers=None, debug=False): """Set headers on the response.""" if debug: cherrypy.log('Setting response headers: %s' % repr(headers), 'TOOLS.RESPONSE_HEADERS') for name, value in (headers or []): cherrypy.serving.response.headers[name] = value response_headers.failsafe = True def referer(pattern, accept=True, accept_missing=False, error=403, message='Forbidden Referer header.', debug=False): """Raise HTTPError if Referer header does/does not match the given pattern. pattern A regular expression pattern to test against the Referer. accept If True, the Referer must match the pattern; if False, the Referer must NOT match the pattern. accept_missing If True, permit requests with no Referer header. error The HTTP error code to return to the client on failure. message A string to include in the response body on failure. """ try: ref = cherrypy.serving.request.headers['Referer'] match = bool(re.match(pattern, ref)) if debug: cherrypy.log('Referer %r matches %r' % (ref, pattern), 'TOOLS.REFERER') if accept == match: return except KeyError: if debug: cherrypy.log('No Referer header', 'TOOLS.REFERER') if accept_missing: return raise cherrypy.HTTPError(error, message) session_auth.__doc__ = ( """Session authentication hook. Any attribute of the SessionAuth class may be overridden via a keyword arg to this function: """ + '\n'.join(['%s: %s' % (k, type(getattr(SessionAuth, k)).__name__) for k in dir(SessionAuth) if not k.startswith('__')]) ) def log_traceback(severity=logging.ERROR, debug=False): """Write the last error's traceback to the cherrypy error log.""" cherrypy.log('', 'HTTP', severity=severity, traceback=True) def log_request_headers(debug=False): """Write request headers to the cherrypy error log.""" h = [' %s: %s' % (k, v) for k, v in cherrypy.serving.request.header_list] cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), 'HTTP') def log_hooks(debug=False): """Write request.hooks to the cherrypy error log.""" request = cherrypy.serving.request msg = [] # Sort by the standard points if possible. from cherrypy import _cprequest points = _cprequest.hookpoints for k in request.hooks.keys(): if k not in points: points.append(k) for k in points: msg.append(' %s:' % k) v = request.hooks.get(k, []) v.sort() for h in v: msg.append(' %r' % h) cherrypy.log('\nRequest Hooks for ' + cherrypy.url() + ':\n' + '\n'.join(msg), 'HTTP') def redirect(url='', internal=True, debug=False): """Raise InternalRedirect or HTTPRedirect to the given url.""" if debug: cherrypy.log('Redirecting %sto: %s' % ({True: 'internal ', False: ''}[internal], url), 'TOOLS.REDIRECT') if internal: raise cherrypy.InternalRedirect(url) else: raise cherrypy.HTTPRedirect(url) def trailing_slash(missing=True, extra=False, status=None, debug=False): """Redirect if path_info has (missing|extra) trailing slash.""" request = cherrypy.serving.request pi = request.path_info if debug: cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' % (request.is_index, missing, extra, pi), 'TOOLS.TRAILING_SLASH') if request.is_index is True: if missing: if not pi.endswith('/'): new_url = cherrypy.url(pi + '/', request.query_string) raise cherrypy.HTTPRedirect(new_url, status=status or 301) elif request.is_index is False: if extra: # If pi == '/', don't redirect to ''! if pi.endswith('/') and pi != '/': new_url = cherrypy.url(pi[:-1], request.query_string) raise cherrypy.HTTPRedirect(new_url, status=status or 301) def accept(media=None, debug=False): """Return the client's preferred media-type (from the given Content-Types). If 'media' is None (the default), no test will be performed. If 'media' is provided, it should be the Content-Type value (as a string) or values (as a list or tuple of strings) which the current resource can emit. The client's acceptable media ranges (as declared in the Accept request header) will be matched in order to these Content-Type values; the first such string is returned. That is, the return value will always be one of the strings provided in the 'media' arg (or None if 'media' is None). If no match is found, then HTTPError 406 (Not Acceptable) is raised. Note that most web browsers send */* as a (low-quality) acceptable media range, which should match any Content-Type. In addition, "...if no Accept header field is present, then it is assumed that the client accepts all media types." Matching types are checked in order of client preference first, and then in the order of the given 'media' values. Note that this function does not honor accept-params (other than "q"). """ if not media: return if isinstance(media, text_or_bytes): media = [media] request = cherrypy.serving.request # Parse the Accept request header, and try to match one # of the requested media-ranges (in order of preference). ranges = request.headers.elements('Accept') if not ranges: # Any media type is acceptable. if debug: cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT') return media[0] else: # Note that 'ranges' is sorted in order of preference for element in ranges: if element.qvalue > 0: if element.value == '*/*': # Matches any type or subtype if debug: cherrypy.log('Match due to */*', 'TOOLS.ACCEPT') return media[0] elif element.value.endswith('/*'): # Matches any subtype mtype = element.value[:-1] # Keep the slash for m in media: if m.startswith(mtype): if debug: cherrypy.log('Match due to %s' % element.value, 'TOOLS.ACCEPT') return m else: # Matches exact value if element.value in media: if debug: cherrypy.log('Match due to %s' % element.value, 'TOOLS.ACCEPT') return element.value # No suitable media-range found. ah = request.headers.get('Accept') if ah is None: msg = 'Your client did not send an Accept header.' else: msg = 'Your client sent this Accept header: %s.' % ah msg += (' But this resource only emits these media types: %s.' % ', '.join(media)) raise cherrypy.HTTPError(406, msg) def autovary(ignore=None, debug=False): """Auto-populate the Vary response header based on request.header access. """ request = cherrypy.serving.request req_h = request.headers request.headers = MonitoredHeaderMap() request.headers.update(req_h) if ignore is None: ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type']) request.hooks.attach('before_finalize', set_response_header, 95) def convert_params(exception=ValueError, error=400): """Convert request params based on function annotations, with error handling. exception Exception class to catch. status The HTTP error code to return to the client on failure. """ request = cherrypy.serving.request types = request.handler.callable.__annotations__ with cherrypy.HTTPError.handle(exception, error): for key in set(types).intersection(request.params): request.params[key] = types[key](request.params[key])
[ 37811, 24629, 2733, 329, 3170, 259, 23165, 20519, 4899, 526, 15931, 198, 198, 11748, 18931, 198, 11748, 302, 198, 6738, 12234, 8019, 1330, 45243, 20, 198, 198, 11748, 2237, 198, 6738, 2237, 13, 76, 5241, 1330, 2956, 297, 571, 198, 198, ...
2.392388
7,225
'''Utility functions''' import multiprocessing from .globalVariables import * def readMathIOmicaData(fileName): '''Read text files exported by MathIOmica and convert to Python data Parameters: fileName: str Path of directories and name of the file containing data Returns: data Python data Usage: data = readMathIOmicaData("../../MathIOmica/MathIOmica/MathIOmicaData/ExampleData/rnaExample") ''' if os.path.isfile(fileName): with open(fileName, 'r') as tempFile: data = tempFile.read() data = data.replace('\n','').replace('{','(').replace('}',')').replace('->',':').replace('|>','}') data = data.replace('<|','{').replace('^','*').replace('`','*').replace('Missing[]','"Missing[]"') data = data.replace("\\",'') else: print('File not found (%s)'%(fileName)) returning = None try: returning = eval(data) except: print('Error occured while converting data (%s)'%(fileName)) return returning def runCPUs(NumberOfAvailableCPUs, func, list_of_tuples_of_func_params): """Parallelize function call with multiprocessing.Pool. Parameters: NumberOfAvailableCPUs: int Number of processes to create func: function Function to apply, must take at most one argument list_of_tuples_of_func_params: list Function parameters Returns: 2d numpy.array Results of func in a numpy array Usage: results = runCPUs(4, pAutocorrelation, [(times[i], data[i], allTimes) for i in range(10)]) """ instPool = multiprocessing.Pool(processes = NumberOfAvailableCPUs) return_values = instPool.map(func, list_of_tuples_of_func_params) instPool.close() instPool.join() return np.vstack(return_values) def createReverseDictionary(inputDictionary): """Efficient way to create a reverse dictionary from a dictionary. Utilizes Pandas.Dataframe.groupby and Numpy arrays indexing. Parameters: inputDictionary: dictionary Dictionary to reverse Returns: dictionary Reversed dictionary Usage: revDict = createReverseDictionary(Dict) """ keys, values = np.array(list(inputDictionary.keys())), np.array(list(inputDictionary.values())) df = pd.DataFrame(np.array([[keys[i], value] for i in range(len(keys)) for value in values[i]])) dfGrouped = df.groupby(df.columns[1]) keys, values = list(dfGrouped.indices.keys()), list(dfGrouped.indices.values()) GOs = df.values.T[0] return dict(zip(keys, [GOs[value].tolist() for value in values])) def createDirectories(path): """Create a path of directories, unless the path already exists. Parameters: path: str Path directory Returns: None Usage: createDirectories("/pathToFolder1/pathToSubFolder2") """ if path=='': return None if not os.path.exists(path): os.makedirs(path) return None
[ 7061, 6, 18274, 879, 5499, 7061, 6, 201, 198, 201, 198, 201, 198, 11748, 18540, 305, 919, 278, 201, 198, 201, 198, 6738, 764, 20541, 23907, 2977, 1330, 1635, 201, 198, 201, 198, 201, 198, 4299, 1100, 37372, 9399, 76, 3970, 6601, 7, ...
2.303725
1,396
#! /usr/bin/env python #adam-does# runs SeeingClearly to get the seeing and rms of the image, then uses those to get sextractor thresholds for CR detection #adam-use# use with CRNitschke pipeline #adam-call_example# call it like ./get_sextract_thresholds.py /path/flname.fits output_file.txt #IO stuff: import sys ; sys.path.append('/u/ki/awright/InstallingSoftware/pythons') ###saveout = sys.stdout saveout = sys.stdout ###logout = open('SeeingClearly_stdout.log','w') ###sys.stdout = logout saveerr = sys.stderr ###logerr = open('SeeingClearly_stderr.log','w') ###sys.stderr = logerr sys.stdout = sys.stderr #the basics import hashlib import os import SeeingClearly from copy import deepcopy import imagetools import glob import astropy from astropy.io import ascii from numpy import asarray if __name__ == "__main__": args=deepcopy(sys.argv[1:]) for false_arg in ['-i', '--']: if false_arg in args: args.remove(false_arg) if len(args)<1: sys.exit() if not os.path.isfile(args[0]): print "sys.argv[1]=",args[0] raise Exception(args[0]+" is not a file!") else: fl=args[0] fl2save=args[1] #start tmp print "Using SeeingClearly to get seeing for: "+fl print "saving output to: " +fl2save try: FILTER=astropy.io.fits.open(fl)[0].header['FILTER'] except: FILTER="UnknownFilt" BASE,ending=os.path.basename(fl).split('OCF') ending="OCF"+ending ending=ending.replace('.fits','') fls_dir=os.path.dirname(fl) basename=os.path.basename(fl) CCDnum=imagetools.GetCCD(fl) globthis='_'+str(CCDnum) glob_basename=basename.replace(globthis,'_*') fls=sorted(glob.glob(fls_dir+"/"+glob_basename)) if not len(fls)==10: raise Exception('cannot find 10 files like this from different CCDs') #adam-old# seeing,back_rms=SeeingClearly.seeing_clearly_withplot(fls,checkplots=1,saveas='pltSeeingClearly_%s_%s' % (FILTER,BASE[:-1]+"ALL")) import adam_stars_from_cat import numpy seeing,back_rms=adam_stars_from_cat.get_seeing_backrms(fls) back_rms=numpy.array(back_rms) ft,dt=seeing_to_ft_dt(seeing) detect_thresh=dt/back_rms #convert to S2N ratio filter_thresh=ft/back_rms #convert to S2N ratio if FILTER=='W-J-B': detect_thresh=asarray([min(170.0,detect_thresh[i]) for i in range(len(detect_thresh))]) filter_thresh=asarray([min(20.0,filter_thresh[i]) for i in range(len(filter_thresh))]) elif (detect_thresh>170.0).any() or (filter_thresh>20.0).any(): print 'checkit: filter=%s and %.2f %% of the detection thresholds are above 170.0 and %.2f %% of the filter thresholds are above 20.0' % (FILTER,(detect_thresh>170.0).mean()*100, (filter_thresh>20.0).mean()*100) dict_out={} dict_out['seeing']=[seeing]*10 dict_out['rms']=back_rms dict_out['dt']=detect_thresh dict_out['ft']=filter_thresh dict_out['#files']=fls t=astropy.table.Table(data=dict_out,names=['#files','rms','seeing','dt','ft'],dtype=[str,float,float,float,float]) t.write(fl2save,format="ascii.basic") #adam-2014#detect_thresh_cap=min(detect_thresh,150.0) #cap is now set in the function seeing_to_ft_dt #PIXSCALE=float(os.environ['PIXSCALE']) #if seeing>PIXSCALE*2.5: #I have no check for being undersampled, should I? #if seeing>.4: # sys.stdout=saveout #back to printing to terminal # ###sys.stdout.write(str(seeing)) # print "'0 "+str(back_rms)+" "+str(seeing)+" "+str(detect_thresh)+" "+str(filter_thresh)+"'" # #else: # #print "exit 1;" # #raise Exception('Seeing less than 2.5xPIXSCALE. The image is undersampled') # #sys.stderr=saveerr #back to printing to terminal # #sys.stderr.write('1') # sys.stdout=saveout #back to printing to terminal # print "0 "+str(back_rms)+" "+str(seeing)+" "+str(detect_thresh)+" "+str(filter_thresh)
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 324, 321, 12, 22437, 2, 4539, 25913, 30638, 284, 651, 262, 4379, 290, 374, 907, 286, 262, 2939, 11, 788, 3544, 883, 284, 651, 384, 742, 40450, 40885, 329, 8740, 13326, 198, 2,...
2.429045
1,508
import time import multiprocessing
[ 11748, 640, 198, 11748, 18540, 305, 919, 278, 198 ]
3.888889
9
# -*- coding: utf-8 -*- from __future__ import unicode_literals from unittest import skipIf try: from django.core.urlresolvers import reverse except ModuleNotFoundError: from django.urls import reverse from django.db import transaction from aldryn_reversion.core import create_revision as aldryn_create_revision from parler.utils.context import switch_language import six from . import NewsBlogTestCase from aldryn_newsblog.cms_appconfig import NewsBlogConfig from ..settings import ENABLE_REVERSION if ENABLE_REVERSION: try: from reversion import create_revision from reversion import default_revision_manager except ImportError: from reversion.revisions import create_revision from reversion.revisions import default_revision_manager
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 555, 715, 395, 1330, 14267, 1532, 198, 198, 28311, 25, 198, 220, 220, 220, 422, 42625, 14208,...
3.094118
255
# Library for the dynamics of a lumen network # The lumen are 2 dimensional and symmetric and connected with 1 dimensional tubes # # Created by A. Mielke, 2018 # Modified by M. Le Verge--Serandour on 8/04/2019 """ network.py conf.init Defines the class network and associated functions Imports ------- Libraries : numpy, os, math Created by A. Mielke Modified by H. Turlier on 8/06/2018 Modified by M. Le Verge--Serandour on 8/04/2019 """ import numpy as np import math import os
[ 2, 10074, 329, 262, 17262, 286, 257, 300, 20080, 3127, 198, 2, 383, 300, 20080, 389, 362, 38517, 290, 23606, 19482, 290, 5884, 351, 352, 38517, 21103, 198, 2, 198, 2, 15622, 416, 317, 13, 337, 8207, 365, 11, 2864, 198, 2, 40499, 4...
2.875
184
# Illustrate upsampling in 2d # Code from Jason Brownlee # https://machinelearningmastery.com/generative_adversarial_networks/ import tensorflow as tf from tensorflow import keras from numpy import asarray #from keras.models import Sequential from tensorflow.keras.models import Sequential #from keras.layers import UpSampling2D from tensorflow.keras.layers import UpSampling2D X = asarray([[1, 2], [3, 4]]) X = asarray([[1, 2, 3], [4, 5, 6], [7,8,9]]) print(X) nr = X.shape[0] nc = X.shape[1] # reshape input data into one sample a sample with a channel X = X.reshape((1, nr, nc, 1)) model = Sequential() model.add(UpSampling2D(input_shape=(nr, nc, 1))) # nearest neighbor yhat = model.predict(X) yhat = yhat.reshape((2*nr, 2*nc)) print(yhat) model = Sequential() model.add(UpSampling2D(input_shape=(nc, nc, 1), interpolation='bilinear')) yhat = model.predict(X) yhat = yhat.reshape((2*nr, 2*nc)) print(yhat)
[ 2, 23279, 4873, 19649, 321, 11347, 287, 362, 67, 198, 2, 6127, 422, 8982, 4373, 7197, 198, 2, 3740, 1378, 30243, 40684, 9866, 88, 13, 785, 14, 8612, 876, 62, 324, 690, 36098, 62, 3262, 5225, 14, 628, 198, 11748, 11192, 273, 11125, ...
2.462141
383
__all__ = ['scaffold', 'command_set'] from gevent import monkey monkey.patch_all() import csv import os import sys import time import shutil from typing import List import gevent from src.BusinessCentralLayer.setting import logger, DEFAULT_POWER, CHROMEDRIVER_PATH, \ REDIS_MASTER, SERVER_DIR_DATABASE_CACHE, SERVER_DIR_CLIENT_DEPORT, SERVER_PATH_DEPOT_VCS, SERVER_DIR_CACHE_BGPIC, \ REDIS_SLAVER_DDT, CRAWLER_SEQUENCE, terminal_echo, SERVER_DIR_DATABASE_LOG, SERVER_DIR_SSPANEL_MINING command_set = { # --------------------------------------------- # # --------------------------------------------- 'deploy': "/Flask yaml", # --------------------------------------------- # # --------------------------------------------- "clear": "", "decouple": "subs_ddt", "overdue": "", "run": "[spawn]", "force_run": "[spawn]", "remain": "", "ping": "", "entropy": "", "exile": "", "spawn": "", "mining": "STAFF hostSEO", # --------------------------------------------- # # --------------------------------------------- # usage: python main.py --parse https://domain/link/token?sub=3 # usage: python main.py --parse https://domain/link/token?sub=3 https://domain/link/token2?sub=3 # "--parse": """ping""", # --------------------------------------------- # Windows # --------------------------------------------- "panel": "[for Windows] ", "ash": "[for Windows] ,Clash yaml," "URL SchemeClash", # --------------------------------------------- # # --------------------------------------------- "example": "python main.py ping" } _ConfigQuarantine().run() scaffold = _ScaffoldGuider()
[ 834, 439, 834, 796, 37250, 1416, 2001, 727, 3256, 705, 21812, 62, 2617, 20520, 198, 198, 6738, 4903, 1151, 1330, 21657, 198, 198, 49572, 13, 17147, 62, 439, 3419, 198, 198, 11748, 269, 21370, 198, 11748, 28686, 198, 11748, 25064, 198, ...
2.905
600
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2016 Daniel Estevez <daniel@destevez.net>. # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # For more information, please refer to <http://unlicense.org> # import numpy from gnuradio import gr import pmt import array
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 1584, 7806, 412, 4169, 33425, 1279, 67, 6321, 31, 16520, 68, 33425, 13, 3262, 28401, 198, 2, 198, ...
3.602015
397
#!/usr/bin/python # # Start dfplayer. import argparse import os import shutil import subprocess import sys import time _PROJ_DIR = os.path.dirname(__file__) main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 198, 2, 7253, 47764, 7829, 13, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 11748, 640, 198, 198, 62, 31190, 41, ...
2.770492
61
#!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A demo of the Google CloudSpeech recognizer.""" import aiy.audio import aiy.cloudspeech import aiy.voicehat import aiy.i18n import aiy.audio CONFIRM_SOUND_PATH = '/home/pi/Music/R2D2/R2_Understood.wav' CONFUSED_SOUND_PATH = '/home/pi/Music/R2D2/R2_Confused.wav' UNRECOGNISED_SOUND_PATH = '/home/pi/Music/R2D2/R2_FastBip.wav' if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 15069, 2177, 3012, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845...
2.993827
324
import argparse import models model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name]))
[ 11748, 1822, 29572, 198, 11748, 4981, 198, 198, 19849, 62, 14933, 796, 23243, 7, 3672, 329, 1438, 287, 4981, 13, 834, 11600, 834, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
2.242105
95
from flask import Flask, Response from flask_basicauth import BasicAuth from flask_cors import CORS, cross_origin import os #from flask_admin import Admin,AdminIndexView #from flask_admin.contrib.sqla import ModelView from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy from flask_migrate import Migrate, MigrateCommand from flask_script import Manager from werkzeug.exceptions import HTTPException from flask_login import LoginManager from itsdangerous import URLSafeSerializer # import psycopg2 # import pymysql # import logging # import warnings # warnings.filterwarnings("ignore") # Initializing Flask App app = Flask(__name__) app.secret_key="Vampire" # This video demonstrates why we use CORS in our Flask App - https://www.youtube.com/watch?v=vWl5XcvQBx0 CORS(app) app.config.from_object("config.DevelopmentConfig") # Creating and Initializing db object of SQLAlchemy class db = SQLAlchemy(app) db.init_app(app) migrate = Migrate(app, db, render_as_batch=True) with app.app_context(): if db.engine.url.drivername == 'sqlite': migrate.init_app(app, db, render_as_batch=True) else: migrate.init_app(app, db) manager = Manager(app) manager.add_command('db', MigrateCommand) # Creating serializer object of URLSafeSerializer class for serializing session_token serializer = URLSafeSerializer(app.secret_key) # Here we set session_token as our user_loader. from bookstore.client.views import client from bookstore.admin.views import admin app.register_blueprint(client) app.register_blueprint(admin)
[ 198, 198, 6738, 42903, 1330, 46947, 11, 18261, 198, 6738, 42903, 62, 12093, 3970, 1071, 1330, 14392, 30515, 198, 6738, 42903, 62, 66, 669, 1330, 327, 20673, 11, 3272, 62, 47103, 198, 11748, 28686, 198, 198, 2, 6738, 42903, 62, 28482, ...
3.185185
486
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.core.management import BaseCommand from cobl.lexicon.models import LanguageList, \ MeaningList, \ Meaning, \ Lexeme, \ CognateClass, \ CognateJudgement, \ LanguageClade, \ Clade
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 1330, 7308, 21575, 198, 198, 6738, 22843, 75, 13, 2588, 47...
1.564103
312
import pandas as pd from icu import Collator, Locale, RuleBasedCollator ddf = pd.read_csv("../word_frequency/unilex/din.txt", sep='\t', skiprows = range(2,5)) collator = Collator.createInstance(Locale('en_AU.UTF-8')) # https://stackoverflow.com/questions/13838405/custom-sorting-in-pandas-dataframe/27009771#27009771 # https://gist.github.com/seanpue/e1cb846f676194ae77eb sort_by_custom_dict = sort_pd(key=collator.getSortKey) #ddf.iloc[sort_by_custom_dict(ddf.index)] # ddf.iloc[sort_by_custom_dict(ddf['Form'])] ddf.iloc[sort_by_custom_dict(ddf['Form'])] #https://python3.wannaphong.com/2015/03/sort-python.html # https://pyerror.com/detail/1316/ lexemes = ddf.Form #lexemes2 = ddf['Form'] temp = lexemes.sort_values() collation_rules = "&A<<aa<<<aA<<<Aa<<<AA<<<<<<<<<<<<<<<<\n&D<dh<<<dH<<<Dh<<<DH\n&E<<ee<<<eE<<<Ee<<<EE<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n&G<<<<\n&I<<ii<<<iI<<<Ii<<<II<<<<<<<<<<<<<<<<\n&N<nh<<<nH<<<Nh<<<NH<ny<<<nY<<<Ny<<<NH<<<<\n&O<<oo<<<oO<<<Oo<<<OO<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n&T<th<<<tH<<<Th<<<TH\n&U<<uu<<<uU<<<Uu<<<UU" custom_collator = RuleBasedCollator(collation_rules) temp.sort_values(key=lambda x: custom_collator.getSortKey(x) ) sort_by_custom_dict = sort_pd(key=custom_collator.getSortKey)
[ 11748, 19798, 292, 355, 279, 67, 198, 6738, 14158, 84, 1330, 7778, 1352, 11, 15181, 1000, 11, 14330, 15001, 22667, 1352, 198, 198, 1860, 69, 796, 279, 67, 13, 961, 62, 40664, 7203, 40720, 4775, 62, 35324, 14, 403, 576, 87, 14, 25194...
2.092257
607
# Python import unittest from copy import deepcopy from unittest.mock import Mock # ATS from pyats.topology import Device # Genie from genie.libs.ops.dot1x.ios.dot1x import Dot1X from genie.libs.ops.dot1x.ios.tests.dot1x_output import Dot1xOutput # Parser from genie.libs.parser.ios.show_dot1x import ShowDot1xAllDetail, \ ShowDot1xAllStatistics, \ ShowDot1xAllSummary, \ ShowDot1xAllCount if __name__ == '__main__': unittest.main()
[ 2, 11361, 198, 11748, 555, 715, 395, 198, 6738, 4866, 1330, 2769, 30073, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 44123, 198, 198, 2, 317, 4694, 198, 6738, 12972, 1381, 13, 4852, 1435, 1330, 16232, 198, 198, 2, 49405, 198, 6738,...
1.87055
309
# ====================================================================== # copyright 2020. Triad National Security, LLC. All rights # reserved. This program was produced under U.S. Government contract # 89233218CNA000001 for Los Alamos National Laboratory (LANL), which # is operated by Triad National Security, LLC for the U.S. Department # of Energy/National Nuclear Security Administration. All rights in # the program are reserved by Triad National Security, LLC, and the # U.S. Department of Energy/National Nuclear Security # Administration. The Government is granted for itself and others # acting on its behalf a nonexclusive, paid-up, irrevocable worldwide # license in this material to reproduce, prepare derivative works, # distribute copies to the public, perform publicly and display # publicly, and to permit others to do so. # ====================================================================== # Authors: Oleg Korobkin (korobkin@lanl.gov) # Purpose: # Provides a check of whether a coordinate transformation of the metric # from code coordinates to Kerr-Schild coordinates produces correct # metric, consistent with the closed form (as in e.g. Eq.(3) # McKinney & Gammie 2004, https://arxiv.org/abs/astro-ph/0404512) # # Functions: # - print_matrix # - check_transformation_matrices # from math import * import numpy as np def print_matrix(matrix,fmt="%19.11e",tostdout=True) -> str: """Pretty-prints a matrix to a string (optinally, to stdout) Parameters ---------- matrix : numpy.array([N,M]) matrix to print fmt : str C-style format of each element (default: "%19.11e") tostdout : bool output to stdout (default: true) Returns ------- str formatted output string """ N = matrix.shape[0] M = matrix.shape[1] s = "[" for i in range(N): s+= "[" for j in range(M): s+= (fmt % matrix[i,j]) if j < M - 1: s += ", " s+= "]" if i < N - 1: s += ",\n " s+="]" if tostdout: print(s) return s def check_transformation_matrices(geom, a, ir, jth, verbose=True, tol=1e-12) -> bool: """Transforms the metric to spherical KS and compares with analytic formula Test 1: covariant metric, gcov, at A = {ir, jth} 1.1 sample gcov and Lambda_h2bl_cov at A 1.2 transform gcov to gks using transofmration matrices 1.3 compare to expected values at {r,th} at A Parameters ---------- geom : dictionary nubhlight geom object a : Float dimensionless Kerr spin parameter ir : Integer index of sample point in radial direction jth : Integer index of sample point in angular theta-direction verbose : bool output steps to stdout tol : Float tolerance to relative error (wrt det g) Returns ------- bool True if all checks passed Examples -------- import hdf5_to_dict as io hdr = io.load_hdr("dump_00000010.h5") geom = io.load_geom(hdr,recalc=True) check_transformation_matrices(geom, -1, 64) """ # sample gcov and h2bl at point A gcov_A = geom['gcov'][ir,jth] h2bl_A = geom['Lambda_h2bl_cov'][ir,jth] # sample r and theta, compute BL metric-related quantities r = geom['r'][ir,jth,0]; r2 = r*r a2 = a*a th= geom['th'][ir,jth,0] sth2= sin(th)**2 Delta= r2 - 2*r + a2 Sigma= r2 + a2*cos(th)**2 A = (r2 + a2)**2 - a2*Delta*sin(th)**2 if verbose: print ("r = %19.11e" % r) print ("theta = %19.11e" % th) print ("a = %19.11e" % a) print ("Delta = %19.11e" % Delta) print ("Sigma = %19.11e" % Sigma) print ("A = %19.11e" % A) # output metric print ("gcov_A = ") print_matrix (gcov_A) print ("") # output transformation matrix print ("h2bl_A = ") print_matrix (h2bl_A) print ("") # compute BL metric at A gks_A = np.zeros([4,4]) for i in range(4): for j in range(4): for k in range(4): for l in range(4): gks_A[i,j] = gks_A[i,j] + h2bl_A[k,i]*h2bl_A[l,j]*gcov_A[k,l] if verbose: print ("gks_A = ") print_matrix (gks_A) print("") # expected values at {r, th} g_tt = -1. + 2.*r/Sigma g_rr = 1. + 2.*r/Sigma g_ff = sth2*(Sigma + a2*g_rr*sth2) g_thth = Sigma g_tr = 2*r/Sigma g_tf = -2*a*r*sth2/Sigma g_rf = -a*g_rr*sth2 det_g = -Sigma**2*sth2 if verbose: print ("Expected:") print (" g_tt = %19.11e" % g_tt ) print (" g_rr = %19.11e" % g_rr ) print (" g_thth = %19.11e" % g_thth) print (" g_ff = %19.11e" % g_ff ) print (" g_tr = %19.11e" % g_tr ) print (" g_rf = %19.11e" % g_rf ) print (" g_tf = %19.11e" % g_tf ) print ("") # check gks_A gks_expected = np.array( [[ g_tt, g_tr, 0.0, g_tf], [ g_tr, g_rr, 0.0, g_rf], [ 0.0, 0.0, g_thth, 0.0], [ g_tf, g_rf, 0.0, g_ff]] ) passed = True for i in range(4): for j in range(4): if abs(gks_A[i,j] - gks_expected[i,j])/abs(det_g) > tol: passed = False if verbose: print (f"WARNING: Significant mismatch in gks_A[{i},{j}]:") print (" -- expected: %19.11e" % gks_expected[i,j]) print (" -- actual: %19.11e" % gks_A[i,j]) return passed
[ 2, 38093, 1421, 28, 198, 2, 6634, 12131, 13, 7563, 324, 2351, 4765, 11, 11419, 13, 1439, 2489, 198, 2, 10395, 13, 770, 1430, 373, 4635, 739, 471, 13, 50, 13, 5070, 2775, 198, 2, 9919, 1954, 2624, 1507, 34, 4535, 2388, 486, 329, ...
2.358951
2,212
from dataclasses import dataclass
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198 ]
3.777778
9
# -*- coding: utf-8 -*- ########################################################################## # NSAp - Copyright (C) CEA, 2020 # Distributed under the terms of the CeCILL-B license, as published by # the CEA-CNRS-INRIA. Refer to the LICENSE file or to # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html # for details. ########################################################################## """ 3D MRI Brain Generation with Generative Adversarial Networks (BGGAN) with Variational Auto Encoder (VAE). """ # Imports import logging import collections import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as func from pynet.utils import Networks # Global parameters logger = logging.getLogger("pynet") def _downsample_shape(shape, nb_iterations=1, scale_factor=2): shape = np.asarray(shape) all_shapes = [shape.astype(int).tolist()] for idx in range(nb_iterations): shape = np.floor(shape / scale_factor) all_shapes.append(shape.astype(int).tolist()) return all_shapes
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 29113, 29113, 7804, 2235, 198, 2, 10551, 79, 532, 15069, 357, 34, 8, 327, 16412, 11, 12131, 198, 2, 4307, 6169, 739, 262, 2846, 286, 262, 20101, 34, 8267, 12, 33, 59...
3.096591
352
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.freezable_batch_norm.""" import numpy as np import tensorflow as tf from object_detection.core import freezable_batch_norm if __name__ == '__main__': tf.test.main()
[ 2, 15069, 2864, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.82906
234
# from PyQt5.QtWidgets import QMessageBox # def raise_error(message: str = "DEFAULT:Error Description:More Information"): # box = QMessageBox() # kind, msg, info = message.split(":") # box.setIcon(QMessageBox.Critical) # box.setWindowTitle(kind + " Error") # box.setText(msg) # box.setInformativeText(info) # box.exec_()
[ 2, 422, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 12837, 14253, 628, 198, 2, 825, 5298, 62, 18224, 7, 20500, 25, 965, 796, 366, 7206, 38865, 25, 12331, 12489, 25, 5167, 6188, 1, 2599, 198, 2, 220, 220, 220, 220, ...
2.4375
144
""" 759. Employee Free Time We are given a list schedule of employees, which represents the working time for each employee. Each employee has a list of non-overlapping Intervals, and these intervals are in sorted order. Return the list of finite intervals representing common, positive-length free time for all employees, also in sorted order. (Even though we are representing Intervals in the form [x, y], the objects inside are Intervals, not lists or arrays. For example, schedule[0][0].start = 1, schedule[0][0].end = 2, and schedule[0][0][0] is not defined). Also, we wouldn't include intervals like [5, 5] in our answer, as they have zero length. """ # Line Swap method # if we met a start, cnt += 1 # if we met an end, cnt -= 1 # time complexity -- O(NlogN), need sort all intervals # Runtime: 96 ms, faster than 87.95% of Python3 online submissions for Employee Free Time. # Memory Usage: 14.7 MB, less than 25.00% of Python3 online submissions for Employee Free Time. """ # Definition for an Interval. class Interval: def __init__(self, start: int = None, end: int = None): self.start = start self.end = end """ # priority queue # if the current end is less than the smallest start # then means there is a free time # use priority queue to maintain the smallest start # also only stort one of jobs of each person in the queue to save memory # time complexity -- O(NlogC), C is the number of employee """ # Definition for an Interval. class Interval: def __init__(self, start: int = None, end: int = None): self.start = start self.end = end """ import heapq
[ 37811, 198, 38314, 13, 36824, 3232, 3862, 198, 1135, 389, 1813, 257, 1351, 7269, 286, 4409, 11, 543, 6870, 262, 1762, 640, 329, 1123, 6538, 13, 198, 198, 10871, 6538, 468, 257, 1351, 286, 1729, 12, 2502, 75, 5912, 4225, 12786, 11, 2...
3.422833
473
import os, yaml config = { 'debug': False, 'port': 5000, 'store_path': '/var/storitch', 'pool_size': 5, 'logging': { 'level': 'warning', 'path': None, 'max_size': 100 * 1000 * 1000,# ~ 95 mb 'num_backups': 10, }, 'image_exts': [ '.jpg', '.jpeg', '.png', '.tiff', '.tif', '.gif', '.bmp', '.bmp2', '.bmp3', '.dcm', '.dicom', '.webp', ], }
[ 11748, 28686, 11, 331, 43695, 198, 198, 11250, 796, 1391, 198, 220, 220, 220, 705, 24442, 10354, 10352, 11, 198, 220, 220, 220, 705, 634, 10354, 23336, 11, 198, 220, 220, 220, 705, 8095, 62, 6978, 10354, 31051, 7785, 14, 301, 273, 2...
1.921659
217
import io import os import numpy as np import pandas import json import logging #<== Optional. Log to console, file, kafka from pipeline_monitor import prometheus_monitor as monitor #<== Optional. Monitor runtime metrics from pipeline_logger import log import tensorflow as tf from tensorflow.contrib import predictor from keras.models import Sequential, load_model from keras.preprocessing import sequence from keras.preprocessing.text import Tokenizer from collections import OrderedDict _logger = logging.getLogger('pipeline-logger') _logger.setLevel(logging.INFO) _logger_stream_handler = logging.StreamHandler() _logger_stream_handler.setLevel(logging.INFO) _logger.addHandler(_logger_stream_handler) __all__ = ['invoke'] #<== Optional. Being a good Python citizen. _labels = { #<== Optional. Used for metrics/labels 'name': 'injection', 'tag': 'v1', 'type': 'tensorflow', 'runtime': 'python', 'chip': 'cpu', } def _initialize_upon_import(): #<== Optional. Called once upon server startup ''' Initialize / Restore Model Object. ''' model = load_model('securitai-lstm-model.h5') model.load_weights('securitai-lstm-weights.h5') model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy']) return model # This is called unconditionally at *module import time*... _model = _initialize_upon_import() #@log(labels=_labels, logger=_logger) #<== Optional. Sample and compare predictions def invoke(request): #<== Required. Called on every prediction '''Where the magic happens...''' with monitor(labels=_labels, name="transform_request"): #<== Optional. Expose fine-grained metrics transformed_request = _transform_request(request) #<== Optional. Transform input (json) into TensorFlow (tensor) with monitor(labels=_labels, name="invoke"): #<== Optional. Calls _model.predict() response = _model.predict(transformed_request) with monitor(labels=_labels, name="transform_response"): #<== Optional. Transform TensorFlow (tensor) into output (json) transformed_response = _transform_response(response) return transformed_response #<== Required. Returns the predicted value(s) if __name__ == '__main__': with open('./pipeline_test_request.csv', 'rb') as fb: request_bytes = fb.read() response_bytes = invoke(request_bytes) print(response_bytes)
[ 11748, 33245, 198, 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 198, 11748, 33918, 198, 11748, 18931, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220...
2.398607
1,149
import argparse import importlib import os import re import signal import subprocess import sys import time import logging from act.common import aCTLogger from act.common.aCTConfig import aCTConfigAPP from act.arc import aCTDBArc if __name__ == '__main__': main()
[ 11748, 1822, 29572, 198, 11748, 1330, 8019, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 6737, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 11748, 640, 198, 11748, 18931, 198, 6738, 719, 13, 11321, 1330, 257, 4177, 11187, 1362, 198,...
3.277108
83
from rest_framework.test import APITestCase, APIClient from django.urls import reverse from rest_framework.authtoken.models import Token
[ 6738, 1334, 62, 30604, 13, 9288, 1330, 3486, 2043, 395, 20448, 11, 3486, 2149, 75, 1153, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 6738, 1334, 62, 30604, 13, 18439, 30001, 13, 27530, 1330, 29130, 628 ]
3.631579
38
from flask import Flask from src.models import db from . import config
[ 6738, 42903, 1330, 46947, 198, 198, 6738, 12351, 13, 27530, 1330, 20613, 198, 6738, 764, 1330, 4566, 628 ]
4.055556
18
from dependencies import Injector from dependencies import this from dependencies.contrib.celery import shared_task from examples.order.commands import ProcessOrder
[ 6738, 20086, 1330, 554, 752, 273, 198, 6738, 20086, 1330, 428, 198, 6738, 20086, 13, 3642, 822, 13, 7015, 88, 1330, 4888, 62, 35943, 198, 6738, 6096, 13, 2875, 13, 9503, 1746, 1330, 10854, 18743, 628 ]
4.611111
36
ano = int(input('Digite o ano: ')) if (ano%4) == 0: print ('Ele bissexto') else: print ('Ele no bissexto')
[ 5733, 796, 493, 7, 15414, 10786, 19511, 578, 267, 281, 78, 25, 705, 4008, 198, 361, 357, 5733, 4, 19, 8, 6624, 657, 25, 198, 220, 3601, 19203, 28827, 220, 275, 20782, 742, 78, 11537, 198, 17772, 25, 198, 220, 3601, 19203, 28827, 6...
2.24
50
__version__ = '0.0.1' __license__ = 'BSD'
[ 834, 9641, 834, 796, 705, 15, 13, 15, 13, 16, 6, 198, 834, 43085, 834, 796, 705, 21800, 6, 198 ]
2.1
20
from datetime import timedelta from typing import Union, List, Optional import click import pandas as pd from flask import current_app as app from flask.cli import with_appcontext from flexmeasures import Sensor from flexmeasures.data import db from flexmeasures.data.schemas.generic_assets import GenericAssetIdField from flexmeasures.data.schemas.sensors import SensorIdField from flexmeasures.data.models.generic_assets import GenericAsset from flexmeasures.data.models.time_series import TimedBelief from flexmeasures.data.utils import save_to_db app.cli.add_command(fm_edit_data) def parse_attribute_value( attribute_null_value: bool, attribute_float_value: Optional[float] = None, attribute_bool_value: Optional[bool] = None, attribute_str_value: Optional[str] = None, attribute_int_value: Optional[int] = None, ) -> Union[float, int, bool, str, None]: """Parse attribute value.""" if not single_true( [attribute_null_value] + [ v is not None for v in [ attribute_float_value, attribute_bool_value, attribute_str_value, attribute_int_value, ] ] ): raise ValueError("Cannot set multiple values simultaneously.") if attribute_null_value: return None elif attribute_float_value is not None: return float(attribute_float_value) elif attribute_bool_value is not None: return bool(attribute_bool_value) elif attribute_int_value is not None: return int(attribute_int_value) return attribute_str_value
[ 6738, 4818, 8079, 1330, 28805, 12514, 198, 6738, 19720, 1330, 4479, 11, 7343, 11, 32233, 198, 198, 11748, 3904, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 42903, 1330, 1459, 62, 1324, 355, 598, 198, 6738, 42903, 13, 44506, 1330, ...
2.687603
605
mail_settings = { "MAIL_SERVER": 'smtp.gmail.com', "MAIL_PORT": 465, "MAIL_USE_TLS": False, "MAIL_USE_SSL": True, "MAIL_USERNAME": 'c003.teste.jp@gmail.com', "MAIL_PASSWORD": 'C003.teste' }
[ 4529, 62, 33692, 796, 1391, 198, 220, 220, 220, 366, 5673, 4146, 62, 35009, 5959, 1298, 705, 5796, 34788, 13, 14816, 13, 785, 3256, 220, 198, 220, 220, 220, 366, 5673, 4146, 62, 15490, 1298, 49669, 11, 220, 198, 220, 220, 220, 366, ...
1.911504
113
#!/usr/bin/env python3.6 # -*- encoding=utf8 -*- import pyquery """ 1. URL 2. 3. 1. PARSER_PASSAGE_URL URL 2. PARSER_PASSAGE_TITLE 3. PARSER_PASSAGE_DATE 4. PARSER_PASSAGE_CATEGORY 5. PARSER_PASSAGE_TAG 6. PARSER_PASSAGE_CONTENT 7. PARSER_PASSAGE_IMGURL URL """
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 13, 21, 198, 2, 532, 9, 12, 21004, 28, 40477, 23, 532, 9, 12, 198, 11748, 12972, 22766, 198, 198, 37811, 198, 220, 220, 220, 220, 198, 220, 220, 220, 220, 220, 220, 220, 220, 198,...
1.364146
357
#!/usr/bin/env python3 # ---------------------------------------------------------------------------- # Copyright 2019 Drunella # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- import os import sys import glob import subprocess import argparse import hashlib import traceback import pprint if __name__ == '__main__': try: retval = main(sys.argv) sys.exit(retval) except Exception as e: print(e) traceback.print_exc() sys.exit(1)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 16529, 10541, 198, 2, 15069, 13130, 1583, 403, 12627, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, ...
3.445513
312
# SPDX-License-Identifier: BSD-3-Clause """ Text decode functions. These functions can be used to get Unicode strings from a series of bytes. """ from codecs import ( BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE, CodecInfo, lookup as lookup_codec, ) from collections import OrderedDict from typing import Dict, Iterable, Optional, Tuple from apetest.typing import LoggerT def encoding_from_bom(data: bytes) -> Optional[str]: """ Look for a byte-order-marker at the start of the given C{bytes}. If found, return the encoding matching that BOM, otherwise return C{None}. """ if data.startswith(BOM_UTF8): return "utf-8" elif data.startswith(BOM_UTF16_LE) or data.startswith(BOM_UTF16_BE): return "utf-16" elif data.startswith(BOM_UTF32_LE) or data.startswith(BOM_UTF32_BE): return "utf-32" else: return None def standard_codec_name(name: str) -> str: """ Map a codec name to the preferred standardized version. The preferred names were taken from this list published by IANA: U{http://www.iana.org/assignments/character-sets/character-sets.xhtml} @param name: Text encoding name, in lower case. """ if name.startswith("iso8859"): return "iso-8859" + name[7:] return { "ascii": "us-ascii", "euc_jp": "euc-jp", "euc_kr": "euc-kr", "iso2022_jp": "iso-2022-jp", "iso2022_jp_2": "iso-2022-jp-2", "iso2022_kr": "iso-2022-kr", }.get(name, name) def try_decode(data: bytes, encodings: Iterable[str]) -> Tuple[str, str]: """ Attempt to decode text using the given encodings in order. @param data: Encoded version of the text. @param encodings: Names of the encodings to try. Must all be lower case. @return: C{(text, encoding)} The decoded string and the encoding used to decode it. The returned encoding is name the preferred name, which could differ from the name used in the C{encodings} argument. @raise ValueError: If the text could not be decoded. """ # Build sequence of codecs to try. codecs: Dict[str, CodecInfo] = OrderedDict() for encoding in encodings: try: codec = lookup_codec(encoding) except LookupError: pass else: codecs[standard_codec_name(codec.name)] = codec # Apply decoders to the document. for name, codec in codecs.items(): try: text, consumed = codec.decode(data, "strict") except UnicodeDecodeError: continue if consumed == len(data): return text, name raise ValueError("Unable to determine document encoding") def decode_and_report( data: bytes, encoding_options: Iterable[Tuple[Optional[str], str]], logger: LoggerT, ) -> Tuple[str, str]: """ Attempt to decode text using several encoding options in order. @param data: Encoded version of the text. @param encoding_options: C{(encoding | None, source)*} Each option is a pair of encoding name and a description of where this encoding suggestion originated. If the encoding name is C{None}, the option is skipped. @param logger: Non-fatal problems are logged here. Such problems include an unknown or differing encodings among the options. @return: C{(text, encoding)} The decoded string and the encoding used to decode it. @raise ValueError: If the text could not be decoded. """ # Filter and remember encoding options. options = [ (encoding, source) for encoding, source in encoding_options if encoding is not None ] encodings = [encoding for encoding, source in options] # Always try to decode as UTF-8, since that is the most common encoding # these days, plus it's a superset of ASCII so it also works for old or # simple documents. encodings.append("utf-8") text, used_encoding = try_decode(data, encodings) # Report differences between suggested encodings and the one we # settled on. for encoding, source in options: try: codec = lookup_codec(encoding) except LookupError: logger.warning( '%s specifies encoding "%s", which is unknown to Python', source, encoding, ) continue std_name = standard_codec_name(codec.name) if std_name != used_encoding: logger.warning( '%s specifies encoding "%s", ' 'while actual encoding seems to be "%s"', source, encoding, used_encoding, ) elif std_name != encoding: logger.info( '%s specifies encoding "%s", ' 'which is not the standard name "%s"', source, encoding, used_encoding, ) return text, used_encoding
[ 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 347, 10305, 12, 18, 12, 2601, 682, 198, 198, 37811, 198, 8206, 36899, 5499, 13, 198, 198, 4711, 5499, 460, 307, 973, 284, 651, 34371, 13042, 422, 257, 2168, 286, 9881, 13, 198, 37811, ...
2.368937
2,144
import os import json import gzip from copy import deepcopy, copy import numpy as np import csv import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader, RandomSampler from transformers.tokenization_utils import trim_batch # Special symbols SOS_token = "<SOS>" # start of sentence EOS_token = "<EOS>" # end of sentence PAD_token = SOS_token # padding symbol INPUT_TOKENS_SCAN = ['jump', 'opposite', 'right', 'twice', 'and', 'turn', 'thrice', 'run', 'after', 'around', 'left', 'walk', 'look'] OUTPUT_TOKENS_SCAN = ['I_TURN_RIGHT', 'I_JUMP', 'I_TURN_LEFT', 'I_RUN', 'I_WALK', 'I_LOOK'] # ACTION_TO_TEXT = {'I_TURN_RIGHT': 'right', 'I_JUMP': 'jump', 'I_TURN_LEFT': 'left', 'I_RUN': 'run', 'I_WALK': 'walk', 'I_LOOK': 'look'} # def encode_file_iterator(tokenizer, data_path, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None): # ''' # This provides a low-memory usage way of iterating thru all of the source/target lines for processing by JIT loader. # ''' # if data_path[-3:] == '.gz': # print('Data file is gzipped') # f = gzip.open(data_path, "rt") # else: # print('Data file is plain text') # f = open(data_path, "r", encoding='utf-8') # # for i, text in enumerate(f): # # tokenized = tokenizer.batch_encode_plus( [text + ' </s>'], max_length=max_length, # pad_to_max_length=pad_to_max_length, return_tensors=return_tensors ) # # yield tokenized # # if max_examples and i >= max_examples: # break # # f.close() # def convert_scan_actions_to_text(actions): # return ' '.join([ACTION_TO_TEXT[_action] for _action in actions.split(' ')]) # def encode_scan_file(tokenizer, data, io_type, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None): # examples = [] # # a = tokenizer.batch_encode_plus( ['right jump left run walk look' + ' <s> </s>'], max_length=max_length, # # pad_to_max_length=pad_to_max_length, return_tensors=return_tensors ) # # print(a) # # exit() # for dp in data: # input, output = dp[0], dp[1] # if io_type == 'input': # raw = input # else: # assert io_type == 'output' # raw = convert_scan_actions_to_text(output) # # tokenized = tokenizer.batch_encode_plus( [raw + ' </s>'], max_length=max_length, # pad_to_max_length=pad_to_max_length, return_tensors=return_tensors ) # # if max_examples and i >= max_examples: # break # examples.append(tokenized) # # return examples
[ 11748, 28686, 198, 11748, 33918, 198, 11748, 308, 13344, 198, 6738, 4866, 1330, 2769, 30073, 11, 4866, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 269, 21370, 198, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, ...
2.259133
1,177
from typing import Dict from rest_framework import serializers from rest_framework.fields import empty from rest_framework.relations import ManyRelatedField from rest_framework.request import Request from .mixins import BridgerSerializerFieldMixin from .types import BridgerType, ReturnContentType
[ 6738, 19720, 1330, 360, 713, 198, 198, 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 6738, 1334, 62, 30604, 13, 25747, 1330, 6565, 198, 6738, 1334, 62, 30604, 13, 39468, 1330, 4650, 9819, 15878, 198, 6738, 1334, 62, 30604, 13, 25927, ...
4.267606
71
import networkx.algorithms.operators.tests.test_product import pytest from graphscope.experimental.nx.utils.compat import import_as_graphscope_nx import_as_graphscope_nx(networkx.algorithms.operators.tests.test_product, decorators=pytest.mark.usefixtures("graphscope_session"))
[ 11748, 3127, 87, 13, 282, 7727, 907, 13, 3575, 2024, 13, 41989, 13, 9288, 62, 11167, 198, 11748, 12972, 9288, 198, 198, 6738, 4823, 29982, 13, 23100, 9134, 13, 77, 87, 13, 26791, 13, 5589, 265, 1330, 1330, 62, 292, 62, 34960, 29982,...
2.618644
118
# {team} -> Name of team # {name} -> Name of person who supports team teamMatchStarted: list[str] = [ "{team} are shit", "{team} cunts", "Dirty {team}", "Dirty {team}, dirty {name}", ] drawing: list[str] = [ "{team} level, this is a shit match", "Boring old {team}", "Happy with how it's going, {name}?", "Yawn...", "{team} wankers", "How can you support this rubbish, {name}?", "You get the feeling that {team} don't really want this", "No passion from {team}, {name}", "If a game of football is like making love to a beautiful woman, this {team} game is a 10 hand job from a swivel-eyed misfit", "This {team} match is like a game of chess. But with more players and only one piece", ] teamLeadByOne: list[str] = [ "{team} cheats, the ref's a cunt", "That was never a goal for {team}", "{team} don't deserve that", "Bollocks", "That should go to VAR", "Bit fortunuate for {team}", "Can't imagine {team} will keep this lead", "Lucky goal for {team}", "{team} got lucky there", "{team} aren't good enough to stay ahead", "Offside!", ] teamExtendingLead: list[str] = [ "There's no way {team} deserve this lead", "Have {team} paid the ref?", "This is bullshit", "The ref's a cunt, {name}'s a cunt", "The ref's a cunt, {team} are cunts, {name}'s a cunt", "Something has gone seriously wrong with this country", "When I voted for Brexit, I didn't vote for this", "At least Boris remains in charge, we've still got that", "Richard Wanger would be turning in his grave", "Liberal elite bullshit", "That was so offside", "VAR!", "Is the linesman OK?", "If only {name}'s wife was as dirty as this game", ] teamLosingLead: list[str] = [ "Lazy old {team}, lazy old {name}", "{team} are throwing it away", "{team} are rubbish", "{team} fucking it up again", "We really are being treated to some world class flouncing from {team} today", "Brace yourself, {name}. This is going to hurt", "This is brown trouser time for {team}", "I hope {name} brought a spare pair of underpants", "I see {team} are playing their B Team. B for Bullshit", ] teamDeficitOfOne: list[str] = [ "This is more like it from {team}", "Oh dear...", "{team} wankers", "How are you feeling, {name}?", "Bit disappointing, {name}?", "Not looking good for {team}, {name}", "You must be furious, {name}", "{team} have just got no heart", "This is what happens when you try to buy success", "All that money spent, {name}, and for what?", ] teamExtendingDeficit: list[str] = [ "Starting to feel a bit sorry for {team}", "Never mind, {name}, there's always the next game", "Poor {team}", "Whoops...", "Oh dear, everything OK, {name}?", "Hey {name}, where'd you get your keeper?\nPOUNDSHOP !! POUNDSHOP !!", "{team} can't raise themselves for this game, typical", "A team like {team} have such a proud history, but what we see today is just embarrassing", "{team} clearly not up for it today", "{team} are letting you down, {name}", "Watching {team} is like watching a bunch of cavemen: Neanderthal", ] teamLosingDeficit: list[str] = [ "Too little too late for {team}", "{team} won't come back from here", "The ref's a cunt", "This is awful", "What a mess", "Well this is an unmitigated shit show", ] teamWon: list[str] = [ "That was a shit game", "There's no way {team} deserved that", "Fuck you, {name} !!", "This will go down in history...\nAs the most tedious game I have ever had the misfortune to witness", ] teamLost: list[str] = [ "Justice done, {team} lost", "Job done for {team}?", "Job done, {name}?", "{name} !!?", "Probably the best {team} could hope for", "Everything OK, {name}?", "{team} continue to disappoint", "Well if the football thing doesn't work out for {team}, they can always consider a career on the stage", "{team} set the bar low", "{team} fail to meet their already low expectations", ] teamDrew: list [str] = [ "Another uninspiring result for {team}", "Thanks for nothing, {team}", "Well that's 90 minutes we won't get back, thanks {team}", "Another draw for {team}", "Boring old {team}", "You should be happy with that result, {name}", "If I could pick one highlight from this {team} game it would be when it finally ended.", "I think {name} will be happy with {team}'s performance today.", ]
[ 2, 1391, 15097, 92, 4613, 6530, 286, 1074, 198, 2, 1391, 3672, 92, 4613, 6530, 286, 1048, 508, 6971, 1074, 198, 198, 15097, 23850, 10434, 276, 25, 1351, 58, 2536, 60, 796, 685, 198, 220, 220, 220, 45144, 15097, 92, 389, 7510, 1600, ...
2.77825
1,646
from bench import bench print(bench(100, ''' def fib(n): return n if n < 2 else fib(n-1) + fib(n-2) ''', ''' fib(20) '''))
[ 6738, 7624, 1330, 7624, 198, 198, 4798, 7, 26968, 7, 3064, 11, 705, 7061, 198, 4299, 12900, 7, 77, 2599, 198, 220, 1441, 299, 611, 299, 1279, 362, 2073, 12900, 7, 77, 12, 16, 8, 1343, 12900, 7, 77, 12, 17, 8, 198, 7061, 3256, ...
2.206897
58
# coding=utf-8 from nlpir.native.nlpir_base import NLPIRBase from ctypes import c_bool, c_char_p, c_int, POINTER, Structure, c_float
[ 2, 19617, 28, 40477, 12, 23, 198, 6738, 299, 75, 4063, 13, 30191, 13, 21283, 4063, 62, 8692, 1330, 399, 19930, 4663, 14881, 198, 6738, 269, 19199, 1330, 269, 62, 30388, 11, 269, 62, 10641, 62, 79, 11, 269, 62, 600, 11, 19922, 4135...
2.596154
52
# -*- coding: utf-8 -*- import pytest from mock import Mock from bravado_core.exception import SwaggerMappingError from bravado_core.operation import Operation from bravado_core.param import get_param_type_spec from bravado_core.param import Param from bravado_core.spec import Spec
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 12972, 9288, 198, 6738, 15290, 1330, 44123, 198, 198, 6738, 49025, 4533, 62, 7295, 13, 1069, 4516, 1330, 2451, 7928, 44, 5912, 12331, 198, 6738, 49025, 4533, 62, ...
3.321839
87
# Copyright (c) 2019 Princeton University # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from markdown import markdown import base64 import json import base64
[ 2, 15069, 357, 66, 8, 13130, 23173, 2059, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, 287, 262, 6808, 8619, 286, 428, 2723, 5509, 13, 198, 198, 6738, 1317, 2902, ...
4.12069
58
from pydub import AudioSegment import os import math from pathlib import Path ''' Splice wav files into multiple segments. ''' LENGTH = 3 # Set splice length in seconds
[ 6738, 279, 5173, 549, 1330, 13491, 41030, 434, 198, 11748, 28686, 198, 11748, 10688, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 7061, 6, 198, 26568, 501, 266, 615, 3696, 656, 3294, 17894, 13, 198, 7061, 6, 198, 198, 43, 49494, 796,...
3.326923
52
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @author: Wu Liang @contact: @date: 2014/06/23 """ import os import sqlite3 import urllib2 import shutil import tarfile import hashlib import codecs from mako.template import Template from pyquery import PyQuery currentPath = os.path.join(os.path.dirname(os.path.realpath(__file__))) name = "manpages" baseName = "manpages-zh" output = baseName + ".docset" appName = "dash-" + baseName tarFileName = baseName + ".tgz" feedName = baseName + ".xml" version = "1.5.0" docsetPath = os.path.join(currentPath, output, "Contents", "Resources", "Documents") # Step 2: Copy the HTML Documentation fin = codecs.open(os.path.join(docsetPath, "index.html"), "r", "utf-8") content = fin.read() fin.close() jQuery = PyQuery(content) jQuery.find("body").empty() fileNames = [] itemTemplate = Template("<a href='html/${fileName}'>${name}</a><br />\n") for fileName in os.listdir(os.path.join(docsetPath, "html")): fileNames.append({ "name": fileName.split(".")[0], "fileName": fileName }) jQuery.find("body").append(itemTemplate.render(name = fileName.split(".")[0], fileName = fileName)) fin = codecs.open(os.path.join(docsetPath, "index.html"), "w", "utf-8") newContent = jQuery.html() fin.write(newContent) fin.close() # Step 3: create the Info.plist file infoTemplate = Template('''<?xml version="1.0" encoding="UTF-8"?> <plist version="1.0"> <dict> <key>CFBundleIdentifier</key> <string>${name}</string> <key>CFBundleName</key> <string>${name}</string> <key>DocSetPlatformFamily</key> <string>${name}</string> <key>dashIndexFilePath</key> <string>index.html</string> <key>dashIndexFilePath</key> <string>index.html</string> <key>isDashDocset</key><true/> <key>isJavaScriptEnabled</key><true/> </dict> </plist>''') infoPlistFile = os.path.join(currentPath, output, "Contents", "Info.plist") fin = open(infoPlistFile, "w") fin.write(infoTemplate.render(name = name)) fin.close() # Step 4: Create the SQLite Index dbFile = os.path.join(currentPath, output, "Contents", "Resources", "docSet.dsidx") if os.path.exists(dbFile): os.remove(dbFile) db = sqlite3.connect(dbFile) cursor = db.cursor() try: cursor.execute("DROP TABLE searchIndex;") except Exception: pass cursor.execute('CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);') cursor.execute('CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);') insertTemplate = Template("INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES ('${name}', '${type}', '${path}');") # Step 5: Populate the SQLite Index for result in fileNames: sql = insertTemplate.render(name = result["name"], type = "Builtin", path = "html/" + result["fileName"]) print sql cursor.execute(sql) db.commit() db.close() # Step 6: copy icon shutil.copyfile(os.path.join(currentPath, "icon.png"), os.path.join(currentPath, output, "icon.png")) shutil.copyfile(os.path.join(currentPath, "icon@2x.png"), os.path.join(currentPath, output, "icon@2x.png")) # Step 7: if not os.path.exists(os.path.join(currentPath, "dist")): os.makedirs(os.path.join(currentPath, "dist")) tarFile = tarfile.open(os.path.join(currentPath, "dist", tarFileName), "w:gz") for root, dirNames, fileNames in os.walk(output): for fileName in fileNames: fullPath = os.path.join(root, fileName) tarFile.add(fullPath) tarFile.close() # Step 8: feed url feedTemplate = Template('''<entry> <version>${version}</version> <sha1>${sha1Value}</sha1> <url>https://raw.githubusercontent.com/magicsky/${appName}/master/dist/${tarFileName}</url> </entry>''') fout = open(os.path.join(currentPath, "dist", tarFileName), "rb") sha1Value = hashlib.sha1(fout.read()).hexdigest() fout.close() fin = open(os.path.join(currentPath, feedName), "w") fin.write(feedTemplate.render(sha1Value = sha1Value, appName = appName, tarFileName = tarFileName, version = version)) fin.close()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 31, 9800, 25, 18027, 43322, 198, 31, 32057, 25, 220, 198, 31, 4475, 25, 1946, 14, 3312, 14, 1954, 198, ...
2.669601
1,477
import os import sys sys.path.insert(0, os.path.abspath("..")) # -------------------------------------------------------------------------------------- project = "nisystemlink" copyright = "2020, National Instruments" author = "National Instruments" # The short X.Y version version = "0.1" # The full version, including alpha/beta/rc tags release = "0.1.3" # -------------------------------------------------------------------------------------- extensions = [ "sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx.ext.viewcode", "sphinx_autodoc_typehints", "docs.cleanup", ] master_doc = "index" html_theme = "sphinx_rtd_theme" html_extra_path = [ "../LICENSE", ] nitpicky = True nitpick_ignore = [ ("py:class", "datetime.datetime"), ("py:class", "datetime.timedelta"), ("py:class", "pathlib.Path"), ("py:data", "typing.Any"), ("py:data", "typing.Awaitable"), ("py:data", "typing.Dict"), ("py:data", "typing.Iterable"), ("py:data", "typing.List"), ("py:data", "typing.Optional"), ("py:data", "typing.Sequence"), ("py:data", "typing.Tuple"), ("py:data", "typing.Union"), ] autodoc_default_options = { "inherited-members": True, "special-members": "__init__", "no-private-members": True, } # Don't let napoleon force methods to be included in the docs; use autodoc flags and our # own docs.cleanup module for that. napoleon_include_init_with_doc = False napoleon_include_private_with_doc = False napoleon_include_special_with_doc = False
[ 11748, 28686, 198, 11748, 25064, 198, 198, 17597, 13, 6978, 13, 28463, 7, 15, 11, 28686, 13, 6978, 13, 397, 2777, 776, 7203, 492, 48774, 198, 198, 2, 16529, 19351, 438, 198, 198, 16302, 796, 366, 21361, 6781, 8726, 1, 198, 22163, 47...
2.731664
559
from typing import Dict, List from arango.cursor import Cursor from django.http.response import Http404 from django.shortcuts import get_object_or_404 from rest_framework.pagination import LimitOffsetPagination from rest_framework.request import Request from rest_framework_extensions.mixins import NestedViewSetMixin from multinet.api.models import Workspace, WorkspaceRole from multinet.api.utils.arango import ArangoQuery
[ 6738, 19720, 1330, 360, 713, 11, 7343, 198, 198, 6738, 610, 14208, 13, 66, 21471, 1330, 327, 21471, 198, 6738, 42625, 14208, 13, 4023, 13, 26209, 1330, 367, 29281, 26429, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 651, 62, 15252, ...
3.583333
120
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import logging import os import sys if __name__ == '__main__': args = ParseArgs() ConfigureLogging(args) logging.info("Arguments: %s" % args) if not os.path.exists(args.template): raise ValueError('Template host file not found: %s' % args.template) if not os.path.exists(args.destination): raise ValueError('Destination directory not found: %s' % args.destination) # Generate all the host files based off the arguments passed. with open(args.template, 'r') as f: template = f.read() for project_id in args.projects.split(';'): filename = "%s.host.textpb" % project_id destination = os.path.join(args.destination, filename) with open(destination, 'w') as f: logging.info("Generating %s" % destination) content = template.replace("<project_id>", project_id) content = content.replace("<storage_bucket>", args.storage_bucket) content = content.replace("<storage_prefix>", args.storage_prefix) f.write(content) sys.exit(0)
[ 2, 15069, 2864, 383, 18255, 1505, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 198, 198, 11748...
2.989873
395
from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import python_2_unicode_compatible from django.urls import reverse from django_extensions.db.models import TimeStampedModel from mptt.models import MPTTModel, TreeForeignKey from .managers import UserProfileManager, DepartmentManager, PositionManager User = settings.AUTH_USER_MODEL
[ 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 26791, 13, 41519, 1330, 334, 1136, 5239, 62, 75, 12582, 355, 4808, 198, 6738, 42625, 14208, 13, 26791, 13, 12685, 765...
3.557377
122
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest import Serializer, Deserializer from ...client import Client from ...v5_1.build import models
[ 2, 16529, 1783, 10541, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 198, 2, 16529, 1783, 10541, 198,...
6.314286
105
import unittest from gamesopt.train import train, TrainConfig
[ 11748, 555, 715, 395, 198, 6738, 1830, 8738, 13, 27432, 1330, 4512, 11, 16835, 16934 ]
4.066667
15
import pandas as pd
[ 11748, 19798, 292, 355, 279, 67, 628 ]
3
7
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from enum import Enum
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 16529, 35937, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 198, 2, 5964, 1321...
5.141414
99
from setuptools import setup, find_packages install_requires = [ "allennlp>=0.9.0", "wandb==0.8.15", ] setup( name='allennlp_wandb', version='0.0.1', description='Utilities to use allennlp with wandb', packages=find_packages( exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), package_data={'allennlp_wandb': ['py.typed']}, install_requires=install_requires, zip_safe=False)
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 17350, 62, 47911, 796, 685, 198, 220, 220, 220, 366, 439, 1697, 34431, 29, 28, 15, 13, 24, 13, 15, 1600, 198, 220, 220, 220, 366, 86, 392, 65, 855, 15, 13, 23, ...
2.348066
181
#!/usr/bin/env python from setuptools import setup, find_packages setup( name="django-axes", description="Keep track of failed login attempts in Django-powered sites.", long_description="\n".join( [ open("README.rst", encoding="utf-8").read(), open("CHANGES.rst", encoding="utf-8").read(), ] ), keywords="authentication django pci security", author=", ".join( [ "Josh VanderLinden", "Philip Neustrom", "Michael Blume", "Alex Clark", "Camilo Nova", "Aleksi Hakli", ] ), author_email="security@jazzband.co", maintainer="Jazzband", maintainer_email="security@jazzband.co", url="https://github.com/jazzband/django-axes", project_urls={ "Documentation": "https://django-axes.readthedocs.io/", "Source": "https://github.com/jazzband/django-axes", "Tracker": "https://github.com/jazzband/django-axes/issues", }, license="MIT", package_dir={"axes": "axes"}, use_scm_version=True, setup_requires=["setuptools_scm"], python_requires="~=3.6", install_requires=["django>=1.11", "django-appconf>=1.0.3", "django-ipware>=2.0.2"], include_package_data=True, packages=find_packages(), classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Environment :: Plugins", "Framework :: Django", "Framework :: Django :: 1.11", "Framework :: Django :: 2.2", "Framework :: Django :: 3.0", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: Log Analysis", "Topic :: Security", "Topic :: System :: Logging", ], zip_safe=False, )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 2625, 28241, 14208, 12, 897, 274, 1600, 198, 220, 220, 220, 6764, 2625, 1...
2.348577
984
from django.contrib import admin from achievements import models admin.site.register(models.Achievement)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 16970, 1330, 4981, 198, 198, 28482, 13, 15654, 13, 30238, 7, 27530, 13, 32, 24957, 434, 8, 198 ]
3.785714
28
from torch.nn import CrossEntropyLoss
[ 6738, 28034, 13, 20471, 1330, 6372, 14539, 28338, 43, 793, 628, 628 ]
3.416667
12
import numpy as np import pandas as pd import matplotlib.pyplot as plt if __name__ == '__main__': files_http1 = ['./results/benchmark_size/http1_txt1.csv', './results/benchmark_size/http1_txt2.csv', './results/benchmark_size/http1_txt3.csv', './results/benchmark_size/http1_txt4.csv', './results/benchmark_size/http1_txt5.csv'] files_http1_tls = ['./results/benchmark_size/http1_tls_txt1.csv', './results/benchmark_size/http1_tls_txt2.csv', './results/benchmark_size/http1_tls_txt3.csv', './results/benchmark_size/http1_tls_txt4.csv', './results/benchmark_size/http1_tls_txt5.csv'] files_http2 = ['./results/benchmark_size/http2_txt1.csv', './results/benchmark_size/http2_txt2.csv', './results/benchmark_size/http2_txt3.csv', './results/benchmark_size/http2_txt4.csv', './results/benchmark_size/http2_txt5.csv'] files_http2_tls = ['./results/benchmark_size/http2_tls_txt1.csv', './results/benchmark_size/http2_tls_txt2.csv', './results/benchmark_size/http2_tls_txt3.csv', './results/benchmark_size/http2_tls_txt4.csv', './results/benchmark_size/http2_tls_txt5.csv'] time_tot_http2, time_contentTransfer_http2 = [], [] std_tot_http2, std_contentTransfer_http2 = [], [] time_tot_http1, time_contentTransfer_http1 = [], [] std_tot_http1, std_contentTransfer_http1 = [], [] time_tot_http2_tls, time_contentTransfer_http2_tls = [], [] std_tot_http2_tls, std_contentTransfer_http2_tls = [], [] time_tot_http1_tls, time_contentTransfer_http1_tls = [], [] std_tot_http1_tls, std_contentTransfer_http1_tls = [], [] for f in files_http2: t1, t2, std1, std2 = main(f) time_contentTransfer_http2.append(t1) time_tot_http2.append(t2) std_contentTransfer_http2.append(2*std1) std_tot_http2.append(2*std2) for f in files_http1: t1, t2, std1, std2 = main(f) time_contentTransfer_http1.append(t1) time_tot_http1.append(t2) std_contentTransfer_http1.append(2*std1) std_tot_http1.append(2*std2) for f in files_http2_tls: t1, t2, std1, std2 = main(f) time_contentTransfer_http2_tls.append(t1) time_tot_http2_tls.append(t2) std_contentTransfer_http2_tls.append(2*std1) std_tot_http2_tls.append(2*std2) for f in files_http1_tls: t1, t2, std1, std2 = main(f) time_contentTransfer_http1_tls.append(t1) time_tot_http1_tls.append(t2) std_contentTransfer_http1_tls.append(2*std1) std_tot_http1_tls.append(2*std2) x = [100, 1000, 10000, 100000, 1000000] time_tot_http2, time_contentTransfer_http2 = np.array(time_tot_http2), np.array(time_contentTransfer_http2) std_tot_http2, std_contentTransfer_http2 = np.array(std_tot_http2), np.array(std_contentTransfer_http2) time_tot_http1, time_contentTransfer_http1 = np.array(time_tot_http1), np.array(time_contentTransfer_http1) std_tot_http1, std_contentTransfer_http1 = np.array(std_tot_http1), np.array(std_contentTransfer_http1) time_tot_http2_tls, time_contentTransfer_http2_tls = np.array(time_tot_http2_tls), np.array(time_contentTransfer_http2_tls) std_tot_http2_tls, std_contentTransfer_http2_tls = np.array(std_tot_http2_tls), np.array(std_contentTransfer_http2_tls) time_tot_http1_tls, time_contentTransfer_http1_tls = np.array(time_tot_http1_tls), np.array(time_contentTransfer_http1_tls) std_tot_http1_tls, std_contentTransfer_http1_tls = np.array(std_tot_http1_tls), np.array(std_contentTransfer_http1_tls) fig, ax = plt.subplots() ax.grid() ax.plot(x, time_contentTransfer_http1, 'o-', color='r', label="HTTP1") ax.plot(x, time_contentTransfer_http1_tls, 'o-', color='g', label="HTTP1_with_tls") ax.plot(x, time_contentTransfer_http2, 'o-', color='b', label="SPDY") ax.plot(x, time_contentTransfer_http2_tls, 'o-', color='k', label="SPDY_with_tls") ax.fill_between(x, time_contentTransfer_http1 - std_contentTransfer_http1, time_contentTransfer_http1 + std_contentTransfer_http1, color='gray', alpha=0.3) ax.fill_between(x, time_contentTransfer_http2 - std_contentTransfer_http2, time_contentTransfer_http2 + std_contentTransfer_http2, color='gray', alpha=0.3) ax.fill_between(x, time_contentTransfer_http1_tls - std_contentTransfer_http1_tls, time_contentTransfer_http1_tls + std_contentTransfer_http1_tls, color='gray', alpha=0.3) ax.fill_between(x, time_contentTransfer_http2_tls - std_contentTransfer_http2_tls, time_contentTransfer_http2_tls + std_contentTransfer_http2_tls, color='gray', alpha=0.3) # ax.errorbar(x, time_contentTransfer_http2, yerr=std_contentTransfer_http2, fmt='-', color='r', label="HTTP2") # ax.errorbar(x, time_contentTransfer_quic, yerr=std_contentTransfer_quic, fmt='-', color='b', label="QUIC") ax.set_xlabel('Size of data (Length)') ax.set_ylabel('Time (in ms)') ax.legend() ax.set_xscale('log') ax.set_title('Comparison of Time Taken for Data Transfer with TLS ON/OFF') fig.savefig('results/plots/time_contentTransfer_tls.png', dpi=fig.dpi) fig, ax = plt.subplots() ax.grid() ax.plot(x, time_tot_http1, 'o-', color='r', label="HTTP1") ax.plot(x, time_tot_http1_tls, 'o-', color='g', label="HTTP1_with_tls") ax.plot(x, time_tot_http2, 'o-', color='b', label="SPDY") ax.plot(x, time_tot_http2_tls, 'o-', color='k', label="SPDY_with_tls") ax.fill_between(x, time_tot_http1 - std_tot_http1, time_tot_http1 + std_tot_http1, color='gray', alpha=0.3) ax.fill_between(x, time_tot_http2 - std_tot_http2, time_tot_http2 + std_tot_http2, color='gray', alpha=0.3) ax.fill_between(x, time_tot_http1_tls - std_tot_http1_tls, time_tot_http1_tls + std_tot_http1_tls, color='gray', alpha=0.3) ax.fill_between(x, time_tot_http2_tls - std_tot_http2_tls, time_tot_http2_tls + std_tot_http2_tls, color='gray', alpha=0.3) # ax.errorbar(x, time_tot_http2, yerr=std_tot_http2, fmt='-', color='r', label="HTTP2") # ax.errorbar(x, time_tot_quic, yerr=std_tot_quic, fmt='-', color='b', label="QUIC") ax.set_xlabel('Size of data (Length)') ax.set_ylabel('Time (in ms)') ax.legend() ax.set_xscale('log') ax.set_title('Comparison of Total Time with TLS ON/OFF') fig.savefig('results/plots/total_time_tls.png', dpi=fig.dpi)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 3696, 62, 4023,...
2.223633
2,835
import unittest import os from matplotlib import pyplot as plt from shapely import geometry, affinity X_COORDINATE = 0 Y_COORDINATE = 1 def extract_x_y(polygon: list) -> (list, list): """Extract the x and y coordinates as two separate lists""" x_list = [] y_list = [] for vertex in polygon: x_list.append(vertex[X_COORDINATE]) y_list.append(vertex[Y_COORDINATE]) return (x_list, y_list) def save_fig(dir: str): """Save the current plt figure in the given directory under the name: m1.png""" plt.savefig(dir + '/m1.png') plt.clf() def plot_polygons(hull: list, min_hull: list, perceived_poly: list, real_poly: list, dir: str = None): """Plot the given two polygons, in a single figure, with different colors""" h1_x, h1_y = extract_x_y(hull) h2_x, h2_y = extract_x_y(min_hull) p1_x, p1_y = extract_x_y(perceived_poly) p2_x, p2_y = extract_x_y(real_poly) # Figure settings fig = plt.figure() # fig.suptitle('Convex hull area (red) VS real representation area (blue)') plt.xlabel('x') plt.ylabel('y') # Plotting hulls plt.fill(h1_x, h1_y, color="#FF000020") plt.fill(h2_x, h2_y, color="#0000FF20") # Plotting polygons lines plt.plot(p1_x, p1_y, color="#FF000060") # Red perceived poly plt.plot(p2_x, p2_y, color="#0000FF60") # Blue real poly # Plotting polygons points for p in perceived_poly: plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'o', color="#FF0000A0") for p in real_poly: plt.plot(p[X_COORDINATE], p[Y_COORDINATE], 'x', color="#0000FFA0") # plt.show() if dir is not None: save_fig(dir) def surveyor_formula(polygon: list) -> float: """Find the area of the given polygon using the surveyor formula""" # Check if first and last points of polygon are equal parsed_poly = polygon[0:-1]\ if polygon[0] == polygon[len(polygon)-1]\ else polygon area = 0 for i in range(-1, len(parsed_poly)-1): area += parsed_poly[i][X_COORDINATE] * parsed_poly[i+1][Y_COORDINATE] -\ parsed_poly[i][Y_COORDINATE] * parsed_poly[i+1][X_COORDINATE] return abs(area / 2) def polygon_to_vertices_list(polygon: geometry.Polygon) -> list: """Extract the polygon vertices as a list""" return list(polygon.exterior.coords) def apply_transformations(initial_representation: list, events: list) -> float: """Apply the transformations in the events list to the initial representation""" scale = 1 rot_angle = 0 trans_vector = [0, 0] for item in events: for event in item["events"]: if event["type"] == "TRANSLATION": trans_vector[X_COORDINATE] += event["trigger"]["transformation"][X_COORDINATE] trans_vector[Y_COORDINATE] += event["trigger"]["transformation"][Y_COORDINATE] elif event["type"] == "ROTATION": rot_angle += event["trigger"]["transformation"] elif event["type"] == "UNIFORM_SCALE": scale *= event["trigger"]["transformation"] # Apply multiplication polygon = geometry.Polygon(initial_representation) s_polygon = affinity.scale(polygon, xfact=scale, yfact=scale, origin=(0, 0)) r_s_polygon = affinity.rotate(s_polygon, rot_angle, origin=(0, 0)) t_r_s_polygon = affinity.translate(r_s_polygon, xoff=trans_vector[0], yoff=trans_vector[1]) return polygon_to_vertices_list(t_r_s_polygon) def apply_m1(real_representation: list, perceived_representation: list, dir: str = None) -> float: """Apply the metric M1 and obtain its result, between 0 and 1""" joint_point_set = real_representation + perceived_representation # Getting necessary hulls real_convex_hull = geometry.MultiPoint(real_representation).convex_hull perceived_hull = geometry.MultiPoint(perceived_representation).convex_hull convex_hull = geometry.MultiPoint(joint_point_set).convex_hull # Getting vertices of hulls real_vertices = polygon_to_vertices_list(real_convex_hull) perceived_vertices = polygon_to_vertices_list(perceived_hull) joint_vertices = polygon_to_vertices_list(convex_hull) # Getting the min area real_area = surveyor_formula(real_vertices) perceived_area = surveyor_formula(perceived_vertices) if real_area <= perceived_area: min_area = real_area min_vertices = real_vertices else: min_area = perceived_area min_vertices = perceived_vertices plot_polygons(hull=joint_vertices, min_hull=min_vertices, perceived_poly=perceived_representation, real_poly=real_representation, dir=dir) return min_area / surveyor_formula(joint_vertices) if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 11748, 28686, 198, 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83, 198, 6738, 5485, 306, 1330, 22939, 11, 28430, 198, 198, 55, 62, 8220, 12532, 1268, 6158, 796, 657, 198, 56, 62, 8220, 12532, 1268...
2.220956
2,281
import os from PIL import Image from array import * from random import shuffle import shutil # Load from and save to Names = [['./training-images','train'], ['./test-images','test']] for name in Names: FileList = [] for dirname in os.listdir(name[0]): path = os.path.join(name[0],dirname) print(path,":",len(os.listdir(path)))
[ 11748, 28686, 198, 6738, 350, 4146, 1330, 7412, 198, 6738, 7177, 1330, 1635, 198, 6738, 4738, 1330, 36273, 198, 11748, 4423, 346, 628, 628, 198, 2, 8778, 422, 290, 3613, 284, 628, 198, 36690, 796, 16410, 4458, 14, 34409, 12, 17566, 41...
2.654412
136
import sys from pyspark import SparkContext import json print('spark got python path -> ' + str(sys.executable)) logfile = sys.argv[1] sc = SparkContext() logdata = sc.textFile(logfile).cache() a_count = logdata.filter(lambda s: 'a' in s).count() b_count = logdata.filter(lambda s: 'b' in s).count() print(json.dumps({'a': a_count, 'b': b_count}))
[ 11748, 25064, 198, 6738, 279, 893, 20928, 1330, 17732, 21947, 198, 11748, 33918, 198, 4798, 10786, 2777, 668, 1392, 21015, 3108, 4613, 705, 1343, 965, 7, 17597, 13, 18558, 18187, 4008, 198, 6404, 7753, 796, 25064, 13, 853, 85, 58, 16, ...
2.71875
128
import bpy from bpy.props import * from ...nodes.BASE.node_base import RenderNodeBase
[ 11748, 275, 9078, 198, 6738, 275, 9078, 13, 1676, 862, 1330, 1635, 198, 6738, 2644, 77, 4147, 13, 33, 11159, 13, 17440, 62, 8692, 1330, 46722, 19667, 14881, 628, 628, 198 ]
2.903226
31
from keras.models import load_model import cv2 import pickle import keras.backend as K import numpy as np from src.model_path import MODEL_PATH '''def predict(self, cell): model = load_model('./model/Model.h5') f = K.function([model.layers[0].input, K.learning_phase()],[model.layers[-1].output]) rescaled_cell = self.rescale(cell) result = [] for _ in range(10): result.append(f([rescaled_cell, 1])) result = np.array(result) prediction = result.mean(axis=0) uncertainty = result.var(axis=0) if uncertainty.argmax() > 3: new_prediction = 0 print(prediction.argmax(),uncertainty.argmax(),new_prediction) else: print(prediction.argmax(),uncertainty.argmax())'''
[ 6738, 41927, 292, 13, 27530, 1330, 3440, 62, 19849, 198, 11748, 269, 85, 17, 198, 11748, 2298, 293, 198, 11748, 41927, 292, 13, 1891, 437, 355, 509, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 12351, 13, 19849, 62, 6978, 1330, 1916...
2.69084
262
# -*- coding: utf-8 -*- """A module for plotting penguins data for modelling with scikit-learn.""" # Imports --------------------------------------------------------------------- import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd # Constants ------------------------------------------------------------------- SPECIES_COLORS = { 'Adelie': '#4daf4a', 'Gentoo': '#ffb000', 'Chinstrap': '#0084f7' } X_AXIS = [30, 60] Y_AXIS = [12, 22] # Set style ------------------------------------------------------------------- # Load the style from a file plt.style.use('./style/eda.mplstyle') # Alternatively, load the style from the library in ~/.matplotlib/stylelib # plt.style.use(['eda']) # Functions ------------------------------------------------------------------- def get_contour_data(model, pipeline, n_points=1000): """Create the data used to show the boundary of the decision function.""" x0s = np.linspace(X_AXIS[0], X_AXIS[1], n_points) x1s = np.linspace(Y_AXIS[0], Y_AXIS[1], n_points) x0, x1 = np.meshgrid(x0s, x1s) X = np.c_[x0.ravel(), x1.ravel()] df_X = pd.DataFrame(X, columns=['bill_length_mm', 'bill_depth_mm']) X = pipeline.transform(df_X) y_pred = model.predict(X).reshape(x0.shape) y_decision = model.decision_function(X).reshape(x0.shape) return x0, x1, y_pred, y_decision def get_target_colors(target): """Create a dictionary of colors to use in binary classification plots.""" return { target : '#984ea3', 'Other': '#ff7f00' } # Plots ----------------------------------------------------------------------- def plot_target_by_features(df): """Plot the different target species.""" fig, ax = plt.subplots() ax.set_title( label='Palmer penguins by species and bill characteristics', loc='center') ax.get_xaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_xlim(X_AXIS[0], X_AXIS[1]) ax.set_xlabel('Bill length (mm)') ax.get_yaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_ylim(Y_AXIS[0], Y_AXIS[1]) ax.set_ylabel('Bill depth (mm)') grouped = df.groupby('species') for key, group in grouped: ax.scatter( group['bill_length_mm'], group['bill_depth_mm'], c=SPECIES_COLORS[key], s=40, label=key, alpha=0.55) ax.legend(loc='lower left', handletextpad=0.2) fig.savefig('plots/target-by-features.png', format='png') plt.close() def plot_model(df, model, pipeline, f_score, target, title, filename): """Plot the results of a binary classification model.""" fig, ax = plt.subplots() ax.set_title(title, loc='center') ax.get_xaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_xlim(X_AXIS[0], X_AXIS[1]) ax.set_xlabel('Bill length (mm)') ax.get_yaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_ylim(Y_AXIS[0], Y_AXIS[1]) ax.set_ylabel('Bill depth (mm)') # Plot the boundary of the decision function x0, x1, y_pred, y_decision = get_contour_data(model, pipeline) ax.contourf(x0, x1, y_pred, cmap=plt.cm.PuOr, alpha=0.2) # This plots the decision score, if needed # ax.contourf(x0, x1, y_decision, cmap=plt.cm.PuOr, alpha=0.1) df = df.copy() df['species'] = df['target'].apply(lambda t: target if t == 1 else 'Other') colors = get_target_colors(target) grouped = df.groupby('species') for key, group in grouped: ax.scatter( group['bill_length_mm'], group['bill_depth_mm'], c=colors[key], s=40, label=key, alpha=0.55) ax.legend(loc='lower left', handletextpad=0.2) bbox_style = { 'boxstyle': 'round', 'facecolor': '#ffffff', 'edgecolor': '#d4d4d4', 'alpha': 0.8 } ax.text(53, 12.415, '$F_1$ score: {0}'.format(f_score), bbox=bbox_style) fig.savefig('plots/{0}.png'.format(filename), format='png') plt.close()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 32, 8265, 329, 29353, 38373, 1040, 1366, 329, 38591, 351, 629, 1134, 270, 12, 35720, 526, 15931, 198, 198, 2, 1846, 3742, 16529, 30934, 198, 198, 11748, 2603...
2.30411
1,825
from django.db import migrations
[ 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628, 198 ]
3.5
10
import unittest from forestgame.game.world import World
[ 11748, 555, 715, 395, 198, 198, 6738, 8222, 6057, 13, 6057, 13, 6894, 1330, 2159, 198 ]
3.5625
16
# Copyright 2021 Richard Johnston <techpowerawaits@outlook.com> # SPDX-license-identifier: 0BSD import string from loguru import logger try: import cell_pos from exceptions import InvconvMissingHeaders import ftype import msg_handler except ModuleNotFoundError: import invconv.cell_pos as cell_pos from invconv.exceptions import InvconvMissingHeaders import invconv.ftype as ftype import invconv.msg_handler as msg_handler used = True try: from openpyxl import load_workbook except ModuleNotFoundError: used = False # load_workbook is used repeatedly with similar settings # every time. WB_SETTINGS = { "read_only": True, "keep_vba": False, "data_only": True, "keep_links": False, } # Will store a file, worksheet tuple-like class # with additional data accessible. xlsx_data_list = ftype.FtypeDataList() # Contains just a list of file, worksheet tuples. xlsx_tuple_list = [] # xlsx files always start counting at 1. INVALID_ROW = 0 if used: ftype.add("xlsx", start)
[ 2, 15069, 33448, 6219, 28972, 1279, 13670, 6477, 6909, 896, 31, 448, 5460, 13, 785, 29, 198, 2, 30628, 55, 12, 43085, 12, 738, 7483, 25, 657, 21800, 198, 198, 11748, 4731, 198, 198, 6738, 2604, 14717, 1330, 49706, 198, 198, 28311, 2...
2.868493
365
import os from tkinter import * import tkinter.filedialog as tkfd from PIL import Image import numpy as np import solvers.generation_solver.image_seperation as IS if __name__ == '__main__': print(show_interface())
[ 11748, 28686, 198, 6738, 256, 74, 3849, 1330, 1635, 198, 11748, 256, 74, 3849, 13, 69, 3902, 498, 519, 355, 256, 74, 16344, 198, 6738, 350, 4146, 1330, 7412, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 1540, 690, 13, 20158, 62, ...
2.858974
78
from bs4 import BeautifulSoup as soup from urllib.request import urlopen as uReq import re, json # Getting the page URL = "https://www.astronomytrek.com/star-constellations-brightest-stars/" uClient = uReq(url=URL) page_html = uClient.read() page_soup = soup(page_html, "html.parser") # Opening a file to write in stars_file = open("brightest_stars.txt", 'w') # brightest_uncleaned = page_soup.find_all("tr") for html in brightest_uncleaned: col_4 = html.contents[4].contents[0] col_5 = html.contents[5].string if col_5 is not None: idx = find_space(col_5) col_5 = col_5[0:idx] if col_5 == "Brightest Star": continue stars_file.write(col_5 + "\n") else: idx = find_space(col_4) col_4 = col_4[0:idx] stars_file.write(col_4 + "\n") stars_file.close()
[ 6738, 275, 82, 19, 1330, 23762, 50, 10486, 355, 17141, 198, 6738, 2956, 297, 571, 13, 25927, 1330, 19016, 9654, 355, 334, 3041, 80, 198, 11748, 302, 11, 33918, 198, 198, 2, 18067, 262, 2443, 198, 21886, 796, 366, 5450, 1378, 2503, 1...
2.288089
361
if __name__ == '__main__': sol=Solution() # height = [1, 1] height=[1,3,2,5,25,24,5] print(sol.maxArea(height))
[ 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1540, 28, 46344, 3419, 198, 220, 220, 220, 1303, 6001, 796, 685, 16, 11, 352, 60, 198, 220, 220, 220, 6001, 41888, 16, 11, 18, 11, 17, 11, 20, 11...
1.984615
65
# coding=utf-8 # Copyright 2020 TF.Text Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # encoding=utf-8 r"""Tests for BertTokenizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_map_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import test from tensorflow_text.python.ops import bert_tokenizer # TODO(thuang513): It appears there isn't a Ragged version of substr; consider # checking this into core TF. _VOCAB = [ b'[unused1]', b'[unused23]', b"'", b'##%', b'##af', b'##book', b'##c', b'##fr', b'##hey', b'##is', b'##o', b'##ost', b'##s', b'##tri', b'##y', b'$', b'%', b'&', b'(', b')', b'*', b'-', b'.', b'20', b':', b'?', b'[CLS]', b'[SEP]', _utf8(u''), _utf8(u''), _utf8(u''), _utf8(u''), _utf8(u''), _utf8(u''), _utf8(u''), _utf8(u''), b'^', b'a', b'ago', b'among', b'an', b'and', b'are', b'aren', b'awesome', b'between', b'candy', b'china', b'companies', b'company', b'crushed', b'dug', b'earnings', b'engaged', b'even', b'few', b'forecast', b'getting', b'had', b'han', b'has', b'hers', b'high', b'hit', b'hs', b'hurting', b'in', b'indie', b'is', b'isn', b'ka', b'ku', b'major', b'maker', b'moth', b'nearly', b'new', b'now', b'president', b'record', b'regulators', b'reported', b'rift', b'rust', b'sales', b'shares', b'slightly', b'sprint', b'states', b'stock', b't', b'taste', b'tension', b'that', b'the', b'this', b'today', b'told', b'topped', b'trade', b'trump', b'united', b'up', b'weeks', b'what', b'why', b'with', b'year', b'yo', b'yu', _utf8(u'\u7231'), _utf8(u'\u4e0a'), _utf8(u'\u4e00'), _utf8(u'\u4e2a'), _utf8(u'\u4e0d'), _utf8(u'\u56de'), _utf8(u'\u5bb6'), _utf8(u'\u7684'), _utf8(u'\u4eba'), ] if __name__ == '__main__': test.main()
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 12131, 24958, 13, 8206, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, ...
2.093428
1,552
"""Unit test for the index.py file.""" import unittest from datetime import datetime, timedelta, timezone import json from unittest.mock import patch, mock_open import requests_mock from src import index from src import github_services
[ 37811, 26453, 1332, 329, 262, 6376, 13, 9078, 2393, 526, 15931, 198, 198, 11748, 555, 715, 395, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 11, 640, 11340, 198, 11748, 33918, 198, 6738, 555, 715, 395, 13, 76, 735, 1330,...
3.567164
67
# -*- coding: utf-8 -*- # # Copyright 2017 Ricequant, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 2177, 13823, 40972, 11, 3457, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743...
3.629412
170
import funcvote as vote votes = input(" >>>") # print(votes) # print(type(votes)) result = vote.str2int(votes) print(vote.countvotes(result)) result = vote.countvotes(result) vote.printvote(result) #
[ 11748, 25439, 27257, 355, 3015, 198, 198, 29307, 796, 5128, 7203, 13163, 4943, 198, 2, 3601, 7, 29307, 8, 198, 2, 3601, 7, 4906, 7, 29307, 4008, 198, 198, 20274, 796, 3015, 13, 2536, 17, 600, 7, 29307, 8, 198, 198, 4798, 7, 27257,...
2.797297
74
# Day 2: Operators # Start using arithmetic operators. # # https://www.hackerrank.com/challenges/30-operators/problem # #!/bin/python3 import sys if __name__ == "__main__": meal_cost = float(input().strip()) tip_percent = int(input().strip()) tax_percent = int(input().strip()) cost = meal_cost * (1 + tip_percent / 100 + tax_percent / 100) print("The total meal cost is {:.0f} dollars.".format(cost))
[ 2, 3596, 362, 25, 6564, 2024, 198, 2, 7253, 1262, 34768, 12879, 13, 198, 2, 198, 2, 3740, 1378, 2503, 13, 31153, 8056, 962, 13, 785, 14, 36747, 34120, 14, 1270, 12, 3575, 2024, 14, 45573, 198, 2, 198, 198, 2, 48443, 8800, 14, 29...
2.79085
153
from django.http import HttpResponse from django.template import RequestContext, loader from django.shortcuts import render, get_object_or_404, redirect from django.core.urlresolvers import reverse from django.core.cache import cache from articles.models import CIFArticle from .forms import CIFArticleForm def index(request): """ Handle requests to the homepage """ article = None # If a user has submitted a URL... if request.POST: form = CIFArticleForm(request.POST) if (form.is_valid()): try: article = form.save(commit=False) existing_articles = CIFArticle.objects.filter(url=article.url).count() if existing_articles: article = CIFArticle.objects.get(url=article.url) else: article.measure_ego() article.save() except ValueError, e: article = None form._errors["url"] = form.error_class([str(e)]) # If no URL submitted, just set up a blank form else: form = CIFArticleForm() # If an article is found or created due to a user submission, redirect there if article: return redirect(reverse("articles:detail", args=(article.id,))) # Else show the homepage & rendered form else: top_articles = cache.get('cim:top_articles') if top_articles is None: top_articles = CIFArticle.objects.filter(is_cif=1).order_by('-score')[:10] cache.set('cim:top_articles', top_articles, 60) latest_articles = cache.get('cim:latest_articles') if latest_articles is None: latest_articles = CIFArticle.objects.filter(is_cif=1).order_by('-id')[:5] cache.set('cim:latest_articles', latest_articles, 30) return render(request, 'articles/index.html', { 'form' : form , 'top_articles' : top_articles, 'latest_articles' : latest_articles }) def detail(request, article_id): """ Handle detail view for an article """ # Quite simple, set up article and form form = CIFArticleForm() article_key = 'cim:article:%s' % article_id article = cache.get(article_key) if article is None: article = get_object_or_404(CIFArticle, id=article_id) cache.set(article_key, article, 300) return render(request, 'articles/detail.html', { 'article' : article, 'form' : form })
[ 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 198, 6738, 42625, 14208, 13, 28243, 1330, 19390, 21947, 11, 40213, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 11, 651, 62, 15252, 62, 273, 62, 26429, 11, 18941, 198, 673...
2.374763
1,054
# Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.
[ 2, 220, 15069, 357, 66, 8, 33448, 11, 440, 13602, 5256, 24408, 805, 5069, 11, 1439, 2489, 10395, 13, 198 ]
3.1
20
# noinspection PyPep8 """pydoc_fork A fork of pydoc that is optimized for generating html documentation in a CI context Usage: pydoc_fork <package>... [options] pydoc_fork (-h | --help) pydoc_fork --version Options: -h --help Show this screen. -v --version Show version. --quiet No printing or logging. --verbose Crank up the logging. --config <config> pyproject.toml or other toml config. --document_internals respect underscore or __all__ private --prefer_docs_python_org link to python.org or generate own stdlib docs -o --output <folder> where to write files """ # TODO: implement this # pydoc_fork dot_notation <importable>... [--output=<folder>] [--document_internals] # pydoc_fork source_path <path>... [--output=<folder>] [--document_internals] import logging import sys import docopt from pydoc_fork import commands, settings from pydoc_fork.settings import load_config LOGGER = logging.getLogger(__name__) LOGGERS = [] __version__ = "3.0.0" def main() -> int: """Get the args object from command parameters""" arguments = docopt.docopt(__doc__, version=f"pydoc_fork {__version__}") config_path = arguments.get("<config>") if config_path: load_config(config_path) LOGGER.debug(f"Invoking with docopts: {str(arguments)}") output_folder = arguments["--output"] # TODO: add lists of packages package = arguments["<package>"] or [] # quiet = bool(arguments.get("--quiet", False)) if arguments.get("--document_internals"): settings.DOCUMENT_INTERNALS = arguments["--document_internals"] if arguments.get("--prefer_docs_python_org"): settings.PREFER_DOCS_PYTHON_ORG = arguments["--prefer_docs_python_org"] if arguments.get("--verbose"): # root logger, all modules for root in ("pydoc_fork", "__main__"): logger = logging.getLogger(root) logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" formatter = logging.Formatter(log_format) handler.setFormatter(formatter) logger.addHandler(handler) LOGGERS.append(logger) commands.process_path_or_dot_name( package, output_folder=output_folder, ) # # TODO # print("Don't recognize that command.") # return -1 return 0 if __name__ == "__main__": sys.exit(main())
[ 2, 645, 1040, 14978, 9485, 47, 538, 23, 198, 37811, 79, 5173, 420, 62, 32523, 198, 32, 15563, 286, 279, 5173, 420, 326, 318, 23392, 329, 15453, 27711, 10314, 287, 257, 14514, 4732, 198, 198, 28350, 25, 198, 220, 279, 5173, 420, 62, ...
2.415512
1,083
""" libs.strings By default, uses `en-gb.json` file inside the `strings` top-level folder. If language changes, set `libs.strings.default_locale` and run `libs.strings.refresh()`. """ import json default_locale = "en-us" cached_strings = {} refresh()
[ 37811, 198, 8019, 82, 13, 37336, 198, 198, 3886, 4277, 11, 3544, 4600, 268, 12, 22296, 13, 17752, 63, 2393, 2641, 262, 4600, 37336, 63, 1353, 12, 5715, 9483, 13, 198, 198, 1532, 3303, 2458, 11, 900, 4600, 8019, 82, 13, 37336, 13, ...
2.835165
91
# Copyright 2013-2021 The Salish Sea MEOPAR Contributors # and The University of British Columbia # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """NEMO-Cmd command plug-in for deflate sub-command. Deflate variables in netCDF files using Lempel-Ziv compression. """ import logging import math import multiprocessing from pathlib import Path import shlex import subprocess import time import attr import cliff.command logger = logging.getLogger(__name__) def deflate(filepaths, max_concurrent_jobs): """Deflate variables in each of the netCDF files in filepaths using Lempel-Ziv compression. Converts files to netCDF-4 format. The deflated file replaces the original file. :param sequence filepaths: Paths/names of files to be deflated. :param int max_concurrent_jobs: Maximum number of concurrent deflation processes allowed. """ logger.info( "Deflating in up to {} concurrent sub-processes".format( int(max_concurrent_jobs) ) ) jobs = [DeflateJob(fp) for fp in filepaths if fp.exists()] jobs_in_progress = _launch_initial_jobs(jobs, max_concurrent_jobs) while jobs or jobs_in_progress: time.sleep(1) _poll_and_launch(jobs, jobs_in_progress)
[ 2, 15069, 2211, 12, 1238, 2481, 383, 4849, 680, 6896, 11948, 3185, 1503, 25767, 669, 198, 2, 290, 383, 2059, 286, 3517, 9309, 198, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, ...
3.132855
557
"""distributions module.""" from UQpy.distributions.collection.Beta import Beta from UQpy.distributions.collection.Binomial import Binomial from UQpy.distributions.collection.Cauchy import Cauchy from UQpy.distributions.collection.ChiSquare import ChiSquare from UQpy.distributions.collection.Exponential import Exponential from UQpy.distributions.collection.Gamma import Gamma from UQpy.distributions.collection.GeneralizedExtreme import GeneralizedExtreme from UQpy.distributions.collection.InverseGaussian import InverseGauss from UQpy.distributions.collection.Laplace import Laplace from UQpy.distributions.collection.Levy import Levy from UQpy.distributions.collection.Logistic import Logistic from UQpy.distributions.collection.Lognormal import Lognormal from UQpy.distributions.collection.Maxwell import Maxwell from UQpy.distributions.collection.Multinomial import Multinomial from UQpy.distributions.collection.MultivariateNormal import MultivariateNormal from UQpy.distributions.collection.Normal import Normal from UQpy.distributions.collection.Pareto import Pareto from UQpy.distributions.collection.Poisson import Poisson from UQpy.distributions.collection.Rayleigh import Rayleigh from UQpy.distributions.collection.TruncatedNormal import TruncatedNormal from UQpy.distributions.collection.Uniform import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.distributions.collection.JointCopula import JointCopula
[ 37811, 17080, 2455, 507, 8265, 526, 15931, 198, 6738, 471, 48, 9078, 13, 17080, 2455, 507, 13, 43681, 13, 43303, 1330, 17993, 198, 6738, 471, 48, 9078, 13, 17080, 2455, 507, 13, 43681, 13, 33, 259, 49070, 1330, 20828, 49070, 198, 6738...
3.522782
417
# # Example file for HelloWorld # if __name__ == "__main__": main()
[ 2, 198, 2, 17934, 2393, 329, 18435, 10603, 198, 2, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
2.5
30
import sys import csv import json import math import pygame import numpy as np from pygame.locals import * import pandas as pd from data import * from agent import agentsList, Agent global screenSize screenSize = [1920, 1080] def run_simulation(burn_time): params = load_parameters("RocketSimulationData/info.json") env = Environment(params[1]) s = System(params[0], env, burn_time) s.launch() def renderAgents(screen, res, ratio): screen.fill((0, 0, 0)) pygame.draw.rect(screen, (0, 0, 255), (0, 1080-108, 1920, 108)) pos = screenSize[1]-158 - res["altitude"]*ratio # print("altitude: "+str(res["altitude"])+", pos: "+str(pos)) pygame.draw.rect(screen, (255, 255, 255), (940, pos, 20, 50)) pygame.display.update() def simulateRocket(screen): run_simulation(150) df = pd.read_csv('RocketSimulationData/Flight.csv') result = df.to_dict("index") ratio = screenSize[1]/1000000 interestingPoint = None for res in result: # print("time: "+str(result[res]["t"])+" Altitude: "+str(result[res]["altitude"])) for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() renderAgents(screen, result[res], ratio) if result[res]["altitude"] < 800000: interestingPoint = result[res] pygame.display.update() return interestingPoint
[ 11748, 25064, 198, 11748, 269, 21370, 198, 11748, 33918, 198, 11748, 10688, 198, 11748, 12972, 6057, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 12972, 6057, 13, 17946, 874, 1330, 1635, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, ...
2.487762
572
""" Package has implementation for the FID score calculation """ from GAN.FID import fid_score from GAN.FID import inception
[ 37811, 15717, 468, 7822, 329, 262, 376, 2389, 4776, 17952, 198, 37811, 198, 198, 6738, 402, 1565, 13, 37, 2389, 1330, 49909, 62, 26675, 198, 6738, 402, 1565, 13, 37, 2389, 1330, 30839, 198 ]
3.705882
34