content stringlengths 0 1.05M | origin stringclasses 2
values | type stringclasses 2
values |
|---|---|---|
import os
import lab_test
def mean(list_a):
return sum(list_a) / len(list_a)
def create_md_file(path, bpp_mine, psnr_mine, ssim_mine, bpp_jpg, psnr_jpg, ssim_jpg):
os.system('mkdir -p {}'.format(path))
file_p = os.path.join(path,'res.md')
mdfile = open(file_p, 'w')
res = []
res.append('MyModel: mean bpp is {:.4f}, mean psnr is {:.4f}, mean ssim is {:.4f}\n'.format(mean(bpp_mine), mean(psnr_mine), mean(ssim_mine)))
res.append('JPEG: mean bpp is {:.4f}, mean psnr is {:.4f}, mean ssim is {:.4f}\n'.format(mean(bpp_jpg), mean(psnr_jpg), mean(ssim_jpg)))
res.append('|BPP_Mine |PSNR_Mine |SSIM_Mine |BPP_JPG |PSNR_JPG |SSIM_JPG |\n')
res.append('|----|----|----|----|-----|----|\n')
comb = zip(bpp_mine, psnr_mine, ssim_mine,bpp_jpg, psnr_jpg, ssim_jpg)
for i in range(len(psnr_mine)):
str = '|{:.4f} | {:.4f} | {:.4f} | {:.4f}| {:.4f} | {:.4f} | \n'.format(
bpp_mine[i], psnr_mine[i], ssim_mine[i], bpp_jpg[i], psnr_jpg[i], ssim_jpg[i]
)
res.append(str)
mdfile.writelines(res)
def process(model, version, args, run = True):
if run:
lab_test.test_kodak(version, model)
lab_test.test_jpg(int(args.jpg))
png_path = 'res/{}'.format(version)
jpg_path = 'jpg_res/{}'.format(args.jpg)
bpp_mine = lab_test.get_bpp('codes/{}'.format(version))
psnr_mine = lab_test.get_psnr(png_path)
ssim_mine = lab_test.get_ssim(png_path)
bpp_jpg = lab_test.get_bpp(jpg_path,jpeg=True)
psnr_jpg = lab_test.get_psnr(jpg_path,jpeg=True)
ssim_jpg = lab_test.get_ssim(jpg_path,jpeg=True)
save_path = 'report/{}'.format(version)
os.system('mkdir -p {}'.format(save_path))
create_md_file(save_path, bpp_mine, psnr_mine, ssim_mine, bpp_jpg, psnr_jpg, ssim_jpg)
def CABAC_res():
os.system('touch CABAC.md')
res1 = open('CABAC.txt','r')
size1 = res1.readlines()
res = []
res.append('|CABAC(kb) |Huffman(kb) |\n')
res.append('|----|----|\n')
i = 0
for x in size1:
i += 1
if i < 10:
n_id = '0' + str(i)
else:
n_id = str(i)
res.append('|{} |{:d} |\n'.format(x.strip('\n'), os.path.getsize('codes/entropy-1/{}.npz'.format(n_id))))
md_file = open('CABAC.md','w')
md_file.writelines(res)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-m', required=True, type=str)
parser.add_argument('--version', '-v', required=True, type=str)
parser.add_argument('--jpg', '-j', required=True, type=str)
args = parser.parse_args()
process(args.model, args.version, args)
| nilq/baby-python | python |
#! /usr/bin/env python
#coding: utf-8
######################################################################################
#Script for download and convert to fastq SRA datasets serially. #
#Authors: David Peris UW-Madison, Dept Genetics #
#Usage: python download_SRA_serially.py INPUT OUTPUTFOLDER YES/NO #
# #
#INPUT a SRA accession number or a text file with a list of SRAs #
#OUTPUTFOLDER the folder where your fastq will be saved #
#YES or NO if your input is a list or just an accession number #
######################################################################################
import sys,os
SRA_files = sys.argv[1]
output_folder = sys.argv[2]
list_file = sys.argv[3]
downloaded_path = '~/ncbi/public/sra/'
if list_file == "NO":
SRA_list = []
SRA_list.append(SRA_files)
else:
SRA_list = open(SRA_files)
def prefetch(SRA_file): #It is downloaded into the directory user/ncbi/public/sra/
cmdA = 'prefetch -v ' + SRA_file
return cmdA
def convert_fastq(SRA_file,output_folder):
cmdB = 'fastq-dump --outdir ' + output_folder
cmdB += ' --split-files ' + downloaded_path + SRA_file + '.sra'
return cmdB
for SRA_file in SRA_list:
SRA_file = SRA_file.strip()
os.system(prefetch(SRA_file))
os.system(convert_fastq(SRA_file,output_folder))
print "SRA files downloaded"
| nilq/baby-python | python |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
import itk
from sys import argv, stderr, exit
itk.auto_progress(2)
if len(argv) < 3:
print((
"Missing Parameters \n Usage: AntiAliasBinaryImageFilter"
" inputImage outputImage [RMS] [numberOfIterations]"), file=stderr)
exit(1)
inputFilename = argv[1]
outputFilename = argv[2]
maximumRMSError = 0.01
numberOfIterations = 50
if len(argv) > 3:
maximumRMSError = float(argv[3])
if len(argv) > 4:
numberOfIterations = int(argv[4])
CharPixelType = itk.UC
RealPixelType = itk.F
Dimension = 3
CharImageType = itk.Image[CharPixelType, Dimension]
RealImageType = itk.Image[RealPixelType, Dimension]
ReaderType = itk.ImageFileReader[CharImageType]
WriterType = itk.ImageFileWriter[CharImageType]
CastToRealFilterType = itk.CastImageFilter[CharImageType, RealImageType]
RescaleFilter = itk.RescaleIntensityImageFilter[RealImageType, CharImageType]
antiAliasFilter = itk.AntiAliasBinaryImageFilter[RealImageType, RealImageType]
antiAliasFilter = antiAliasFilter.New()
reader = ReaderType.New()
writer = WriterType.New()
toReal = CastToRealFilterType.New()
rescale = RescaleFilter.New()
reader.SetFileName(inputFilename)
writer.SetFileName(outputFilename)
rescale.SetOutputMinimum(0)
rescale.SetOutputMaximum(255)
toReal.SetInput(reader.GetOutput())
antiAliasFilter.SetInput(toReal.GetOutput())
antiAliasFilter.SetMaximumRMSError(maximumRMSError)
antiAliasFilter.SetNumberOfIterations(numberOfIterations)
antiAliasFilter.SetNumberOfLayers(2)
rescale.SetInput(antiAliasFilter.GetOutput())
writer.SetInput(rescale.GetOutput())
writer.Update()
| nilq/baby-python | python |
from typing import Callable
def test_hello_default(hello: Callable[..., str]) -> None:
assert hello() == "Hello !"
def test_hello_name(hello: Callable[..., str], name: str) -> None:
assert hello(name) == "Hello {0}!".format(name)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
equip.analysis.python
~~~~~~~~~~~~~~~~~~~~~
Python related information for analysis.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
| nilq/baby-python | python |
""" XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# MOD INFO
XFW_MOD_INFO = {
# mandatory
'VERSION': '0.9.19.0.1',
'URL': 'http://www.modxvm.com/',
'UPDATE_URL': 'http://www.modxvm.com/en/download-xvm/',
'GAME_VERSIONS': ['0.9.19.0.1'],
# optional
}
#####################################################################
# imports
import traceback
import sys
from math import degrees, pi
import BigWorld
import game
import gui.shared.tooltips.vehicle as tooltips_vehicle
from gun_rotation_shared import calcPitchLimitsFromDesc
from helpers import i18n
from gui import g_htmlTemplates
from gui.shared import g_eventBus
from gui.shared.formatters import text_styles
from gui.shared.tooltips import formatters
from gui.shared.gui_items import GUI_ITEM_TYPE
from gui.Scaleform.locale.MENU import MENU
from gui.shared.items_parameters import formatters as param_formatter
from gui.shared.items_parameters.formatters import measureUnitsForParameter
from gui.shared.items_parameters.params_helper import getParameters as getParameters_helper
from gui.shared.items_parameters.params_helper import idealCrewComparator as idealCrewComparator_helper
from gui.shared.utils.requesters.ItemsRequester import ItemsRequester
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.Scaleform.framework.ToolTip import ToolTip
from gui.Scaleform.daapi.view.battle.shared.consumables_panel import ConsumablesPanel
from gui.Scaleform.daapi.view.meta.ModuleInfoMeta import ModuleInfoMeta
from gui.shared.tooltips.module import ModuleBlockTooltipData
from helpers import dependency
from skeletons.gui.shared import IItemsCache
from xfw import *
import xvm_main.python.config as config
from xvm_main.python.consts import *
from xvm_main.python.logger import *
from xvm_main.python.vehinfo import _getRanges
from xvm_main.python.vehinfo_tiers import getTiers
from xvm_main.python.xvm import l10n
#####################################################################
# globals
shells_vehicles_compatibility = {}
carousel_tooltips_cache = {}
styles_templates = {}
toolTipDelayIntervalId = None
weightTooHeavy = False
p_replacement = None # will be something like <font size... color...>
#####################################################################
# initialization/finalization
def start():
g_eventBus.addListener(XVM_EVENT.CONFIG_LOADED, tooltips_clear_cache)
BigWorld.callback(0, start)
@registerEvent(game, 'fini')
def fini():
g_eventBus.removeListener(XVM_EVENT.CONFIG_LOADED, tooltips_clear_cache)
#####################################################################
# handlers
# tooltip delay to resolve performance issue
@overrideMethod(ToolTip, 'onCreateComplexTooltip')
def ToolTip_onCreateComplexTooltip(base, self, tooltipId, stateType):
# log('ToolTip_onCreateComplexTooltip')
_createTooltip(self, lambda:_onCreateComplexTooltip_callback(base, self, tooltipId, stateType))
# tooltip delay to resolve performance issue
# suppress carousel tooltips
@overrideMethod(ToolTip, 'onCreateTypedTooltip')
def ToolTip_onCreateTypedTooltip(base, self, type, *args):
# log('ToolTip_onCreateTypedTooltip')
try:
if type == TOOLTIPS_CONSTANTS.CAROUSEL_VEHICLE and config.get('hangar/carousel/suppressCarouselTooltips'):
return
except Exception as ex:
err(traceback.format_exc())
_createTooltip(self, lambda:_onCreateTypedTooltip_callback(base, self, type, *args))
# adds delay for tooltip appearance
def _createTooltip(self, func):
try:
global toolTipDelayIntervalId
self.xvm_hide()
tooltipDelay = config.get('tooltips/tooltipsDelay', 0.4)
toolTipDelayIntervalId = BigWorld.callback(tooltipDelay, func)
except Exception as ex:
err(traceback.format_exc())
def _onCreateTypedTooltip_callback(base, self, type, *args):
# log('ToolTip_onCreateTypedTooltip_callback')
global toolTipDelayIntervalId
toolTipDelayIntervalId = None
base(self, type, *args)
def _onCreateComplexTooltip_callback(base, self, tooltipId, stateType):
# log('_onCreateComplexTooltip_callback')
global toolTipDelayIntervalId
toolTipDelayIntervalId = None
base(self, tooltipId, stateType)
def _ToolTip_xvm_hide(self):
# log('_ToolTip_xvm_hide')
global toolTipDelayIntervalId
if toolTipDelayIntervalId is not None:
BigWorld.cancelCallback(toolTipDelayIntervalId)
toolTipDelayIntervalId = None
ToolTip.xvm_hide = _ToolTip_xvm_hide
#############################
# carousel events
@overrideMethod(tooltips_vehicle.VehicleInfoTooltipData, '_packBlocks')
def VehicleInfoTooltipData_packBlocks(base, self, *args, **kwargs):
result = base(self, *args, **kwargs)
result = [item for item in result if item.get('data', {}).get('blocksData')]
return result
@overrideMethod(tooltips_vehicle.SimplifiedStatsBlockConstructor, 'construct')
def SimplifiedStatsBlockConstructor_construct(base, self):
if config.get('tooltips/hideSimplifiedVehParams'):
return []
else:
return base(self)
@overrideMethod(tooltips_vehicle.AdditionalStatsBlockConstructor, 'construct')
def AdditionalStatsBlockConstructor_construct(base, self):
if config.get('tooltips/hideBottomText'):
return []
else:
return base(self)
@overrideMethod(text_styles, "_getStyle")
def text_styles_getStyle(base, style, ctx = None):
if ctx is None:
ctx = {}
try:
if style not in styles_templates:
template = g_htmlTemplates['html_templates:lobby/textStyle'][style].source
template_string = template if type(template) is str else template['text']
if "size='14'" in template_string and "face='$FieldFont'" in template_string:
template_string = template_string \
.replace("size='14'", "size='%s'" % config.get('tooltips/fontSize', 14)) \
.replace("face='$FieldFont'", "face='%s'" % config.get('tooltips/fontName', '$FieldFont'))
styles_templates[style] = template_string if type(template) is str else {'text': template_string}
if type(styles_templates[style]) is str:
return styles_templates[style]
else:
if ctx:
return styles_templates[style]['text'] % ctx
else:
return base(style, ctx)
except Exception as ex:
err(traceback.format_exc())
return base(style, ctx)
def tooltip_add_param(self, result, param0, param1):
result.append(formatters.packTextParameterBlockData(name=text_styles.main(param0), value=text_styles.stats(param1), valueWidth=107, padding=formatters.packPadding(left=self.leftPadding, right=self.rightPadding)))
def tooltip_with_units(value, units):
return '%s %s' % (value, text_styles.standard(units))
def getParameterValue(paramName):
return text_styles.main(i18n.makeString(MENU.tank_params(paramName))) + text_styles.standard(measureUnitsForParameter(paramName))
def formatNumber(value):
if value > 99:
value = round(value)
elif value > 9:
value = round(value, 1)
else:
value = round(value, 2)
return str(BigWorld.wg_getNiceNumberFormat(value))
# replace <h>text1 <p>text2</p></h> with: text1 text_styles.standard(text2)
def replace_p(text):
global p_replacement
if not p_replacement:
p_replacement = text_styles.standard('').split('>', 1)[0] + '>'
return text.replace('<p>', p_replacement).replace('</p>', '</font>').replace('<h>', '').replace('</h>', '')
# overriding tooltips for tanks in hangar, configuration in tooltips.xc
@overrideMethod(tooltips_vehicle.CommonStatsBlockConstructor, 'construct')
def CommonStatsBlockConstructor_construct(base, self):
try:
self.leftPadding = -15
vehicle = self.vehicle
cache_result = carousel_tooltips_cache.get(vehicle.intCD)
if cache_result:
return cache_result
result = []
if not config.get('tooltips/hideSimplifiedVehParams'):
result.append(formatters.packTitleDescBlock(text_styles.middleTitle(i18n.makeString(TOOLTIPS.TANKCARUSEL_MAINPROPERTY)), padding=formatters.packPadding(left=0, right=self.rightPadding, bottom=8)))
params = self.configuration.params
veh_descr = vehicle.descriptor
gun = vehicle.gun.descriptor
turret = vehicle.turret.descriptor
comparator = idealCrewComparator_helper(vehicle)
vehicleCommonParams = getParameters_helper(vehicle)
veh_type_inconfig = vehicle.type.replace('AT-SPG', 'TD')
clipGunInfoShown = False
premium_shells = {}
for shell in vehicle.shells:
premium_shells[shell.intCompactDescr] = shell.isPremium
if params:
values = config.get('tooltips/%s' % veh_type_inconfig)
if values and len(values):
params_list = values # overriding parameters
else:
params_list = self.PARAMS.get(vehicle.type, 'default') # original parameters
paramInfo = None
for paramName in params_list:
if paramName is None:
continue
if paramName == 'rateOfFire':
paramName = 'reloadTime'
elif paramName == 'traverseLimits':
paramName = 'gunYawLimits' if 'gunYawLimits' in vehicleCommonParams else 'turretYawLimits'
elif paramName == 'radioRange':
paramName = 'radioDistance'
elif paramName == 'reloadTimeSecs' and vehicle.gun.isClipGun():
paramName = 'clipFireRate'
elif paramName == 'turretRotationSpeed' and not vehicle.hasTurrets:
paramName = 'gunRotationSpeed'
if paramName in vehicleCommonParams:
paramInfo = comparator.getExtendedData(paramName)
if paramName == 'turretArmor' and not vehicle.hasTurrets:
continue
#maxHealth
elif paramName == 'maxHealth':
tooltip_add_param(self, result, i18n.makeString('#menu:vehicleInfo/params/maxHealth'), formatNumber(veh_descr.maxHealth))
#battle tiers
elif paramName == 'battleTiers':
(minTier, maxTier) = getTiers(vehicle.level, vehicle.type, vehicle.name)
tooltip_add_param(self, result, l10n('Battle tiers'), '%s..%s' % (minTier, maxTier))
#explosionRadius
elif paramName == 'explosionRadius':
explosionRadiusMin = 999
explosionRadiusMax = 0
for shot in gun['shots']:
if 'explosionRadius' in shot['shell']:
if shot['shell']['explosionRadius'] < explosionRadiusMin:
explosionRadiusMin = shot['shell']['explosionRadius']
if shot['shell']['explosionRadius'] > explosionRadiusMax:
explosionRadiusMax = shot['shell']['explosionRadius']
if explosionRadiusMax == 0: # no HE
continue
explosionRadius_str = formatNumber(explosionRadiusMin)
if explosionRadiusMin != explosionRadiusMax:
explosionRadius_str += '/%s' % gold_pad(formatNumber(explosionRadiusMax))
tooltip_add_param(self, result, getParameterValue(paramName), explosionRadius_str)
#shellSpeedSummary
elif paramName == 'shellSpeedSummary':
shellSpeedSummary_arr = []
for shot in gun['shots']:
shellSpeed_str = '%g' % round(shot['speed'] * 1.25)
if premium_shells[shot['shell']['compactDescr']]:
shellSpeed_str = gold_pad(shellSpeed_str)
shellSpeedSummary_arr.append(shellSpeed_str)
shellSpeedSummary_str = '/'.join(shellSpeedSummary_arr)
tooltip_add_param(self, result, tooltip_with_units(l10n('shellSpeed'), l10n('(m/sec)')), shellSpeedSummary_str)
#piercingPowerAvg
elif paramName == 'piercingPowerAvg':
piercingPowerAvg = formatNumber(veh_descr.shot['piercingPower'][0])
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgPiercingPower')), piercingPowerAvg)
#piercingPowerAvgSummary
elif paramName == 'piercingPowerAvgSummary':
piercingPowerAvgSummary_arr = []
for shot in gun['shots']:
piercingPower_str = formatNumber(shot['piercingPower'][0])
if premium_shells[shot['shell']['compactDescr']]:
piercingPower_str = gold_pad(piercingPower_str)
piercingPowerAvgSummary_arr.append(piercingPower_str)
piercingPowerAvgSummary_str = '/'.join(piercingPowerAvgSummary_arr)
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgPiercingPower')), piercingPowerAvgSummary_str)
#damageAvgSummary
elif paramName == 'damageAvgSummary':
damageAvgSummary_arr = []
for shot in gun['shots']:
damageAvg_str = formatNumber(shot['shell']['damage'][0])
if premium_shells[shot['shell']['compactDescr']]:
damageAvg_str = gold_pad(damageAvg_str)
damageAvgSummary_arr.append(damageAvg_str)
damageAvgSummary_str = '/'.join(damageAvgSummary_arr)
tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/avgDamage')), damageAvgSummary_str)
#magazine loading
# elif (paramName == 'reloadTimeSecs' or paramName == 'rateOfFire') and vehicle.gun.isClipGun():
# if clipGunInfoShown:
# continue
# (shellsCount, shellReloadingTime) = gun['clip']
# reloadMagazineTime = gun['reloadTime']
# shellReloadingTime_str = formatNumber(shellReloadingTime)
# reloadMagazineTime_str = formatNumber(reloadMagazineTime)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/shellsCount')), shellsCount)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/shellReloadingTime')), shellReloadingTime_str)
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/reloadMagazineTime')), reloadMagazineTime_str)
# clipGunInfoShown = True
#rate of fire
# elif paramName == 'rateOfFire' and not vehicle.gun.isClipGun():
# rateOfFire_str = formatNumber(60 / gun['reloadTime'])
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/reloadTime')), rateOfFire_str)
# gun traverse limits
# elif paramName == 'traverseLimits' and gun['turretYawLimits']:
# (traverseMin, traverseMax) = gun['turretYawLimits']
# traverseLimits_str = '%g..+%g' % (round(degrees(traverseMin)), round(degrees(traverseMax)))
# tooltip_add_param(self, result, l10n('traverseLimits'), traverseLimits_str)
# elevation limits (front)
# elif paramName == 'pitchLimits':
# (pitchMax, pitchMin) = calcPitchLimitsFromDesc(0, gun['pitchLimits'])
# pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
# tooltip_add_param(self, result, l10n('pitchLimits'), pitchLimits_str)
# elevation limits (side)
elif paramName == 'pitchLimitsSide':
if gun['turretYawLimits'] and abs(degrees(gun['turretYawLimits'][0])) < 89: continue # can't look aside 90 degrees
(pitchMax, pitchMin) = calcPitchLimitsFromDesc(pi / 2, gun['pitchLimits'])
pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
tooltip_add_param(self, result, l10n('pitchLimitsSide'), pitchLimits_str)
# elevation limits (rear)
elif paramName == 'pitchLimitsRear':
if gun['turretYawLimits']: continue # can't look back
(pitchMax, pitchMin) = calcPitchLimitsFromDesc(pi, gun['pitchLimits'])
pitchLimits_str = '%g..+%g' % (round(degrees(-pitchMin)), round(degrees(-pitchMax)))
tooltip_add_param(self, result, l10n('pitchLimitsRear'), pitchLimits_str)
# shooting range
elif paramName == 'shootingRadius':
viewRange, shellRadius, artiRadius = _getRanges(turret, gun, vehicle.nationName, vehicle.type)
if vehicle.type == 'SPG':
tooltip_add_param(self, result, tooltip_with_units(l10n('shootingRadius'), l10n('(m)')), artiRadius)
elif shellRadius < 707:
tooltip_add_param(self, result, tooltip_with_units(l10n('shootingRadius'), l10n('(m)')), shellRadius)
#reverse max speed
elif paramName == 'speedLimits':
(speedLimitForward, speedLimitReverse) = veh_descr.physics['speedLimits']
speedLimits_str = str(int(speedLimitForward * 3.6)) + '/' + str(int(speedLimitReverse * 3.6))
tooltip_add_param(self, result, getParameterValue(paramName), speedLimits_str)
#turret rotation speed
# elif paramName == 'turretRotationSpeed' or paramName == 'gunRotationSpeed':
# if not vehicle.hasTurrets:
# paramName = 'gunRotationSpeed'
# turretRotationSpeed_str = str(int(degrees(veh_descr.turret['rotationSpeed'])))
# tooltip_add_param(self, result, tooltip_with_units(i18n.makeString('#menu:tank_params/%s' % paramName).rstrip(), i18n.makeString('#menu:tank_params/gps')), turretRotationSpeed_str)
#terrain resistance
elif paramName == 'terrainResistance':
resistances_arr = []
for key in veh_descr.chassis['terrainResistance']:
resistances_arr.append(formatNumber(key))
terrainResistance_str = '/'.join(resistances_arr)
tooltip_add_param(self, result, l10n('terrainResistance'), terrainResistance_str)
#radioRange
# elif paramName == 'radioRange':
# radioRange_str = '%s' % int(vehicle.radio.descriptor['distance'])
# tooltip_add_param(self, result, replace_p(i18n.makeString('#menu:moduleInfo/params/radioDistance')), radioRange_str)
#gravity
elif paramName == 'gravity':
gravity_str = formatNumber(veh_descr.shot['gravity'])
tooltip_add_param(self, result, l10n('gravity'), gravity_str)
#inner name, for example - ussr:R100_SU122A
elif paramName == 'innerName':
tooltip_add_param(self, result, vehicle.name, '')
#custom text
elif paramName.startswith('TEXT:'):
customtext = paramName[5:]
tooltip_add_param(self, result, l10n(customtext), '')
elif paramInfo is not None and paramName in paramInfo.name:
valueStr = str(param_formatter.formatParameter(paramName, paramInfo.value))
tooltip_add_param(self, result, getParameterValue(paramName), valueStr)
if vehicle.isInInventory:
# optional devices icons, must be in the end
if 'optDevicesIcons' in params_list:
optDevicesIcons_arr = []
for key in vehicle.optDevices:
if key:
imgPath = 'img://gui' + key.icon.lstrip('.')
else:
imgPath = 'img://gui/maps/icons/artefact/empty.png'
optDevicesIcons_arr.append('<img src="%s" height="16" width="16">' % imgPath)
optDevicesIcons_str = ' '.join(optDevicesIcons_arr)
tooltip_add_param(self, result, optDevicesIcons_str, '')
# equipment icons, must be in the end
if 'equipmentIcons' in params_list:
equipmentIcons_arr = []
for key in vehicle.eqs:
if key:
imgPath = 'img://gui' + key.icon.lstrip('.')
else:
imgPath = 'img://gui/maps/icons/artefact/empty.png'
equipmentIcons_arr.append('<img src="%s" height="16" width="16">' % imgPath)
equipmentIcons_str = ' '.join(equipmentIcons_arr)
if config.get('tooltips/combineIcons') and optDevicesIcons_str:
tmp_list = []
tooltip_add_param(self, tmp_list, equipmentIcons_str, '')
result[-1]['data']['name'] += ' ' + tmp_list[0]['data']['name']
else:
tooltip_add_param(self, result, equipmentIcons_str, '')
# crew roles icons, must be in the end
if 'crewRolesIcons' in params_list:
imgPath = 'img://../mods/shared_resources/xvm/res/icons/tooltips/roles'
crewRolesIcons_arr = []
for tankman_role in vehicle.descriptor.type.crewRoles:
crewRolesIcons_arr.append('<img src="%s/%s.png" height="16" width="16">' % (imgPath, tankman_role[0]))
crewRolesIcons_str = ''.join(crewRolesIcons_arr)
tooltip_add_param(self, result, crewRolesIcons_str, '')
if (len(result) > 30) and config.get('tooltips/hideBottomText'): # limitation
result = result[:30]
elif (len(result) > 29) and not config.get('tooltips/hideBottomText'): # limitation
result = result[:29]
carousel_tooltips_cache[vehicle.intCD] = result
return result
except Exception as ex:
err(traceback.format_exc())
return base(self)
# in battle, add tooltip for HE shells - explosion radius
@overrideMethod(ConsumablesPanel, '_ConsumablesPanel__makeShellTooltip')
def ConsumablesPanel__makeShellTooltip(base, self, descriptor, piercingPower):
result = base(self, descriptor, piercingPower)
try:
if 'explosionRadius' in descriptor:
key_str = i18n.makeString('#menu:tank_params/explosionRadius')
result = result.replace('{/BODY}', '\n%s: %s{/BODY}' % (key_str, formatNumber(descriptor['explosionRadius'])))
except Exception as ex:
err(traceback.format_exc())
return result
# show compatible vehicles for shells info window in warehouse and shop
@overrideMethod(ModuleInfoMeta, 'as_setModuleInfoS')
def ModuleInfoMeta_as_setModuleInfoS(base, self, moduleInfo):
try:
if moduleInfo.get('type') == 'shell':
if not shells_vehicles_compatibility:
relate_shells_vehicles()
if self.moduleCompactDescr in shells_vehicles_compatibility:
moduleInfo['compatible'].append({'type': i18n.makeString('#menu:moduleInfo/compatible/vehicles'), 'value': ', '.join(shells_vehicles_compatibility[self.moduleCompactDescr])})
except Exception as ex:
err(traceback.format_exc())
base(self, moduleInfo)
# # add '#menu:moduleInfo/params/weightTooHeavy' (red 'weight (kg)')
# @overrideMethod(i18n, 'makeString')
# def makeString(base, key, *args, **kwargs):
# if key == '#menu:moduleInfo/params/weightTooHeavy':
# global weightTooHeavy
# if weightTooHeavy is None:
# weightTooHeavy = '<h>%s</h>' % red_pad(strip_html_tags(i18n.makeString('#menu:moduleInfo/params/weight'))) # localized red 'weight (kg)'
# return weightTooHeavy
# return base(key, *args, **kwargs)
##########################################################################
# paint 'weight (kg)' with red if module does not fit due to overweight
@overrideMethod(param_formatter, 'formatModuleParamName')
def formatters_formatModuleParamName(base, paramName):
builder = text_styles.builder()
if weightTooHeavy and paramName == 'weight':
builder.addStyledText(text_styles.error, MENU.moduleinfo_params(paramName))
builder.addStyledText(text_styles.error, param_formatter.MEASURE_UNITS.get(paramName, ''))
else:
builder.addStyledText(text_styles.main, MENU.moduleinfo_params(paramName))
builder.addStyledText(text_styles.standard, param_formatter.MEASURE_UNITS.get(paramName, ''))
return builder.render()
@overrideMethod(ModuleBlockTooltipData, '_packBlocks')
def ModuleBlockTooltipData_packBlocks(base, self, *args, **kwargs):
try:
global weightTooHeavy
module = self.context.buildItem(*args, **kwargs)
statusConfig = self.context.getStatusConfiguration(module)
vehicle = statusConfig.vehicle
slotIdx = statusConfig.slotIdx
if vehicle is not None:
isFit, reason = module.mayInstall(vehicle, slotIdx)
weightTooHeavy = not isFit and reason == 'too heavy'
except Exception as ex:
err(traceback.format_exc())
return base(self, *args, **kwargs)
#####################################################################
# Utility functions
def h1_pad(text):
return '<h1>%s</h1>' % text
def gold_pad(text):
return "<font color='%s'>%s</font>" % (config.get('tooltips/goldColor', '#FFC363'), text)
def red_pad(text):
return "<font color='#FF0000'>%s</font>" % text
# make dict: shells => compatible vehicles
def relate_shells_vehicles():
global shells_vehicles_compatibility
try:
shells_vehicles_compatibility = {}
itemsCache = dependency.instance(IItemsCache)
for vehicle in itemsCache.items.getVehicles().values():
if vehicle.name.find('_IGR') > 0 or vehicle.name.find('_training') > 0:
continue
for turrets in vehicle.descriptor.type.turrets:
for turret in turrets:
for gun in turret['guns']:
for shot in gun['shots']:
shell_id = shot['shell']['compactDescr']
if shell_id in shells_vehicles_compatibility:
if vehicle.userName not in shells_vehicles_compatibility[shell_id]:
shells_vehicles_compatibility[shell_id].append(vehicle.userName)
else:
shells_vehicles_compatibility[shell_id] = [vehicle.userName]
except Exception as ex:
err(traceback.format_exc())
shells_vehicles_compatibility = {}
@registerEvent(ItemsRequester, '_invalidateItems')
def ItemsRequester_invalidateItems(self, itemTypeID, uniqueIDs):
try:
if itemTypeID == GUI_ITEM_TYPE.VEHICLE:
for veh_id in uniqueIDs:
carousel_tooltips_cache[veh_id] = {}
except Exception as ex:
err(traceback.format_exc())
carousel_tooltips_cache.clear()
@registerEvent(ItemsRequester, 'clear')
def ItemsRequester_clear(*args, **kwargs):
tooltips_clear_cache(*args, **kwargs)
def tooltips_clear_cache(*args, **kwargs):
carousel_tooltips_cache.clear()
styles_templates.clear()
| nilq/baby-python | python |
import numpy as np
import pandas as pd
import pytest
from dku_timeseries import WindowAggregator
from recipe_config_loading import get_windowing_params
@pytest.fixture
def columns():
class COLUMNS:
date = "Date"
category = "country"
aggregation = "value1_avg"
return COLUMNS
@pytest.fixture
def df(columns):
co2 = [315.58, 316.39, 316.79, 316.2]
country = ["first", "first", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="M")
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def long_df(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 345, 234, 100, 299]
country = ["first", "first", "first", "first", "second", "second", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def long_df_2(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10]
country = ["first", "first", "second", "second", "third", "third"]
country_2 = ["first", "first", "second", "second", "third", "third"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, columns.date: time_index})
return df
@pytest.fixture
def long_df_3(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10, 2, 3]
country = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_2 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_3 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M")).append(pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, "store": country_3, columns.date: time_index})
return df
@pytest.fixture
def long_df_4(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10, 2, 3]
country = ["first", "first", "second", "second", "third", "third", "first", "first"]
country_2 = ["first", "first", "second", "second", "third", "third", "second", "first"]
country_3 = ["first", "first", "second", "second", "third", "third", "third", "fourth"]
time_index = pd.date_range("1-1-2020", periods=2, freq="M").append(pd.date_range("1-1-2020", periods=2, freq="M")).append(
pd.date_range("1-1-2020", periods=2, freq="M")).append(pd.date_range("1-1-2020", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, "store": country_3, columns.date: time_index})
return df
@pytest.fixture
def long_df_numerical(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 345, 234, 100, 299]
country = [1, 1, 1, 1, 2, 2, 2, 2]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def recipe_config(columns):
config = {u'window_type': u'none', u'groupby_columns': [u'country'], u'closed_option': u'left', u'window_unit': u'days', u'window_width': 3,
u'causal_window': True, u'datetime_column': u'Date', u'advanced_activated': True, u'aggregation_types': [u'retrieve', u'average'],
u'gaussian_std': 1}
return config
@pytest.fixture
def params(recipe_config):
return get_windowing_params(recipe_config)
@pytest.fixture
def params_no_causal(recipe_config):
recipe_config["causal_window"] = False
return get_windowing_params(recipe_config)
class TestWindowingLongFormat:
def test_long_format(self, long_df, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = [columns.category]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(np.round(output_df[columns.aggregation].values, 2), np.array([np.nan, 315.58, 315.98, 316.25, np.nan, 345.,
289.5, 226.33]))
np.testing.assert_array_equal(output_df.country.values, np.array(['first', 'first', 'first', 'first', 'second', 'second', 'second', 'second']))
def test_two_identifiers(self, long_df_2, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = ["country", "item"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df_2, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df[datetime_column].values,
pd.DatetimeIndex(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000']))
def test_three_identifiers(self, long_df_3, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = ["country", "item", "store"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df_3, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df[datetime_column].values,
pd.DatetimeIndex(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000']))
def test_mix_identifiers(self, long_df_4, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = ["country", "item", "store"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df_4, datetime_column, groupby_columns=groupby_columns)
expected_dates = pd.DatetimeIndex(['2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000',
'2020-02-29T00:00:00.000000000', '2020-01-31T00:00:00.000000000',
'2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000',
'2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000'])
np.testing.assert_array_equal(output_df[datetime_column].values, expected_dates)
def test_empty_identifiers(self, df, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(df, datetime_column, groupby_columns=[])
assert output_df.shape == (4, 5)
output_df = window_aggregator.compute(df, datetime_column)
assert output_df.shape == (4, 5)
output_df = window_aggregator.compute(df, datetime_column, groupby_columns=None)
assert output_df.shape == (4, 5)
def test_long_format_no_causal(self, long_df, params_no_causal, recipe_config,columns):
window_aggregator = WindowAggregator(params_no_causal)
groupby_columns = ["country"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(np.round(output_df[columns.aggregation].values, 2), np.array([np.nan, 316.25, 316.46, np.nan, np.nan, 226.33,
211., np.nan]))
np.testing.assert_array_equal(output_df.country.values, np.array(['first', 'first', 'first', 'first', 'second', 'second', 'second', 'second']))
def test_long_format_numerical(self, long_df_numerical, params, recipe_config,columns):
window_aggregator = WindowAggregator(params)
groupby_columns = ["country"]
datetime_column = recipe_config.get('datetime_column')
output_df = window_aggregator.compute(long_df_numerical, datetime_column, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df.country.values, np.array([1, 1, 1, 1, 2, 2, 2, 2]))
| nilq/baby-python | python |
#!/usr/bin/env python
import pyinotify
import os, sys
import logging
import json
import thread, threading
import time, datetime
import hashlib
import mimetypes
import traceback
# google stuff
from ServiceProviders.Google import GoogleServiceProvider
from apiclient.http import BatchHttpRequest
from apiclient import errors
#logging stuff
class NotImplementedError(Exception):
"""mime.from_file(fp)
Generic Exception for Placeholder Functions
"""
class GenericEventHandler(pyinotify.ProcessEvent):
"""
define every possible event type here
overloads methods in parent class
"""
def process_IN_CREATE(self, event):
self.logger.info("-> Creating: %s" % event.name)
def process_IN_DELETE(self, event):
self.logger.info("-> Removing: %s" % event.name)
def process_default(self, event):
self.logger.info("->Unknown event: %s" % event.maskname)
class GoogleEventHandler(pyinotify.ProcessEvent):
"""
uploads to google drive
"""
def __init__(self, options_dict, watch_descriptors):
"""
options_dict contains all parameters necesary for
the GoogleServiceProvider.__init__() method.
"""
self.sp = GoogleServiceProvider(**options_dict)
self.credentials = self.sp.get_stored_credentials('testid')
self.service = self.sp.build_service(self.credentials)
self.http = self.service[0]
self.service = self.service[1]
self.options_dict = options_dict
for key, value in watch_descriptors[0].items():
if value == 1:
self.protected_dir = key
self.descriptors = watch_descriptors
self.descriptors_dict = {}
for desc in self.descriptors:
self.descriptors_dict.update(desc)
### logging stuff:
self.logger = logging.getLogger('main')
self.logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=options_dict['LOGFILE'])
# add formatter to ch
self.ch.setFormatter(formatter)
# add ch to logger
logging.addLevelName( logging.WARNING, "%s" % logging.getLevelName(logging.WARNING))
logging.addLevelName( logging.ERROR, "%s" % logging.getLevelName(logging.ERROR))
logging.addLevelName( logging.DEBUG, "%s" % logging.getLevelName(logging.DEBUG))
logging.addLevelName( logging.INFO, "%s" % logging.getLevelName(logging.INFO))
# we need this mutex for the files_dict dictionary
self.mutex = threading.Lock()
# this is by default the SyncThemAll folder on GoogleDrive
if self.options_dict['DEFAULT_PARENT'] != 'root':
self.default_pid = self.sp.query_entity(self.service,"title = '"+self.options_dict['DEFAULT_PARENT']+"'")[0]['id']
else:
self.default_pid = 'root'
# this will have to be loaded from json
## the structure of the json is:
"""
{
'folder_pathname' :
{
'files' : {
'file1': {'md5sum': None,
'ownId': None,
'parent':None,
'alreadyUploaded': False,
'alreadyUpdated': False,
'upLoadinProgress': False,
'progressBar': int,
'fullpath':None,
'fileBody': {},
'googleBody': {} } } },
'file2': {'md5sum': None,
'ownId': None,
'parent':None,
'alreadyUploaded': False,
'alreadyUpdated': False,
'upLoadinProgress': False,
'progressBar': int,
'fullpath':None,
'fileBody': {},
'googleBody': {} } } },
'file3': {'md5sum': None,
'ownId': None,
'parent':None,
'alreadyUploaded': False,
'alreadyUpdated': False,
'upLoadinProgress': False,
'progressBar': int,
'fullpath':None,
'fileBody': {},
'googleBody': {} } } }
},
'parent': None
'alreadyCreated': False,
'alreadyUpdated':False,
'grive': {'own_google_id':None, 'parent_google_id': None }
'folderBody': {}
'googleMetaData': {}
},
}
"""
self.jsonfile = self.options_dict['treefile']
self.files_dict = {}
if not os.path.exists(self.jsonfile):
self.files_dict.update(self.descriptors_dict.fromkeys(self.descriptors_dict.keys(),
{'files': {}, 'grive':{'own_google_id': None, 'parent_google_id': None}, 'folderBody':{}, 'googleMetaData':{} }))
else:
with open(self.jsonfile, 'r') as infile:
try:
self.files_dict = json.loads(infile.read())
infile.close()
except ValueError as e:
self.logger.info("Jsonfile %s not found or corrupted!\n Please remove, or stash it." % self.jsonfile)
self.syncthread = thread.start_new_thread(self._save_to_json, ())
self.filesyncthread = thread.start_new_thread(self._synchronize_files, ())
# [thread.start_new_thread(self._synchronize_files, ()) for i in range(10)]
def _save_to_json(self):
while True:
self.logger.info("%s save_to_json()" % datetime.datetime.now())
try:
# logging.debug("Opening %s" % self.jsonfile)
with open(self.jsonfile,'w') as outfile:
# locking stuff here
try:
json.dump(self.files_dict, outfile)
except Exception as e:
self.logger.info("%s" % e)
# release lock here
outfile.close()
except Exception as e:
tb = traceback.self.logger.info_exc()
t = (datetime.datetime.now(), tb, e)
self.logger.info("%s" % t)
time.sleep(10)
def _synchronize_files(self):
self.file_sp = GoogleServiceProvider(**self.options_dict)
self.file_credentials = self.file_sp.get_stored_credentials('testid')
self.file_service = self.file_sp.build_service(self.file_credentials)[1]
while True:
# self.logger.info("%s %s -> _synchronize_files() " % (datetime.datetime.now(), threading.current_thread()))
for (fullpath, directory, file_list) in os.walk(self.protected_dir):
try:
if fullpath not in self.files_dict.keys():
continue
for fisier in file_list:
fp = os.path.join(fullpath, fisier)
self.mutex.acquire()
if fisier not in self.files_dict[fullpath]['files']:
self.files_dict[fullpath]['files'][fisier] = {
'md5sum': hashlib.md5(open(fp).read()).hexdigest(),
'ownId': None,
'parent': fullpath,
'alreadyUploaded': False,
'alreadyUpdated': False,
'upLoadinProgress': False,
'progressBar': 0,
'fullpath': fp,
'fileBody': {
'title': fisier,
'description': fp,
'mimeType': mimetypes.guess_type(fp)[0] or 'text/plain',
'parents': [
{
"kind": "drive#parentReference",
"id": None,
}
],
},
'googleBody': {},
}
if self.files_dict[fullpath]['files'][fisier]['alreadyUploaded']:
self.mutex.release()
continue
if os.path.getsize(fp) == 0:
self.logger.info("%s is 0 bytes in size, skipping" % fp)
self.mutex.release()
continue
if self.files_dict[fullpath]['grive']['own_google_id']:
self.files_dict[fullpath]['files'][fisier]['fileBody']['parents'][0]['id'] = self.files_dict[fullpath]['grive']['own_google_id']
if self.files_dict[fullpath]['grive']['own_google_id'] is None and fullpath in self.descriptors[0]:
self.files_dict[fullpath]['files'][fisier]['fileBody']['parents'][0]['id'] = self.default_pid
self.mutex.release()
for retry in range(5):
try:
self.logger.debug("Uploading file: %s" % fisier)
googleReturnBody = self.file_sp.upload_file(fisier,
self.files_dict[fullpath]['files'][fisier]['fullpath'],
self.file_service,
self.files_dict[fullpath]['files'][fisier]['fileBody'])
break
except Exception as e:
self.logger.error("%s" % e)
traceback.print_exc()
if googleReturnBody:
try:
self.mutex.acquire()
self.files_dict[fullpath]['files'][fisier]['googleBody'] = googleReturnBody
self.files_dict[fullpath]['files'][fisier]['ownId'] = googleReturnBody['id']
self.files_dict[fullpath]['files'][fisier]['alreadyUploaded'] = True
self.logger.info("Successfully uploaded file: %s " % fp)
self.mutex.release()
except KeyError as e:
self.logger.info("File has already been deleted from the filesytem: %s" % e)
self.mutex.release()
continue
except IOError as e:
self.logger.info("File has already been deleted from the filesystem: %s " % e)
self.mutex.release()
continue
# finally:
# # if self.mutex._is_owned():
# self.mutex.release()
time.sleep(self.options_dict['FILE_SYNC_INTERVAL'])
def callb(request_id, response, exception):
"""
in case something went wrong, attempts to retransmit the batch request ( 5 times )
"""
t = (request_id, self.batch._requests, exception)
def upd():
self.files_dict[response['description']]['alreadyCreated'] = True
self.files_dict[response['description']]['grive']['own_google_id'] = response['id']
self.files_dict[response['description']]['googleMetaData'].update(response)
if exception is not None:
self.logger.info("Error occured during BatchHttpRequest %s" % (t,))
else:
self.mutex.acquire()
upd()
self.mutex.release()
self.batch = BatchHttpRequest(callback=callb)
def process_IN_CREATE(self, event):
"""
triggered by pyinotify when a file is created
it only updates FILES inside files_dict
"""
t = {'event.pathname': event.pathname,
'event.maskname': event.maskname,
'event.wd': event.wd,
'event.dir': event.dir }
self.logger.info("-> Creating: %s" % t)
parent = os.path.abspath(os.path.join(event.pathname, os.pardir))
folderbody = {'files': {},
'parent': parent,
'alreadyCreated': False,
'alreadyUpdated':False,
'grive': {'own_google_id':None, 'parent_google_id': None },
'folderBody': {
'title': os.path.basename(event.pathname),
'description': event.pathname,
'mimeType': 'application/vnd.google-apps.folder',
"parents": [{
"kind": "drive#parentReference",
"id": None,
}],
},
'googleMetaData': {}}
if event.dir:
# we populate the structure first
self.mutex.acquire()
try:
if self.files_dict[event.pathname]['alreadyCreated']:
self.mutex.release()
return 0
except KeyError as e:
self.files_dict[event.pathname] = folderbody
self.mutex.release()
# let's get the parent id
if parent != self.protected_dir and parent in self.files_dict.keys():
pid = self.files_dict[parent]['grive']['own_google_id']
else:
pid = None
if parent == self.protected_dir:
pid = self.default_pid
self.mutex.acquire()
# update structure first
self.files_dict[event.pathname]['grive']['parent_google_id'] = pid
self.files_dict[event.pathname]['folderBody']['parents'][0]['id'] = pid
self.mutex.release()
self.mutex.acquire()
own_id = self.sp.create_folder(self.service, self.files_dict[event.pathname]['folderBody'])
self.mutex.release()
if own_id:
self.mutex.acquire()
t = (own_id['id'], own_id['title'])
self.logger.info("Acquired own_id and title: %s" % (t,))
self.files_dict[event.pathname]['grive']['own_google_id'] = own_id['id']
self.files_dict[event.pathname]['googleMetaData'] = own_id
self.files_dict[event.pathname]['alreadyCreated'] = True
self.mutex.release()
def process_IN_DELETE(self, event):
t = {'event.pathname': event.pathname,
'event.maskname': event.maskname,
'event.wd': event.wd,
'event.dir': event.dir }
self.logger.info("-> Removing: %s" % t)
parent = os.path.abspath(os.path.join(event.pathname, os.pardir))
if event.dir:
self.mutex.acquire()
#if parent in self.files_dict.keys() and self.files_dict[event.pathname]['grive']['own_google_id']:
if self.files_dict[event.pathname]['grive']['own_google_id']:
for retry in range(5):
try:
self.service.files().delete(fileId=self.files_dict[event.pathname]['grive']['own_google_id']).execute()
except errors.HttpError as e:
self.logger.info("%s" % e)
continue
self.files_dict.pop(event.pathname)
self.mutex.release()
else:
if parent in self.files_dict.keys():
self.mutex.acquire()
try:
if self.files_dict[parent]['files'][os.path.basename(event.pathname)]['ownId']:
for retry in range(5):
try:
self.service.files().delete(fileId=self.files_dict[parent]['files'][os.path.basename(event.pathname)]['ownId']).execute()
break
except errors.HttpError as e:
self.logger.info("%s" % e)
continue
except KeyError as e:
self.mutex.release()
return 0 # parent folder has been deleted
try:
self.files_dict[parent]['files'].pop(os.path.basename(event.pathname))
except KeyError as e:
self.mutex.release()
return 0
self.mutex.release()
def process_IN_MODIFY(self, event):
"""
used when updating files
"""
t = {'event.pathname': event.pathname,
'event.maskname': event.maskname,
'event.wd': event.wd,
'event.dir': event.dir }
self.logger.info("-> Modified: %s" % t)
parent = os.path.abspath(os.path.join(event.pathname, os.pardir))
self.mutex.acquire()
if event.name not in self.files_dict[parent]['files']:
self.mutex.release()
return 0
try:
if not event.dir:
if hashlib.md5(open(event.pathname).read()).hexdigest() != self.files_dict[parent]['files'][event.name]['md5sum']:
self.files_dict[parent]['files'][event.name]['md5sum'] = hashlib.md5(open(event.pathname).read()).hexdigest()
updated_file = self.sp.update_file(self.service, event.pathname, self.files_dict[parent]['files'][event.name]['ownId'],
new_body=self.files_dict[parent]['files'][event.name]['fileBody'])
except (KeyError, IOError) as e:
self.mutex.release()
self.logger.info("Modify error: %s" % e)
return 0
self.mutex.release()
def __del__(self):
self.sp = None
self.credentials = None
self.service = None
self.logger.info("Shutting down %s" % self.__class__.__name__)
| nilq/baby-python | python |
# CS4120 NLP, Northeastern University 2020
import spacy
from tqdm import tqdm
from spacy.analysis import Token, Doc, Span
from data_management import output_filepath, input_filepath
def main():
nlp = spacy.load("en_core_web_sm")
docs = []
with open(input_filepath("samplesentences.txt")) as f:
for line in tqdm(f, desc="Parsing dataset"):
if line.isspace():
# skip blank lines
continue
else:
doc: Doc = nlp(line)
docs.append(doc)
with open(input_filepath("training_tags_out.txt"), "w") as f:
for doc in docs: # type: Doc
def token_info_string(token: Token):
return f"{token.tag_}/{token.ent_type_}"
f.write(" ".join([token_info_string(token) for token in doc]))
f.write("\n")
if __name__ == "__main__":
main()
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""Tests for CommandChainDispatcher."""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.core.error import TryNext
from IPython.core.hooks import CommandChainDispatcher
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# Define two classes, one which succeeds and one which raises TryNext. Each
# sets the attribute `called` to True when it is called.
class Okay(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
return self.message
class Fail(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
raise TryNext(self.message)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_command_chain_dispatcher_ff():
"""Test two failing hooks"""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
dp = CommandChainDispatcher([(0, fail1),
(10, fail2)])
try:
dp()
except TryNext as e:
nt.assert_equal(str(e), u'fail2')
else:
assert False, "Expected exception was not raised."
nt.assert_true(fail1.called)
nt.assert_true(fail2.called)
def test_command_chain_dispatcher_fofo():
"""Test a mixture of failing and succeeding hooks."""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(0, fail1),
# (5, okay1), # add this later
(10, fail2),
(15, okay2)])
dp.add(okay1, 5)
nt.assert_equal(dp(), u'okay1')
nt.assert_true(fail1.called)
nt.assert_true(okay1.called)
nt.assert_false(fail2.called)
nt.assert_false(okay2.called)
def test_command_chain_dispatcher_eq_priority():
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(1, okay1)])
dp.add(okay2, 1)
| nilq/baby-python | python |
from datetime import date
import uuid
from typing import Optional, List
from pydantic import BaseModel, Field
def generate_invoice_id():
return str(uuid.uuid4())
class InvoiceInfo(BaseModel):
invoice_id: str = Field(default_factory=generate_invoice_id)
issuer_name: str
issuer_address: Optional[str]
recipient_name: Optional[str]
document_date: Optional[date]
payment_date: Optional[date]
due_date: Optional[date]
currency: Optional[str]
amount_total: float
amount_paid: Optional[float]
amount_tax: Optional[float]
amount_due: Optional[float]
amount_sum: Optional[float]
num_items: Optional[int]
class Config:
orm_mode = True
class InvoiceItem(BaseModel):
invoice_id: str
item_name: str
sub_total: float
class Config:
orm_mode = True
class Invoice(BaseModel):
info: InvoiceInfo
items: List[InvoiceItem]
| nilq/baby-python | python |
# Generated by Django 3.0.7 on 2020-07-17 15:52
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0010_auto_20200717_1735'),
]
operations = [
migrations.AlterField(
model_name='problem',
name='create_sql',
field=models.CharField(blank=True, max_length=20000, null=True),
),
migrations.AlterField(
model_name='problem',
name='insert_sql',
field=models.CharField(blank=True, max_length=20000, null=True),
),
migrations.AlterField(
model_name='problem',
name='text_md',
field=models.CharField(blank=True, max_length=5000),
),
migrations.AlterField(
model_name='problem',
name='title_md',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='selectproblem',
name='solution',
field=models.CharField(blank=True, max_length=5000, validators=[django.core.validators.MinLengthValidator(1)]),
),
]
| nilq/baby-python | python |
import sys
import numpy as np
import matplotlib.pyplot as plt
# Load data
#bf = np.loadtxt('data/times/brute_force.txt')
#cp = np.loadtxt('data/times/closest_pair.txt')
bf = np.loadtxt(sys.argv[1])
cp = np.loadtxt(sys.argv[2])
# Reshape data
bf = bf.reshape(6, len(bf) // 6)
cp = cp.reshape(6, len(cp) // 6)
# Average times
bf_mean = np.mean(bf[:,1:], axis=1)
cp_mean = np.mean(cp[:,1:], axis=1)
# N sims
N = np.array([10**i for i in range(1, 7)])
# Plots
plt.plot(N, bf_mean, "r-x", label="Brute Force")
plt.plot(N, cp_mean, "b-o", label="Closest Pair")
plt.plot(N,1e-8*N**2, "r-.", label=r"$O(n^2)$")
plt.plot(N,1.5e-7*N*np.log(N)/np.log(2), "b-.", label=r"$O(n\log_2n)$")
plt.xscale('log')
plt.yscale('log')
plt.xlabel("N Aircrafts")
plt.ylabel("Time [s]")
plt.grid(True)
plt.legend()
plt.show() | nilq/baby-python | python |
from core.testing import APITestCase
class TestAPITestCase(APITestCase):
def test_tests(self):
self.assertTrue(hasattr(self, 'pytestmark'))
self.assertTrue(hasattr(self, 'mixer'))
| nilq/baby-python | python |
# NOT FINISHED, barely started
import copy
import time
import random
import math
from typing import List
import jax.numpy as np
from pomdp_py.framework.basics import Action, Agent, POMDP, State, Observation,\
ObservationModel, TransitionModel, GenerativeDistribution, PolicyModel
from pomdp_py.framework.planner import Planner
from pomdp_py.representations.distribution.particles import Particles
from pomdp_py.algorithms.po_uct import VNode, RootVNode, QNode, POUCT, RandomRollout
from pomdp_py.algorithms.pomcp import VNodeParticles, RootVNodeParticles, POMCP
def particle_reinvigoration(particles, num_particles, state_transform_func=None):
"""Note that particles should contain states that have already made
the transition as a result of the real action. Therefore, they simply
form part of the reinvigorated particles. At least maintain `num_particles`
number of particles. If already have more, then it's ok.
"""
# If not enough particles, introduce artificial noise to existing particles (reinvigoration)
new_particles = copy.deepcopy(particles)
if len(new_particles) == 0:
raise ValueError("Particle deprivation.")
if len(new_particles) > num_particles:
return new_particles
print("Particle reinvigoration for %d particles" % (num_particles - len(new_particles)))
while len(new_particles) < num_particles:
# need to make a copy otherwise the transform affects states in 'particles'
next_state = copy.deepcopy(particles.random())
# Add artificial noise
if state_transform_func is not None:
next_state = state_transform_func(next_state)
new_particles.add(next_state)
return new_particles
def update_particles_belief(
current_particles,
real_action,
real_observation=None,
observation_model=None,
transition_model=None,
blackbox_model=None,
state_transform_func=None,
):
"""
update_particles_belief(Particles current_particles,
Action real_action, Observation real_observation=None,
ObservationModel observation_model=None,
TransitionModel transition_model=None,
BlackboxModel blackbox_model=None,
state_transform_func=None)
This is the second case (update particles belief explicitly); Either
BlackboxModel is not None, or TransitionModel and ObservationModel are not
None. Note that you DON'T need to call this function if you are using POMCP.
|TODO: not tested|
Args:
state_transform_func (State->State) is used to add artificial noise to
the reinvigorated particles.
"""
for particle in current_particles.particles:
# particle represents a state
if blackbox_model is not None:
# We're using a blackbox generator; (s',o,r) ~ G(s,a)
result = blackbox_model.sample(particle, real_action)
next_state = result[0]
observation = result[1]
else:
# We're using explicit models
next_state = transition_model.sample(particle, real_action)
observation = observation_model.sample(next_state, real_action)
# If observation matches real, then the next_state is accepted
if observation == real_observation:
filtered_particles.append(next_state)
# Particle reinvigoration
return particle_reinvigoration(Particles(filtered_particles), len(current_particles.particles),
state_transform_func=state_transform_func)
def sample_explicit_models(T, O, R, state, action, discount_factor=1.):
# states, actions: batch, returns next_state, reward: batch
next_state = T.sample(state, action)
reward = R.sample(state, action, next_state)
nsteps = 1
if O is not None:
observation = O.sample(next_state, action)
return next_state, observation, reward, nsteps
else:
return next_state, reward, nsteps
class ParticlesJax(Particles):
# represents a belief / distribution over states
def __init__(self, values: List[State], weights: np.ndarray):
self._values = values # used to convert from integer to State
self._weights = weights # can be unnormalized, i.e. counts
def add(self, particle, weight=1):
# not sure we want to use this API
self._weights = self._weights.at[particle].add(weight)
#self._values.index(particle)
#if isinstance(particle, State)
#else particle
#].add(weight)
class PomcpJax(POMCP):
"""POMCP is POUCT + particle belief representation.
This POMCP version only works for problems
with action space that can be enumerated."""
def __init__(self,
max_depth=5, planning_time=-1., num_sims=-1,
discount_factor=0.9, exploration_const=math.sqrt(2),
num_visits_init=0, value_init=0,
rollout_policy=RandomRollout(), action_prior=None,
show_progress=False, pbar_update_interval=5):
super().__init__(max_depth=max_depth,
planning_time=planning_time,
num_sims=num_sims,
discount_factor=discount_factor,
exploration_const=exploration_const,
num_visits_init=num_visits_init,
value_init=value_init,
rollout_policy=rollout_policy,
action_prior=action_prior,
show_progress=show_progress,
pbar_update_interval=pbar_update_interval)
# TODO: can remove all when convert to cython
#self._show_progress = show_progress
def plan(self, agent):
# Only works if the agent's belief is particles
if not isinstance(agent.belief, ParticlesJax):
raise TypeError("Agent's belief is not represented in particles.\n"\
"POMCP not usable. Please convert it to particles.")
return POUCT.plan(self, agent)
def update(self, agent, real_action, real_observation,
state_transform_func=None):
"""
Assume that the agent's history has been updated after taking real_action
and receiving real_observation.
`state_transform_func`: Used to add artificial transform to states during
particle reinvigoration. Signature: s -> s_transformed
"""
if not isinstance(agent.belief, ParticlesJax):
raise TypeError("agent's belief is not represented in particles.\n"\
"POMCP not usable. Please convert it to particles.")
if not hasattr(agent, "tree"):
print("Warning: agent does not have tree. Have you planned yet?")
return
if agent.tree[real_action][real_observation] is None:
# Never anticipated the real_observation. No reinvigoration can happen.
raise ValueError("Particle deprivation.")
# Update the tree; Reinvigorate the tree's belief and use it
# as the updated belief for the agent.
agent.tree = RootVNodeParticles.from_vnode(agent.tree[real_action][real_observation],
agent.history)
tree_belief = agent.tree.belief
agent.set_belief(particle_reinvigoration(
tree_belief,
len(agent.init_belief.particles),
state_transform_func=state_transform_func))
# If observation was never encountered in simulation, then tree will be None;
# particle reinvigoration will occur.
if agent.tree is not None:
agent.tree.belief = copy.deepcopy(agent.belief)
def _search(self):
if self._show_progress:
if stop_by_sims:
total = int(self._num_sims)
else:
total = self._planning_time
pbar = tqdm(total=total)
start_time = time.time()
while True:
## Note: the tree node with () history will have
## the init belief given to the agent.
state = self._agent.sample_belief()
self._simulate(state, self._agent.history, self._agent.tree,
None, None, 0)
sims_count +=1
time_taken = time.time() - start_time
if self._show_progress and sims_count % self._pbar_update_interval == 0:
if stop_by_sims:
pbar.n = sims_count
else:
pbar.n = time_taken
pbar.refresh()
if stop_by_sims:
if sims_count >= self._num_sims:
break
else:
if time_taken > self._planning_time:
if self._show_progress:
pbar.n = self._planning_time
pbar.refresh()
break
if self._show_progress:
pbar.close()
best_action = self._agent.tree.argmax()
return best_action, time_taken, sims_count
def _simulate(self,
state, history, root, parent,
observation, depth):
if depth > self._max_depth:
return 0
if root is None:
if self._agent.tree is None:
root = self._VNode(agent=self._agent, root=True)
self._agent.tree = root
if self._agent.tree.history != self._agent.history:
raise ValueError("Unable to plan for the given history.")
else:
root = self._VNode()
if parent is not None:
parent[observation] = root
self._expand_vnode(root, history, state=state)
rollout_reward = self._rollout(state, history, root, depth)
return rollout_reward
action = self._ucb(root)
next_state, observation, reward, nsteps = sample_generative_model(self._agent, state, action)
if nsteps == 0:
# This indicates the provided action didn't lead to transition
# Perhaps the action is not allowed to be performed for the given state
# (for example, the state is not in the initiation set of the option,
# or the state is a terminal state)
return reward
total_reward = reward + (self._discount_factor**nsteps)*self._simulate(
next_state,
history + ((action, observation),),
root[action][observation],
root[action],
observation,
depth+nsteps)
root.num_visits += 1
root[action].num_visits += 1
root[action].value = root[action].value + (total_reward - root[action].value) / (root[action].num_visits)
# POMCP simulate, need to update belief as well
if depth == 1 and root is not None:
root.belief.add(state) # belief update happens as simulation goes.
return total_reward
def _rollout(self, state, history, root, depth):
while depth < self._max_depth:
action = self._rollout_policy.rollout(state, history)
next_state, observation, reward, nsteps = sample_generative_model(self._agent, state, action)
history = history + ((action, observation),)
depth += nsteps
total_discounted_reward += reward * discount
discount *= (self._discount_factor**nsteps)
state = next_state
return total_discounted_reward
def _ucb(self, root):
"""UCB1"""
best_action, best_value = None, float('-inf')
for action in root.children:
if root[action].num_visits == 0:
val = float('inf')
else:
val = root[action].value + \
self._exploration_const * math.sqrt(math.log(root.num_visits + 1) / root[action].num_visits)
if val > best_value:
best_action = action
best_value = val
return best_action
def _sample_generative_model(self, state, action):
'''
(s', o, r) ~ G(s, a)
'''
if self._agent.transition_model is None:
next_state, observation, reward = self._agent.generative_model.sample(state, action)
else:
next_state = self._agent.transition_model.sample(state, action)
observation = self._agent.observation_model.sample(next_state, action)
reward = self._agent.reward_model.sample(state, action, next_state)
return next_state, observation, reward
def _VNode(self, agent=None, root=False, **kwargs):
"""Returns a VNode with default values; The function naming makes it clear
that this function is about creating a VNode object."""
if root:
# agent cannot be None.
return RootVNodeParticles(self._num_visits_init,
agent.history,
belief=copy.deepcopy(agent.belief))
else:
if agent is None:
return VNodeParticles(self._num_visits_init,
belief=Particles([]))
else:
return VNodeParticles(self._num_visits_init,
belief=copy.deepcopy(agent.belief))
| nilq/baby-python | python |
# Generated by Django 3.2.3 on 2021-11-11 14:04
from django.db import migrations, models
import django.db.models.deletion
def copy_funding_instruments_from_calls_to_projects(apps, schema_editor):
Project = apps.get_model('project_core', 'Project')
for project in Project.objects.all():
project.funding_instrument = project.call.funding_instrument
project.save()
class Migration(migrations.Migration):
dependencies = [
('project_core', '0167_organisation_display_name'),
]
operations = [
migrations.AddField(
model_name='historicalproject',
name='funding_instrument',
field=models.ForeignKey(blank=True, db_constraint=False, help_text='Funding instrument to which the call belongs', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='project_core.fundinginstrument'),
),
migrations.AddField(
model_name='project',
name='funding_instrument',
field=models.ForeignKey(blank=True, help_text='Funding instrument to which the call belongs', null=True, on_delete=django.db.models.deletion.PROTECT, to='project_core.fundinginstrument'),
),
migrations.RunPython(
copy_funding_instruments_from_calls_to_projects
)
]
| nilq/baby-python | python |
import json
import argparse
def contains(splits):
# Returns 1D binary map of images to take such that access is O(1)
MAX, MIN = max([int(x.split('-')[-1]) for x in splits]), min([int(x.split('-')[0]) for x in splits])
A = [0 for _ in range(MAX-MIN+1)]
for sp in splits:
if '-' in sp:
beg, end = [int(x) for x in sp.split('-')]
else:
beg = end = int(sp)
for idx in range(beg-MIN, end+1-MIN):
print (idx)
A[idx] = 1
return A, MIN, MAX
if __name__=='__main__':
ap = argparse.ArgumentParser()
ap.add_argument('json', help='Path to JSON dataset file')
ap.add_argument('split', nargs='+', help='Dataset split for splitting')
ap.add_argument('--out', help='Path to output JSON file', default='cut_dataset.json')
args = ap.parse_args()
with open(args.json, 'r') as f:
obj = json.load(f)
A, MIN, MAX = contains(args.split)
imgs, anns = [], []
for img in obj['images']:
if img['id'] >= MIN and img['id'] <= MAX:
if A[img['id']-MIN]:
ANN = [ann for ann in obj['annotations'] if ann['image_id']==img['id']]
anns.extend(ANN)
imgs.append(img)
with open(args.out, 'w') as f:
json.dump({'images': imgs, 'annotations': anns, 'classes': obj['classes'], 'categories': []}, f)
| nilq/baby-python | python |
from sklearn.base import BaseEstimator
import numpy as np
from sklearn.base import clone
from .logs.loggers import get_logger
import math
class DeepModel(BaseEstimator):
def __init__(self, estimator, depths, n_estimators=100,
learning_rate=0.01, verbose=True, logging=None, logging_params={}):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.verbose = verbose
self.depths = depths
self.estimator = estimator
self.logger = get_logger(logging, 'DeepModel', logging_params)
def fit(self, X_train, y_train):
self.models = []
self.logger('Training...')
feed = y_train.copy()
for depth in self.depths:
self.logger(f"Depth: {depth}")
model = clone(self.estimator)
model.fit(X_train, feed)
self.models.append(model)
preds = model.predict(X_train)
feed -= preds
self.logger('%.15f' % np.mean(abs(feed)))
def predict(self, X_test):
preds = np.zeros(X_test.shape[0])
for model in self.models:
preds += model.predict(X_test)
return preds
return preds
class EarlyStoppingError(Exception):
pass
class EarlyStopping:
def __init__(self, direction, patience=100, threshold=1e-3):
self.best = -math.inf if direction == 'maximize' else math.inf
self.fn = max if direction == 'maximize' else min
self.count = 0
self.threshold = threshold
self.patience = patience
def __call__(self, value):
new_value = self.fn(self.best, value)
if abs(new_value - self.best) < self.threshold:
self.count += 1
if self.count > self.patience:
raise EarlyStoppingError()
else:
self.count = 0
self.best = new_value | nilq/baby-python | python |
################################################################################
# COPYRIGHT(c) 2018 STMicroelectronics #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# 3. Neither the name of STMicroelectronics nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
"""ble_advertising_data_parser
The ble_advertising_data_parser module contains tools to parse the advertising
data coming from Bluetooth devices and recognized by the BlueSTSDK.
"""
# IMPORT
import blue_st_sdk.node
from blue_st_sdk.utils.blue_st_exceptions import InvalidBLEAdvertisingDataException
# CLASSES
class BLEAdvertisingDataParser(object):
"""Parse the advertising data sent by a device that follows the BlueST
protocol.
It raises an exception if the advertising data is not valid.
"""
# Note: the Bluepy library hides the field-type.
ADVERTISING_DATA_MANUFACTURER_LENGTH_1 = 7
"""Allowed length for the advertising data manufacturer in bytes."""
ADVERTISING_DATA_MANUFACTURER_LENGTH_2 = 13
"""Allowed length for the advertising data manufacturer in bytes."""
VERSION_PROTOCOL_SUPPORTED_MIN = 0x01
"""Minimum version protocol supported."""
VERSION_PROTOCOL_SUPPORTED_MAX = 0x01
"""Maximum version protocol supported."""
_COMPLETE_LOCAL_NAME = 0x09
"""Code identifier for the complete local name."""
_TX_POWER = 0x0A
"""Code identifier for the transmission power."""
_MANUFACTURER_SPECIFIC_DATA = 0xFF
"""Code identifier for themanufacturer data."""
_NAME_UNKNOWN = 'UNKNOWN'
"""Unknown name."""
def __init__(self, advertising_data):
"""Constructor.
Args:
advertising_data (str): BLE advertising_data.
Raises:
:exc:`blue_st_sdk.utils.blue_st_exceptions.InvalidBLEAdvertisingDataException`
is raised if the advertising data is not well formed.
"""
# Device name (str).
self._name = self._NAME_UNKNOWN
# Device transmission power (int).
self._tx_power = -1
# Device MAC address (str).
self._address = None
# Bitmask that keeps track of the available features (int).
self._feature_mask = -1
# Device identifier (int).
self._device_id = -1
# Device Protocol Version (int).
self._protocol_version = -1
# Board's type (NodeType).
self._board_type = None
# Board in sleeping status (bool).
self._board_sleeping = None
# Manufacturer specific data (str).
self._manufacturer_specific_data = None
# Getting data.
for data in advertising_data:
if data[0] == self._COMPLETE_LOCAL_NAME:
self._name = data[2].encode('utf-8')
elif data[0] == self._TX_POWER:
self._tx_power = data[2]
elif data[0] == self._MANUFACTURER_SPECIFIC_DATA:
self._manufacturer_specific_data = data[2]
if self._manufacturer_specific_data is None:
raise InvalidBLEAdvertisingDataException(
' ' + self._name + ': ' \
'"Manufacturer specific data" is mandatory: ' \
'the advertising data does not contain it.'
)
try:
# Parse manufacturer specific data.
self._parse_manufacturer_specific_data(self._manufacturer_specific_data)
except InvalidBLEAdvertisingDataException as e:
raise e
def _parse_manufacturer_specific_data(self, manufacturer_specific_data):
"""Parse the manufacturer specific data.
Args:
manufacturer_specific_data (str): The manufacturer specific data.
Raises:
:exc:`blue_st_sdk.utils.blue_st_exceptions.InvalidBLEAdvertisingDataException`
is raised if the advertising data is not well formed.
"""
length = len(manufacturer_specific_data.decode('hex')) + 1 # Adding 1 byte of the field-type, which is hidden by the Bluepy library.
if length != self.ADVERTISING_DATA_MANUFACTURER_LENGTH_1 and length != self.ADVERTISING_DATA_MANUFACTURER_LENGTH_2:
raise InvalidBLEAdvertisingDataException(
' ' + self._name + ': ' \
'"Manufacturer specific data" must be of length "' \
+ str(self.ADVERTISING_DATA_MANUFACTURER_LENGTH_1) + '" or "' \
+ str(self.ADVERTISING_DATA_MANUFACTURER_LENGTH_2) + '", not "' + str(length) + '".'
)
self._protocol_version = int(manufacturer_specific_data[0:2], 16)
if (self._protocol_version < self.VERSION_PROTOCOL_SUPPORTED_MIN) or \
(self._protocol_version > self.VERSION_PROTOCOL_SUPPORTED_MAX):
raise InvalidBLEAdvertisingDataException(
' ' + self._name + ': ' \
'Protocol version "' + str(self._protocol_version) + '" unsupported. ' \
'Version must be in [' + str(self.VERSION_PROTOCOL_SUPPORTED_MIN) + '..' + str(self.VERSION_PROTOCOL_SUPPORTED_MAX) + '].'
)
self._device_id = int(manufacturer_specific_data[2:4], 16)
self._device_id = self._device_id & 0xFF if self._device_id & 0x80 == 0x80 else self._device_id & 0x1F
try:
self._board_type = self._get_node_type(self._device_id)
except InvalidBLEAdvertisingDataException as e:
raise e
self._board_sleeping = self._get_node_sleeping_status(int(manufacturer_specific_data[2:4], 16))
self._feature_mask = int(manufacturer_specific_data[4:12], 16)
self._address = manufacturer_specific_data[12:24] if length == self.ADVERTISING_DATA_MANUFACTURER_LENGTH_2 else None
def _get_node_type(self, device_id):
"""Get the node's type.
Args:
device_id (int): Device identifier.
Returns:
:class:`blue_st_sdk.node.NodeType`: The node's type.
Raises:
:exc:`blue_st_sdk.utils.blue_st_exceptions.InvalidBLEAdvertisingDataException`
is raised if the advertising data is not well formed.
"""
temp = int(device_id & 0xFF)
if temp == 0x01:
return blue_st_sdk.node.NodeType.STEVAL_WESU1
if temp == 0x02:
return blue_st_sdk.node.NodeType.SENSOR_TILE
if temp == 0x03:
return blue_st_sdk.node.NodeType.BLUE_COIN
if temp == 0x04:
return blue_st_sdk.node.NodeType.STEVAL_IDB008VX
if temp >= 0x80 and temp <= 0xFF:
return blue_st_sdk.node.NodeType.NUCLEO
return blue_st_sdk.node.NodeType.GENERIC
@classmethod
def _get_node_sleeping_status(self, node_type):
"""Parse the node type field to check whether the board is sleeping.
Args:
node_type (int): Node type.
Returns:
True if the board is sleeping, False otherwise.
"""
return ((node_type & 0x80) != 0x80 and ((node_type & 0x40) == 0x40))
def get_name(self):
"""Get the device name.
Returns:
str: The device name.
"""
return self._name
def get_tx_power(self):
"""Get the device transmission power in mdb.
Returns:
int: The device transmission power in mdb.
"""
return self._tx_power
def get_address(self):
"""Get the device MAC address.
Returns:
str: The device MAC address.
"""
return self._address
def get_protocol_version(self):
"""Get the device protocol version.
Returns:
int: The device protocol version.
"""
return self._protocol_version
def get_board_type(self):
"""Get the board's type.
Returns:
The board's type.
"""
return self._board_type
def get_board_sleeping(self):
"""Get the sleeping status.
Returns:
True if the board is sleeping, False otherwise.
"""
return self._board_sleeping
def get_device_id(self):
"""Get the device identifier.
Returns:
int: The device identifier.
"""
return self._device_id
def get_feature_mask(self):
"""Get the bitmask that keeps track of the available features.
Returns:
The bitmask that keeps track of the available features.
"""
return self._feature_mask
def __str__(self):
"""Print the advertising_data.
Returns:
str: A string that contains the advertising_data.
"""
return "Name: " + self._name + \
"\n\tTxPower: " + self._tx_power + \
"\n\tAddress: " + self._address + \
"\n\tFeature Mask: " + self._feature_mask + \
"\n\tProtocol Version: " + self._protocol_version
| nilq/baby-python | python |
from django.core.mail import send_mail
from django.shortcuts import render,redirect,reverse
from django.http import HttpResponse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import generic
from .models import AgentModel, LeadModel,CategoryModel
from .forms import (
LeadCreationForm,UserCreationForm,AssignAgentForm,
CategoryUpdateForm,
)
from agents.mixin import OrganisorAndLoginRequiredMixin
from django.views.generic import (
TemplateView,CreateView,ListView,
UpdateView,DeleteView,DetailView,
)
class LandingPageView(TemplateView):
template_name="leads/index.html"
class SignupView(CreateView):
template_name="registration/signup.html"
form_class=UserCreationForm
def get_success_url(self):
return reverse("login")
def index(request):
return render(request,"leads/index.html")
class LeadListView(LoginRequiredMixin,ListView):
template_name="leads/leads_list.html"
context_object_name="leads"
#""" #queryset=LeadModel.objects.all()
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=LeadModel.objects.filter(
organisation=user.userprofile,
agent__isnull=False,
)
else:
queryset=LeadModel.objects.filter(
organisation = user.agent.organisation,
agent__isnull=False,
)
#filter for the agent that is logged in
queryset=queryset.filter(agent__user=user)
return queryset
def get_context_data(self,**kwargs):
context=super(LeadListView,self).get_context_data(**kwargs)
user=self.request.user
if user.is_organisor:
queryset=LeadModel.objects.filter(
organisation=user.userprofile,
agent__isnull=True,
)
# context["'unassigned_leads'"]=queryset
# return context
context.update({
'unassigned_leads':queryset
})
return context
#"""
def LeadList(request):
leads=LeadModel.objects.all()
context={
"leads":leads
}
return render(request,"leads/leads_list.html",context)
class LeadCreateView(OrganisorAndLoginRequiredMixin,CreateView):
template_name="leads/leads_create.html"
form_class=LeadCreationForm
def get_success_url(self):
return reverse("leads:leadlist")
def form_valid(self,form):
lead = form.save(commit=False)
lead.organisation = self.request.user.userprofile
lead.save()
send_mail(
subject="A lead has been created",
message="Go to the site to see the new lead",
from_email="test@test.com",
recipient_list=['test2@test.com']
# recipient_list=["test2@test.com"]
)
return super(LeadCreateView,self).form_valid(form)
def LeadCreate(request):
if request.POST:
form =LeadCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect("leadlist")
context={
"LeadCreationForm":LeadCreationForm
}
return render(request,"leads/leads_create.html",context)
class LeadDetailView(LoginRequiredMixin,DetailView):
template_name="leads/leads_detail.html"
context_object_name="lead"
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=LeadModel.objects.filter(organisation=user.userprofile)
else:
queryset=LeadModel.objects.filter(organisation=user.agent.organisation)
#filter for the agent that is logged in
queryset=queryset.filte(agent__user=user)
return queryset
def LeadDetail(request,pk):
lead=LeadModel.objects.get(id=pk)
context={
"lead":lead
}
return render(request,"leads/leads_detail.html",context)
class LeadUpdateView(OrganisorAndLoginRequiredMixin,UpdateView):
template_name="leads/leads_update.html"
#queryset=LeadModel.objects.all()
form_class=LeadCreationForm
context_object_name="lead"
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
queryset=LeadModel.objects.filter(organisation=user.userprofile)
return queryset
def get_success_url(self):
return reverse("leads:leadlist")
def LeadUpdate(request,pk):
lead=LeadModel.objects.get(id=pk)
form=LeadCreationForm(instance=lead)
if request.POST:
form =LeadCreationForm(request.POST,instance=lead)
if form.is_valid():
form.save()
return redirect("leadlist")
context={
#"lead":lead,
"form":form,
"lead":lead,
}
return render(request,"leads/leads_update.html",context)
class LeadDeleteView(LoginRequiredMixin,DeleteView):
template_name="leads/leads_delete.html"
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
queryset=LeadModel.objects.filter(organisation=user.userprofile)
def get_success_url(self):
return reverse("leads:leadlist")
def LeadDelete(request,pk):
lead=LeadModel.objects.get(id=pk)
lead.delete()
return redirect("leads:leadlist")
class AssignAgentView(OrganisorAndLoginRequiredMixin,generic.FormView):
template_name="leads/assign_agent.html"
form_class=AssignAgentForm
def get_form_kwargs(self,**kwargs):
kwargs=super(AssignAgentView,self).get_form_kwargs(**kwargs)
kwargs.update({
"request":self.request
})
return kwargs
def form_valid(self, form):
agent=form.cleaned_data["agents"]
lead=LeadModel.objects.get(id=self.kwargs["pk"])
lead.agent=agent
lead.save()
return super(AssignAgentView,self).form_valid(form)
def get_success_url(self):
return reverse("leads:leadlist")
class CategoryListView(LoginRequiredMixin,generic.ListView):
template_name="leads/category_list.html"
context_object_name="category_list"
def get_context_data(self, **kwargs):
context= super(CategoryListView,self).get_context_data(**kwargs)
user=self.request.user
if user.is_organisor:
queryset=LeadModel.objects.filter(organisation=user.userprofile,)
else:
queryset=LeadModel.objects.filter(organisation = user.agent.organisation,)
context.update({
"unassigned_lead_count":queryset.filter(category__isnull=True).count()
})
return context
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=CategoryModel.objects.filter(organisation=user.userprofile,)
else:
queryset=CategoryModel.objects.filter(organisation = user.agent.organisation,)
return queryset
class CategoryDetailView(LoginRequiredMixin,generic.DetailView):
template_name="leads/category_detail.html"
context_object_name="category"
#direct relation query from models can used to achieve same result <category.leads.all>
def get_context_data(self, **kwargs):
context= super(CategoryDetailView,self).get_context_data(**kwargs)
leads=self.get_object().leads.all()
context.update({
"leads":leads
})
return context
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=CategoryModel.objects.filter(organisation=user.userprofile,)
else:
queryset=CategoryModel.objects.filter(organisation = user.agent.organisation,)
return queryset
class CategoryUpdateView(LoginRequiredMixin,generic.UpdateView):
template_name="leads/category_update.html"
form_class=CategoryUpdateForm
context_object_name="lead"
def get_queryset(self):
user=self.request.user
#queryset of leads for the entire organisation
if user.is_organisor:
queryset=LeadModel.objects.filter(organisation=user.userprofile,)
else:
queryset=LeadModel.objects.filter(organisation = user.agent.organisation,)
return queryset
def get_success_url(self):
return reverse("leads:leaddetail",kwargs={"pk":self.get_object().id})
| nilq/baby-python | python |
# Copyright 2016 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.services.qos import qos_plugin
from oslo_config import cfg
from oslo_log import log as logging
from vmware_nsx._i18n import _
from vmware_nsx.common import exceptions as nsx_exc
LOG = logging.getLogger(__name__)
class NsxVQosPlugin(qos_plugin.QoSPlugin):
"""Service plugin for VMware NSX-v to implement Neutron's Qos API."""
supported_extension_aliases = ["qos"]
def __init__(self):
LOG.info("Loading VMware NSX-V Qos Service Plugin")
super(NsxVQosPlugin, self).__init__()
if not cfg.CONF.nsxv.use_dvs_features:
error = _("Cannot use the NSX-V QoS plugin without "
"enabling the dvs features")
raise nsx_exc.NsxPluginException(err_msg=error)
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import logging
logger = logging.getLogger(__name__)
class RestartHandler:
def __init__(self, observer, command):
self.observer = observer
self.command = command
def run(self):
logger.info("Running restart handler")
command_process = subprocess.Popen(self.command)
while True:
events = self.observer.observe_and_update()
if events:
logger.info("Restarting the process")
command_process.terminate()
command_process.wait()
command_process = subprocess.Popen(self.command)
| nilq/baby-python | python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ConnectionAliasAssociation',
'ConnectionAliasTag',
'WorkspaceProperties',
'WorkspaceTag',
]
@pulumi.output_type
class ConnectionAliasAssociation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "associatedAccountId":
suggest = "associated_account_id"
elif key == "associationStatus":
suggest = "association_status"
elif key == "connectionIdentifier":
suggest = "connection_identifier"
elif key == "resourceId":
suggest = "resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ConnectionAliasAssociation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ConnectionAliasAssociation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ConnectionAliasAssociation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
associated_account_id: Optional[str] = None,
association_status: Optional['ConnectionAliasAssociationAssociationStatus'] = None,
connection_identifier: Optional[str] = None,
resource_id: Optional[str] = None):
if associated_account_id is not None:
pulumi.set(__self__, "associated_account_id", associated_account_id)
if association_status is not None:
pulumi.set(__self__, "association_status", association_status)
if connection_identifier is not None:
pulumi.set(__self__, "connection_identifier", connection_identifier)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="associatedAccountId")
def associated_account_id(self) -> Optional[str]:
return pulumi.get(self, "associated_account_id")
@property
@pulumi.getter(name="associationStatus")
def association_status(self) -> Optional['ConnectionAliasAssociationAssociationStatus']:
return pulumi.get(self, "association_status")
@property
@pulumi.getter(name="connectionIdentifier")
def connection_identifier(self) -> Optional[str]:
return pulumi.get(self, "connection_identifier")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
return pulumi.get(self, "resource_id")
@pulumi.output_type
class ConnectionAliasTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class WorkspaceProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeTypeName":
suggest = "compute_type_name"
elif key == "rootVolumeSizeGib":
suggest = "root_volume_size_gib"
elif key == "runningMode":
suggest = "running_mode"
elif key == "runningModeAutoStopTimeoutInMinutes":
suggest = "running_mode_auto_stop_timeout_in_minutes"
elif key == "userVolumeSizeGib":
suggest = "user_volume_size_gib"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_type_name: Optional[str] = None,
root_volume_size_gib: Optional[int] = None,
running_mode: Optional[str] = None,
running_mode_auto_stop_timeout_in_minutes: Optional[int] = None,
user_volume_size_gib: Optional[int] = None):
if compute_type_name is not None:
pulumi.set(__self__, "compute_type_name", compute_type_name)
if root_volume_size_gib is not None:
pulumi.set(__self__, "root_volume_size_gib", root_volume_size_gib)
if running_mode is not None:
pulumi.set(__self__, "running_mode", running_mode)
if running_mode_auto_stop_timeout_in_minutes is not None:
pulumi.set(__self__, "running_mode_auto_stop_timeout_in_minutes", running_mode_auto_stop_timeout_in_minutes)
if user_volume_size_gib is not None:
pulumi.set(__self__, "user_volume_size_gib", user_volume_size_gib)
@property
@pulumi.getter(name="computeTypeName")
def compute_type_name(self) -> Optional[str]:
return pulumi.get(self, "compute_type_name")
@property
@pulumi.getter(name="rootVolumeSizeGib")
def root_volume_size_gib(self) -> Optional[int]:
return pulumi.get(self, "root_volume_size_gib")
@property
@pulumi.getter(name="runningMode")
def running_mode(self) -> Optional[str]:
return pulumi.get(self, "running_mode")
@property
@pulumi.getter(name="runningModeAutoStopTimeoutInMinutes")
def running_mode_auto_stop_timeout_in_minutes(self) -> Optional[int]:
return pulumi.get(self, "running_mode_auto_stop_timeout_in_minutes")
@property
@pulumi.getter(name="userVolumeSizeGib")
def user_volume_size_gib(self) -> Optional[int]:
return pulumi.get(self, "user_volume_size_gib")
@pulumi.output_type
class WorkspaceTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
| nilq/baby-python | python |
import numpy as np
from scipy.constants import mu_0
# TODO: make this to take a vector rather than a single frequency
def rTEfunfwd(nlay, f, lamda, sig, chi, depth, HalfSwitch):
"""
Compute reflection coefficients for Transverse Electric (TE) mode.
Only one for loop for multiple layers. Do not use for loop for lambda,
which has 801 times of loops (actually, this makes the code really slow).
"""
Mtemp00 = np.zeros(lamda.size, dtype=complex)
Mtemp10 = np.zeros(lamda.size, dtype=complex)
Mtemp01 = np.zeros(lamda.size, dtype=complex)
Mtemp11 = np.zeros(lamda.size, dtype=complex)
M1sum00 = np.zeros(lamda.size, dtype=complex)
M1sum10 = np.zeros(lamda.size, dtype=complex)
M1sum01 = np.zeros(lamda.size, dtype=complex)
M1sum11 = np.zeros(lamda.size, dtype=complex)
thick = -np.diff(depth)
w = 2*np.pi*f
rTE = np.zeros(lamda.size, dtype=complex)
utemp0 = np.zeros(lamda.size, dtype=complex)
utemp1 = np.zeros(lamda.size, dtype=complex)
const = np.zeros(lamda.size, dtype=complex)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
Mtemp00 = 0.5*(1+const)
Mtemp10 = 0.5*(1-const)
Mtemp01 = 0.5*(1-const)
Mtemp11 = 0.5*(1+const)
M00 = []
M10 = []
M01 = []
M11 = []
M0sum00 = Mtemp00
M0sum10 = Mtemp10
M0sum01 = Mtemp01
M0sum11 = Mtemp11
if HalfSwitch == True:
M1sum00 = np.zeros(lamda.size, dtype=complex)
M1sum10 = np.zeros(lamda.size, dtype=complex)
M1sum01 = np.zeros(lamda.size, dtype=complex)
M1sum11 = np.zeros(lamda.size, dtype=complex)
M1sum00 = M0sum00
M1sum10 = M0sum10
M1sum01 = M0sum01
M1sum11 = M0sum11
else :
for j in range (nlay-1):
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j])*sig[j])
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j+1])*sig[j+1])
const = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0)
h0 = thick[j]
Mtemp00 = 0.5*(1.+ const)*np.exp(-2.*utemp0*h0)
Mtemp10 = 0.5*(1.- const)
Mtemp01 = 0.5*(1.- const)*np.exp(-2.*utemp0*h0)
Mtemp11 = 0.5*(1.+ const)
M1sum00 = M0sum00*Mtemp00 + M0sum01*Mtemp10
M1sum10 = M0sum10*Mtemp00 + M0sum11*Mtemp10
M1sum01 = M0sum00*Mtemp01 + M0sum01*Mtemp11
M1sum11 = M0sum10*Mtemp01 + M0sum11*Mtemp11
M0sum00 = M1sum00
M0sum10 = M1sum10
M0sum01 = M1sum01
M0sum11 = M1sum11
rTE = M1sum01/M1sum11
return rTE
def matmul(a00, a10, a01, a11, b00, b10, b01, b11):
"""
Compute 2x2 matrix mutiplication in vector way
C = A*B
C = [a00 a01] * [b00 b01] = [c00 c01]
[a10 a11] [b10 b11] [c10 c11]
"""
c00 = a00*b00 + a01*b10
c10 = a10*b00 + a11*b10
c01 = a00*b01 + a01*b11
c11 = a10*b01 + a11*b11
return c00, c10, c01, c11
# TODO: make this to take a vector rather than a single frequency
def rTEfunjac(nlay, f, lamda, sig, chi, depth, HalfSwitch):
"""
Compute reflection coefficients for Transverse Electric (TE) mode.
Only one for loop for multiple layers. Do not use for loop for lambda,
which has 801 times of loops (actually, this makes the code really slow).
"""
# Initializing arrays
Mtemp00 = np.zeros(lamda.size, dtype=complex)
Mtemp10 = np.zeros(lamda.size, dtype=complex)
Mtemp01 = np.zeros(lamda.size, dtype=complex)
Mtemp11 = np.zeros(lamda.size, dtype=complex)
M1sum00 = np.zeros(lamda.size, dtype=complex)
M1sum10 = np.zeros(lamda.size, dtype=complex)
M1sum01 = np.zeros(lamda.size, dtype=complex)
M1sum11 = np.zeros(lamda.size, dtype=complex)
M0sum00 = np.zeros(lamda.size, dtype=complex)
M0sum10 = np.zeros(lamda.size, dtype=complex)
M0sum01 = np.zeros(lamda.size, dtype=complex)
M0sum11 = np.zeros(lamda.size, dtype=complex)
dMtemp00 = np.zeros(lamda.size, dtype=complex)
dMtemp10 = np.zeros(lamda.size, dtype=complex)
dMtemp01 = np.zeros(lamda.size, dtype=complex)
dMtemp11 = np.zeros(lamda.size, dtype=complex)
dj0temp00 = np.zeros(lamda.size, dtype=complex)
dj0temp10 = np.zeros(lamda.size, dtype=complex)
dj0temp01 = np.zeros(lamda.size, dtype=complex)
dj0temp11 = np.zeros(lamda.size, dtype=complex)
dj1temp00 = np.zeros(lamda.size, dtype=complex)
dj1temp10 = np.zeros(lamda.size, dtype=complex)
dj1temp01 = np.zeros(lamda.size, dtype=complex)
dj1temp11 = np.zeros(lamda.size, dtype=complex)
thick = -np.diff(depth)
w = 2*np.pi*f
rTE = np.zeros(lamda.size, dtype=complex)
drTE = np.zeros((nlay, lamda.size) , dtype=complex)
utemp0 = np.zeros(lamda.size, dtype=complex)
utemp1 = np.zeros(lamda.size, dtype=complex)
const = np.zeros(lamda.size, dtype=complex)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
# Compute M1
Mtemp00 = 0.5*(1+const)
Mtemp10 = 0.5*(1-const)
Mtemp01 = 0.5*(1-const)
Mtemp11 = 0.5*(1+const)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
# Compute dM1du1
dj0Mtemp00 = 0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp10 = -0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp01 = -0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp11 = 0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
# TODO: for computing Jacobian
M00 = []
M10 = []
M01 = []
M11 = []
dJ00 = []
dJ10 = []
dJ01 = []
dJ11 = []
M00.append(Mtemp00)
M01.append(Mtemp01)
M10.append(Mtemp10)
M11.append(Mtemp11)
M0sum00 = Mtemp00.copy()
M0sum10 = Mtemp10.copy()
M0sum01 = Mtemp01.copy()
M0sum11 = Mtemp11.copy()
if HalfSwitch == True:
M1sum00 = np.zeros(lamda.size, dtype=complex)
M1sum10 = np.zeros(lamda.size, dtype=complex)
M1sum01 = np.zeros(lamda.size, dtype=complex)
M1sum11 = np.zeros(lamda.size, dtype=complex)
M1sum00 = M0sum00.copy()
M1sum10 = M0sum10.copy()
M1sum01 = M0sum01.copy()
M1sum11 = M0sum11.copy()
else:
for j in range (nlay-1):
dJ_10Mtemp00 = np.zeros(lamda.size, dtype=complex)
dJ_10Mtemp10 = np.zeros(lamda.size, dtype=complex)
dJ_10Mtemp01 = np.zeros(lamda.size, dtype=complex)
dJ_10Mtemp11 = np.zeros(lamda.size, dtype=complex)
dJ01Mtemp00 = np.zeros(lamda.size, dtype=complex)
dJ01Mtemp10 = np.zeros(lamda.size, dtype=complex)
dJ01Mtemp01 = np.zeros(lamda.size, dtype=complex)
dJ01Mtemp11 = np.zeros(lamda.size, dtype=complex)
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j])*sig[j])
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j+1])*sig[j+1])
const = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0)
h0 = thick[j]
Mtemp00 = 0.5*(1.+ const)*np.exp(-2.*utemp0*h0)
Mtemp10 = 0.5*(1.- const)
Mtemp01 = 0.5*(1.- const)*np.exp(-2.*utemp0*h0)
Mtemp11 = 0.5*(1.+ const)
M1sum00, M1sum10, M1sum01, M1sum11 = matmul(
M0sum00, M0sum10, M0sum01, M0sum11,
Mtemp00, Mtemp10, Mtemp01, Mtemp11
)
M0sum00 = M1sum00
M0sum10 = M1sum10
M0sum01 = M1sum01
M0sum11 = M1sum11
# TODO: for Computing Jacobian
dudsig = 0.5*1j*w*mu_0*(1+chi[j])/utemp0
if j==0:
const1a = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0**2)
const1b = const1a*utemp0
dj1Mtemp00 = -0.5*const1a*np.exp(-2.*utemp0*h0)-h0*(1+const1b)*np.exp(-2.*utemp0*h0)
dj1Mtemp10 = 0.5*const1a
dj1Mtemp01 = 0.5*const1a*np.exp(-2.*utemp0*h0)-h0*(1-const1b)*np.exp(-2.*utemp0*h0)
dj1Mtemp11 = -0.5*const1a
#Compute dM1dm1*M2
dJ_10Mtemp00, dJ_10Mtemp10, dJ_10Mtemp01, dJ_10Mtemp11 = matmul(dj0Mtemp00, dj0Mtemp10, dj0Mtemp01, dj0Mtemp11, Mtemp00, Mtemp10, Mtemp01, Mtemp11)
#Compute M1*dM2dm1
dJ01Mtemp00, dJ01Mtemp10, dJ01Mtemp01, dJ01Mtemp11 = matmul(M00[j], M10[j], M01[j], M11[j], dj1Mtemp00, dj1Mtemp10, dj1Mtemp01, dj1Mtemp11)
dJ00.append(dudsig*(dJ_10Mtemp00+dJ01Mtemp00))
dJ10.append(dudsig*(dJ_10Mtemp10+dJ01Mtemp10))
dJ01.append(dudsig*(dJ_10Mtemp01+dJ01Mtemp01))
dJ11.append(dudsig*(dJ_10Mtemp11+dJ01Mtemp11))
else:
h_1 = thick[j-1]
utemp_1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j-1])*sig[j-1])
const0 = mu_0*(1+chi[j-1])/(mu_0*(1+chi[j])*utemp_1)
dj0Mtemp00 = 0.5*(const0)*np.exp(-2.*utemp_1*h_1)
dj0Mtemp10 = -0.5*(const0)
dj0Mtemp01 = -0.5*(const0)*np.exp(-2.*utemp_1*h_1)
dj0Mtemp11 = 0.5*(const0)
const1a = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0**2)
const1b = const1a*utemp0
dj1Mtemp00 = -0.5*const1a*np.exp(-2.*utemp0*h0)-h0*(1+const1b)*np.exp(-2.*utemp0*h0)
dj1Mtemp10 = 0.5*const1a
dj1Mtemp01 = 0.5*const1a*np.exp(-2.*utemp0*h0)-h0*(1-const1b)*np.exp(-2.*utemp0*h0)
dj1Mtemp11 = -0.5*const1a
#Compute dMjdmj*Mj+1
dJ_10Mtemp00, dJ_10Mtemp10, dJ_10Mtemp01, dJ_10Mtemp11 = matmul(dj0Mtemp00, dj0Mtemp10, dj0Mtemp01, dj0Mtemp11, Mtemp00, Mtemp10, Mtemp01, Mtemp11)
#Compute Mj*dMj+1dmj
dJ01Mtemp00, dJ01Mtemp10, dJ01Mtemp01, dJ01Mtemp11 = matmul(M00[j], M10[j], M01[j], M11[j], dj1Mtemp00, dj1Mtemp10, dj1Mtemp01, dj1Mtemp11)
dJ00.append(dudsig*(dJ_10Mtemp00+dJ01Mtemp00))
dJ10.append(dudsig*(dJ_10Mtemp10+dJ01Mtemp10))
dJ01.append(dudsig*(dJ_10Mtemp01+dJ01Mtemp01))
dJ11.append(dudsig*(dJ_10Mtemp11+dJ01Mtemp11))
M00.append(Mtemp00)
M01.append(Mtemp01)
M10.append(Mtemp10)
M11.append(Mtemp11)
# rTE = M1sum01/M1sum11
if HalfSwitch == True:
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0])
dudsig = 0.5*1j*w*mu_0*(1+chi[0])/utemp0
dJ1sum00 = np.zeros(lamda.size, dtype=complex)
dJ1sum10 = np.zeros(lamda.size, dtype=complex)
dJ1sum01 = np.zeros(lamda.size, dtype=complex)
dJ1sum11 = np.zeros(lamda.size, dtype=complex)
dJ1sum00 = dudsig*dj0Mtemp00
dJ1sum10 = dudsig*dj0Mtemp10
dJ1sum01 = dudsig*dj0Mtemp01
dJ1sum11 = dudsig*dj0Mtemp11
drTE = dJ1sum01/M1sum11 - M1sum01/(M1sum11**2)*dJ1sum11
else:
#j = nlay
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[nlay-1])*sig[nlay-1])
dudsig = 0.5*1j*w*mu_0*(1+chi[j])/utemp0
h_1 = thick[nlay-2]
utemp_1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[nlay-2])*sig[nlay-2])
const0 = mu_0*(1+chi[nlay-2])/(mu_0*(1+chi[nlay-1])*utemp_1)
dj0Mtemp00 = 0.5*(const0)*np.exp(-2.*utemp_1*h_1)
dj0Mtemp10 = -0.5*(const0)
dj0Mtemp01 = -0.5*(const0)*np.exp(-2.*utemp_1*h_1)
dj0Mtemp11 = 0.5*(const0)
dJ_10Mtemp00 = dj0Mtemp00
dJ_10Mtemp10 = dj0Mtemp10
dJ_10Mtemp01 = dj0Mtemp01
dJ_10Mtemp11 = dj0Mtemp11
dJ00.append(dudsig*dJ_10Mtemp00)
dJ10.append(dudsig*dJ_10Mtemp10)
dJ01.append(dudsig*dJ_10Mtemp01)
dJ11.append(dudsig*dJ_10Mtemp11)
for i in range (nlay):
dJ0sum00 = np.zeros(lamda.size, dtype=complex)
dJ0sum10 = np.zeros(lamda.size, dtype=complex)
dJ0sum01 = np.zeros(lamda.size, dtype=complex)
dJ0sum11 = np.zeros(lamda.size, dtype=complex)
dJ1sum00 = np.zeros(lamda.size, dtype=complex)
dJ1sum10 = np.zeros(lamda.size, dtype=complex)
dJ1sum01 = np.zeros(lamda.size, dtype=complex)
dJ1sum11 = np.zeros(lamda.size, dtype=complex)
if i==0:
for j in range (nlay-2):
if j==0:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ00[i], dJ10[i], dJ01[i], dJ11[i], M00[j+2], M10[j+2], M01[j+2], M11[j+2]
)
else:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, M00[j+2], M10[j+2], M01[j+2], M11[j+2]
)
dJ0sum00 = dJ1sum00
dJ0sum10 = dJ1sum10
dJ0sum01 = dJ1sum01
dJ0sum11 = dJ1sum11
elif (i>0) & (i<nlay-1):
dJ0sum00 = M00[0]
dJ0sum10 = M10[0]
dJ0sum01 = M01[0]
dJ0sum11 = M11[0]
for j in range (nlay-2):
if j==i-1:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, dJ00[i], dJ10[i], dJ01[i], dJ11[i]
)
elif j < i-1:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, M00[j+1], M10[j+1], M01[j+1], M11[j+1]
)
elif j > i-1:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, M00[j+2], M10[j+2], M01[j+2], M11[j+2]
)
dJ0sum00 = dJ1sum00
dJ0sum10 = dJ1sum10
dJ0sum01 = dJ1sum01
dJ0sum11 = dJ1sum11
elif i==nlay-1:
dJ0sum00 = M00[0]
dJ0sum10 = M10[0]
dJ0sum01 = M01[0]
dJ0sum11 = M11[0]
for j in range (nlay-1):
if j < nlay-2:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, M00[j+1], M10[j+1], M01[j+1], M11[j+1]
)
elif j == nlay-2:
dJ1sum00, dJ1sum10, dJ1sum01, dJ1sum11 = matmul(
dJ0sum00, dJ0sum10, dJ0sum01, dJ0sum11, dJ00[i], dJ10[i], dJ01[i], dJ11[i]
)
dJ0sum00 = dJ1sum00
dJ0sum10 = dJ1sum10
dJ0sum01 = dJ1sum01
dJ0sum11 = dJ1sum11
drTE[i, :] = dJ1sum01/M1sum11 - M1sum01/(M1sum11**2)*dJ1sum11
return drTE
# return rTE, drTE
| nilq/baby-python | python |
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from . import models
from . import serializers
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def root(request):
return JSONResponse({"name": "The DataShed Annotation Store.", "version": "0.0.1"})
@csrf_exempt
def index_create(request):
if request.method == "GET":
annotations = models.Annotation.objects.all()
serializer = serializers.AnnotationSerializer(annotations, many=True)
return JSONResponse(serializer.data)
if request.method == "POST":
data = JSONParser().parse(request)
serializer = serializers.AnnotationSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JSONResponse(serializer.data, status=201)
# TODO: The below is what *should* happen...
response = HttpResponse(status=303)
response["Location"] = reverse("read_update_delete",
kwargs={"pk": serializer.data["id"]})
return response
else:
return HttpResponseForbidden(str(serializer.errors))
else:
return HttpResponseForbidden()
@csrf_exempt
def read_update_delete(request, pk):
if request.method == "GET":
annotation = get_object_or_404(models.Annotation, pk=pk)
serializer = serializers.AnnotationSerializer(annotation)
return JSONResponse(serializer.data, status=200)
elif request.method == "PUT":
annotation = get_object_or_404(models.Annotation, pk=pk)
data = JSONParser().parse(request)
serializer = serializers.AnnotationSerializer(annotation, data=data)
if serializer.is_valid():
serializer.save()
return JSONResponse(serializer.data, status=200)
# TODO: The below is what *should* happen...
response = HttpResponse(status=303)
response["Location"] = reverse("read_update_delete",
kwargs={"pk": serializer.data["id"]})
return response
elif request.method == "DELETE":
annotation = get_object_or_404(models.Annotation, pk=pk)
annotation.delete()
return HttpResponse(status=204)
else:
return HttpResponseForbidden()
def search(request):
if request.method == "GET":
query = {k: v for k, v in request.GET.items()}
annotations = models.Annotation.objects.filter(**query)
serializer = serializers.AnnotationSerializer(annotations, many=True)
return JSONResponse({"total": len(serializer.data), "rows": serializer.data})
else:
return HttpResponseForbidden()
class DemoView(TemplateView):
template_name = "demo.html"
| nilq/baby-python | python |
import cv2 as cv
import numpy as np
import math
import time
beg=time.time()
def readimg (xmin,xmax,ymin,ymax):
ymins=ymin
n=(xmax-xmin+1)*(ymax-ymin+1)*21.25
target=0
while xmin<xmax :
while ymin<ymax :
target = target+img[xmin,ymin]
ymin += 1
xmin += 1
ymin=ymins
target=math.floor(target/n)
return target
def basicTransform(input):
dictionary=['鑪','罚','朋','同','团','田','口','厂','十','一','、','。',',']
goal=dictionary[input]
return goal
def imageTransform(xCharN,yCharN):
xStep = size[1]/xCharN
yStep = size[0]/yCharN
print(xStep,yStep)
i=0
j=0
finalstr=''
while i < size[0]:
while j < size[1] :
finalstr=finalstr+basicTransform(readimg(math.ceil(i),math.ceil(i+xStep),math.ceil(j),math.ceil(j+yStep)))
j=j+xStep
i=i+yStep
j=0
return finalstr
def textwrite(name,msg):
file_path = 'D:/TestFiles/'
full_path = file_path + name + '.txt'
file = open(full_path,'w')
file.write(msg)
file.close()
print('Done')
number=10000
while number <=13595:
print(number)
img = cv.imread("D:/[WPF]JJDown/Download/rua/"+str(number)+".jpg",cv.IMREAD_GRAYSCALE)
size=np.shape(img)
print (size)
text = imageTransform(157,77)
textwrite(str(number),text)
number+=1
end=time.time()
runTime=beg-end
print(runTime) | nilq/baby-python | python |
#!/usr/local/bin/python
import ogr, osr
import datetime
print "Start: ", datetime.datetime.now()
for i in range(10000):
pointX = -84
pointY = 38
inputESPG = 4267
outputEPSG = 2246
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(pointX, pointY)
inSpatialRef = osr.SpatialReference()
inSpatialRef.ImportFromEPSG(inputESPG)
outSpatialRef = osr.SpatialReference()
outSpatialRef.ImportFromEPSGA(outputEPSG)
coordTransform = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)
point.Transform(coordTransform)
print "end: ", datetime.datetime.now()
print point.GetX(), point.GetY()
| nilq/baby-python | python |
#!/usr/local/bin/python3
from MPL3115A2 import MPL3115A2
from si7021 import Si7021
from pms5003 import PMS5003
from smbus import SMBus
import influxdb_client
from influxdb_client import InfluxDBClient
import time
import logging
hostname="indoors"
logging.basicConfig(level=logging.DEBUG)
mpl = MPL3115A2(1, fetchPressure=False)
si = Si7021(SMBus(1))
pms5003 = PMS5003(device='/dev/ttyAMA0', baudrate=9600, pin_enable=22, pin_reset=27)
influxdb = InfluxDBClient(url="http://filtr.home.rkas.net:9999", token="dyuhAG11e2qX7dAvsZx9DvmZT8kG006pgyaTnYQ62_I9uwHitjy7PnGW8gLEZctZGCLKbgqcsJKOuJYNfEvGnA==")
influx_write_client = influxdb.write_api()
def readMPL():
#print("🗻 Altitude is %.3f" % mpl.altitude)
pressure = mpl.pressure
temp = mpl.temperature
print("🌬 Pressure is %.2f" % pressure)
print("🌡 Temp is %.3f°C (%.3f°F)" % (temp, (temp * 1.8 + 32.0)))
return [f"weather,host={hostname},sensor=MPL3115A2 pressure={pressure}",
f"weather,host={hostname},sensor=MPL3115A2 temperature={temp}"]
def readSi():
(humidity, temp) = si.read()
print("🌡 Temp is %.3f°C (%.3f°F)" % (temp, (temp * 1.8 + 32.0)))
print("🌫 Relative humidity is %0.2f%%" % humidity)
data = [f"weather,host={hostname},sensor=Si7021 temperature={temp}"]
# Filter out undiagnosed spikes of 100% humidity
if humidity < 100:
data += [f"weather,host={hostname},sensor=Si7021 humidity={humidity}"]
return data
def readPMS():
pmsdata = pms5003.read()
pm10 = pmsdata.pm_ug_per_m3(1.0)
pm25 = pmsdata.pm_ug_per_m3(2.5)
pm100 = pmsdata.pm_ug_per_m3(10)
print("✨ PM1.0 ug/m3: %d" % pm10)
print("✨ PM2.5 ug/m3: %d" % pm25)
print("✨ PM10 ug/m3: %d" % pm100)
return [f"airquality,host={hostname},sensor=PMS5003 pm10={pm10}",
f"airquality,host={hostname},sensor=PMS5003 pm25={pm25}",
f"airquality,host={hostname},sensor=PMS5003 pm100={pm100}"]
while True:
print("-----")
datapoints = []
try:
datapoints += readMPL()
except Exception as e:
print(f"Exception: {e}")
pass
try:
datapoints += readSi()
except:
print(f"Exception: {e}")
pass
try:
datapoints += readPMS()
except:
print(f"Exception: {e}")
pass
print("Writing datapoints:\n%s" % ",\n".join(datapoints))
influx_write_client.write("FWAP", "farkhome", datapoints)
print("-----")
time.sleep(60)
| nilq/baby-python | python |
#! /usr/bin/env python
#coding=utf8
import os
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'USAGE: commit message'
sys.exit()
commit_msg = sys.argv[1]
os.system('git pull origin master')
os.system('git status')
os.system('git add ./')
os.system('git commit * -m "%s"'%commit_msg)
os.system('git push origin master') | nilq/baby-python | python |
#!/usr/bin/env python
from csv import DictReader
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from wordcloud import WordCloud
from snli_cooccur import mkdirp_parent
DEFAULT_COLOR_NAME = '#1f497d'
DEFAULT_RELATIVE_SCALING = 1.
DEFAULT_WIDTH = 800
DEFAULT_HEIGHT = 400
DEFAULT_MAX_WORDS = 50
DEFAULT_COLOR_MAP_RANGE = (0., 1.)
def parse_color_map_range(s):
t = tuple(map(float, s.split(',')))
if len(t) != 2:
raise ValueError('color map range must be two comma-delimited numbers')
if t[0] > t[1]:
raise ValueError('lower bound of color map range must be no greater '
'than upper bound')
if t[0] < 0 or t[1] > 1:
raise ValueError('color map range must be within [0, 1]')
return t
def top_y_csv_to_word_cloud(input_path, query, x, output_path,
mask_path=None,
color_name=DEFAULT_COLOR_NAME,
color_map_name=None,
color_map_range=DEFAULT_COLOR_MAP_RANGE,
relative_scaling=DEFAULT_RELATIVE_SCALING,
background_color_name=None,
max_words=DEFAULT_MAX_WORDS,
width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT):
y_scores = dict()
with open(input_path) as f:
reader = DictReader(f)
for row in reader:
if row['query'] == query and row['x'] == x:
y_scores[row['y']] = float(row['score'])
if not y_scores:
raise ValueError('found no rows matching query %s and row %s' %
(query, x))
mask = None if mask_path is None else np.array(Image.open(mask_path))
cmap = None if color_map_name is None else plt.get_cmap(color_map_name)
def color_func(word, font_size, position, orientation, font_path,
random_state):
if cmap is None:
return color_name
else:
u = random_state.uniform(*color_map_range)
(r, g, b, a) = 255 * np.array(cmap(u))
return 'rgb(%.0f, %.0f, %.0f)' % (r, g, b)
wordcloud = WordCloud(
max_words=max_words,
stopwords=(),
prefer_horizontal=0.9,
width=width,
height=height,
margin=2,
relative_scaling=relative_scaling,
mode='RGBA',
color_func=color_func,
background_color=background_color_name,
mask=mask,
collocations=False,
normalize_plurals=False,
regexp=r'\S+',
)
wordcloud.generate_from_frequencies(y_scores)
image = wordcloud.to_image()
mkdirp_parent(output_path)
with open(output_path, 'wb') as f:
image.save(f, format='png')
def main():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description='Generate word cloud from CSV top-y results',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('input_path', help='path to input CSV file')
parser.add_argument('query',
help='query for which top y will be visualized')
parser.add_argument('x',
help='x for which top y will be visualized '
'(must appear in specified query)')
parser.add_argument('output_path', help='path to output PNG file')
parser.add_argument('--mask-path', help='path to image mask PNG file')
parser.add_argument('--background-color-name',
help='name of background color (default: transparent)')
parser.add_argument('--color-name', default=DEFAULT_COLOR_NAME,
help='name of text color')
parser.add_argument('--color-map-name',
help='name of color map to select word colors from '
'(randomly) (default: use color-name for all '
'words)')
parser.add_argument('--color-map-range', type=parse_color_map_range,
default=DEFAULT_COLOR_MAP_RANGE,
help='range of color map to use (as two '
'comma-delimited floats, a lower bound and an '
'upper bound)')
parser.add_argument('--max-words', type=int, default=DEFAULT_MAX_WORDS,
help='number of words to display')
parser.add_argument('--width', type=int, default=DEFAULT_WIDTH,
help='width of image, in pixels')
parser.add_argument('--height', type=int, default=DEFAULT_HEIGHT,
help='height of image, in pixels')
parser.add_argument('--relative-scaling', type=float,
default=DEFAULT_RELATIVE_SCALING,
help='degree to which score (rather than rank) is '
'used to scale words')
args = parser.parse_args()
top_y_csv_to_word_cloud(args.input_path, args.query, args.x,
args.output_path, mask_path=args.mask_path,
background_color_name=args.background_color_name,
color_name=args.color_name,
color_map_name=args.color_map_name,
color_map_range=args.color_map_range,
width=args.width,
height=args.height,
relative_scaling=args.relative_scaling)
if __name__ == '__main__':
main()
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
from unittest import TestCase
from flaky import flaky
from polyaxon_schemas.ops.build_job import BuildConfig
from polyaxon_schemas.ops.environments.pods import EnvironmentConfig
from polyaxon_schemas.ops.environments.resources import K8SResourcesConfig, PodResourcesConfig
from polyaxon_schemas.ops.experiment.frameworks import ExperimentFramework
from polyaxon_schemas.ops.group.early_stopping_policies import EarlyStoppingConfig
from polyaxon_schemas.ops.group.hptuning import HPTuningConfig, SearchAlgorithms
from polyaxon_schemas.ops.group.matrix import MatrixConfig
from polyaxon_schemas.ops.logging import LoggingConfig
from polyaxon_schemas.polyaxonfile import PolyaxonFile
from polyaxon_schemas.specs.frameworks import TensorflowSpecification
from polyaxon_schemas.utils import TaskType
class TestPolyaxonfileDeprecation(TestCase):
def test_simple_file_framework_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/deprecated/simple_file_framework.yml'))
spec = plxfile.specification
spec.apply_context()
assert spec.version == 1
assert spec.logging is None
assert spec.tags is None
assert spec.build.dockerfile == 'Dockerfile'
assert spec.run.cmd == 'video_prediction_train --model=DNA --num_masks=1'
assert spec.environment is not None
assert spec.environment.resources.gpu.to_dict() == {'requests': 1, 'limits': 1}
assert spec.environment.outputs.to_dict() == {'jobs': [111], 'experiments': None}
assert spec.framework is not None
assert spec.is_experiment is True
def test_deprecated_advanced_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/deprecated/advanced_file.yml'))
spec = plxfile.specification
spec.apply_context()
assert spec.version == 1
assert isinstance(spec.logging, LoggingConfig)
assert spec.is_experiment
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.framework == ExperimentFramework.TENSORFLOW
assert spec.config.tensorflow.n_workers == 5
assert spec.config.tensorflow.n_ps == 10
# check properties for returning worker configs and resources
assert spec.config.tensorflow.worker_resources == {}
assert spec.config.tensorflow.ps_resources == {}
cluster, is_distributed = spec.cluster_def
assert TensorflowSpecification.get_worker_resources(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert TensorflowSpecification.get_ps_resources(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5,
TaskType.PS: 10}, True)
def test_deprecated_notebook_job_with_node_selectors(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/deprecated/notebook_with_custom_environment.yml'))
spec = plxfile.specification
spec.apply_context()
assert spec.version == 1
assert spec.is_notebook
assert spec.is_notebook is True
assert spec.backend is None
assert spec.logging is None
assert sorted(spec.tags) == sorted(['foo', 'bar'])
assert isinstance(spec.build, BuildConfig)
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.artifact_refs == ['outputs1']
assert spec.data_refs == ['data1', 'data2']
assert spec.config_map_refs == ['config_map1', 'config_map2']
node_selector = {'polyaxon.com': 'node_for_notebook_jobs'}
assert spec.environment.node_selector == node_selector
assert spec.node_selector == node_selector
resources = {
'cpu': {'requests': 1, 'limits': 2},
'memory': {'requests': 200, 'limits': 200},
}
assert spec.environment.resources.to_dict() == resources
assert spec.resources.to_dict() == resources
affinity = {
'nodeAffinity': {'requiredDuringSchedulingIgnoredDuringExecution': {}}
}
assert spec.environment.affinity == affinity
assert spec.affinity == affinity
tolerations = [{'key': 'key', 'operator': 'Exists'}]
assert spec.environment.tolerations == tolerations
assert spec.tolerations == tolerations
def test_deprecated_advanced_file_with_custom_configs_and_resources_passes(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/deprecated/advanced_file_with_custom_configs_and_resources.yml'))
spec = plxfile.specification
spec.apply_context()
assert spec.version == 1
assert isinstance(spec.logging, LoggingConfig)
assert spec.is_experiment
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.framework == ExperimentFramework.TENSORFLOW
assert spec.artifact_refs == ['outputs1']
assert spec.data_refs == ['data1', 'data2']
assert spec.config_map_refs == ['config_map1', 'config_map2']
assert spec.config.tensorflow.n_workers == 5
assert spec.config.tensorflow.n_ps == 10
assert isinstance(spec.environment.resources, PodResourcesConfig)
assert isinstance(spec.environment.resources.cpu, K8SResourcesConfig)
assert spec.environment.resources.cpu.requests == 1
assert spec.environment.resources.cpu.limits == 2
assert spec.config.tensorflow.default_worker_node_selector == {
'foo': True
}
assert spec.config.tensorflow.worker_resources == {}
assert spec.config.tensorflow.worker_affinities == {}
assert isinstance(spec.config.tensorflow.worker_node_selectors[3], dict)
assert spec.config.tensorflow.worker_node_selectors[3] == {
'foo': False
}
assert isinstance(spec.config.tensorflow.worker_tolerations[4], list)
assert spec.config.tensorflow.worker_tolerations[4] == [{
'key': 'key',
'operator': 'Exists',
'effect': 'NoSchedule',
}]
assert isinstance(spec.config.tensorflow.default_ps_resources, PodResourcesConfig)
assert isinstance(spec.config.tensorflow.default_ps_resources.cpu, K8SResourcesConfig)
assert spec.config.tensorflow.default_ps_resources.cpu.requests == 2
assert spec.config.tensorflow.default_ps_resources.cpu.limits == 4
assert spec.config.tensorflow.ps_node_selectors == {}
assert isinstance(spec.config.tensorflow.ps_tolerations[7], list)
assert spec.config.tensorflow.ps_tolerations[7] == [{
'operator': 'Exists'
}]
assert isinstance(spec.config.tensorflow.ps_affinities[7], dict)
assert isinstance(spec.config.tensorflow.ps_resources[9], PodResourcesConfig)
assert isinstance(spec.config.tensorflow.ps_resources[9].memory, K8SResourcesConfig)
assert spec.config.tensorflow.ps_resources[9].memory.requests == 512
assert spec.config.tensorflow.ps_resources[9].memory.limits == 1024
# check that properties for return list of configs and resources is working
cluster, is_distributed = spec.cluster_def
worker_node_selectors = TensorflowSpecification.get_worker_node_selectors(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
)
assert len(worker_node_selectors) == spec.config.tensorflow.n_workers
assert set([i['foo'] for i in worker_node_selectors.values()]) == {
spec.config.tensorflow.default_worker_node_selector['foo'],
spec.config.tensorflow.worker_node_selectors[3]['foo']}
assert TensorflowSpecification.get_worker_resources(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
) == {}
ps_resources = TensorflowSpecification.get_ps_resources(
environment=spec.config.tensorflow,
cluster=cluster,
is_distributed=is_distributed
)
assert len(ps_resources) == spec.config.tensorflow.n_ps
assert set(ps_resources.values()) == {
spec.config.tensorflow.default_ps_resources,
spec.config.tensorflow.ps_resources[9]}
# Check total resources
assert spec.total_resources == {
'cpu': {'requests': 1 + 2 * 9, 'limits': 2 + 4 * 9},
'memory': {'requests': 512, 'limits': 1024},
}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5,
TaskType.PS: 10}, True)
| nilq/baby-python | python |
#!/usr/bin/env python
from typing import NamedTuple
from hummingbot.market.market_base import MarketBase
class ArbitrageMarketPair(NamedTuple):
"""
Specifies a pair of markets for arbitrage
"""
market_1: MarketBase
market_1_trading_pair: str
market_1_base_asset: str
market_1_quote_asset: str
market_2: MarketBase
market_2_trading_pair: str
market_2_base_asset: str
market_2_quote_asset: str
| nilq/baby-python | python |
param_names = [\
'Kon_IL13Rec',
'Rec_phosphorylation',
'pRec_intern',
'pRec_degradation',
'Rec_intern',
'Rec_recycle',
'JAK2_phosphorylation',
'pJAK2_dephosphorylation',
'STAT5_phosphorylation',
'pSTAT5_dephosphorylation',
'SOCS3mRNA_production',
'DecoyR_binding',
'JAK2_p_inhibition',
'SOCS3_translation',
'SOCS3_accumulation',
'SOCS3_degradation',
'CD274mRNA_production',
#
'len_f_params'\
]
for idx,name in enumerate(param_names):
exec('%s=%d'%(name,idx))
| nilq/baby-python | python |
import datetime
import genshin
async def test_diary(lclient: genshin.Client, genshin_uid: int):
diary = await lclient.get_diary()
assert diary.uid == genshin_uid == lclient.uids[genshin.Game.GENSHIN]
assert diary.nickname == "sadru"
assert diary.month == datetime.datetime.now().month
assert diary.data.current_mora > 0
async def test_diary_log(lclient: genshin.Client, genshin_uid: int):
log = lclient.diary_log(limit=10)
data = await log.flatten()
assert data[0].amount > 0
assert log.data.uid == genshin_uid == lclient.uids[genshin.Game.GENSHIN]
assert log.data.nickname == "sadru"
assert log.data.month == datetime.datetime.now().month
| nilq/baby-python | python |
"""
A :class:`~miso.data.dataset_readers.dataset_reader.DatasetReader`
reads a file and converts it to a collection of
:class:`~miso.data.instance.Instance` s.
The various subclasses know how to read specific filetypes
and produce datasets in the formats required by specific models.
"""
# pylint: disable=line-too-long
from .decomp import DecompDatasetReader
| nilq/baby-python | python |
import numpy as np
from napari.components import Camera
def test_camera():
"""Test camera."""
camera = Camera()
assert camera.center == (0, 0, 0)
assert camera.zoom == 1
assert camera.angles == (0, 0, 90)
center = (10, 20, 30)
camera.center = center
assert camera.center == center
assert camera.angles == (0, 0, 90)
zoom = 200
camera.zoom = zoom
assert camera.zoom == zoom
angles = (20, 90, 45)
camera.angles = angles
assert camera.angles == angles
def test_calculate_view_direction_3d():
"""Check that view direction is calculated properly from camera angles."""
# simple case
camera = Camera(center=(0, 0, 0), angles=(90, 0, 0), zoom=1)
assert np.allclose(camera.view_direction, (0, 1, 0))
# shouldn't change with zoom
camera = Camera(center=(0, 0, 0), angles=(90, 0, 0), zoom=10)
assert np.allclose(camera.view_direction, (0, 1, 0))
# shouldn't change with center
camera = Camera(center=(15, 15, 15), angles=(90, 0, 0), zoom=1)
assert np.allclose(camera.view_direction, (0, 1, 0))
def test_calculate_view_direction_nd():
"""Check that nD view direction is calculated properly."""
camera = Camera(center=(0, 0, 0), angles=(90, 0, 0), zoom=1)
# should return none if ndim == 2
view_direction = camera.calculate_nd_view_direction(
ndim=2, dims_displayed=[0, 1]
)
assert view_direction is None
# should return 3d if ndim == 3
view_direction = camera.calculate_nd_view_direction(
ndim=3, dims_displayed=[0, 1, 2]
)
assert len(view_direction) == 3
assert np.allclose(view_direction, (0, 1, 0))
# should return nD with 3d embedded in nD if ndim > 3
view_direction = camera.calculate_nd_view_direction(
ndim=5, dims_displayed=[0, 2, 4]
)
assert len(view_direction) == 5
assert np.allclose(view_direction[[0, 2, 4]], (0, 1, 0))
| nilq/baby-python | python |
class APIError(Exception):
"""
Simple error handling
"""
codes = {
204: 'No Results',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Unauthorized (Payment Required)',
403: 'Forbidden',
404: 'Not Found',
413: 'Too Much Data Given',
429: 'Too Many Requests (Rate Limiting)',
500: 'Internal Server Error',
501: 'Not Implemented',
503: 'Service Unavailable'
}
def __init__(self, msg, code=0):
Exception.__init__(self)
self.msg = msg
self.code = code
def __str__(self):
return "HTTP error code %s: %s (%s)" % (self.code, self.codes.get(self.code, 'Communication Error'), self.msg) | nilq/baby-python | python |
ta=[1,2,3]
tb=[9,8,7]
# cluster
zipped=zip(ta,tb)
print('zip(ta,tb)=',zip(ta,tb))
#decompose
na,nb=zip(*zipped)
print(na,nb)
| nilq/baby-python | python |
import os, logging, math
import numpy as np
import torch
import torch.nn as nn
from volsim.base_models import *
from volsim.simulation_dataset import *
from volsim.params import *
class DistanceModel(nn.Module):
def __init__(self, modelParams:Params, useGPU:bool=True):
super(DistanceModel, self).__init__()
self.hp = modelParams
self.useGPU = useGPU
if "multiScale" in self.hp.mBase:
base = self.hp.mBase.split("_")
try:
layers = int(base[1])
except ValueError:
layers = 12
try:
width = float(base[2])
except ValueError:
width = 1
useSkip = "Skip" in self.hp.mBase
self.basenet = MultiScaleNet(widthFactor=width, layers=layers, firstChannels=3, useSkip=useSkip)
elif "alex" in self.hp.mBase:
base = self.hp.mBase.split("_")
try:
layers = int(base[1])
except ValueError:
layers = 5
try:
width = float(base[2])
except ValueError:
width = 1
convKernel, maxPoolKernel, firstStride = (12, 4, 4)
self.basenet = AlexNetLike(widthFactor=width, layers=layers, convKernel=convKernel, maxPoolKernel=maxPoolKernel,
firstStride=firstStride)
else:
raise ValueError('Unknown base network type.')
self.normAcc = [] #for normMode max
self.normM2 = [] #for normMode mean
for i in range(self.basenet.layers):
if self.useGPU:
self.normAcc += [torch.tensor([0.0], requires_grad=False).cuda()]
self.normM2 += [torch.tensor([0.0], requires_grad=False).cuda()]
else:
self.normAcc += [torch.tensor([0.0], requires_grad=False)]
self.normM2 += [torch.tensor([0.0], requires_grad=False)]
self.normCount = [0] * self.basenet.layers #for normMode avg
self.avgs = []
self.avg0 = self.avgLayer(self.basenet.channels[0])#, self.basenet.featureMapSize[0])
self.avgs += [self.avg0]
if self.basenet.layers > 1:
self.avg1 = self.avgLayer(self.basenet.channels[1])#, self.basenet.featureMapSize[1])
self.avgs += [self.avg1]
if self.basenet.layers > 2:
self.avg2 = self.avgLayer(self.basenet.channels[2])#, self.basenet.featureMapSize[2])
self.avgs += [self.avg2]
if self.basenet.layers > 3:
self.avg3 = self.avgLayer(self.basenet.channels[3])#, self.basenet.featureMapSize[3])
self.avgs += [self.avg3]
if self.basenet.layers > 4:
self.avg4 = self.avgLayer(self.basenet.channels[4])#, self.basenet.featureMapSize[4])
self.avgs += [self.avg4]
if self.basenet.layers > 5:
self.avg5 = self.avgLayer(self.basenet.channels[5])#, self.basenet.featureMapSize[5])
self.avgs += [self.avg5]
if self.basenet.layers > 6:
self.avg6 = self.avgLayer(self.basenet.channels[6])#, self.basenet.featureMapSize[6])
self.avgs += [self.avg6]
if self.basenet.layers > 7:
self.avg7 = self.avgLayer(self.basenet.channels[7])#, self.basenet.featureMapSize[7])
self.avgs += [self.avg7]
if self.basenet.layers > 8:
self.avg8 = self.avgLayer(self.basenet.channels[8])#, self.basenet.featureMapSize[8])
self.avgs += [self.avg8]
if self.basenet.layers > 9:
self.avg9 = self.avgLayer(self.basenet.channels[9])#, self.basenet.featureMapSize[9])
self.avgs += [self.avg9]
if self.basenet.layers > 10:
self.avg10 = self.avgLayer(self.basenet.channels[10])#, self.basenet.featureMapSize[10])
self.avgs += [self.avg10]
if self.basenet.layers > 11:
self.avg11 = self.avgLayer(self.basenet.channels[11])#, self.basenet.featureMapSize[11])
self.avgs += [self.avg11]
if self.basenet.layers > 12:
self.avg12 = self.avgLayer(self.basenet.channels[12])#, self.basenet.featureMapSize[12])
self.avgs += [self.avg12]
if self.basenet.layers > 13:
self.avg13 = self.avgLayer(self.basenet.channels[13])#, self.basenet.featureMapSize[13])
self.avgs += [self.avg13]
if self.basenet.layers > 14:
self.avg14 = self.avgLayer(self.basenet.channels[14])#, self.basenet.featureMapSize[14])
self.avgs += [self.avg14]
if self.basenet.layers > 15:
self.avg15 = self.avgLayer(self.basenet.channels[15])#, self.basenet.featureMapSize[15])
self.avgs += [self.avg15]
if self.basenet.layers > 16:
self.avg16 = self.avgLayer(self.basenet.channels[16])#, self.basenet.featureMapSize[16])
self.avgs += [self.avg16]
if self.basenet.layers > 17:
self.avg17 = self.avgLayer(self.basenet.channels[17])#, self.basenet.featureMapSize[17])
self.avgs += [self.avg17]
if self.basenet.layers > 18:
self.avg18 = self.avgLayer(self.basenet.channels[18])#, self.basenet.featureMapSize[18])
self.avgs += [self.avg18]
if self.basenet.layers > 19:
self.avg19 = self.avgLayer(self.basenet.channels[19])#, self.basenet.featureMapSize[19])
self.avgs += [self.avg19]
# initialize learned average weight layers
for avgLayer in self.avgs:
for layer in avgLayer:
if isinstance(layer, nn.Conv3d):
layer.weight.data.fill_(self.hp.mLinInit)
if self.useGPU:
self.cuda()
@classmethod
def load(cls, path:str, useGPU:bool=True):
if useGPU:
print('Loading model from %s' % path)
loaded = torch.load(path)
else:
print('CPU - Loading model from %s' % path)
loaded = torch.load(path, map_location=torch.device('cpu'))
params = Params.fromDict(loaded['hyperparams'])
stateDict = loaded['stateDict']
model = cls(params, useGPU)
model.load_state_dict(stateDict)
model.eval()
if params.mNormMode != "norm":
model.normAcc = loaded['normAcc']
model.normM2 = loaded['normM2']
model.normCount = loaded['normCount']
return model
def forward(self, x:dict) -> Tuple[torch.Tensor, list]:
full = x["data"].cuda() if self.useGPU else x["data"]
idxA = x["indexA"][0,x["idxMin"]:x["idxMax"]].long() #only use index of first batch element for entire batch
idxB = x["indexB"][0,x["idxMin"]:x["idxMax"]].long()
idxA = idxA.cuda() if self.useGPU else idxA
idxB = idxB.cuda() if self.useGPU else idxB
dataA = torch.index_select(full, 1, idxA)
dataB = torch.index_select(full, 1, idxB)
dataA = dataA.view(-1,full.shape[2],full.shape[3],full.shape[4],full.shape[5])
dataB = dataB.view(-1,full.shape[2],full.shape[3],full.shape[4],full.shape[5])
dataA = dataA.permute(0,4,1,2,3) # change shape to [batch*sampleSlice,3,128,128,128]
dataB = dataB.permute(0,4,1,2,3)
self.clampWeights()
outBaseA = self.basenet(dataA)
outBaseB = self.basenet(dataB)
result = torch.tensor([[0.0]]).cuda() if self.useGPU else torch.tensor([[0.0]])
for i in range( len(outBaseA) ):
if i in self.hp.mIgnoreLayers:
continue
#print(outBaseA[i].shape)
normalized1 = self.normalizeTensor(outBaseA[i], i)
normalized2 = self.normalizeTensor(outBaseB[i], i)
if self.hp.mFeatDist == "L1":
diff = torch.abs(normalized2 - normalized1)
elif self.hp.mFeatDist == "L2" or self.hp.mFeatDist == "L2Sqrt":
diff = (normalized2 - normalized1)**2
else:
raise ValueError('Unknown feature distance.')
weightedDiff = self.avgs[i](diff)
result = result + torch.mean(weightedDiff, dim=[2,3,4])
if self.hp.mFeatDist == "L2Sqrt":
result = torch.sqrt(result)
return torch.squeeze(result, dim=1).view(full.shape[0],-1)
# input two numpy arrays with shape [width, height, depth, channel] or shape
# [batch, width, height, depth, channel] where channel = 1 or channel = 3
# and return a distance of shape [1] or [batch]
# If true, normalize performs a normalization to the models native data range jointly for the full data batch
# If true, interpolate performs a spatial interpolation to the models native data size jointly for the full data batch
def computeDistance(self, input1:np.ndarray, input2:np.ndarray, normalize:bool, interpolate:bool) -> np.ndarray:
assert (not self.training), "Distance computation should happen in evaluation mode!"
assert (input1.shape == input2.shape), "Input shape mismatch!"
in1 = input1[None,...] if input1.ndim == 4 else input1
in2 = input2[None,...] if input2.ndim == 4 else input2
data_transform = TransformsInference("single", 3, self.hp)
if not normalize:
data_transform.normalize = "none"
if not interpolate:
data_transform.outputSize = -1
data = np.concatenate([in1, in2], axis=0) # stack along param dimension
dataDict = {"data": data, "path": None, "distance": None, "indexA" : None, "indexB" : None, "idxMin" : None, "idxMax" : None}
data = data_transform(dataDict)["data"]
nPairs = in1.shape[0]
distance = torch.from_numpy(np.zeros(nPairs, dtype=np.float32))
indexA = torch.from_numpy(np.arange(nPairs, dtype=np.int32))
indexB = torch.from_numpy(np.arange(nPairs, dtype=np.int32) + nPairs)
path = np.array([""]*nPairs)
sample = {"data": data[None,...], "path": path, "distance": distance[None,...],
"indexA" : indexA[None,...], "indexB" : indexB[None,...], "idxMin" : 0, "idxMax" : nPairs}
output = self(sample)
output = output.cpu().detach().view(-1).numpy()
return output
# ensures that avg layer weights are greater or equal to zero
def clampWeights(self):
for avgLayer in self.avgs:
for layer in avgLayer:
if isinstance(layer, nn.Conv3d):
layer.weight.data = torch.clamp(layer.weight.data, min=0)
# 1x1 convolution layer to scale feature maps channel-wise
def avgLayer(self, channelsIn:int) -> nn.Sequential:
if self.hp.mLinDropout:
return nn.Sequential(
nn.Dropout(),
nn.Conv3d(channelsIn, 1, 1, stride=1, padding=0, bias=False),
)
else:
return nn.Sequential(
nn.Conv3d(channelsIn, 1, 1, stride=1, padding=0, bias=False),
)
# preprocessing step that updates internal accumulators for feature map normalization
def updateNorm(self, sample:dict):
full = sample["data"].cuda() if self.useGPU else sample["data"]
for i in range(full.shape[1]): # do not use index here, only iterate over all data once
data = full[:, i]
data = data.permute(0,4,1,2,3) # change shape to [batch,3,128,128,128]
self.clampWeights()
outBase = self.basenet(data)
for j in range( len(outBase) ):
self.normalizeTensor(outBase[j], j, updateAcc=True)
# normalizes feature map tensor along channel dimension with different methods
def normalizeTensor(self, tensorIn:torch.Tensor, layer:int, epsilon:float=1e-10,
updateAcc:bool=False) -> torch.Tensor:
size = tensorIn.size()
# unit normalize tensor in channel dimension
if self.hp.mNormMode == "normUnit":
norm = torch.sqrt( torch.sum(tensorIn**2,dim=1) )
norm = norm.view(size[0], 1, size[2], size[3], size[4])
return tensorIn / (norm.expand_as(tensorIn) + epsilon)
elif self.hp.mNormMode == "normMeanLayerGlobal":
if updateAcc:
self.normCount[layer] = self.normCount[layer] + size[0]
delta = tensorIn - self.normAcc[layer].expand_as(tensorIn)
self.normAcc[layer] = self.normAcc[layer] + torch.sum( torch.mean(delta / self.normCount[layer], dim=1) , dim=0)
self.normM2[layer] = self.normM2[layer] + torch.sum( torch.mean(delta *(tensorIn - self.normAcc[layer].expand_as(tensorIn)), dim=1) , dim=0)
# rescale norm accumulators for differently sized inputs
if size[2] != self.normAcc[layer].shape[0] or size[3] != self.normAcc[layer].shape[1] or size[4] != self.normAcc[layer].shape[2]:
up = nn.Upsample(size=(size[2], size[3], size[4]), mode="trilinear", align_corners=True)
normAcc = torch.squeeze(up( torch.unsqueeze(torch.unsqueeze(self.normAcc[layer].detach(), dim=0), dim=0) ))
normM2 = torch.squeeze(up( torch.unsqueeze(torch.unsqueeze(self.normM2[layer].detach(), dim=0), dim=0) ))
mean = normAcc
mean = mean.view(1, 1, size[2], size[3], size[4])
std = torch.sqrt( normM2 / (self.normCount[layer] - 1) )
std = std.view(1, 1, size[2], size[3], size[4])
# directly use norm accumulators for matching input size
else:
mean = self.normAcc[layer]
mean = mean.view(1, 1, size[2], size[3], size[4])
std = torch.sqrt( self.normM2[layer] / (self.normCount[layer] - 1) )
std = std.view(1, 1, size[2], size[3], size[4])
normalized = (tensorIn - mean.expand_as(tensorIn)) / (std.expand_as(tensorIn) + epsilon)
normalized2 = normalized / (math.sqrt(size[1]) - 1)
return normalized2
elif self.hp.mNormMode == "normNone":
return tensorIn
else:
raise ValueError('Unknown norm mode.')
def printModelInfo(self):
parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in parameters])
print("Trainable parameters: %d" % params)
print(self)
print("")
logging.info("Trainable parameters: %d" % params)
logging.info(self)
logging.info("")
def save(self, path:str, override:bool=False, noPrint:bool=False):
if not noPrint:
print('Saving model to %s' % path)
if not override and os.path.isfile(path):
raise ValueError("Override warning!")
else:
saveDict = {'stateDict' : self.state_dict(), 'hyperparams' : self.hp.asDict(),}
if self.hp.mNormMode != "norm":
saveDict['normAcc'] = self.normAcc
saveDict['normM2'] = self.normM2
saveDict['normCount'] = self.normCount
torch.save(saveDict, path)
def resume(self, path:str):
if self.useGPU:
print('Resuming model from %s' % path)
loaded = torch.load(path)
else:
print('CPU - Resuming model from %s' % path)
loaded = torch.load(path, map_location=torch.device('cpu'))
self.load_state_dict(loaded['stateDict'])
self.hp = Params().fromDict(loaded['hyperparams'])
if self.hp.mNormMode != "norm":
self.normAcc = loaded['normAcc']
self.normM2 = loaded['normM2']
self.normCount = loaded['normCount']
| nilq/baby-python | python |
"""Add hostname column to the resources table
Revision ID: 58a12e45663e
Revises: 06ce06e9bb85
Create Date: 2020-10-20 18:24:40.267394
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '58a12e45663e'
down_revision = '06ce06e9bb85'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('resources') as batch_op:
batch_op.add_column(sa.Column('hostname', sa.String(length=64), nullable=True))
def downgrade():
with op.batch_alter_table('resources') as batch_op:
batch_op.drop_column('hostname')
| nilq/baby-python | python |
from cantoolz.module import *
from cantoolz.uds import *
import json
class control_ecu_doors(CANModule):
name = "Doors trigger for vircar"
help = """
This module emulating lock control.
Init params (example):
{
'id_report': {0x91:'Left', 0x92:'Right'},
'id_command': 0x81,
'commands': {
'lock':'1000',
'unlock':'10ff',
'init': '00ff',
},
'reports': {
'Locked': '2000',
'Unlocked': '20ff'
}
}
"""
_active = True
def do_init(self, params):
self._status2 = params
self.frames = []
self._doors = {}
self._cmdList['status'] = Command("Get doors status", 0, "", self.control_get_status, True)
self._cmdList['central_lock'] = Command("Lock doors", 0, "", self.control_lock, True)
self._cmdList['central_unlock'] = Command("Unlock doors", 0, "", self.control_unlock, True)
def control_lock(self, flag):
self.frames.append(CANMessage(self._status2['id_command'],int(len(self._status2['commands']['lock'])/2),bytes.fromhex(self._status2['commands']['lock']),False, CANMessage.DataFrame))
return ""
def control_unlock(self, flag):
self.frames.append(CANMessage(self._status2['id_command'],int(len(self._status2['commands']['unlock'])/2),bytes.fromhex(self._status2['commands']['unlock']),False, CANMessage.DataFrame))
return ""
def control_get_status(self, flag):
json_string = json.dumps({'status': self._doors})
return json_string
# Effect (could be fuzz operation, sniff, filter or whatever)
def do_effect(self, can_msg, args):
if args['action'] == 'read' and can_msg.CANData: # READ
if can_msg.CANFrame.frame_id in self._status2.get('id_report', {}).keys():
for status, code in self._status2['reports'].items():
if can_msg.CANFrame.frame_length == int(len(code)/2) and code == self.get_hex(can_msg.CANFrame.frame_raw_data):
self._doors.update(
{self._status2['id_report'][can_msg.CANFrame.frame_id]: status}
)
if args['action'] == 'write' and not can_msg.CANData:
if len(self.frames) > 0:
can_msg.CANFrame = self.frames.pop(0)
can_msg.CANData = True
can_msg.bus = self._bus
return can_msg
| nilq/baby-python | python |
# Under MIT licence, see LICENCE.txt
import random
from typing import List
from Util import Pose, Position
from Util.ai_command import MoveTo
from Util.constant import BALL_RADIUS, ROBOT_RADIUS, POSITION_DEADZONE, ANGLE_TO_HALT
from Util.geometry import compare_angle
from ai.GameDomainObjects.player import Player
from ai.STA.Tactic.tactic import Tactic
from ai.states.game_state import GameState
ORIENTATION_DEADZONE = 0.2
DISTANCE_TO_KICK_REAL = ROBOT_RADIUS * 3.4
DISTANCE_TO_KICK_SIM = ROBOT_RADIUS + BALL_RADIUS
COMMAND_DELAY = 1.5
class GoToRandomPosition(Tactic):
def __init__(self, game_state: GameState,
player: Player,
args: List[str]=None,
center_of_zone=Position(0, 0),
height_of_zone=800,
width_of_zone=800):
super().__init__(game_state, player, args=args)
self.current_state = self.main_state
self.next_state = self.main_state
self.center_of_zone = center_of_zone
self.height_of_zone = height_of_zone
self.width_of_zone = width_of_zone
self.bottom_left_corner = Position(self.center_of_zone[0] - self.width_of_zone / 2,
self.center_of_zone[1] - self.height_of_zone / 2)
self.grid_of_positions = []
discretisation = 100
for i in range(int(self.width_of_zone / discretisation)):
for j in range(int(self.height_of_zone / discretisation)):
self.grid_of_positions.append(self.bottom_left_corner + Position(discretisation * i,
discretisation * j))
self.current_position_index_to_go = random.randint(0, len(self.grid_of_positions) - 1)
self.current_position_to_go = self.grid_of_positions[self.current_position_index_to_go]
self.current_angle_to_go = 0 #random.randint(0, 100) * np.pi / 100.
self.next_pose = Pose(self.current_position_to_go, self.current_angle_to_go)
def main_state(self):
if self.check_success():
self.current_position_index_to_go = random.randint(0, len(self.grid_of_positions) - 1)
self.current_position_to_go = self.grid_of_positions[self.current_position_index_to_go]
#self.current_angle_to_go = random.randint(-1, 1) * np.pi / 100.
self.next_pose = Pose(self.current_position_to_go, self.current_angle_to_go)
return MoveTo(self.next_pose, cruise_speed=2)
def check_success(self):
distance = (self.player.pose.position - self.next_pose.position).norm
if distance < POSITION_DEADZONE and compare_angle(self.player.pose.orientation, self.next_pose.orientation, abs_tol=ANGLE_TO_HALT):
return True
return False
| nilq/baby-python | python |
import factory
import json
from django.test import TestCase, Client
from django.urls import reverse
from django.test import RequestFactory
from django.contrib.auth.models import AnonymousUser
from movies.models import Movie
from movies.views import home
from movies.forms import SearchMovieForm
from movie_database.users.models import User
class MovieFactory(factory.DjangoModelFactory):
class Meta:
model = Movie
django_get_or_create = ('title',)
title = 'Spiderman'
# data = json.dumps({'Year': '2001'})
class FavouriteTests(TestCase):
def setUp(self):
self.movie = MovieFactory()
self.factory = RequestFactory()
self.user = User.objects.create_user(
username='jacob',
email='jacob@gmail.com',
password='topsecret'
)
self.client = Client()
def test_home_page(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], SearchMovieForm)
self.assertTemplateUsed(response, template_name="pages/home.html")
def test_home_page_form(self):
data = {'title': 'Spiderman'}
form = SearchMovieForm(data=data)
self.assertTrue(form.is_valid())
| nilq/baby-python | python |
import spatialfacet
import numpy as np
from matplotlib import pyplot as plt
from shapely.geometry import Polygon, Point
U = Polygon([[-1,-1],
[-1,1],
[0,1],
[0,-1],
[-1,-1]])
V = Polygon([[0,-1],
[0,1],
[1,1],
[1,-1],
[0,-1]])
U_minus = Polygon([[-1,0.75],
[-1,1],
[0,1],
[0,0.75],
[-1,0.75]])
print(U)
s = spatialfacet.SpatialFacetMiner()
s.add_database("databases/simple","german")
s.query("red blue",1,20,1000)
c0, c1, docs,wt = s.getSpyData();
v1,values = s.getSpyStringData();
print(c0)
print("="*50)
print(c1)
print("="*50)
print(docs)
print("="*50)
print(wt)
print("="*50)
print(v1)
print("="*50)
print(values)
print("="*50)
plt.scatter(c0,c1, s=(15*wt)**2+5)
plt.savefig("test.png")
## facet
def get_facet (c0,c1,U):
return ([rowid for x,y,rowid in zip(c0,c1,range(c0.shape[0])) if Point([x,y]).within(U)])
facet = {
"U": get_facet(c0,c1,U),
"V": get_facet(c0,c1,V),
"U-": get_facet(c0,c1,U_minus),
}
print(facet)
## now propose query terms
out = s.augment("red", [1,2,3], 5)
print(out)
print(s.query_with_data("red green",1,10,1))
print(s.query("red green",1,10,1))
| nilq/baby-python | python |
import numpy
def diff(features1, features2):
pixelMap1 = numpy.asarray(features1)
pixelMap2 = numpy.asarray(features2)
return numpy.linalg.norm(pixelMap1-pixelMap2)
def highOrSober(soberFeatures, highFeatures, queryFeatures):
if(diff(soberFeatures, queryFeatures) < diff(highFeatures, queryFeatures)):
return "sober"
else:
return "high"
| nilq/baby-python | python |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'A tiny Python/C library for loading MIAS images from file.',
'author': 'Samuel Jackson',
'url': 'http://github.com/samueljackson92/mias-loader',
'download_url': 'http://github.com/samueljackson92/mias-loader',
'author_email': 'samueljackson@outlook.com',
'version': '0.1.0',
'install_requires': [
'numpy'
],
'py_modules': ['mias_load'],
'name': 'mias_loader'
}
setup(**config)
| nilq/baby-python | python |
from PIL import Image
import gym
import gym_pacman
import time
env = gym.make('BerkeleyPacmanPO-v0')
env.seed(1)
done = False
while True:
done = False
env.reset()
i = 0
while i < 100:
i += 1
s_, r, done, info = env.step(env.action_space.sample())
env.render()
print("Iteration over")
| nilq/baby-python | python |
from asyncio import sleep
from requests import get
from main import bot, reg_handler, des_handler, par_handler
async def diss(message, args, origin_text):
await message.edit("获取中 . . .")
status = False
for _ in range(20):
req = get("https://nmsl.shadiao.app/api.php?level=min&from=tntcrafthim")
if req.status_code == 200:
res = req.text
await message.edit(res, parse_mode='html')
status = True
break
else:
continue
if status == False:
await message.edit("出错了呜呜呜 ~ 试了好多好多次都无法访问到 API 服务器 。")
await sleep(2)
await message.delete()
async def biss(message, args, origin_text):
await message.edit("获取中 . . .")
status = False
for _ in range(20):
req = get("https://nmsl.shadiao.app/api.php?from=tntcrafthim")
if req.status_code == 200:
res = req.text
await message.edit(res, parse_mode='html')
status = True
break
else:
continue
if status == False:
await message.edit("出错了呜呜呜 ~ 试了好多好多次都无法访问到 API 服务器 。")
await sleep(2)
await message.delete()
reg_handler('diss', diss)
reg_handler('biss', biss)
des_handler('diss', "儒雅随和版祖安语录。")
des_handler('diss', '加带力度版祖安语录。')
par_handler('diss', '')
par_handler('biss', '')
| nilq/baby-python | python |
# Generated by Django 2.1 on 2018-10-03 01:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('unlabel_backend', '0021_auto_20180625_1904'),
]
operations = [
migrations.DeleteModel(
name='Article',
),
migrations.DeleteModel(
name='Capability',
),
migrations.RemoveField(
model_name='client',
name='city',
),
migrations.RemoveField(
model_name='client',
name='country',
),
migrations.RemoveField(
model_name='client',
name='description',
),
migrations.RemoveField(
model_name='client',
name='image',
),
migrations.RemoveField(
model_name='client',
name='slug',
),
migrations.RemoveField(
model_name='client',
name='state',
),
migrations.RemoveField(
model_name='client',
name='url',
),
]
| nilq/baby-python | python |
#!/usr/bin/env python
# vim:set ts=2 sw=2 expandtab:
#
# the pre.py script permits the reconstruction to specify the server, user
# name, and password used for connection. This is achieved by setting
# the parameters in a dictionary named jobArgs.
#
# Optional output variable : jobArgs
#
# Default dictionary; only those being changed need to be specified.
#
# jobArgs = { 'machines' : ('ip address 1', 'ip address 2'),
# 'user' : 'reconuser',
# 'keyfile' : None } (Which implies id_recon next to autorec.py)
print("********* Entering pre.py *************")
print("# Objects present at pre.py:")
for x in sorted(locals().keys()):
print("# {0:20} : {1}".format(x, locals()[x]))
print("")
# To override the DEFAULT_MACHINES_LIST for this reconstruction, for example:
#jobArgs = { 'machines' : ('127.0.0.1',) } # Note trailing ',' if only one.
print("********* Exiting pre.py *************")
| nilq/baby-python | python |
class Solution(object):
def findLongestWord(self, s, d):
"""
:type s: str
:type d: List[str]
:rtype: str
"""
newD = sorted(d, key=len, reverse=True)
tempList = []
for word in newD:
if len(tempList) != 0 and len(word) != len(tempList[-1]):
return sorted(tempList)[0]
if self.scanWord(s, word):
tempList.append(word)
if len(tempList) != 0:
return sorted(tempList)[0]
else:
return ""
def scanWord(self, sIn, s):
if len(s) > len(sIn):
return False
i2 = 0
for i1 in range(len(sIn)):
if sIn[i1] == s[i2]:
i2 += 1
if i2 == len(s):
return True
return False
a = Solution()
print(a.findLongestWord("abpcplea", [
"ale", "apple", "monkey", "plea"])) # apple
print(a.findLongestWord("abpcplea", ["a", "b", "c"])) # a
print(a.findLongestWord("apple", ["zxc", "vbn"])) # ""
| nilq/baby-python | python |
"""
@author: Andrea Domenico Giuliano
@contact: andreadomenico.giuliano@studenti.unipd.it
@organization: University of Padua
"""
import datetime
import math
from collections import defaultdict
#File contenente le funzioni riguardanti la creazione dei dict rigurandanti i gruppi degli items e degli users
def Nr_items(c):
c.execute('select count(id) as co from items');
nr_it = 0;
for r in c:
nr_it = int(r['co']);
return nr_it;
def Jobroles_list(c):
c.execute('select * from us_jobroles');
jbr_l = [];
for r in c:
l_par = [];
l_par.append(int(r['user_id']));
l_par.append(int(r['jobrole']));
jbr_l.append(l_par);
return jbr_l;
def Jobroles_Dist_list(c):
c.execute('select DISTINCT jobrole as jb from us_jobroles');
job_roles_l = [];
for r in c:
job_roles_l.append(int(r['jb']));
return job_roles_l;
"""
# Versione senza divisione temporale
def Jobroles_d_creation(c,jbr_l,items_upop_score_d):
#print "Inizio calcolo Jobroles";
a = datetime.datetime.now();
jobroles_d = {};
jbr_d_l = Jobroles_Dist_list(c);
items_upop_sc_d = defaultdict(lambda: 0);
if (len(jbr_d_l)>0):
for i in range(0,len(jbr_d_l)):
jobroles_d[jbr_d_l[i]] = [int(0),[],items_upop_sc_d.copy()];
if (len(jbr_l)>0):
for i in range(0,len(jbr_l)):
if jbr_l[i][0] not in jobroles_d[jbr_l[i][1]][1]:
jobroles_d[jbr_l[i][1]][0] += 1;
l = jobroles_d[jbr_l[i][1]][1];
l.append(jbr_l[i][0]);
jobroles_d[jbr_l[i][1]][1] = l;
for key in jobroles_d:
if (jobroles_d[key][0]>0):
jobroles_d[key][0] = float(1.00/jobroles_d[key][0]);
b = datetime.datetime.now();
#print "Fine calcolo Jobroles";
#print (b-a);
return jobroles_d;
"""
def Jobroles_d_creation(c,jbr_l):
#print "Inizio calcolo Jobroles";
a = datetime.datetime.now();
jobroles_d = {};
jbr_d_l = Jobroles_Dist_list(c);
#items_upop_sc_d = defaultdict(lambda: 0);
if (len(jbr_d_l)>0):
for i in range(0,len(jbr_d_l)):
p = {};
for k in range(1,7):
p[k] = defaultdict(lambda: 0.0);
jobroles_d[jbr_d_l[i]] = [int(0),[],p];
if (len(jbr_l)>0):
for i in range(0,len(jbr_l)):
if jbr_l[i][0] not in jobroles_d[jbr_l[i][1]][1]:
jobroles_d[jbr_l[i][1]][0] += 1;
l = jobroles_d[jbr_l[i][1]][1];
l.append(jbr_l[i][0]);
jobroles_d[jbr_l[i][1]][1] = l;
for key in jobroles_d:
if (jobroles_d[key][0]>0):
jobroles_d[key][0] = float(1.00/jobroles_d[key][0]);
b = datetime.datetime.now();
#print "Fine calcolo Jobroles";
#print (b-a);
return jobroles_d;
def Fos_list(c):
c.execute('select * from us_fos');
fos_l = [];
for r in c:
l_par = [];
l_par.append(int(r['user_id']));
l_par.append(int(r['fos']));
fos_l.append(l_par);
return fos_l;
def Fos_Dist_list(c):
c.execute('select DISTINCT fos as f from us_fos');
f_l = [];
for r in c:
f_l.append(int(r['f']));
return f_l;
"""
# Versione senza divisione temporale
def Fos_d_creation(c,fos_l,items_upop_score_d):
#print "Inizio calcolo Fos";
a = datetime.datetime.now();
fos_d = {};
fos_di_l = Fos_Dist_list(c);
items_upop_sc_d = defaultdict(lambda: 0);
if (len(fos_di_l)>0):
for i in range(0,len(fos_di_l)):
fos_d[fos_di_l[i]] = [int(0),[],items_upop_sc_d.copy()];
if (len(fos_l)>0):
for i in range(0,len(fos_l)):
if fos_l[i][0] not in fos_d[fos_l[i][1]][1]:
fos_d[fos_l[i][1]][0] += 1;
l = fos_d[fos_l[i][1]][1];
l.append(fos_l[i][0]);
fos_d[fos_l[i][1]][1] = l;
for key in fos_d:
if (fos_d[key][0]>0):
fos_d[key][0] = float(1.00/fos_d[key][0]);
b = datetime.datetime.now();
#print "Fine calcolo Fos";
#print (b - a);
return fos_d;
"""
def Fos_d_creation(c,fos_l):
#print "Inizio calcolo Fos";
a = datetime.datetime.now();
fos_d = {};
fos_di_l = Fos_Dist_list(c);
if (len(fos_di_l)>0):
for i in range(0,len(fos_di_l)):
p = {};
for k in range(1,7):
p[k] = defaultdict(lambda: 0.0);
fos_d[fos_di_l[i]] = [int(0),[],p];
if (len(fos_l)>0):
for i in range(0,len(fos_l)):
if fos_l[i][0] not in fos_d[fos_l[i][1]][1]:
fos_d[fos_l[i][1]][0] += 1;
l = fos_d[fos_l[i][1]][1];
l.append(fos_l[i][0]);
fos_d[fos_l[i][1]][1] = l;
for key in fos_d:
if (fos_d[key][0]>0):
fos_d[key][0] = float(1.00/fos_d[key][0]);
b = datetime.datetime.now();
#print "Fine calcolo Fos";
#print (b - a);
return fos_d;
def Tag_Not_used(c):
c.execute('select max(tag) as t from it_tags where tag != ""');
n_u_t = int(0);
for r in c:
n_u_t = int(r['t']);
n_u_t += 1;
return n_u_t;
def Tags_Dist_list(c,n_u_t):
c.execute('select DISTINCT tag as t from it_tags');
t_l = [];
for r in c:
if (str(r['t']) != ''):
t_l.append(int(r['t']));
t_l.append(n_u_t);
return t_l;
def Tags_list(c,n_u_t):
c.execute('select * from it_tags');
tags_l = [];
for r in c:
l_par = [];
l_par.append(int(r['item_id']));
if (str(r['tag']) == ''):
l_par.append(n_u_t);
else:
l_par.append(int(r['tag']));
tags_l.append(l_par);
return tags_l;
def Tags_d_creation(c,tags_l,n_u_t,nr_items):
#print "Inizio calcolo Tags";
a = datetime.datetime.now();
tags_d = {};
tags_di_l = Tags_Dist_list(c,n_u_t);
if (len(tags_di_l)>0):
for i in range(0,len(tags_di_l)):
tf_idf_sc = float(0.0);
tags_d[tags_di_l[i]] = [int(0),[],tf_idf_sc];
if (len(tags_l)>0):
for i in range(0,len(tags_l)):
if tags_l[i][0] not in tags_d[tags_l[i][1]][1]:
tags_d[tags_l[i][1]][0] += 1;
l = tags_d[tags_l[i][1]][1];
l.append(tags_l[i][0]);
tags_d[tags_l[i][1]][1] = l;
for tag_id in tags_d:
nr_users_group = tags_d[tag_id][0];
if (nr_users_group > 0):
idf = math.log(nr_items/nr_users_group);
tags_d[tag_id][2] = idf;
b = datetime.datetime.now();
#print "Fine calcolo Tags";
#print (b - a);
return tags_d;
def Title_Not_used(c):
c.execute('select max(title) as t from it_titles where title != ""');
n_u_t = int(0);
for r in c:
n_u_t = int(r['t']);
n_u_t += 1;
return n_u_t;
def Titles_Dist_list(c,n_u_t):
c.execute('select DISTINCT title as t from it_titles');
t_l = [];
for r in c:
if (str(r['t']) != ''):
t_l.append(int(r['t']));
t_l.append(int(0));
return t_l;
def Titles_list(c,n_u_t):
c.execute('select * from it_titles');
titles_l = [];
for r in c:
l_par = [];
l_par.append(int(r['item_id']));
if (str(r['title']) == ''):
l_par.append(int(0));
else:
l_par.append(int(r['title']));
titles_l.append(l_par);
#print titles_l;
return titles_l;
def Titles_d_creation(c,titles_l,n_u_t,nr_items):
#print "Inizio calcolo Titles";
a = datetime.datetime.now();
titles_d = {};
t_di_l = Titles_Dist_list(c,n_u_t);
if (len(t_di_l)>0):
for i in range(0,len(t_di_l)):
tf_idf_sc = float(0.0);
titles_d[t_di_l[i]] = [int(0),[],tf_idf_sc];
if (len(titles_l)>0):
for i in range(0,len(titles_l)):
if titles_l[i][0] not in titles_d[titles_l[i][1]][1]:
titles_d[titles_l[i][1]][0] += 1;
l = titles_d[titles_l[i][1]][1];
l.append(titles_l[i][0]);
titles_d[titles_l[i][1]][1] = l;
for title_id in titles_d:
nr_users_group = titles_d[title_id][0];
if (nr_users_group > 0):
idf = math.log(nr_items / nr_users_group);
titles_d[title_id][2] = idf;
b = datetime.datetime.now();
#print "Fine calcolo Titles";
#print (b - a);
return titles_d; | nilq/baby-python | python |
import logging
import os
import shutil
import numpy as np
import torch
from pytorch_metric_learning.utils import common_functions as pml_cf
from sklearn.model_selection import train_test_split
from torchmetrics.functional import accuracy as tmf_accuracy
from ..adapters import Finetuner
from ..containers import Models, Optimizers
from ..datasets import DataloaderCreator, SourceDataset
from ..models import Discriminator
from ..utils import common_functions as c_f
from ..utils.savers import Saver
from .accuracy_validator import AccuracyValidator
from .base_validator import BaseValidator
from .score_history import ScoreHistory
class DeepEmbeddedValidator(BaseValidator):
"""
Implementation of
[Towards Accurate Model Selection in Deep Unsupervised Domain Adaptation](http://proceedings.mlr.press/v97/you19a.html)
"""
def __init__(
self,
temp_folder,
layer="features",
num_workers=0,
batch_size=32,
error_fn=None,
error_layer="logits",
framework_cls=None,
**kwargs,
):
super().__init__(**kwargs)
self.temp_folder = temp_folder
self.layer = layer
self.num_workers = num_workers
self.batch_size = batch_size
self.error_fn = c_f.default(
error_fn, torch.nn.CrossEntropyLoss(reduction="none")
)
self.error_layer = error_layer
self.framework_cls = framework_cls
if self.framework_cls is None:
from ..frameworks.ignite import Ignite
self.framework_cls = Ignite
self.D_accuracy_val = None
self.D_accuracy_test = None
self.mean_error = None
self._DEV_recordable = ["D_accuracy_val", "D_accuracy_test", "mean_error"]
pml_cf.add_to_recordable_attributes(self, list_of_names=self._DEV_recordable)
def compute_score(self, src_train, src_val, target_train):
init_logging_level = c_f.LOGGER.level
c_f.LOGGER.setLevel(logging.WARNING)
weights, self.D_accuracy_val, self.D_accuracy_test = get_weights(
src_train[self.layer],
src_val[self.layer],
target_train[self.layer],
self.num_workers,
self.batch_size,
self.temp_folder,
self.framework_cls,
)
error_per_sample = self.error_fn(src_val[self.error_layer], src_val["labels"])
output = get_dev_risk(weights, error_per_sample[:, None])
self.mean_error = torch.mean(error_per_sample).item()
c_f.LOGGER.setLevel(init_logging_level)
return -output
def extra_repr(self):
x = super().extra_repr()
x += f"\n{c_f.extra_repr(self, self._DEV_recordable)}"
return x
#########################################################################
#### ADAPTED FROM https://github.com/thuml/Deep-Embedded-Validation #####
#########################################################################
def get_dev_risk(weight, error):
"""
:param weight: shape [N, 1], the importance weight for N source samples in the validation set
:param error: shape [N, 1], the error value for each source sample in the validation set
(typically 0 for correct classification and 1 for wrong classification)
"""
if torch.any(weight < 0) or torch.any(error < 0):
raise ValueError("weights and errors must be positive")
weight = pml_cf.to_numpy(weight)
error = pml_cf.to_numpy(error)
N, d = weight.shape
_N, _d = error.shape
assert N == _N and d == _d, "dimension mismatch!"
weighted_error = weight * error
cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1]
var_w = np.var(weight, ddof=1)
eta = -cov / (var_w + 1e-6)
return np.mean(weighted_error) + eta * np.mean(weight) - eta
def get_weights(
source_feature,
validation_feature,
target_feature,
num_workers,
batch_size,
temp_folder,
framework_cls,
):
"""
:param source_feature: shape [N_tr, d], features from training set
:param validation_feature: shape [N_v, d], features from validation set
:param target_feature: shape [N_te, d], features from test set
:return:
"""
device = source_feature.device
source_feature = pml_cf.to_numpy(source_feature)
validation_feature = pml_cf.to_numpy(validation_feature)
target_feature = pml_cf.to_numpy(target_feature)
N_s, d = source_feature.shape
N_t, _d = target_feature.shape
source_feature = source_feature.copy()
target_feature = target_feature.copy()
all_feature = np.concatenate((source_feature, target_feature))
all_label = np.asarray([1] * N_s + [0] * N_t, dtype=np.int64)
(
feature_for_train,
feature_for_test,
label_for_train,
label_for_test,
) = train_test_split(all_feature, all_label, train_size=0.8)
train_set = SourceDataset(
pml_cf.EmbeddingDataset(feature_for_train, label_for_train)
)
val_set = SourceDataset(pml_cf.EmbeddingDataset(feature_for_test, label_for_test))
decays = [1e-1, 3e-2, 1e-2, 3e-3, 1e-3, 3e-4, 1e-4, 3e-5, 1e-5]
val_acc, trainers, savers, folders = [], [], [], []
epochs = 100
patience = 2
for i, decay in enumerate(decays):
torch.cuda.empty_cache()
curr_folder = os.path.join(temp_folder, f"DeepEmbeddedValidation{i}")
models = Models(
{
"G": torch.nn.Identity(),
"C": Discriminator(d, h=d, out_size=2).to(device),
}
)
optimizers = Optimizers(
(torch.optim.Adam, {"lr": 0.001, "weight_decay": decay})
)
trainer = Finetuner(models=models, optimizers=optimizers)
validator = AccuracyValidator(
torchmetric_kwargs={"average": "macro", "num_classes": 2}
)
validator = ScoreHistory(validator)
saver = Saver(folder=curr_folder)
trainer = framework_cls(
trainer, validator=validator, saver=saver, with_pbars=False
)
datasets = {"train": train_set, "src_val": val_set}
bs = int(np.min([len(train_set), len(val_set), batch_size]))
acc, _ = trainer.run(
datasets,
dataloader_creator=DataloaderCreator(
num_workers=num_workers, batch_size=bs
),
max_epochs=epochs,
validation_interval=1,
patience=patience,
)
val_acc.append(acc)
trainers.append(trainer)
savers.append(saver)
folders.append(curr_folder)
torch.cuda.empty_cache()
D_accuracy_val = max(val_acc)
index = val_acc.index(D_accuracy_val)
labels = torch.ones(len(validation_feature), dtype=int)
validation_set = SourceDataset(pml_cf.EmbeddingDataset(validation_feature, labels))
trainer, saver = trainers[index], savers[index]
saver.load_adapter(trainer.adapter, "best")
bs = min(len(validation_set), batch_size)
dataloader = torch.utils.data.DataLoader(
validation_set, num_workers=num_workers, batch_size=bs
)
domain_out = trainer.get_all_outputs(dataloader, "val")
domain_out = domain_out["val"]["preds"]
weights = (domain_out[:, :1] / domain_out[:, 1:]) * (float(N_s) / N_t)
[shutil.rmtree(f) for f in folders]
D_accuracy_test = tmf_accuracy(domain_out, labels.to(domain_out.device)).item()
return weights, D_accuracy_val, D_accuracy_test
| nilq/baby-python | python |
#!/usr/bin/env python
# $Id$
""" Abstract base class for driver classes"""
import exceptions
class DriverError(exceptions.Exception):
def __init__(self, arg):
exceptions.Exception.__init__(self,arg)
class Driver:
mount_delay = 0
def fileno(self):
raise NotImplementedError
def tell(self):
raise NotImplementedError
def open(self, device, mode,retry_count=10):
raise NotImplementedError
def flush(self, device):
raise NotImplementedError
def close(self):
raise NotImplementedError
def rewind(self):
raise NotImplementedError
def seek(self, where, eot_ok=0):
raise NotImplementedError
def skipfm(self, n):
raise NotImplementedError
def get_status(self):
raise NotImplementedError
def verify_label(self, volume_label, mode, expected_length=80):
raise NotImplementedError
def set_mode(self, density=None, compression=None, blocksize=None):
raise NotImplementedError
def rates(self):
raise NotImplementedError
def get_cleaning_bit(self):
return 0
| nilq/baby-python | python |
#
# Copyright (c) 2015-2016 Erik Derr [derr@cs.uni-saarland.de]
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#!/usr/bin/python
#
# Crawler for libraries hosted at mvn central
# Retrieves jar|aar files along with some meta data
import json
import urllib2
import datetime
import os
import errno
import zipfile
import traceback
from retrying import retry # may require "pip install retrying"
## functions ##
def unix2Date(unixTime):
unixTime = int(str(unixTime)[:-3])
return datetime.datetime.fromtimestamp(unixTime).strftime('%d.%m.%Y')
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def write_library_description(fileName, libName, category, version, date, comment):
make_sure_path_exists(os.path.dirname(fileName))
# write lib description in xml format
with open(fileName, "w") as desc:
desc.write("<?xml version=\"1.0\"?>\n")
desc.write("<library>\n")
desc.write(" <!-- library name -->\n")
desc.write(" <name>{}</name>\n".format(libName))
desc.write("\n")
desc.write(" <!-- Advertising, Analytics, Android, SocialMedia, Cloud, Utilities -->\n")
desc.write(" <category>{}</category>\n".format(category))
desc.write("\n")
desc.write(" <!-- optional: version string -->\n")
desc.write(" <version>{}</version>\n".format(version))
desc.write("\n")
desc.write(" <!-- optional: date (format: DD/MM/YYYY) -->\n")
desc.write(" <releasedate>{}</releasedate>\n".format(date))
desc.write("\n")
desc.write(" <!-- optional: comment -->\n")
desc.write(" <comment>{}</comment>\n".format(comment))
desc.write("</library>\n")
# TODO: decorator does not work
@retry(urllib2.URLError, tries=3, delay=3, backoff=1)
def urlopen_with_retry(URL):
return urllib2.urlopen(URL)
def downloadFile(targetDir, groupid, artefactid, version, filetype):
make_sure_path_exists(os.path.dirname(targetDir + "/"))
# assemble download URL
baseURL = "http://search.maven.org/remotecontent?filepath="
artefactid_r = artefactid.replace(".","/")
groupid_r = groupid.replace(".","/")
URL = baseURL + groupid_r + "/" + artefactid_r + "/"
# sometimes it just returns the type "bundle", we then access the jar file
if filetype == "bundle":
filetype = "jar"
fileName = artefactid_r + "-" + version + "." + filetype
URL = URL + version + "/" + fileName
# retrieve and save file
targetFile = targetDir + "/" + fileName
try:
libFile = urllib2.urlopen(URL)
with open(targetFile,'wb') as output:
output.write(libFile.read())
# if filetype is aar unzip classes.jar (since WALA currently does not handle aar's directly)
if filetype == "aar":
fh = open(targetFile, 'rb')
z = zipfile.ZipFile(fh)
for f in z.namelist():
if f == "classes.jar":
z.extract(f, targetDir)
fh.close()
return 0
except urllib2.HTTPError as e:
print 'HTTPError = ' + str(e.code)
return 1
except urllib2.URLError as e:
print 'URLError = ' + str(e.reason)
return 1
except Exception, excp:
print 'Download failed (' + str(excp) + ')'
return 1
def updateLibrary(libName, category, comment, groupId, artefactId):
# replace all blanks with dash
libName = libName.replace(" ", "-")
print " # check library " + libName + " [" + category + "] (g:\"" + groupId + "\" AND a:\"" + artefactId + "\")"
baseDirName = rootDir + category + "/" + libName + "/"
dir = os.path.dirname(baseDirName)
make_sure_path_exists(dir);
# Assemble mvn central search URL and retrieve meta data
try:
mvnSearchURL = "http://search.maven.org/solrsearch/select?q=g:%22" + groupId + "%22+AND+a:%22" + artefactId + "%22&rows=100&core=gav"
response = urllib2.urlopen(mvnSearchURL)
data = json.loads(response.read())
except urllib2.URLError, e:
print 'URLError = ' + str(e.reason)
return
except Exception, excp:
print 'Could not retrieve meta data for ' + libName + ' [SKIP] (' + str(excp) + ')'
return
# DEBUG: pretty print json
#print json.dumps(data, indent=4, sort_keys=True)
#print
numberOfVersions = data["response"]["numFound"]
print " - retrieved meta data for " + str(numberOfVersions) + " versions:"
numberOfUpdates = 0
if numberOfVersions > 0:
for version in data["response"]["docs"]:
# skip lib version if already existing
if not os.path.isfile(baseDirName + "/" + version["v"] + "/" + libDescriptorFileName):
numberOfUpdates += 1
date = unix2Date(version["timestamp"])
targetDir = baseDirName + version["v"]
print " - update version: {} type: {} date: {} target-dir: {}".format(version["v"], version["p"], date, targetDir)
result = downloadFile(targetDir, groupId, artefactId, version["v"], version["p"])
if result == 0:
# write lib description
fileName = targetDir + "/" + "library.xml"
write_library_description(fileName, libName, category, version["v"], date, comment)
if numberOfUpdates == 0:
print " -> all versions up-to-date"
## Main functionality ##
inputFile = "libraries-ILC.json"
libDescriptorFileName = "library.xml"
rootDir = "E:\gradute\libs-ILC/" ### change this directory to your lib-sdks dir ###
print "== mvn central crawler =="
# load iterate over lib json
with open(inputFile) as ifile:
data = json.load(ifile)
# update each lib
for lib in data["libraries"]:
updateLibrary(lib["name"], lib["category"], lib["comment"], lib["groupid"], lib["artefactid"])
| nilq/baby-python | python |
def train_interupter():
with open('train_interupter.ini', 'r', encoding='utf-8') as f:
flag = f.read().strip()
if flag == '0':
return False
elif flag == '1':
with open('train_interupter.ini', 'w', encoding='utf-8') as f:
f.write('0')
return True
else:
raise ValueError('Wrong flag value.')
| nilq/baby-python | python |
# The MIT License (MIT)
# Copyright (c) 2021 Jonah Yolles-Murphy (TG-Techie)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import builtins
from typing import *
__version__ = "0.1.0"
T = TypeVar("T")
Sliceable = Union["Slice[T]", MutableSequence[T]]
class _SliceConstructor(Generic[T]):
"""
An intermediate constructor that holds the sequence to be sliced and allows for
a more flexible `.slice(...)` or `.slice[...]` syntax.
"""
__slots__ = {"_seq"}
def __init__(self, seq: Sliceable) -> None:
self._seq = seq
def __getitem__(self, s: Union[int, builtins.slice]) -> "Slice[T]":
# allow single item slicing with Slice(...)[n] syntax
if isinstance(s, int) or hasattr(s, "__index__"):
index = s.__index__()
s = builtins.slice(index, index + 1)
assert (
s.step is None
), f"slicing cannot be non-contiguous (got `{s.step!r}` for step)"
seq = self._seq
start = s.start
stop = s.stop
if start is None:
start = 0
while start < 0:
start += len(seq)
if stop is None:
stop = len(seq)
while stop < 0:
stop += len(seq)
return self(
start=start,
length=stop - start,
)
def __call__(self, *, length, start) -> "Slice[T]":
return Slice(self._seq, start=start, length=length)
class Slice(Generic[T]):
"""
A more tradition slice of sequences where the created slice mutates the sliced object.
When using a Slice to mutate the base Sequence the Slice assumes the base will not change size
ex:
```
ls = [0, 3, -1, 1, 4]
slc = Slice(ls)[1:4]
slc[0] = 1
slc[2] = 3
assert ls == [0, 1, -1, 3, 4]
```
By default, slicing Slice object will return whatever slicing the base object would normally be.
```
assert type(slc[0:1]) == list # evaluates as True
```
If you want a "sub slice" use .slice to make a further slice
```
sub = slc.slice[1:2]
sub[0] = 2
assert ls == [0, 1, 2, 3, 4]
```
"""
Self = Union["Slice"]
_seq: Sliceable
_start: int
_length: int
_constructor: Optional[_SliceConstructor[T]]
__slots__ = {"_seq", "_start", "_length", "_constructor"}
def __new__(
cls: Type[Self],
seq: Sliceable,
start=None,
length=None,
):
if start is not None and length is not None:
return super(Slice, cls).__new__(cls) # type: ignore
elif start is None and length is None:
return _SliceConstructor(seq)
else:
raise ValueError(
f"{cls.__name__} cannot be called with only one of start= and length=, "
f"got only {'start=' if start is not None else 'length='}"
)
def __init__(
self,
seq: Sliceable,
*,
start=None, # type: ignore
length=None, # type: ignore
) -> None:
# sanitize the inputs, as they must be integers
start = start.__index__()
length = length.__index__()
# verify that the given start and length are in bounds
if not length >= 1:
raise ValueError(
f"Slices cannot be created with lengths less than 1, got {length}"
)
if not (0 <= start < len(seq)):
raise ValueError(f"start index out of bounds, got {start}")
if not ((start + length) <= len(seq)):
raise ValueError(
f"slice out of bounds. starting at {start}, a slice of length {length} extends"
f" past the end of the sliced sequence "
)
# if this is slicing a slice, instead driectly slice the original object
if isinstance(seq, Slice):
self._seq = seq._seq
start += seq._start
else:
self._seq = seq
self._start = start
self._length = length
# sanitization
assert hasattr(start, "__index__"), (
"start must be an integer, " + f"got {start!r}"
)
assert hasattr(length, "__index__"), (
"length must be an integer, " + f"got {length!r}"
)
# this will be lazily evaluated later
self._constructor = None
@property
def slice(self) -> _SliceConstructor[T]:
# lazily create a constructor for sub slices of this slice
constructor = self._constructor
if constructor is None:
self._constructor = constructor = _SliceConstructor(self)
return constructor
def _isinited(self) -> bool:
return hasattr(self, "_start") and hasattr(self, "_length")
def __getitem__(self, index: Union[int, builtins.slice]):
if isinstance(index, int) or hasattr(index, "__index__"):
return self._get_item(index.__index__()) # type: ignore
# idk to test for SupportsIndex in 3.6 yet
elif isinstance(index, slice):
return self._get_slice(index)
else:
raise TypeError(
f"{type(self).__name__} indices must be integers or slices, "
f"not {type(index).__name__}"
)
def __setitem__(self, index: Union[int, builtins.slice], value: T) -> None:
# check for slice assignment as it is not yet supported
if isinstance(index, builtins.slice):
offset = self._start
self._seq.__setitem__(
builtins.slice(
index.start + offset,
index.stop + offset,
index.step,
),
value,
)
return
elif isinstance(index, int) or hasattr(index, "__index__"):
index = self._bounds_check_and_mod(index)
self._seq[self._start + index] = value
else:
raise NotImplementedError()
def _get_slice(self, s: builtins.slice) -> MutableSequence[T]:
offset = self._bounds_check_and_mod(self._start)
stop = s.stop % self._length
return self._seq[s.start + offset : stop + offset : s.step]
def _get_item(self, index: int) -> T:
# check that the index is in range assuming the base sequence has not changed
index = self._bounds_check_and_mod(index)
return self._seq[self._start + index]
def __len__(self) -> int:
assert self._isinited()
return self._length
def __iter__(self) -> Generator[T, None, None]:
seq = self._seq
for index in range(self._start, self._start + self._length):
yield seq[index]
else:
return None
def __repr__(self) -> str:
return f"${self._seq[self._start : self._start+self._length]}"
def _bounds_check_and_mod(self, index: int) -> int:
if index >= self._length:
raise IndexError(
f"Slice index out of range, got [{index}] in slice of length {self._length}"
)
elif index < 0:
index %= self._length
else:
pass
return index
def sort(self, **kwargs) -> None:
for index, value in enumerate(sorted(self, **kwargs)):
self[index] = value
if __name__ == "__main__":
# test basic sicing
ls = [0, 3, -1, 1, 4]
slc = Slice(ls)[1:4]
slc[0] = 1
slc[2] = 3
assert ls == [0, 1, -1, 3, 4]
# test sub-slicing
sub = slc.slice[1:2]
sub[0] = 2
assert ls == [0, 1, 2, 3, 4]
# test slicing types
ls = [*range(8)]
# test default start and stop
slc = Slice(ls)[:]
assert [*slc] == ls
# test negative end
slc = Slice(ls)[0:-1]
assert [*slc] == ls[0:-1]
# test negative start
slc = Slice(ls)[-8:]
assert [*slc] == ls[-8:]
# test slice sorting
ls = [0, 4, 3, 2, 1, 5]
slc = Slice(ls)[1:-1]
assert [*slc] == [4, 3, 2, 1]
slc.sort()
assert ls == [0, 1, 2, 3, 4, 5]
| nilq/baby-python | python |
import argparse
import logging
from sqlalchemy.orm import Session
from ...db import yield_connection_from_env_ctx
from ..indices import update_installation_default_indices
from ..models import SlackOAuthEvent
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def upgrade_one(
db_session: Session, bot_installation: SlackOAuthEvent
) -> SlackOAuthEvent:
update_installation_default_indices(db_session, bot_installation)
bot_installation.version = 2
db_session.add(bot_installation)
db_session.commit()
return bot_installation
def main(args: argparse.Namespace) -> None:
with yield_connection_from_env_ctx() as db_session:
query = (
db_session.query(SlackOAuthEvent)
.filter(SlackOAuthEvent.version == 1)
.filter(SlackOAuthEvent.deleted is not False)
)
if args.workspace is not None:
query = query.filter(SlackOAuthEvent.team_id == args.workspace)
installations_for_upgrade = query.all()
for bot_installation in installations_for_upgrade:
logger.info(
f"Upgrading installation {bot_installation.id} for team {bot_installation.team_id} "
f"({bot_installation.team_name}) to version 2"
)
upgrade_one(db_session, bot_installation)
logger.info("Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Set up default search indices for fresh @bugout slack installations"
)
parser.add_argument(
"-w",
"--workspace",
required=False,
type=str,
default=None,
help="ID for the bot installation",
)
args = parser.parse_args()
main(args)
| nilq/baby-python | python |
from extractors.blockextractor import BlockExtractor
from extractors.characterfactory import CharacterFactory
from extractors.emojiextractor import EmojiExtractor
from extractors.mathcollectionextractor import MathExtractor
from extractors.nerdextractor import NerdExtractor
if __name__ == "__main__":
character_factory = CharacterFactory()
EmojiExtractor().extract()
BlockExtractor(character_factory).extract()
MathExtractor(character_factory).extract()
NerdExtractor().extract()
| nilq/baby-python | python |
"""AnimeSuki Media models"""
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.text import slugify
from animesuki.core.models import ArtworkModel
from animesuki.core.utils import DatePrecision
from animesuki.history.models import HistoryModel
class Media(HistoryModel):
class Type:
ANIME = 1
MANGA = 2
NOVEL = 3
choices = (
(ANIME, 'Anime'),
(MANGA, 'Manga'),
(NOVEL, 'Novel'),
)
class SubType:
UNKNOWN = 0
# Anime
TV = 1
OVA = 2
MOVIE = 3
WEB = 4
SPECIAL = 5
MUSIC = 6
# Manga
MANGA = 7
MANHUA = 8
MANHWA = 9
WEB_MANGA = 10
ONE_SHOT = 11
DOUJIN = 12
# Novel
LIGHT_NOVEL = 13
WEB_NOVEL = 14
NOVEL = 15
choices = (
(UNKNOWN, 'Unknown'),
('Anime', (
(TV, 'TV'),
(OVA, 'OVA'),
(MOVIE, 'Movie'),
(WEB, 'Web'),
(SPECIAL, 'Special'),
(MUSIC, 'Music'),
)),
('Manga', (
(MANGA, 'Manga'),
(MANHUA, 'Manhua'),
(MANHWA, 'Manhwa'),
(WEB_MANGA, 'Web Manga'),
(ONE_SHOT, 'One Shot'),
(DOUJIN, 'Doujin'),
)),
('Novel', (
(LIGHT_NOVEL, 'Light Novel'),
(WEB_NOVEL, 'Web Novel'),
(NOVEL, 'Novel'),
))
)
class Status:
AUTO = 1
HIATUS = 2
CANCELLED = 3
choices = (
(AUTO, 'Automatic'),
(HIATUS, 'On Hiatus'),
(CANCELLED, 'Cancelled')
)
class Season:
WINTER = 1
SPRING = 2
SUMMER = 3
FALL = 4
choices = (
(WINTER, 'Winter'),
(SPRING, 'Spring'),
(SUMMER, 'Summer'),
(FALL, 'Fall')
)
title = models.CharField('title', max_length=250, blank=True)
media_type = models.PositiveSmallIntegerField('type', choices=Type.choices, default=Type.ANIME)
sub_type = models.PositiveSmallIntegerField('sub Type', choices=SubType.choices, default=SubType.UNKNOWN)
status = models.PositiveSmallIntegerField('status', choices=Status.choices, default=Status.AUTO)
is_adult = models.BooleanField('r-18', default=False)
episodes = models.IntegerField('episodes', null=True, blank=True)
duration = models.IntegerField('duration', null=True, blank=True)
volumes = models.IntegerField('volumes', null=True, blank=True)
chapters = models.IntegerField('chapters', null=True, blank=True)
start_date = models.DateField('start date', null=True, blank=True)
start_precision = models.PositiveSmallIntegerField('precision', choices=DatePrecision.choices,
default=DatePrecision.FULL)
end_date = models.DateField('end date', null=True, blank=True)
end_precision = models.PositiveSmallIntegerField('precision', choices=DatePrecision.choices,
default=DatePrecision.FULL)
season_year = models.IntegerField('season year', null=True, blank=True)
season = models.PositiveSmallIntegerField('season', choices=Season.choices, null=True, blank=True)
description = models.TextField('description', blank=True)
synopsis = models.TextField('synopsis', blank=True)
artwork_active = models.ForeignKey('MediaArtwork', related_name='media_artwork', on_delete=models.SET_NULL,
null=True, blank=True, default=None)
HISTORY_MODERATE_FIELDS = ('title', 'media_type', 'sub_type', 'is_adult')
def __str__(self):
return self.title
def get_status(self):
if self.status != self.Status.AUTO:
return self.get_status_display()
status = {
self.Type.ANIME: {
'future': 'Not yet aired',
'present': 'Currently airing',
'past': 'Finished'
},
self.Type.MANGA: {
'future': 'Not yet published',
'present': 'Currently publishing',
'past': 'Finished'
},
}
status[self.Type.NOVEL] = status[self.Type.MANGA]
now = timezone.now().date()
if self.end_date and self.end_date <= now:
return status[self.media_type]['past']
elif not self.start_date or self.start_date > now:
return status[self.media_type]['future']
else:
return status[self.media_type]['present']
def get_absolute_url(self, view='media:detail'):
return reverse(view, args=[slugify(self.get_media_type_display()), self.pk, slugify(self.title)])
class Meta:
db_table = 'media'
verbose_name_plural = 'media'
class MediaArtwork(ArtworkModel):
media = models.ForeignKey(Media, on_delete=models.PROTECT)
ARTWORK_FOLDER = 'media'
ARTWORK_SIZES = ((75, 75, 't75'), (150, 150, 't150'), (225, 225, 't225'), (450, 450, 't450'),
(292, 600, '292w'), (352, 800, '352w'), (438, 1000, '438w'),
(528, 1200, '528w'), (584, 1200, '584w'), (704, 1400, '704w'))
def sub_folder(self):
return self.media.pk
class Meta:
db_table = 'media_artwork'
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-12 07:41
from __future__ import unicode_literals
import bluebottle.files.fields
import bluebottle.utils.fields
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('files', '0001_initial'),
('funding', '0007_auto_20190605_1639'),
]
operations = [
migrations.CreateModel(
name='BudgetLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default=b'', max_length=255, verbose_name='description')),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3)),
('amount', bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'budget line',
'verbose_name_plural': 'budget lines',
},
),
migrations.CreateModel(
name='Fundraiser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('description', models.TextField(blank=True, verbose_name='description')),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3)),
('amount', bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12, verbose_name='amount')),
('deadline', models.DateTimeField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'fundraiser',
'verbose_name_plural': 'fundraisers',
},
),
migrations.CreateModel(
name='Reward',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3)),
('amount', bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12, verbose_name='Amount')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('description', models.CharField(max_length=500, verbose_name='Description')),
('limit', models.IntegerField(blank=True, help_text='How many of this rewards are available', null=True, verbose_name='Limit')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-project__created', 'amount'],
'verbose_name': 'Gift',
'verbose_name_plural': 'Gifts',
'permissions': (('api_read_reward', 'Can view reward through the API'), ('api_add_reward', 'Can add reward through the API'), ('api_change_reward', 'Can change reward through the API'), ('api_delete_reward', 'Can delete reward through the API'), ('api_read_own_reward', 'Can view own reward through the API'), ('api_add_own_reward', 'Can add own reward through the API'), ('api_change_own_reward', 'Can change own reward through the API'), ('api_delete_own_reward', 'Can delete own reward through the API')),
},
),
migrations.AlterField(
model_name='donation',
name='amount',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='donation',
name='amount_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
migrations.AlterField(
model_name='funding',
name='target',
field=bluebottle.utils.fields.MoneyField(blank=True, currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=None, max_digits=12, null=True),
),
migrations.AlterField(
model_name='funding',
name='target_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
migrations.AddField(
model_name='reward',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='funding.Funding', verbose_name='Activity'),
),
migrations.AddField(
model_name='fundraiser',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fundraisers', to='funding.Funding', verbose_name='activity'),
),
migrations.AddField(
model_name='fundraiser',
name='image',
field=bluebottle.files.fields.ImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='files.Image'),
),
migrations.AddField(
model_name='fundraiser',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='funding_fundraisers', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='budgetline',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='funding.Funding'),
),
]
| nilq/baby-python | python |
from __future__ import annotations
import subprocess
import sys
def test_same_version():
""" Test the the version in setup.py matches the version in __init__.py """
res = subprocess.run(
[sys.executable, '-m', 'pip', 'show', 'cptk'],
stdout=subprocess.PIPE,
check=True,
encoding='utf8',
)
fields = {
line.partition(':')[0]: line.partition(':')[-1].strip()
for line in res.stdout.split('\n')
}
from cptk import __version__
assert __version__ == fields['Version']
| nilq/baby-python | python |
# coding=utf-8 ##以utf-8编码储存中文字符
import jieba.analyse
import codecs,sys
import itertools
from work import match
from io import BufferedReader
from work import simplyParticiple
def Synonym(): #同义词函数
seperate_word = {}
dict1={}
i=0
file = codecs.open("same_word.txt","r","utf-8") # 这是同义词库
lines = file.readlines() # 读取全部内容
for line in lines:
seperate_word[i] = line.split() # 同义词放入字典seperate_word中
i = i + 1
x1 = len(lines)
for i in range(0, x1):
x2 = {k: seperate_word[i][0] for k in seperate_word[i]} # 这是同义词字典,不同键,相同值
dict1 = dict(dict1, **x2) # 将多个字典合并为一个
return dict1
def _synonym(txt):
# synonym函数将列表中的同义词函数替换
# final_sentence=""
list_prim=[]
line = simplyParticiple. participle(txt)
line_2 =line.split("/")
dict1 = Synonym()
for word in line_2:
if word in dict1:
word = dict1[word]
list_prim.append(word)
else:
list_prim.append(word)
return list_prim
def getkeyword(list_A,list_B):
# getkeyword 的作为是从分词去停用词同义词过后的原始关键词中于关键词列表进行匹配,找出最后的关键词
list_C=[]
for each_itema in list_A:
for item in list_B:
if(each_itema==item):
list_C.append(item)
break
return list_C
def combination(the_list):
str=""
for each_item in the_list:
str += each_item
return str
with open("final_keyword.txt",encoding="utf_8") as fp:#list_keyword用于保存初步匹配关键词
list_keyword=[]
for lines in fp.readlines():
lines=lines.split()
list_keyword=list_keyword+lines
fp.close()
def result(txt):
# list_final保存全排列后字符串,list_prim保存与知识点关键词匹配后的关键词,list_mid保存全排列后关键词列表
# list_final=[]
list_prim = getkeyword(_synonym(txt), list_keyword)
# print(_synonym(txt))
# print(list_prim)
# list_mid=(list(itertools.permutations(list_prim, len(list_prim))))
# for item in list_mid:
# list_final.append(combination(item))
# return list_final
return list_prim
# c = result('进制的转换觉得有点难。')
# print(c)
| nilq/baby-python | python |
from .common import (
AskHandler,
CommonHandler,
AskCommutativeHandler,
TautologicalHandler,
test_closed_group,
)
__all__ = [
"AskHandler",
"CommonHandler",
"AskCommutativeHandler",
"TautologicalHandler",
"test_closed_group",
]
| nilq/baby-python | python |
from matching_algorithm import matching_algorithm
import json
import copy
class top_trading_cycle(matching_algorithm):
def group_1_optimal(self):
return self.match(copy.deepcopy(self.group_1), copy.deepcopy(self.group_2), 'top_trading_cycle', False)
def group_2_optimal(self):
return self.match(copy.deepcopy(self.group_2), copy.deepcopy(self.group_1), 'top_trading_cycle', False)
def get_top_trading_cycle(file_name):
with open(file_name) as f:
algorithm = top_trading_cycle(json.load(f), ('group_1', 'group_2'))
a, b = algorithm.group_1_optimal(), algorithm.group_2_optimal()
return a, b
| nilq/baby-python | python |
from setuptools import find_packages, setup
setup(
name="Skaak",
packages=find_packages(include=["skaak"]),
version="0.12.5",
description="A Python Chess Library",
author="George Munyoro",
license="MIT",
install_requires=[],
setup_requires=["pytest-runner"],
tests_require=["pytest==6.1.1"],
test_suite="tests",
)
| nilq/baby-python | python |
"""
Ticket numbers usually consist of an even number of digits. A ticket number is considered lucky if the sum of the first
half of the digits is equal to the sum of the second half.
Given a ticket number n, determine if it's lucky or not.
Example
For n = 1230, the output should be
solution(n) = true;
For n = 239017, the output should be
solution(n) = false.
"""
def solution(n):
list_num = list(str(n))
split_len = int(len(list_num)/2)
first_part = 0
last_part = 0
for i in range(split_len):
first_part+= int(list_num[i])
for i in range(split_len):
last_part += int(list_num[i+split_len])
if first_part == last_part:
return True
else:
return False
print(solution(239017)) | nilq/baby-python | python |
import graphene
from ipam import filtersets, models
from netbox.graphql.scalars import BigInt
from netbox.graphql.types import BaseObjectType, OrganizationalObjectType, PrimaryObjectType
__all__ = (
'ASNType',
'AggregateType',
'FHRPGroupType',
'FHRPGroupAssignmentType',
'IPAddressType',
'IPRangeType',
'PrefixType',
'RIRType',
'RoleType',
'RouteTargetType',
'ServiceType',
'VLANType',
'VLANGroupType',
'VRFType',
)
class ASNType(PrimaryObjectType):
asn = graphene.Field(BigInt)
class Meta:
model = models.ASN
fields = '__all__'
filterset_class = filtersets.ASNFilterSet
class AggregateType(PrimaryObjectType):
class Meta:
model = models.Aggregate
fields = '__all__'
filterset_class = filtersets.AggregateFilterSet
class FHRPGroupType(PrimaryObjectType):
class Meta:
model = models.FHRPGroup
fields = '__all__'
filterset_class = filtersets.FHRPGroupFilterSet
def resolve_auth_type(self, info):
return self.auth_type or None
class FHRPGroupAssignmentType(BaseObjectType):
class Meta:
model = models.FHRPGroupAssignment
fields = '__all__'
filterset_class = filtersets.FHRPGroupAssignmentFilterSet
class IPAddressType(PrimaryObjectType):
class Meta:
model = models.IPAddress
fields = '__all__'
filterset_class = filtersets.IPAddressFilterSet
def resolve_role(self, info):
return self.role or None
class IPRangeType(PrimaryObjectType):
class Meta:
model = models.IPRange
fields = '__all__'
filterset_class = filtersets.IPRangeFilterSet
def resolve_role(self, info):
return self.role or None
class PrefixType(PrimaryObjectType):
class Meta:
model = models.Prefix
fields = '__all__'
filterset_class = filtersets.PrefixFilterSet
class RIRType(OrganizationalObjectType):
class Meta:
model = models.RIR
fields = '__all__'
filterset_class = filtersets.RIRFilterSet
class RoleType(OrganizationalObjectType):
class Meta:
model = models.Role
fields = '__all__'
filterset_class = filtersets.RoleFilterSet
class RouteTargetType(PrimaryObjectType):
class Meta:
model = models.RouteTarget
fields = '__all__'
filterset_class = filtersets.RouteTargetFilterSet
class ServiceType(PrimaryObjectType):
class Meta:
model = models.Service
fields = '__all__'
filterset_class = filtersets.ServiceFilterSet
class VLANType(PrimaryObjectType):
class Meta:
model = models.VLAN
fields = '__all__'
filterset_class = filtersets.VLANFilterSet
class VLANGroupType(OrganizationalObjectType):
class Meta:
model = models.VLANGroup
fields = '__all__'
filterset_class = filtersets.VLANGroupFilterSet
class VRFType(PrimaryObjectType):
class Meta:
model = models.VRF
fields = '__all__'
filterset_class = filtersets.VRFFilterSet
| nilq/baby-python | python |
from utils import *
import matplotlib.pyplot as plt
# import matplotlib.colors
from sklearn.preprocessing import StandardScaler
from skimage.transform import resize
from PIL import Image
path_save = "./results/face_glasses_separation2/"
if not os.path.exists(path_save):
os.makedirs(path_save)
# color_map = matplotlib.colors.hsv_to_rgb(plt.cm.hsv) # plt.cm.bwr #--> plt.cm.brg, plt.cm.hsv
# color_map = plt.cm.bwr
path_1 = "C:/Users/benya/Desktop/my_PhD/QQE/codes/4_results/17_face_glasses_transform_inputSpace/run2_good/algorithm_files/class_" + str(0) + "/fuzzy_QQplot/"
X0 = load_variable(name_of_variable="X_matched_initial", path=path_1)
path_1 = "C:/Users/benya/Desktop/my_PhD/QQE/codes/4_results/17_face_glasses_transform_inputSpace/run2_good/algorithm_files/class_" + str(1) + "/fuzzy_QQplot/"
X1 = load_variable(name_of_variable="X_matched_initial", path=path_1)
X = np.column_stack((X0, X1))
y = [0]*X0.shape[1] + [1]*X1.shape[1]
y = np.asarray(y)
for i, plot_name in enumerate(["X_matched_iteration_0", "X_matched_iteration_20", "X_matched_iteration_30", "X_matched_iteration_10"]):
if i <= 2:
class_index_of_plot = 0
else:
class_index_of_plot = 1
path_1 = "C:/Users/benya/Desktop/my_PhD/QQE/codes/4_results/17_face_glasses_transform_inputSpace/run2_good/algorithm_files/class_" + str(class_index_of_plot) + "/fuzzy_QQplot/iterations_numpy/"
X_class = load_variable(name_of_variable=plot_name, path=path_1)
if i != 0:
X[:, y==class_index_of_plot] = X_class
# plt.scatter(X[0, :], X[1, :], c=y, cmap=color_map, edgecolors='k')
markers = ["v", "o"]
colors = ["r", "b"]
for class_index in range(2):
sample_of_this_class = X[:, y == class_index]
# c = class_index * np.ones((sample_of_this_class.shape[1],))
plt.scatter(sample_of_this_class[0, :], sample_of_this_class[1, :], s=30, color=colors[class_index], alpha=1.0, marker=markers[class_index])
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# plt.show()
plt.savefig(path_save + str(i) + ".png")
plt.clf()
plt.close()
evaluate_embedding(embedding=X.T, labels=y, path_save_accuracy_of_test_data=path_save, k_list=[1, 2, 4, 8, 16], name=str(i))
| nilq/baby-python | python |
from flask_apscheduler import APScheduler
from actions import *
from context import *
from config import Config
class Executor:
"""
An Executor drives a pipeline which composed by a sequence of actions with a context
"""
def __init__(self, config: Config, pipeline_name, pipeline):
self.config = config
self.pipeline_name = pipeline_name
self.pipeline = pipeline
self.__context = Context()
from logger import Logger
# Each Executor has its own log file
self.logger = Logger("%s.log" % pipeline_name).get_logger()
def start(self):
pass
def get_context(self):
return self.__context
def execute(self, args):
"""
"""
self.__on_execute(args)
def __on_execute(self, args):
"""
"""
# self.logger.info(self.pipeline_name, self.pipeline)
action_name = Config.get_start_action_name(self.pipeline)
while action_name:
action_config = Config.get_action_config(self.pipeline, action_name)
if not action_config:
break
if 'type' not in action_config:
print("No action type for ", action_name)
exit(0)
action_type = action_config['type']
action_type = action_config['type']
action = BaseAction.create_action(action_type, action_config)
print(action_name, action)
action.try_execute(self.get_context())
action_name = action.get_next()
| nilq/baby-python | python |
#!/usr/bin/env python
import os
try:
import cplex
except ImportError:
cplex = None
import numpy as np
from mapel.voting.metrics.inner_distances import hamming
# FOR SUBELECTIONS
def solve_lp_voter_subelection(election_1, election_2, metric_name='0'):
""" LP solver for voter subelection problem """
cp = cplex.Cplex()
cp.parameters.threads.set(1)
# OBJECTIVE FUNCTION
cp.objective.set_sense(cp.objective.sense.maximize)
objective = []
names = []
for v1 in range(election_1.num_voters):
for v2 in range(election_2.num_voters):
names.append('N' + str(v1) + '_' + str(v2))
objective.append(1.)
cp.variables.add(obj=objective,
names=names,
types=[
cp.variables.type.binary] * election_1.num_voters * election_2.num_voters)
# FIRST CONSTRAINT FOR VOTERS
lin_expr = []
for v1 in range(election_1.num_voters):
ind = []
for v2 in range(election_2.num_voters):
ind.append('N' + str(v1) + '_' + str(v2))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * election_2.num_voters))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['L'] * election_1.num_voters,
rhs=[1.0] * election_1.num_voters,
names=['C1_' + str(i) for i in range(election_1.num_voters)])
# SECOND CONSTRAINT FOR VOTERS
lin_expr = []
for v2 in range(election_2.num_voters):
ind = []
for v1 in range(election_1.num_voters):
ind.append('N' + str(v1) + '_' + str(v2))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * election_1.num_voters))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['L'] * election_2.num_voters,
rhs=[1.0] * election_2.num_voters,
names=['C2_' + str(i) for i in range(election_2.num_voters)])
# ADD VARIABLES FOR CANDIDATES
names = []
for c1 in range(election_1.num_candidates):
for c2 in range(election_2.num_candidates):
names.append('M' + str(c1) + '_' + str(c2))
cp.variables.add(names=list(names),
types=[
cp.variables.type.binary] * election_1.num_candidates * election_2.num_candidates)
# FIRST CONSTRAINT FOR CANDIDATES
lin_expr = []
for c1 in range(election_1.num_candidates):
ind = []
for c2 in range(election_2.num_candidates):
ind.append('M' + str(c1) + '_' + str(c2))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * election_2.num_candidates))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * election_1.num_candidates,
rhs=[1.0] * election_1.num_candidates,
names=['C3_' + str(i) for i in range(election_1.num_candidates)])
# SECOND CONSTRAINT FOR CANDIDATES
lin_expr = []
for c2 in range(election_2.num_candidates):
ind = []
for c1 in range(election_1.num_candidates):
ind.append('M' + str(c1) + '_' + str(c2))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * election_1.num_candidates))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * election_2.num_candidates,
rhs=[1.0] * election_2.num_candidates,
names=['C4_' + str(i) for i in range(election_2.num_candidates)])
# MAIN CONSTRAINT FOR VOTES
lin_expr = []
for v1 in range(election_1.num_voters):
for v2 in range(election_2.num_voters):
ind = []
val = []
for c1 in range(election_1.num_candidates):
for c2 in range(election_2.num_candidates):
ind.append('M' + str(c1) + '_' + str(c2))
if abs(election_1.potes[v1][c1] - election_2.potes[v2][c2]) <= int(metric_name):
val.append(1.)
else:
val.append(0.)
ind.append('N' + str(v1) + '_' + str(v2))
val.append(-election_1.num_candidates)
lin_expr.append(cplex.SparsePair(ind=ind, val=val))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['G'] * election_1.num_voters * election_2.num_voters,
rhs=[0.0] * election_1.num_voters * election_2.num_voters,
names=['C5_' + str(i) for i in
range(election_1.num_voters * election_2.num_voters)])
# cp.write('new.lp')
# SOLVE THE ILP
cp.set_results_stream(None)
try:
cp.solve()
except: # cplex.CplexSolverError:
print("Exception raised while solving")
return
objective_value = cp.solution.get_objective_value()
return objective_value
def solve_lp_candidate_subelections(lp_file_name, election_1, election_2):
""" LP solver for candidate subelection problem """
# PRECOMPUTING
# """
P = np.zeros([election_1.num_voters, election_2.num_voters, election_1.num_candidates,
election_2.num_candidates,
election_1.num_candidates, election_2.num_candidates])
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
for d2 in range(election_2.num_candidates):
if (election_1.potes[v][c1] > election_1.potes[v][c2] and
election_2.potes[u][d1] >
election_2.potes[u][d2]) or \
(election_1.potes[v][c1] < election_1.potes[v][c2] and
election_2.potes[u][d1] <
election_2.potes[u][d2]):
P[v][u][c1][d1][c2][d2] = 1
# print(P)
# """
# CREATE LP FILE
lp_file = open(lp_file_name, 'w')
lp_file.write("Maximize\nobj: ")
first = True
for c in range(election_1.num_candidates):
for d in range(election_2.num_candidates):
if not first:
lp_file.write(" + ")
first = False
lp_file.write(" M_" + str(c) + "_" + str(d))
lp_file.write("\n")
"""
first = True
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
if P[v][u][c1][d1][c2][d2] == 1:
if not first:
lp_file.write(" + ")
first = False
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write("\n")
"""
lp_file.write("Subject To\n")
ctr_c = 0
# FIRST CONSTRAINT FOR VOTERS
for v in range(election_1.num_voters):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for u in range(election_2.num_voters):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" N_" + str(v) + "_" + str(u))
lp_file.write(" = 1" + "\n")
ctr_c += 1
# SECOND CONSTRAINT FOR VOTERS
for u in range(election_2.num_voters):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for v in range(election_1.num_voters):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" N_" + str(v) + "_" + str(u))
lp_file.write(" = 1" + "\n")
ctr_c += 1
# FIRST CONSTRAINT FOR CANDIDATES
for c in range(election_1.num_candidates):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for d in range(election_2.num_candidates):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" M_" + str(c) + "_" + str(d))
lp_file.write(" <= 1" + "\n")
ctr_c += 1
# SECOND CONSTRAINT FOR CANDIDATES
for d in range(election_2.num_candidates):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for c in range(election_1.num_candidates):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" M_" + str(c) + "_" + str(d))
lp_file.write(" <= 1" + "\n")
ctr_c += 1
# FIRST CONSTRAINT FOR P
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
# if P[v][u][c1][d1][c2][d2] == 1:
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write(" - 0.34 N_" + str(v) + "_" + str(u))
lp_file.write(" - 0.34 M_" + str(c1) + "_" + str(d1))
lp_file.write(" - 0.34 M_" + str(c2) + "_" + str(d2))
lp_file.write(" <= 0" + "\n")
ctr_c += 1
# SECOND CONSTRAINT FOR P
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
# if P[v][u][c1][d1][c2][d2] == 1:
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
# lp_file.write(" + 1")
lp_file.write(" - 0.34 N_" + str(v) + "_" + str(u))
lp_file.write(" - 0.34 M_" + str(c1) + "_" + str(d1))
lp_file.write(" - 0.34 M_" + str(c2) + "_" + str(d2))
lp_file.write(" > -1" + "\n")
ctr_c += 1
# THIRD CONSTRAINT FOR P
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
# if P[v][u][c1][d1][c2][d2] == 1:
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write(" <= " + str(P[v][u][c1][d1][c2][d2]) + "\n")
ctr_c += 1
"""
# NEW 1
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
if P[v][u][c1][d1][c2][d2] == 1:
if not first:
lp_file.write(" +")
first = False
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write(' - ' + str((magic_param-1)*election_1.num_voters) + ' M_' + str(c1) + '_' + str(d1) + ' = 0' + "\n")
ctr_c += 1
# NEW 2
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
if P[v][u][c1][d1][c2][d2] == 1:
if not first:
lp_file.write(" +")
first = False
lp_file.write(" P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2))
lp_file.write(' - ' + str((magic_param-1)*2) + ' N_' + str(v) + '_' + str(u) + ' = 0' + "\n")
ctr_c += 1
"""
lp_file.write("Binary\n")
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_2.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_2.num_candidates):
if d1 == d2:
continue
# if P[v][u][c1][d1][c2][d2] == 1:
lp_file.write("P_" + str(v) + "_" + str(u) + "_" +
str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(
d2) + "\n")
for v in range(election_1.num_voters):
for u in range(election_2.num_voters):
lp_file.write("N_" + str(v) + "_" + str(u) + "\n")
for c in range(election_1.num_candidates):
for d in range(election_2.num_candidates):
lp_file.write("M_" + str(c) + "_" + str(d) + "\n")
lp_file.write("End\n")
lp_file.close()
### SECOND PART
cp_lp = cplex.Cplex(lp_file_name)
cp_lp.parameters.threads.set(1)
cp_lp.set_results_stream(None)
try:
cp_lp.solve()
except: # cplex.CplexSolverError:
print("Exception raised during solve")
return
##########################
##########################
result = np.zeros([election_1.num_candidates, election_1.num_candidates])
for i in range(election_1.num_candidates):
for j in range(election_1.num_candidates):
name = 'M_' + str(i) + '_' + str(j)
result[i][j] = cp_lp.solution.get_values(name)
# print('M', result)
"""
result_2 = np.zeros([election_1.num_voters, election_1.num_voters])
for i in range(election_1.num_voters):
for j in range(election_1.num_voters):
election_id = 'N_' + str(i) + '_' + str(j)
result_2[i][j] = cp_lp.solution.get_values(election_id)
print('N', result_2)
total = 0
for v in range(election_1.num_voters):
for u in range(election_1.num_voters):
for c1 in range(election_1.num_candidates):
for d1 in range(election_1.num_candidates):
for c2 in range(election_1.num_candidates):
if c1 == c2:
continue
for d2 in range(election_1.num_candidates):
if d1 == d2:
continue
#if P[v][u][c1][d1][c2][d2] == 1:
election_id = "P_" + str(v) + "_" + str(u) + "_" + str(c1) + "_" + str(d1) + "_" + str(c2) + "_" + str(d2)
value = cp_lp.solution.get_values(election_id)
#print(value)
if value == 1:
print(election_id)
total += value
print(total)
"""
##########################
##########################
# objective_value = cp_lp.solution.get_objective_value()
# print('O-V: ', objective_value)
# print(sum(sum(result)))
return sum(sum(result))
# FOR METRICS
def solve_lp_matching_vector_with_lp(cost_table, length):
""" LP solver for vectors' matching """
# print(cost_table)
cp = cplex.Cplex()
cp.parameters.threads.set(1)
# OBJECTIVE FUNCTION
cp.objective.set_sense(cp.objective.sense.minimize)
objective = []
names = []
pos = 0
for i in range(length):
for j in range(length):
names.append('x' + str(pos))
objective.append(cost_table[i][j])
pos += 1
cp.variables.add(obj=objective,
names=names,
types=[cp.variables.type.binary] * length ** 2)
# FIRST GROUP OF CONSTRAINTS
lin_expr = []
for i in range(length):
ind = []
for j in range(length):
pos = i * length + j
ind.append('x' + str(pos))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * length))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * length,
rhs=[1.0] * length)
# SECOND GROUP OF CONSTRAINTS
lin_expr = []
for j in range(length):
ind = []
for i in range(length):
pos = i * length + j
ind.append('x' + str(pos))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * length))
cp.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * length,
rhs=[1.0] * length)
# c.write('new.lp')
# SOLVE THE ILP
cp.set_results_stream(None)
try:
cp.solve()
except: # cplex.CplexSolverError:
print("Exception raised while solving")
return
# UNPACK THE RESULTS
"""
result = [0.] * length ** 2
for i in range(len(result)):
result[i] = c.solution.get_values('x' + str(i))
matching = [0] * length
ctr = 0
for i in range(len(result)):
if result[i] == 1:
matching[ctr] = i % length
ctr += 1
"""
objective_value = cp.solution.get_objective_value()
return objective_value
def solve_lp_matching_interval(cost_table, length_1, length_2):
precision = length_1 * length_2
# print(cost_table)
c = cplex.Cplex()
c.parameters.threads.set(1)
# OBJECTIVE FUNCTION
c.objective.set_sense(c.objective.sense.minimize)
c.objective.set_name("Obj")
objective = []
names = []
pos = 0
for i in range(length_1):
for j in range(length_2):
names.append('x' + str(pos))
objective.append(cost_table[i][j])
pos += 1
c.variables.add(obj=objective,
names=names,
types=[c.variables.type.integer] * precision)
# FIRST GROUP OF CONSTRAINTS
lin_expr = []
c_names = []
for i in range(length_1):
ind = []
for j in range(length_2):
pos = i * length_2 + j
ind.append('x' + str(pos))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * length_2))
c_names.append('c1_' + str(i))
c.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * length_1,
rhs=[length_2] * length_1,
names=c_names)
# SECOND GROUP OF CONSTRAINTS
lin_expr = []
c_names = []
for j in range(length_2):
ind = []
for i in range(length_1):
pos = i * length_2 + j
ind.append('x' + str(pos))
lin_expr.append(cplex.SparsePair(ind=ind, val=[1.0] * length_1))
c_names.append('c2_' + str(j))
c.linear_constraints.add(lin_expr=lin_expr,
senses=['E'] * length_2,
rhs=[length_1] * length_2,
names=c_names)
c.write('interval.lp')
c.write('interval.mps')
# SOLVE THE ILP
c.set_results_stream(None)
try:
c.solve()
except: # cplex.CplexSolverError:
print("Exception raised while solving")
return
result = c.solution.get_objective_value() / precision
return result
# DODGSON SCORE
def generate_lp_file_dodgson_score(lp_file_name, N=None, e=None, D=None):
lp_file = open(lp_file_name, 'w')
lp_file.write("Minimize\nobj: ")
first = True
for i in range(len(N)):
for j in range(1, len(D)):
if not first:
lp_file.write(" + ")
first = False
lp_file.write(str(j) + " y" + str(i) + "_" + str(j))
lp_file.write("\n")
lp_file.write("Subject To\n")
ctr_c = 0
for i in range(len(N)):
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" y" + str(i) + "_" + str(0) + " = " + str(N[i]) + "\n")
ctr_c += 1
# """
for k in range(len(D)):
lp_file.write("c" + str(ctr_c) + ":")
first = True
for i in range(len(N)):
for j in range(1, len(D)):
# print(i,j,k)
# print(e[i][j][k], e[i][j-1][k])
if not first:
lp_file.write(" +")
first = False
lp_file.write(" " + str(e[i][j][k] - e[i][j - 1][k]) + " y" + str(i) + "_" + str(j))
lp_file.write(" >= " + str(D[k]) + "\n")
ctr_c += 1
# """
# """
for i in range(len(N)):
for j in range(1, len(D)):
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(
" y" + str(i) + "_" + str(j - 1) + " - y" + str(i) + "_" + str(j) + " >= 0" + "\n")
ctr_c += 1
# """
# """
# chyba nie potrzeba bo integer zalatwia sprawe...
for i in range(len(N)):
for j in range(len(D)):
lp_file.write("c" + str(ctr_c) + ":")
lp_file.write(" y" + str(i) + "_" + str(j) + " >= 0" + "\n")
ctr_c += 1
# """
# """
lp_file.write("General\n")
for i in range(len(N)):
for j in range(len(D)):
lp_file.write("y" + str(i) + "_" + str(j) + "\n")
ctr_c += 1
# """
lp_file.write("End\n")
def solve_lp_dodgson_score(lp_file_name):
""" this function ..."""
cp_lp = cplex.Cplex(lp_file_name)
cp_lp.parameters.threads.set(1)
cp_lp.set_results_stream(None)
try:
cp_lp.solve()
except: # cplex.CplexSolverError:
print("Exception raised during solve")
return
"""
import numpy as np
result = np.zeros([len(N), len(D)])
for i in range(len(N)):
for j in range(len(D)):
result[i] = cp_lp.solution.get_values('y' + str(i) + '_' + str(j))
"""
return cp_lp.solution.get_objective_value()
# FOR WINNERS - needs update
def generate_lp_file_borda_owa(owa, lp_file_name, params, votes):
""" this function generates lp file"""
lp_file = open(lp_file_name, 'w')
lp_file.write("Maximize\nobj: ")
pos = 0
first = True
for i in range(params['voters']):
for j in range(params['orders']):
for k in range(params['candidates']):
if not first and owa[j] >= 0.:
lp_file.write(" + ")
first = False
lp_file.write(str(owa[j]) + " x" + str(pos))
pos += 1
lp_file.write("\n")
lp_file.write("Subject To\n")
lp_file.write("c0:")
first = True
for i in range(params['candidates']):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" y" + str(i))
lp_file.write(' = ' + str(params['orders']) + '\n')
for i in range(params['voters']):
for j in range(params['candidates']):
lp_file.write("c" + str(i * params['candidates'] + j + 1) + ": ")
pos = i * params['orders'] * params['candidates'] + j
first = True
for k in range(params['orders']):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" x" + str(pos + params['candidates'] * k))
for k in range(0, j + 1):
lp_file.write(" - y" + str(int(votes[i][k])))
lp_file.write(" <= 0 \n")
lp_file.write("Binary\n")
for i in range(params['voters'] * params['orders'] * params['candidates']):
lp_file.write("x" + str(i) + "\n")
for i in range(params['candidates']):
lp_file.write("y" + str(i) + "\n")
lp_file.write("End\n")
def generate_lp_file_bloc_owa(owa, lp_file_name, params, votes, t_bloc):
""" this function generates lp file"""
lp_file = open(lp_file_name, 'w')
lp_file.write("Maximize\nobj: ")
pos = 0
first = True
for i in range(params['voters']):
for j in range(params['orders']):
for k in range(params['candidates']):
if not first:
if k == t_bloc - 1:
lp_file.write(" + ")
first = False
if k == t_bloc - 1:
lp_file.write(str(owa[j]) + " x" + str(pos))
pos += 1
lp_file.write("\n")
lp_file.write("Subject To\n")
lp_file.write("c0:")
first = True
for i in range(params['candidates']):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" y" + str(i))
lp_file.write(' = ' + str(params['orders']) + '\n')
for i in range(params['voters']):
for j in range(params['candidates']):
lp_file.write("c" + str(i * params['candidates'] + j + 1) + ": ")
pos = i * params['orders'] * params['candidates'] + j
first = True
for k in range(params['orders']):
if not first:
lp_file.write(" +")
first = False
lp_file.write(" x" + str(pos + params['candidates'] * k))
for k in range(0, j + 1):
lp_file.write(" - y" + str(int(votes[i][k])))
lp_file.write(" <= 0 \n")
lp_file.write("Binary\n")
for i in range(params['voters'] * params['orders'] * params['candidates']):
lp_file.write("x" + str(i) + "\n")
for i in range(params['candidates']):
lp_file.write("y" + str(i) + "\n")
lp_file.write("End\n")
def get_winners_from_lp(tmp_file, params, candidates):
""" this function ..."""
cp_lp = cplex.Cplex(tmp_file)
cp_lp.parameters.threads.set(1)
cp_lp.set_results_stream(None)
try:
cp_lp.solve()
except cplex.CplexSolverError:
print("Exception raised during solve")
return
result = [0.] * params['candidates']
for i in range(params['candidates']):
result[i] = cp_lp.solution.get_values('y' + str(i))
# print(result)
params['pure'] = True
winner_id = 0
winners = [0.] * params['orders']
for i in range(params['candidates']):
if result[i] == 1.:
if params['pure']:
winners[winner_id] = i
else:
winners[winner_id] = candidates[i]
winner_id += 1
winners = sorted(winners)
return winners
"""
def generate_lp_file_matching_matrix_half(lp_file_name, matrix_1, matrix_2, length):
# [1, 4, 6, 9, 11]
# [1, 5, 6, 9, 11]
print(matrix_1)
print(matrix_2)
lp_file = open(lp_file_name, 'w')
lp_file.write("Minimize\n") # obj: ")
first = True
for k in range(length):
for l in range(length):
for i in range(k+1, length):
for j in range(l+1, length):
if not first:
lp_file.write(" + ")
first = False
weight = abs(matrix_1[k][i] - matrix_2[l][j])#**2
print(weight)
lp_file.write(str(weight) + " P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write("\n")
lp_file.write("Subject To\n")
for k in range(length):
for l in range(length):
for i in range(k+1, length):
for j in range(l+1, length):
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(i) + "j" + str(j) + " <= 0" + "\n")
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(k) + "j" + str(l) + " <= 0" + "\n")
for i in range(length):
first = True
for j in range(length):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
for j in range(length):
first = True
for i in range(length):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# Not sure about this part #
for k in range(length):
for i in range(k+1, length):
if k == i:
continue
first = True
for l in range(length):
for j in range(l+1, length):
if l == j:
continue
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# Not sure about this part #
for l in range(length):
for j in range(l+1, length):
if l == j:
continue
first = True
for k in range(length):
for i in range(k+1, length):
if k == i:
continue
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
lp_file.write("Binary\n")
for k in range(length):
for l in range(length):
for i in range(k+1, length):
for j in range(l+1, length):
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j) + "\n")
for i in range(length):
for j in range(length):
lp_file.write("M" + "i" + str(i) + "j" + str(j) + "\n")
lp_file.write("End\n")
"""
def generate_lp_file_matching_matrix(lp_file_name, matrix_1, matrix_2, length, inner_distance):
lp_file = open(lp_file_name, 'w')
lp_file.write("Minimize\n")
first = True
for k in range(length):
for l in range(length):
for i in range(length):
if i == k:
continue
for j in range(length):
if j == l:
continue
if not first:
lp_file.write(" + ")
first = False
weight = inner_distance(matrix_1[k][i], matrix_2[l][j])
lp_file.write(
str(weight) + " P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(
j))
lp_file.write("\n")
lp_file.write("Subject To\n")
for k in range(length):
for l in range(length):
for i in range(length):
if i == k:
continue
for j in range(length):
if j == l:
continue
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(i) + "j" + str(j) + " <= 0" + "\n")
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(k) + "j" + str(l) + " <= 0" + "\n")
for i in range(length):
first = True
for j in range(length):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
for j in range(length):
first = True
for i in range(length):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# Not sure about this part #
for k in range(length):
for i in range(length):
if k == i:
continue
first = True
for l in range(length):
for j in range(length):
if l == j:
continue
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# Not sure about this part #
for l in range(length):
for j in range(length):
if l == j:
continue
first = True
for k in range(length):
for i in range(length):
if k == i:
continue
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
lp_file.write("Binary\n")
for k in range(length):
for l in range(length):
for i in range(length):
if i == k:
continue
for j in range(length):
if j == l:
continue
lp_file.write(
"P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j) + "\n")
for i in range(length):
for j in range(length):
lp_file.write("M" + "i" + str(i) + "j" + str(j) + "\n")
lp_file.write("End\n")
def solve_lp_matrix(lp_file_name, matrix_1, matrix_2, length):
cp_lp = cplex.Cplex(lp_file_name)
cp_lp.set_results_stream(None)
cp_lp.parameters.threads.set(1)
# cp_lp.parameters.mip.tolerances.mipgap = 0.0001
# cp_lp.parameters.mip.strategy.probe.set(3)
try:
cp_lp.solve()
except:
print("Exception raised during solve")
return
"""
for k in range(length):
for l in range(length):
for i in range(k+1, length):
if k == i:
continue
for j in range(l+1, length):
if l == j:
continue
A = "P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j)
if int(cp_lp.solution.get_values(A)) == 1:
print(A)
"""
"""
for i in range(length):
for j in range(length):
A = "M" + "i" + str(i) + "j" + str(j)
if int(cp_lp.solution.get_values(A)) == 1:
print(A)
"""
# print(cp_lp.solution.get_objective_value())
return cp_lp.solution.get_objective_value()
# SPEARMAN - old
def generate_ilp_distance(lp_file_name, votes_1, votes_2, params, metric_name):
lp_file = open(lp_file_name, 'w')
lp_file.write("Minimize\n") # obj: ")
first = True
for k in range(params['voters']):
for l in range(params['voters']):
vote_1 = votes_1[k]
vote_2 = votes_2[l]
if metric_name == 'spearman':
pote_1 = [0] * params['candidates']
pote_2 = [0] * params['candidates']
for i in range(params['candidates']):
pote_1[vote_1[i]] = i
pote_2[vote_2[i]] = i
for i in range(params['candidates']):
for j in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
if metric_name == "spearman":
weight = abs(pote_1[i] - pote_2[j])
elif metric_name == "alt":
weight = float(abs(pote_1[i] - pote_2[j]) ** (2)) / float(
1. + min(pote_1[i], pote_2[j]))
elif metric_name == 'hamming':
weight = hamming(vote_1, vote_2)
else:
weight = 0
lp_file.write(
str(weight) + " P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(
j))
lp_file.write("\n")
lp_file.write("Subject To\n")
for k in range(params['voters']):
for l in range(params['voters']):
for i in range(params['candidates']):
for j in range(params['candidates']):
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "M" + "i" + str(i) + "j" + str(j) + " <= 0" + "\n")
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" - " + "N" + "k" + str(k) + "l" + str(l) + " <= 0" + "\n")
for k in range(params['voters']):
first = True
for l in range(params['voters']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("N" + "k" + str(k) + "l" + str(l))
lp_file.write(" = 1" + "\n")
for l in range(params['voters']):
first = True
for k in range(params['voters']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("N" + "k" + str(k) + "l" + str(l))
lp_file.write(" = 1" + "\n")
for i in range(params['candidates']):
first = True
for j in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
for j in range(params['candidates']):
first = True
for i in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("M" + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# IMPORTANT #
for k in range(params['voters']):
for i in range(params['candidates']):
first = True
for l in range(params['voters']):
for j in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
# IMPORTANT #
for l in range(params['voters']):
for j in range(params['candidates']):
first = True
for k in range(params['voters']):
for i in range(params['candidates']):
if not first:
lp_file.write(" + ")
first = False
lp_file.write("P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j))
lp_file.write(" = 1" + "\n")
lp_file.write("Binary\n")
for k in range(params['voters']):
for l in range(params['voters']):
for i in range(params['candidates']):
for j in range(params['candidates']):
lp_file.write(
"P" + "k" + str(k) + "l" + str(l) + "i" + str(i) + "j" + str(j) + "\n")
for k in range(params['voters']):
for l in range(params['voters']):
lp_file.write("N" + "k" + str(k) + "l" + str(l) + "\n")
for i in range(params['candidates']):
for j in range(params['candidates']):
lp_file.write("M" + "i" + str(i) + "j" + str(j) + "\n")
lp_file.write("End\n")
def solve_ilp_distance(lp_file_name, votes_1, votes_2, params, metric_name):
cp_lp = cplex.Cplex(lp_file_name)
cp_lp.set_results_stream(None)
# cp_lp.parameters.threads.set(1)
# cp_lp.parameters.timelimit.set(60)
try:
cp_lp.solve()
except cplex.CplexSolverError:
print("Exception raised during solve")
return
total = cp_lp.solution.get_objective_value()
return total
def spearman_cost(single_votes_1, single_votes_2, params, perm):
pote_1 = [0] * params['candidates']
pote_2 = [0] * params['candidates']
for i in range(params['candidates']):
id_1 = int(perm[0][single_votes_1[i]])
pote_1[id_1] = i
id_2 = int(perm[1][single_votes_2[i]])
pote_2[id_2] = i
total_diff = 0.
for i in range(params['candidates']):
local_diff = float(abs(pote_1[i] - pote_2[i]))
total_diff += local_diff
return total_diff
def spearman_cost_per_cand(single_votes_1, single_votes_2, params, perm):
pote_1 = [0] * params['candidates']
pote_2 = [0] * params['candidates']
for i in range(params['candidates']):
id_1 = int(perm[0][single_votes_1[i]])
pote_1[id_1] = i
id_2 = int(perm[1][single_votes_2[i]])
pote_2[id_2] = i
cand_diff = [0] * params['candidates']
for i in range(params['candidates']):
cand_diff[i] = float(abs(pote_1[i] - pote_2[i]))
return cand_diff
def remove_lp_file(path):
""" Safely remove lp file """
try:
os.remove(path)
except:
pass
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# Author : Jesse Wei
# LastUpdate : 2020/10/04
# Impact : Jobs generated by SQLG
# Message : Humanity towards others, we live by sharing. Fear can hold you prisoner, only hope can set you free.
# from __future__ import print_function
import logging
import re
import airflow
import pendulum
from datetime import datetime, timedelta
from airflow.operators.sensors import ExternalTaskSensor
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
from airflow.contrib.sensors.file_sensor import FileSensor
from airflow import models
from airflow.models import Variable, DagModel, DagBag
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
# For ODP platform
# from acme.operators.sqlg_oracle import OracleOperatorWithTemplatedParams
# from airflow.operators.oracle_operator import OracleOperator
from acme.operators.sqlg_mssql import MsSqlOperatorWithTemplatedParams
from airflow.operators.mssql_operator import MsSqlOperator
# DB_NAME = 'DWH' # for future xDB operator
proj_start_date = pendulum.datetime(2021, 1, 1, tzinfo="Etc/GMT-8")
tmpl_search_path = Variable.get("sql_path")
data_stage_imp_ptn = '_ODS_'
data_stage = []
# list for standard internval order sequence
std_interval = {
'@once' :1,
'@hourly' :2,
'0 5 * * *' :3,
'0 5 * * 0' :4,
'0 5 1 * *' :5,
'0 5 1 */3 *' :6,
'0 5 1 1 *' :7,
}
# function to sync execution for diff frequency
def sqlg_exec_date_fn(dt, context):
var_date = Variable.get("sqlg_execution_date")
ti = context['ti']
dag = context['dag']
ti_exec_date = context['execution_date']
schedule_interval = dag.schedule_interval
# if wait INIT and standard freq then set as default {{ ds }} # set in planner
# else use dag own execution date
if ti.task.external_dag_id == 'D_STG_INIT' and schedule_interval[0] == '@':
exec_date = pendulum.parse(var_date)
else:
exec_date = ti_exec_date
print("sqlg_exec_date_fn::DEBUG:external_dag_id, exec_date:", ti.task.external_dag_id, exec_date)
return exec_date
args = {
"owner": "SPA010038",
'start_date': proj_start_date,
'provide_context': True
}
# XSLT:loop: declaration: END}
# XSLT:loop: JOB_FLOW_NAME: START{
job_flow_name = "D_ODS_SCM"
data_stage = job_flow_name.split('_')
tags = data_stage
D_ODS_SCM = airflow.DAG(
"D_ODS_SCM",
tags=tags,
schedule_interval="0 5 * * *",
dagrun_timeout=timedelta(minutes=60*4),
template_searchpath=tmpl_search_path,
default_args=args,
# start_date=proj_start_date,
max_active_runs=1
)
job_flow_name = "D_DM_SCM"
data_stage = job_flow_name.split('_')
tags = data_stage
D_DM_SCM = airflow.DAG(
"D_DM_SCM",
tags=tags,
schedule_interval="0 5 * * *",
dagrun_timeout=timedelta(minutes=60*4),
template_searchpath=tmpl_search_path,
default_args=args,
# start_date=proj_start_date,
max_active_runs=1
)
job_flow_name = "D_INT_SCM"
data_stage = job_flow_name.split('_')
tags = data_stage
D_INT_SCM = airflow.DAG(
"D_INT_SCM",
tags=tags,
schedule_interval="0 5 * * *",
dagrun_timeout=timedelta(minutes=60*4),
template_searchpath=tmpl_search_path,
default_args=args,
# start_date=proj_start_date,
max_active_runs=1
)
# XSLT:loop: JOB_FLOW_NAME: END}
# JOB_TYPE=ODS-MAIN
my_taskid = "PNL_Revenue_Cost_A"
PNL_Revenue_Cost_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "NRE_Summary"
NRE_Summary = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "Daily_Revenue_F"
Daily_Revenue_F = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "RFQ_Master"
RFQ_Master = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "Inventory_A"
Inventory_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DOI_Actual"
DOI_Actual = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_ODS_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_PNL_Revenue_Cost_A"
DM_PNL_Revenue_Cost_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_NRE_Summary"
DM_NRE_Summary = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_Daily_Revenue_F"
DM_Daily_Revenue_F = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_RFQ_Master"
DM_RFQ_Master = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_Inventory_A"
DM_Inventory_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "DM_DOI_Actual"
DM_DOI_Actual = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_DM_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_PNL_Revenue_Cost_A"
INT_PNL_Revenue_Cost_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_NRE_Summary"
INT_NRE_Summary = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_Daily_Revenue_F"
INT_Daily_Revenue_F = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_RFQ_Master"
INT_RFQ_Master = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_Inventory_A"
INT_Inventory_A = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "INT_DOI_Actual"
INT_DOI_Actual = MsSqlOperatorWithTemplatedParams(
auto_commit=True,
task_id=my_taskid,
pool = "sql_pool",
dag=D_INT_SCM,
# parameters=({":END_DT_CHAR":"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}"}),
timeout=60*60*3,
sql= "EXECUTE SQLEXT." + my_taskid + "_SP "+
"{{ (execution_date.astimezone('Asia/Taipei')).strftime('%Y%m%d') }}" +
";"
)
ExternalTaskSensor.ui_color = 'white'
ExternalTaskSensor.ui_fgcolor = 'blue'
# tmpl_search_path = Variable.get("sql_path")
# XSLT:loop: JOB_FLOW_NAME-and-PRE_JOB: External:START{{
def branch_D_ODS_SCMxD_STG_INIT__SYS_STS_STG(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_STG_INIT")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG","D_ODS_SCMxD_STG_INIT__SYS_STS_STG"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG","D_ODS_SCMxD_STG_INIT__SYS_STS_STG"]
return ["proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG"]
my_taskid = "BRANCH_D_ODS_SCMxD_STG_INIT__SYS_STS_STG"
BRANCH_D_ODS_SCMxD_STG_INIT__SYS_STS_STG= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_ODS_SCMxD_STG_INIT__SYS_STS_STG,
dag=D_ODS_SCM,
provide_context=True,
)
my_taskid = "proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG"
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_ODS_SCM,
)
# Cross dag sensor
my_taskid = "D_ODS_SCMxD_STG_INIT__SYS_STS_STG"
D_ODS_SCMxD_STG_INIT__SYS_STS_STG= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_STG_INIT",
external_task_id="SYS_STS_STG",
mode="reschedule",
dag=D_ODS_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG)
BRANCH_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(D_ODS_SCMxD_STG_INIT__SYS_STS_STG)
D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG)
def branch_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A","D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A","D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"
BRANCH_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"
proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A"
D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_PNL_Revenue_Cost_A",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A)
BRANCH_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A.set_downstream(D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A)
D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A)
def branch_D_DM_SCMxD_INT_SCM__INT_NRE_Summary(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary","D_DM_SCMxD_INT_SCM__INT_NRE_Summary"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary","D_DM_SCMxD_INT_SCM__INT_NRE_Summary"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_NRE_Summary"
BRANCH_D_DM_SCMxD_INT_SCM__INT_NRE_Summary= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_NRE_Summary,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary"
proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_NRE_Summary"
D_DM_SCMxD_INT_SCM__INT_NRE_Summary= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_NRE_Summary",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_NRE_Summary.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary)
BRANCH_D_DM_SCMxD_INT_SCM__INT_NRE_Summary.set_downstream(D_DM_SCMxD_INT_SCM__INT_NRE_Summary)
D_DM_SCMxD_INT_SCM__INT_NRE_Summary.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary)
def branch_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F","D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F","D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"
BRANCH_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"
proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F"
D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_Daily_Revenue_F",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F)
BRANCH_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F.set_downstream(D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F)
D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F)
def branch_D_DM_SCMxD_INT_SCM__INT_RFQ_Master(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master","D_DM_SCMxD_INT_SCM__INT_RFQ_Master"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master","D_DM_SCMxD_INT_SCM__INT_RFQ_Master"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_RFQ_Master"
BRANCH_D_DM_SCMxD_INT_SCM__INT_RFQ_Master= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_RFQ_Master,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master"
proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_RFQ_Master"
D_DM_SCMxD_INT_SCM__INT_RFQ_Master= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_RFQ_Master",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_RFQ_Master.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master)
BRANCH_D_DM_SCMxD_INT_SCM__INT_RFQ_Master.set_downstream(D_DM_SCMxD_INT_SCM__INT_RFQ_Master)
D_DM_SCMxD_INT_SCM__INT_RFQ_Master.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master)
def branch_D_DM_SCMxD_INT_SCM__INT_Inventory_A(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A","D_DM_SCMxD_INT_SCM__INT_Inventory_A"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A","D_DM_SCMxD_INT_SCM__INT_Inventory_A"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_Inventory_A"
BRANCH_D_DM_SCMxD_INT_SCM__INT_Inventory_A= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_Inventory_A,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A"
proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_Inventory_A"
D_DM_SCMxD_INT_SCM__INT_Inventory_A= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_Inventory_A",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_Inventory_A.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A)
BRANCH_D_DM_SCMxD_INT_SCM__INT_Inventory_A.set_downstream(D_DM_SCMxD_INT_SCM__INT_Inventory_A)
D_DM_SCMxD_INT_SCM__INT_Inventory_A.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A)
def branch_D_DM_SCMxD_INT_SCM__INT_DOI_Actual(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_INT_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual","D_DM_SCMxD_INT_SCM__INT_DOI_Actual"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual","D_DM_SCMxD_INT_SCM__INT_DOI_Actual"]
return ["proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual"]
my_taskid = "BRANCH_D_DM_SCMxD_INT_SCM__INT_DOI_Actual"
BRANCH_D_DM_SCMxD_INT_SCM__INT_DOI_Actual= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_DM_SCMxD_INT_SCM__INT_DOI_Actual,
dag=D_DM_SCM,
provide_context=True,
)
my_taskid = "proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual"
proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_DM_SCM,
)
# Cross dag sensor
my_taskid = "D_DM_SCMxD_INT_SCM__INT_DOI_Actual"
D_DM_SCMxD_INT_SCM__INT_DOI_Actual= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_INT_SCM",
external_task_id="INT_DOI_Actual",
mode="reschedule",
dag=D_DM_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_DM_SCMxD_INT_SCM__INT_DOI_Actual.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual)
BRANCH_D_DM_SCMxD_INT_SCM__INT_DOI_Actual.set_downstream(D_DM_SCMxD_INT_SCM__INT_DOI_Actual)
D_DM_SCMxD_INT_SCM__INT_DOI_Actual.set_downstream(proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual)
def branch_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A","D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A","D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"]
return ["proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"
BRANCH_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"
proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A"
D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="PNL_Revenue_Cost_A",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A)
BRANCH_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A.set_downstream(D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A)
D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A)
def branch_D_INT_SCMxD_ODS_SCM__NRE_Summary(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary","D_INT_SCMxD_ODS_SCM__NRE_Summary"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary","D_INT_SCMxD_ODS_SCM__NRE_Summary"]
return ["proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__NRE_Summary"
BRANCH_D_INT_SCMxD_ODS_SCM__NRE_Summary= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__NRE_Summary,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary"
proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__NRE_Summary"
D_INT_SCMxD_ODS_SCM__NRE_Summary= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="NRE_Summary",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__NRE_Summary.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary)
BRANCH_D_INT_SCMxD_ODS_SCM__NRE_Summary.set_downstream(D_INT_SCMxD_ODS_SCM__NRE_Summary)
D_INT_SCMxD_ODS_SCM__NRE_Summary.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary)
def branch_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F","D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F","D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"]
return ["proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"
BRANCH_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"
proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__Daily_Revenue_F"
D_INT_SCMxD_ODS_SCM__Daily_Revenue_F= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="Daily_Revenue_F",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F)
BRANCH_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F.set_downstream(D_INT_SCMxD_ODS_SCM__Daily_Revenue_F)
D_INT_SCMxD_ODS_SCM__Daily_Revenue_F.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F)
def branch_D_INT_SCMxD_ODS_SCM__RFQ_Master(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master","D_INT_SCMxD_ODS_SCM__RFQ_Master"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master","D_INT_SCMxD_ODS_SCM__RFQ_Master"]
return ["proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__RFQ_Master"
BRANCH_D_INT_SCMxD_ODS_SCM__RFQ_Master= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__RFQ_Master,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master"
proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__RFQ_Master"
D_INT_SCMxD_ODS_SCM__RFQ_Master= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="RFQ_Master",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__RFQ_Master.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master)
BRANCH_D_INT_SCMxD_ODS_SCM__RFQ_Master.set_downstream(D_INT_SCMxD_ODS_SCM__RFQ_Master)
D_INT_SCMxD_ODS_SCM__RFQ_Master.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master)
def branch_D_INT_SCMxD_ODS_SCM__Inventory_A(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__Inventory_A","D_INT_SCMxD_ODS_SCM__Inventory_A"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__Inventory_A","D_INT_SCMxD_ODS_SCM__Inventory_A"]
return ["proxy_D_INT_SCMxD_ODS_SCM__Inventory_A"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__Inventory_A"
BRANCH_D_INT_SCMxD_ODS_SCM__Inventory_A= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__Inventory_A,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__Inventory_A"
proxy_D_INT_SCMxD_ODS_SCM__Inventory_A= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__Inventory_A"
D_INT_SCMxD_ODS_SCM__Inventory_A= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="Inventory_A",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__Inventory_A.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__Inventory_A)
BRANCH_D_INT_SCMxD_ODS_SCM__Inventory_A.set_downstream(D_INT_SCMxD_ODS_SCM__Inventory_A)
D_INT_SCMxD_ODS_SCM__Inventory_A.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__Inventory_A)
def branch_D_INT_SCMxD_ODS_SCM__DOI_Actual(**context):
mydag = context["dag"]
dagbag = DagBag()
upstream = dagbag.get_dag("D_ODS_SCM")
# print("branch::DEBUG:upstream.latest_execution_date:", upstream.latest_execution_date)
# print("branch::DEBUG:mydag.execution_date:", context['execution_date'])
up_sch_interval = std_interval.get(upstream.schedule_interval)
my_sch_interval = std_interval.get(mydag.schedule_interval)
if up_sch_interval is None or my_sch_interval is None:
if (up_sch_interval is None and my_sch_interval is None) and (upstream.schedule_interval == mydag.schedule_interval):
return ["proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual","D_INT_SCMxD_ODS_SCM__DOI_Actual"]
elif std_interval[upstream.schedule_interval] >= std_interval[mydag.schedule_interval]:
if upstream.latest_execution_date == context["execution_date"]:
return ["proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual","D_INT_SCMxD_ODS_SCM__DOI_Actual"]
return ["proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual"]
my_taskid = "BRANCH_D_INT_SCMxD_ODS_SCM__DOI_Actual"
BRANCH_D_INT_SCMxD_ODS_SCM__DOI_Actual= BranchPythonOperator(
task_id=my_taskid,
python_callable=branch_D_INT_SCMxD_ODS_SCM__DOI_Actual,
dag=D_INT_SCM,
provide_context=True,
)
my_taskid = "proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual"
proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual= DummyOperator(
task_id=my_taskid,
trigger_rule="none_failed_or_skipped",
dag=D_INT_SCM,
)
# Cross dag sensor
my_taskid = "D_INT_SCMxD_ODS_SCM__DOI_Actual"
D_INT_SCMxD_ODS_SCM__DOI_Actual= ExternalTaskSensor(
pool = "sensor_pool",
task_id=my_taskid,
external_dag_id="D_ODS_SCM",
external_task_id="DOI_Actual",
mode="reschedule",
dag=D_INT_SCM,
check_existence=True,
timeout=60*60*1,
retries=5,
retry_delay=timedelta(minutes=3),
execution_date_fn=sqlg_exec_date_fn
)
BRANCH_D_INT_SCMxD_ODS_SCM__DOI_Actual.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual)
BRANCH_D_INT_SCMxD_ODS_SCM__DOI_Actual.set_downstream(D_INT_SCMxD_ODS_SCM__DOI_Actual)
D_INT_SCMxD_ODS_SCM__DOI_Actual.set_downstream(proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual)
# XSLT:loop: JOB_FLOW_NAME-and-PRE_JOB: External: END}}
# XSLT:loop: JOB_FLOW_NAME: START{
# XSLT:loop: Rows-by-JOB_FLOW_NAME: JOB_NAME: START{{
# FLOW: D_ODS_SCM.PNL_Revenue_Cost_A
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(PNL_Revenue_Cost_A)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(NRE_Summary)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(Daily_Revenue_F)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(RFQ_Master)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(Inventory_A)
proxy_D_ODS_SCMxD_STG_INIT__SYS_STS_STG.set_downstream(DOI_Actual)
# XSLT:loop: Rows-by-JOB_FLOW_NAME: JOB_NAME: START{{
# FLOW: D_DM_SCM.DM_PNL_Revenue_Cost_A
proxy_D_DM_SCMxD_INT_SCM__INT_PNL_Revenue_Cost_A.set_downstream(DM_PNL_Revenue_Cost_A)
proxy_D_DM_SCMxD_INT_SCM__INT_NRE_Summary.set_downstream(DM_NRE_Summary)
proxy_D_DM_SCMxD_INT_SCM__INT_Daily_Revenue_F.set_downstream(DM_Daily_Revenue_F)
proxy_D_DM_SCMxD_INT_SCM__INT_RFQ_Master.set_downstream(DM_RFQ_Master)
proxy_D_DM_SCMxD_INT_SCM__INT_Inventory_A.set_downstream(DM_Inventory_A)
proxy_D_DM_SCMxD_INT_SCM__INT_DOI_Actual.set_downstream(DM_DOI_Actual)
# XSLT:loop: Rows-by-JOB_FLOW_NAME: JOB_NAME: START{{
# FLOW: D_INT_SCM.INT_PNL_Revenue_Cost_A
proxy_D_INT_SCMxD_ODS_SCM__PNL_Revenue_Cost_A.set_downstream(INT_PNL_Revenue_Cost_A)
proxy_D_INT_SCMxD_ODS_SCM__NRE_Summary.set_downstream(INT_NRE_Summary)
proxy_D_INT_SCMxD_ODS_SCM__Daily_Revenue_F.set_downstream(INT_Daily_Revenue_F)
proxy_D_INT_SCMxD_ODS_SCM__RFQ_Master.set_downstream(INT_RFQ_Master)
proxy_D_INT_SCMxD_ODS_SCM__Inventory_A.set_downstream(INT_Inventory_A)
proxy_D_INT_SCMxD_ODS_SCM__DOI_Actual.set_downstream(INT_DOI_Actual)
| nilq/baby-python | python |
from segmentTree import SumSegmentTree, MinSegmentTree
import numpy as np
import matplotlib.pyplot as plt
class RingBuffer(object):
def __init__(self, maxlen, shape, dtype='int32'):
self.maxlen = maxlen
self.data = np.zeros((maxlen,) + shape).astype(dtype)
self.next_idx = 0
def append(self, v):
self.data[self.next_idx] = v
self.next_idx = (self.next_idx+1) % self.maxlen
def __getitem__(self, idx):
if idx < 0 or idx >= self.maxlen:
raise KeyError()
return self.data[idx]
def array_min2d(x):
x = np.array(x)
if x.ndim >= 2:
return x
return x.reshape(-1, 1)
class Buffer(object):
def __init__(self, limit, content_shape):
self.next_idx = 0
self.limit = limit
self.length = 0
self.contents = {}
for content, shape in content_shape.items():
self.contents[content] = RingBuffer(limit, shape=shape)
def append(self, buffer_item):
for name, value in self.contents.items():
value.append(buffer_item[name])
self.next_idx = (self.next_idx+1) % self.limit
if self.length < self.limit:
self.length += 1
class PrioritizedGoalBuffer(Buffer):
def __init__(self, limit, alpha):
self.content = {'goal': (1,)}
self.alpha = alpha
super(PrioritizedGoalBuffer, self).__init__(limit, self.content)
it_capacity = 1
while it_capacity < limit:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._max_priority = 1.0
def append(self, buffer_item, priority=None):
"""See ReplayBuffer.store_effect"""
idx = self.next_idx
super().append(buffer_item)
if priority is None:
self._it_sum[idx] = self._max_priority ** self.alpha
else:
self._it_sum[idx] = priority
def sample_proportional_idx(self):
sum = self._it_sum.sum()
mass = np.random.random() * sum
idx = self._it_sum.find_prefixsum_idx(mass)
return idx
def sample(self):
# Draw such that we always have a proceeding element.
idx = self.sample_proportional_idx()
result = {}
for name, value in self.contents.items():
result[name] = array_min2d(value[idx])
return idx, result
def update_priority(self, idx, priority):
self._it_sum[idx] = priority ** self.alpha
self._max_priority = max(self._max_priority, priority)
def _demo():
buffer = PrioritizedGoalBuffer(11, 1)
samples = np.zeros((100000), dtype=int)
for i in range(15):
buffer_item = {'goal': i}
buffer.append(buffer_item, i)
for j in range(100000):
idx, sample = buffer.sample()
samples[j] = int(sample['goal'])
bins = np.bincount(samples)
plt.plot(range(bins.shape[0]), bins)
plt.show()
buffer.update_priority(6,100)
for j in range(100000):
idx, sample = buffer.sample()
samples[j] = int(sample['goal'])
bins = np.bincount(samples)
plt.plot(range(bins.shape[0]), bins)
plt.show()
if __name__ == "__main__":
_demo() | nilq/baby-python | python |
from .plots import Plot,PlotError,PlotState
from .. import context
from .. import items
from .. import maps
from .. import randmaps
from .. import waypoints
from .. import monsters
from .. import dialogue
from .. import services
from .. import teams
from .. import characters
from .. import namegen
import random
from .. import cutscene
from .. import worlds
# BARDIC_DUNGEON
# This subplot will generate a dungeon of a given type. All these subplots
# should be unique in order to prevent dungeon types from repeating.
# - Generate dungeon
# - Generate connection to previous dungeon
# - Install dungeon
# - Add chapter resources, as appropriate
class BardicCaves( Plot ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Caverns of {0}", "Caves of {0}", "{0} Grotto", "{0} Chasm" )
DUNGEON_PATTERN = (context.HAB_CAVE,)
UNIQUE = True
scope = True
active = True
def custom_init( self, nart ):
"""Load dungeon levels, and connect this dungeon to the adventure."""
# Decide on a good name. Do this first in case we want to generate an antagonist
# or boss monster to include in the dungeon. The name generator will generate
# this antagonist, and it will be passed on to the levels of the dungeon.
self.elements[ "ANTAGONIST" ] = False
self.dname = self.gen_name()
# Generate the levels
self.levels = self.get_dungeon_levels( nart, self.DUNGEON_PATTERN, self.chapter.start_rank, self.chapter.end_rank )
# Connect all the levels, and name them.
self.add_sub_plot( nart, "BARDIC_CONNECTION",
PlotState(elements={"LEVELS":self.levels,"DNAME":self.dname}, rank=self.chapter.start_rank).based_on( self ) )
# Set the LAST_DUNGEON element, for use by the next dungeon.
self.register_element( "LAST_DUNGEON", self.levels[-1] )
return True
def gen_name( self ):
return random.choice( self.NAME_PATTERNS ).format( namegen.random_style_name() )
class BardicCrypt( BardicCaves ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Crypt of {0}", "Tomb of {0}", "{0} Boneyard", "{0} Catacombs" )
DUNGEON_PATTERN = (context.HAB_TUNNELS,context.GEN_UNDEAD)
UNIQUE = True
class AntagonisticForest( BardicCaves ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Forest","Woods","Wilds" )
DUNGEON_PATTERN = (context.HAB_FOREST,)
UNIQUE = True
def gen_name( self ):
Antagonist = self.register_element( "ANTAGONIST", teams.AntagonistFaction(dungeon_type=self.NAME_PATTERNS) )
return Antagonist.name
class AntagonisticCaves( BardicCaves ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Caves","Caverns","Grotto","Chasm" )
DUNGEON_PATTERN = (context.HAB_CAVE,)
UNIQUE = True
def gen_name( self ):
Antagonist = self.register_element( "ANTAGONIST", teams.AntagonistFaction(dungeon_type=self.NAME_PATTERNS) )
return Antagonist.name
class AntagonisticTunnels( BardicCaves ):
LABEL = "BARDIC_DUNGEON"
NAME_PATTERNS = ( "Hideout", "Tunnels", "Catacombs" )
DUNGEON_PATTERN = (context.HAB_CAVE,)
UNIQUE = True
def gen_name( self ):
Antagonist = self.register_element( "ANTAGONIST", teams.AntagonistFaction(dungeon_type=self.NAME_PATTERNS) )
return Antagonist.name
# BARDIC_CONNECTION
# This subplot will add a connection for the new bardic dungeon from the
# previous one. If no dungeons have yet been added, it will just connect to
# the city scene. Otherwise, it will likely add a boss encounter to the
# previous dungeon and a new set of resources (shops, etc) for the new level.
#
# DUTIES:
# - To activate the chapter
# - To connect the next dungeon to the previous
# - Provide access to needed resources: shops, temple, etc.
# - Provide rumours regarding the previous/current chapter.
class BC_DirectConnection( Plot ):
"""The first dungeon gets directly connected to the LOCALE scene."""
LABEL = "BARDIC_CONNECTION"
scope = True
active = True
@classmethod
def matches( self, pstate ):
"""Requires LOCALE to exist, but no LAST_DUNGEON."""
return pstate.elements.get( "LOCALE" ) and not pstate.elements.get( "LAST_DUNGEON" )
def custom_init( self, nart ):
"""Install the dungeon."""
self.install_dungeon( nart, self.elements[ "LEVELS" ], self.elements[ "LOCALE" ], self.elements["DNAME"] )
self._ready = True
return True
### TESTING CUTSCENES HERE- FOR TESTING ONLY
do_cutscene = False
def t_START( self, explo ):
if self._ready:
self.chapter.activate()
self._ready = False
#explo.alert("[PORTENT]")
explo.alert("They say that a journey of a thousand miles begins with a single step. Today your journey begins as you prepare to leave the city of [city] and begin your adventure.")
# Print message, activate chapter upon entering city the first time.
if self.do_cutscene:
explo.alert( "You enter a ." )
cs1=cutscene.Say( "This place stinks of death...", species=(characters.Human,characters.Elf,characters.Fuzzy,characters.Hurthling), children= [
cutscene.Say( "You say that like it's a bad thing.", job=(characters.Necromancer,) ),
cutscene.Say( "Yes, it reminds me of my mother's cooking.", species=(characters.Orc,) ),
cutscene.Say( "The sooner we get this job finished, the sooner we can get out of here.", job=(characters.Warrior,) ),
])
cutscene.roll_cutscene( explo, [cs1,] )
#self.do_cutscene = False
def get_dialogue_grammar( self, npc, explo ):
if self.chapter.active:
dname = self.elements.get("DNAME")
mygram = {
"[RUMOUR]": ["[rumourleadin] there are [monsters] coming from the {}.".format( dname )],
}
city = self.elements["LOCALE"]
anti = self.elements.get( "ANTAGONIST" )
if anti:
mygram["[HOWAREYOU]"] = ["Heavens save {} from the {}.".format(city,anti),]
mygram["[RUMOUR]"].append( "[rumourleadin] {} lives in fear of the {}.".format( city, anti ) )
return mygram
class BC_DwarvenCity( Plot ):
LABEL = "BARDIC_CONNECTION"
UNIQUE = True
scope = True
active = True
NAME_PATTERNS = ( "{0} Deep", "{0} Halls" )
_ready = True
@classmethod
def matches( self, pstate ):
"""Requires LAST_DUNGEON to exist and to not go up, and the next dungeon to go down."""
return ( pstate.elements.get( "LAST_DUNGEON" )
and context.MAP_GOUP not in pstate.elements["LAST_DUNGEON"].desctags
and context.MAP_GODOWN in pstate.elements["LEVELS"][0].desctags )
def custom_init( self, nart ):
"""Install the dungeon."""
# Create the intermediary level.
interior = maps.Scene( 65,65, sprites={maps.SPRITE_WALL: "terrain_wall_cave.png", maps.SPRITE_GROUND: "terrain_ground_under.png", maps.SPRITE_FLOOR: "terrain_floor_gravel.png" },
biome=context.HAB_TUNNELS, setting=self.setting, desctags=(context.MAP_DUNGEON,context.MAP_GODOWN) )
igen = randmaps.SubtleMonkeyTunnelScene( interior )
self.register_scene( nart, interior, igen, ident="_LAIR" )
# Create the guardian.
btype = monsters.choose_monster_type(self.rank,self.rank+2,{(context.DES_EARTH,context.MTY_FIGHTER,context.MTY_CONSTRUCT):True,context.DES_EARTH:context.MAYBE})
boss = self.register_element( "_BOSS", monsters.generate_boss( btype, self.rank+3 ) )
interior.name = "{0}'s Lair".format( boss )
# Connect to previous level.
self.add_sub_plot( nart, "CONNECT", PlotState( elements={"PREV":self.elements["LAST_DUNGEON"],"NEXT":interior} ).based_on( self ) )
# Create the goal room.
team = teams.Team(default_reaction=-999, rank=self.rank, strength=150,
habitat=interior.get_encounter_request(), respawn=False, boss=boss )
int_goalroom = randmaps.rooms.SharpRoom( tags=(context.GOAL,), parent=interior )
int_goalroom.contents.append( team )
int_goalroom.contents.append( boss )
boss.team = team
stairs_1 = waypoints.SpiralStairsDown()
int_goalroom.contents.append( stairs_1 )
# Create the Dwarven City.
myscene = maps.Scene( 65, 65,
sprites={maps.SPRITE_WALL: "terrain_wall_cave.png", maps.SPRITE_GROUND: "terrain_ground_under.png", maps.SPRITE_FLOOR: "terrain_floor_gravel.png"},
biome=context.HAB_BUILDING, setting=self.setting,
name=random.choice( self.NAME_PATTERNS ).format( namegen.DWARF.gen_word() ),
desctags=(context.MAP_DUNGEON,context.DES_CIVILIZED,context.MAP_GODOWN) )
mymapgen = randmaps.CaveScene( myscene )
self.register_scene( nart, myscene, mymapgen, ident="LOCALE" )
castle = self.register_element( "CITY", randmaps.rooms.VillageRoom( width=35,height=35,tags=(context.CIVILIZED,context.ROOM_PUBLIC), parent=myscene ) )
myroom = randmaps.rooms.FuzzyRoom( tags=(context.ENTRANCE,), parent=castle )
myteam = teams.Team( strength=0, default_reaction=characters.SAFELY_FRIENDLY)
castle.contents.append( myteam )
stairs_2 = waypoints.SpiralStairsUp()
myroom.contents.append( stairs_2 )
myroom.contents.append( monsters.generate_npc(species=characters.Dwarf, team=myteam) )
myroom.contents.append( monsters.generate_npc(species=characters.Dwarf, team=myteam) )
# Connect the stairs.
self.move_element( myscene, interior )
stairs_1.destination = myscene
stairs_1.otherside = stairs_2
stairs_2.destination = interior
stairs_2.otherside = stairs_1
# Add some city services.
self.add_sub_plot( nart, "CITY_GENERALSTORE" )
self.add_sub_plot( nart, "CITY_LIBRARY" )
self.add_sub_plot( nart, "CITY_INN" )
self.add_sub_plot( nart, "CITY_TEMPLE" )
self.add_sub_plot( nart, "CITY_EXTRASHOP" )
# Install the dungeon in the city.
self.install_dungeon( nart, self.elements[ "LEVELS" ], self.elements[ "LOCALE" ], self.elements["DNAME"] )
return True
def t_START( self, explo ):
# Print message, activate chapter upon entering city the first time.
if explo.scene is self.elements["LOCALE"] and self._ready:
explo.alert( "You step into a bustling dwarven city." )
self.chapter.activate()
self._ready = False
def get_dialogue_grammar( self, npc, explo ):
dname = self.elements.get("DNAME")
city = self.elements.get("LOCALE")
monster = self.elements.get("_BOSS")
if self.chapter.prev and self.chapter.prev.active:
mygram = {
"[RUMOUR]": ["[rumourleadin] the dwarves of {} protect the world from {}.".format( city, dname ),
"[rumourleadin] {} is now under siege from {} the {}.".format( city, monster, monster.monster_name )
],
}
return mygram
elif self.chapter.active:
mygram = {
"[RUMOUR]": ["[rumourleadin] beneath {} lies {}.".format( city, dname )],
}
return mygram
class BC_AdvanceAgent( Plot ):
# Fight an agent of next chapter's ANTAGONIST.
LABEL = "BARDIC_CONNECTION"
scope = True
active = True
_ready = True
@classmethod
def matches( self, pstate ):
"""Requires LAST_DUNGEON and ANTAGONIST to exist"""
return ( pstate.elements.get( "LAST_DUNGEON" )
and pstate.elements.get( "ANTAGONIST" ) )
def custom_init( self, nart ):
"""Install the dungeon."""
# Create the intermediary level.
interior = maps.Scene( 65,65, sprites={maps.SPRITE_WALL: "terrain_wall_darkbrick.png", maps.SPRITE_GROUND: "terrain_ground_under.png", maps.SPRITE_FLOOR: "terrain_floor_tile.png" },
fac=self.elements["ANTAGONIST"],
biome=context.HAB_TUNNELS, setting=self.setting, desctags=(context.MAP_DUNGEON,) )
igen = randmaps.SubtleMonkeyTunnelScene( interior )
self.register_scene( nart, interior, igen, ident="LOCALE" )
# Create the goal room.
team = teams.Team(default_reaction=-999, rank=self.rank, strength=50, habitat=interior.get_encounter_request(),
fac=self.elements["ANTAGONIST"], respawn=False )
int_goalroom = randmaps.rooms.SharpRoom( tags=(context.GOAL,), parent=interior )
int_goalroom.contents.append( team )
# Create the guardian.
boss = self.register_element( "_BOSS", monsters.generate_npc(team=team,upgrade=True,rank=self.rank+3) )
self.enemy_defeated = False
interior.name = "{}'s Chamber".format( boss )
int_goalroom.contents.append( boss )
for t in range( random.randint(2,4) ):
self.add_sub_plot( nart, "ENCOUNTER" )
# Connect to previous level.
self.add_sub_plot( nart, "CONNECT", PlotState( elements={"PREV":self.elements["LAST_DUNGEON"],"NEXT":interior} ).based_on( self ) )
# Add a BARDIC_FRESHSTART to install the dungeon somewhere else.
sp = self.add_sub_plot( nart, "BARDIC_FRESHSTART" )
self.register_element( "DESTINATION", sp.elements.get( "LOCALE" ) )
return True
def _BOSS_DEATH( self, explo ):
self.enemy_defeated = True
def t_COMBATOVER( self, explo ):
if self.enemy_defeated:
# Activate the resolution, whatever that is.
explo.alert( "You discover that {} was carrying a map leading to {}. That should be your next destination.".format(self.elements["_BOSS"],self.elements["DESTINATION"]) )
explo.alert( "New world map location discovered." )
self.chapter.activate()
self.active = False
def get_dialogue_grammar( self, npc, explo ):
dname = self.elements.get("DNAME")
enemy = self.elements.get("ANTAGONIST")
olddname = self.elements["LAST_DUNGEON"].dname
monster = self.elements.get("_BOSS")
newloc = self.elements.get("DESTINATION")
if self.chapter.prev and self.chapter.prev.active:
mygram = {
"[RUMOUR]": ["[rumourleadin] the {} is in league with the {}.".format( olddname, enemy )],
}
return mygram
elif self.chapter.active:
mygram = {
"[RUMOUR]": ["[rumourleadin] the {} is near {}.".format( dname, newloc )],
}
return mygram
#
# BARDIC_FRESHSTART
# This subplot opens up a new world map scene in which to place the next dungeon.
# Because of this, it installs the dungeon... normally BARDIC_CONNECTION is
# supposed to do that, but it can pawn off the responsibility to this subplot.
#
# The world map entrance should get activated when the chapter is activated.
# That scene should be stored as element LOCALE, in case the connection needs
# to do anything with it.
#
class BF_ForestVillage( Plot ):
"""A new world map scene, set in a forest."""
LABEL = "BARDIC_FRESHSTART"
scope = True
active = True
@classmethod
def matches( self, pstate ):
"""Requires LEVELS[0] to be forest or not MAP_WILDERNESS."""
dungeon = pstate.elements.get( "LEVELS" )
return dungeon and ( dungeon[0].biome is context.HAB_FOREST
or context.MAP_WILDERNESS not in dungeon[0].desctags )
def custom_init( self, nart ):
# Add the forest itself.
myscene = maps.Scene( min( 95 + self.rank * 3, 129 ), min( 95 + self.rank * 3, 129 ),
sprites={maps.SPRITE_WALL: "terrain_wall_woodfort.png", maps.SPRITE_GROUND: "terrain_ground_forest.png",
maps.SPRITE_FLOOR: "terrain_floor_gravel.png" },
biome=context.HAB_FOREST, setting=self.setting, fac=None,
desctags=(context.MAP_WILDERNESS,) )
mymapgen = randmaps.ForestScene( myscene )
self.register_scene( nart, myscene, mymapgen, ident="LOCALE" )
# Add a village.
castle = self.register_element( "CITY", randmaps.rooms.VillageRoom( width=35,
height=35,tags=(context.CIVILIZED,context.ROOM_PUBLIC,context.MAP_ON_EDGE), parent=myscene ) )
myroom = randmaps.rooms.FuzzyRoom( tags=(context.ENTRANCE,), parent=castle )
myteam = teams.Team( strength=0, default_reaction=characters.SAFELY_FRIENDLY)
castle.contents.append( myteam )
myent = waypoints.Well()
myroom.contents.append( myent )
myroom.contents.append( monsters.generate_npc(species=characters.Elf, team=myteam) )
myroom.contents.append( monsters.generate_npc(species=characters.Elf, team=myteam) )
self.add_sub_plot( nart, "CITY_GENERALSTORE" )
self.add_sub_plot( nart, "CITY_LIBRARY" )
self.add_sub_plot( nart, "CITY_INN" )
self.add_sub_plot( nart, "CITY_EXTRASHOP" )
# Add world map entrance.
self._entrance = self.chapter.world.add_entrance( myscene, myscene.name, worlds.W_VILLAGE, myent, False )
for t in range( random.randint(2+min(self.rank//3,6),4+min(self.rank//2,6)) ):
self.add_sub_plot( nart, "ENCOUNTER" )
self.add_sub_plot( nart, "SPECIAL_FEATURE" )
# Install the dungeon here.
self.install_dungeon( nart, self.elements[ "LEVELS" ], myscene, self.elements["DNAME"] )
self._ready = True
return True
def t_START( self, explo ):
# When the chapter activates, show the world map entrance.
if self.chapter.active:
self._entrance.visible = True
self.active = False
# BARDIC_CONCLUSION
# This subplot will feature a big boss battle to take place after the LAST_DUNGEON.
class StraightBardicBalrog( Plot ):
"""Fight a boss encounter."""
LABEL = "BARDIC_CONCLUSION"
active = True
scope = True
def custom_init( self, nart ):
"""Create the final dungeon, boss encounter, and resolution."""
btype = monsters.choose_monster_type(self.rank+2,self.rank+4,{context.MTY_BOSS:True,context.MTY_LEADER:context.MAYBE})
boss = monsters.generate_boss( btype, self.rank+5 )
#print( "{0} the {1}".format( boss, boss.monster_name ) )
interior = maps.Scene( 65,65, sprites={maps.SPRITE_WALL: "terrain_wall_darkbrick.png",
maps.SPRITE_FLOOR: "terrain_floor_dungeon.png", },
biome=context.HAB_BUILDING, setting=self.setting, desctags=(context.MAP_DUNGEON,context.MTY_HUMANOID) )
igen = randmaps.SubtleMonkeyTunnelScene( interior )
interior.name = "{0}'s Lair".format( boss )
self.register_scene( nart, interior, igen, ident="_LAIR" )
self.add_sub_plot( nart, "CONNECT", PlotState( elements={"PREV":self.elements["LAST_DUNGEON"],"NEXT":interior} ).based_on( self ) )
team = teams.Team(default_reaction=-999, rank=self.rank, strength=200,
habitat=interior.get_encounter_request(), respawn=False, boss=boss )
int_goalroom = randmaps.rooms.SharpRoom( tags=(context.GOAL,), parent=interior )
int_goalroom.contents.append( team )
boss.team = team
self.register_element( "_LAIR_ROOM", int_goalroom )
self.register_element( "ENEMY", boss, "_LAIR_ROOM" )
self.add_sub_plot( nart, "DUNGEON_ARMORY", PlotState( elements={"LOCALE":interior} ).based_on( self ) )
self.enemy_defeated = False
return True
def ENEMY_DEATH( self, explo ):
self.enemy_defeated = True
def t_COMBATOVER( self, explo ):
if self.enemy_defeated:
# Activate the resolution, whatever that is.
explo.alert( "With {0} defeated, peace soon returns to the land.".format( self.elements["ENEMY"] ) )
explo.alert( "Thanks for playing Dungeon Monkey Eternal. You can follow development at www.gearheadrpg.com, or via @Pyrro12 on Twitter." )
self.active = False
def get_dialogue_grammar( self, npc, explo ):
if self.active:
boss = self.elements["ENEMY"]
mygram = {
"[HOWAREYOU]": ["Heavens save us from {0}.".format(boss)],
"[monsters]": ["{0}'s minions".format(boss)],
"[RUMOUR]": ["[rumourleadin] {0} the {1} is the cause of our problems.".format( boss, boss.monster_name )],
}
city = self.elements.get( "LOCALE" )
if city:
mygram["[RUMOUR]"].append( "[rumourleadin] {0} the {1} plans to destroy {2}.".format( boss, boss.monster_name,city ) )
return mygram
| nilq/baby-python | python |
from datetime import datetime, date, timezone
import dateutil
from dateutil.relativedelta import relativedelta
import re
from .util import calculate_price, DELIM_VALUE_REGEX, DOT_VALUE_REGEX
from isodate import parse_duration, parse_datetime
import pytz
def create_default_context(numeric, responseMetadata):
def cff_yeardiff(datestr1, datestr2):
if type(datestr1) is not str or type(datestr2) is not str:
return 0
d1 = datetime.strptime(datestr1, "%Y-%m-%d")
d2 = datetime.strptime(datestr2, "%Y-%m-%d")
return relativedelta(d1, d2).years
def cff_nthOfNextMonth(datestr, n, maxDayDiff=None):
"""Returns nth day of the next month after datestr.
If the return date is less than maxDayDiff away from date, then go to the next month.
"""
if type(datestr) is not str or type(n) is not int or n <= 0:
return None
date = datetime.strptime(datestr, "%Y-%m-%d")
new_date = (date + relativedelta(months=1)).replace(day=n)
if maxDayDiff and (new_date - date).days < maxDayDiff:
new_date = new_date + relativedelta(months=1)
return new_date.strftime("%Y-%m-%d")
# def cff_countArray(array, expression):
# return len([item for item in array if calculate_price(expression, item)])
def cff_countArray(*args):
# TODO: fix py-expression-eval so that the method signature above is called.
# Same applies to cff_addDuration.
array = list(args)
expression = array.pop(-1)
if type(array) is not list:
return 0
return len(
[
item
for item in array
if calculate_price(expression, item, numeric, responseMetadata)
]
)
def cff_today():
return date.today().strftime("%Y-%m-%d")
def cff_addDuration(dt, duration):
if type(dt) is not str:
return None
dt = datetime.strptime(dt, "%Y-%m-%d")
duration = parse_duration(duration)
new_time = dt + relativedelta(
months=int(getattr(duration, "months", 0)),
days=int(getattr(duration, "days", 0)),
years=int(getattr(duration, "years", 0)),
)
return new_time.strftime("%Y-%m-%d")
def cff_createdBetween(datestr1, datestr2):
if type(datestr1) is not str or type(datestr2) is not str:
return 0
datestr1 = re.sub(
DOT_VALUE_REGEX, ".", re.sub(DELIM_VALUE_REGEX, ":", datestr1)
)
datestr2 = re.sub(
DOT_VALUE_REGEX, ".", re.sub(DELIM_VALUE_REGEX, ":", datestr2)
)
d1 = parse_datetime(datestr1)
d2 = parse_datetime(datestr2)
date_created = responseMetadata.get("date_created", None)
date_created = (
parse_datetime(date_created) if date_created is not None else datetime.now()
)
# Convert date_created from a naive to an aware datetime,
# so that it can be compared with the naive datetimems d1 and d2.
# PyMongo always stores naive datetimes in UTC, so this is ok.
date_created = date_created.replace(tzinfo=pytz.utc)
return (date_created >= d1) and (date_created <= d2)
DEFAULT_CONTEXT = {
"cff_yeardiff": cff_yeardiff,
"cff_nthOfNextMonth": cff_nthOfNextMonth,
"cff_countArray": cff_countArray,
"cff_addDuration": cff_addDuration,
"cff_today": cff_today,
"cff_createdBetween": cff_createdBetween,
}
return DEFAULT_CONTEXT
| nilq/baby-python | python |
import container_crawler.utils
import mock
import unittest
class TestUtils(unittest.TestCase):
@mock.patch('container_crawler.utils.InternalClient')
@mock.patch('container_crawler.utils.os')
def test_internal_client_path(self, os_mock, ic_mock):
os_mock.path.exists.return_value = True
os_mock.path.join.side_effect = lambda *x: '/'.join(x)
conf = {'internal_client_logname': 'TestClient',
'internal_client_path': '/etc/swift/internal-client.conf'}
container_crawler.utils.create_internal_client(conf, '/etc/swift')
ic_mock.assert_called_once_with(conf['internal_client_path'],
conf['internal_client_logname'], 3)
@mock.patch('container_crawler.utils.ConfigString')
@mock.patch('container_crawler.utils.InternalClient')
@mock.patch('container_crawler.utils.os')
def test_internal_client_path_not_found(self, os_mock, ic_mock, conf_mock):
os_mock.path.exists.return_value = False
os_mock.path.join.side_effect = lambda *x: '/'.join(x)
conf_string = mock.Mock()
conf_mock.return_value = conf_string
conf = {'internal_client_logname': 'TestClient',
'internal_client_path': '/etc/swift/internal-client.conf'}
container_crawler.utils.create_internal_client(conf, '/etc/swift')
os_mock.path.exists.assert_called_once_with(
conf['internal_client_path'])
conf_mock.assert_called_once_with(
container_crawler.utils.INTERNAL_CLIENT_CONFIG)
ic_mock.assert_called_once_with(
conf_string, conf['internal_client_logname'], 3)
| nilq/baby-python | python |
import random
from random import sample
import argparse
import numpy as np
import os
import pickle
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.covariance import LedoitWolf
from scipy.spatial.distance import mahalanobis
from scipy.ndimage import gaussian_filter
from skimage import morphology
from skimage.segmentation import mark_boundaries
import matplotlib.pyplot as plt
import matplotlib
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.models import wide_resnet50_2, resnet18
import datasets.mvtec as mvtec
# device setup
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
def parse_args():
parser = argparse.ArgumentParser('PaDiM')
parser.add_argument('--data_path', type=str, default='./datasets')
parser.add_argument('--save_path', type=str, default='./fundus_result')
parser.add_argument('--arch', type=str, choices=['resnet18', 'wide_resnet50_2'], default='resnet18')
return parser.parse_args()
_layer1 = 'layer2'
_layer2 = 'layer3'
_layer3 = 'layer3'
def main():
args = parse_args()
# load model
if args.arch == 'resnet18':
model = resnet18(pretrained=True, progress=True)
t_d = 384
d = 100
elif args.arch == 'wide_resnet50_2':
model = wide_resnet50_2(pretrained=True, progress=True)
t_d = 1792
d = 550
model.to(device)
model.eval()
random.seed(1024)
torch.manual_seed(1024)
if use_cuda:
torch.cuda.manual_seed_all(1024)
idx = torch.tensor(sample(range(0, t_d), d))
# print(f'--> {idx.shape}')
# set model's intermediate outputs
outputs = []
def hook(module, input, output):
outputs.append(output)
model.layer2[-1].register_forward_hook(hook)
model.layer3[-1].register_forward_hook(hook)
# model.layer3[-1].register_forward_hook(hook)
os.makedirs(os.path.join(args.save_path, 'temp_%s' % args.arch), exist_ok=True)
fig, ax = plt.subplots(1, 2, figsize=(20, 10))
fig_img_rocauc = ax[0]
fig_pixel_rocauc = ax[1]
total_roc_auc = []
total_pixel_roc_auc = []
for class_name in mvtec.CLASS_NAMES:
train_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=True)
train_dataloader = DataLoader(train_dataset, batch_size=32, pin_memory=True)
test_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=False)
test_dataloader = DataLoader(test_dataset, batch_size=32, pin_memory=True)
train_outputs = OrderedDict([(_layer1, []), (_layer2, [])])
test_outputs = OrderedDict([(_layer1, []), (_layer2, [])])
# train_outputs = OrderedDict([('layer2', []), ('layer3', []), ('layer4', [])])
# test_outputs = OrderedDict([('layer2', []), ('layer3', []), ('layer4', [])])
# extract train set features
train_feature_filepath = os.path.join(args.save_path, 'temp_%s' % args.arch, 'train_%s.pkl' % class_name)
if not os.path.exists(train_feature_filepath):
for (x, _, _) in tqdm(train_dataloader, '| feature extraction | train | %s |' % class_name):
# model prediction
with torch.no_grad():
_ = model(x.to(device))
# get intermediate layer outputs
for k, v in zip(train_outputs.keys(), outputs):
train_outputs[k].append(v.cpu().detach())
# initialize hook outputs
outputs = []
for k, v in train_outputs.items():
train_outputs[k] = torch.cat(v, 0)
# Embedding concat
embedding_vectors = train_outputs[_layer1]
for layer_name in [_layer2]:
embedding_vectors = embedding_concat(embedding_vectors, train_outputs[layer_name])
print(f'--> {embedding_vectors.shape}')
# randomly select d dimension
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
# calculate multivariate Gaussian distribution
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W)
mean = torch.mean(embedding_vectors, dim=0).numpy()
cov = torch.zeros(C, C, H * W).numpy()
I = np.identity(C)
for i in range(H * W):
# cov[:, :, i] = LedoitWolf().fit(embedding_vectors[:, :, i].numpy()).covariance_
cov[:, :, i] = np.cov(embedding_vectors[:, :, i].numpy(), rowvar=False) + 0.01 * I
# save learned distribution
train_outputs = [mean, cov]
with open(train_feature_filepath, 'wb') as f:
pickle.dump(train_outputs, f)
else:
print('load train set feature from: %s' % train_feature_filepath)
with open(train_feature_filepath, 'rb') as f:
train_outputs = pickle.load(f)
gt_list = []
gt_mask_list = []
test_imgs = []
# extract test set features
for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):
test_imgs.extend(x.cpu().detach().numpy())
gt_list.extend(y.cpu().detach().numpy())
gt_mask_list.extend(mask.cpu().detach().numpy())
# model prediction
with torch.no_grad():
_ = model(x.to(device))
# get intermediate layer outputs
for k, v in zip(test_outputs.keys(), outputs):
test_outputs[k].append(v.cpu().detach())
# initialize hook outputs
outputs = []
for k, v in test_outputs.items():
test_outputs[k] = torch.cat(v, 0)
# Embedding concat
embedding_vectors = test_outputs[_layer1]
for layer_name in [_layer2]:
embedding_vectors = embedding_concat(embedding_vectors, test_outputs[layer_name])
# randomly select d dimension
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
# calculate distance matrix
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W).numpy()
dist_list = []
for i in range(H * W):
mean = train_outputs[0][:, i]
conv_inv = np.linalg.inv(train_outputs[1][:, :, i])
dist = [mahalanobis(sample[:, i], mean, conv_inv) for sample in embedding_vectors]
dist_list.append(dist)
dist_list = np.array(dist_list).transpose(1, 0).reshape(B, H, W)
# upsample
dist_list = torch.tensor(dist_list)
score_map = F.interpolate(dist_list.unsqueeze(1), size=x.size(2), mode='bilinear',
align_corners=False).squeeze().numpy()
# apply gaussian smoothing on the score map
for i in range(score_map.shape[0]):
score_map[i] = gaussian_filter(score_map[i], sigma=4)
# Normalization
max_score = score_map.max()
min_score = score_map.min()
scores = (score_map - min_score) / (max_score - min_score)
# calculate image-level ROC AUC score
img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
gt_list = np.asarray(gt_list)
fpr, tpr, _ = roc_curve(gt_list, img_scores)
img_roc_auc = roc_auc_score(gt_list, img_scores)
total_roc_auc.append(img_roc_auc)
print('image ROCAUC: %.3f' % (img_roc_auc))
fig_img_rocauc.plot(fpr, tpr, label='%s img_ROCAUC: %.3f' % (class_name, img_roc_auc))
# get optimal threshold
gt_mask = np.asarray(gt_mask_list)
precision, recall, thresholds = precision_recall_curve(gt_mask.flatten(), scores.flatten())
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
threshold = thresholds[np.argmax(f1)]
# # calculate per-pixel level ROCAUC
# fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten())
# per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())
# total_pixel_roc_auc.append(per_pixel_rocauc)
# print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))
# fig_pixel_rocauc.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, per_pixel_rocauc))
save_dir = args.save_path + '/' + f'pictures_{args.arch}'
os.makedirs(save_dir, exist_ok=True)
plot_fig(test_imgs, scores, gt_mask_list, threshold, save_dir, class_name)
print('Average ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.title.set_text('Average image ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.legend(loc="lower right")
# print('Average pixel ROCUAC: %.3f' % np.mean(total_pixel_roc_auc))
# fig_pixel_rocauc.title.set_text('Average pixel ROCAUC: %.3f' % np.mean(total_pixel_roc_auc))
# fig_pixel_rocauc.legend(loc="lower right")
fig.tight_layout()
fig.savefig(os.path.join(args.save_path, 'roc_curve.png'), dpi=100)
def plot_fig(test_img, scores, gts, threshold, save_dir, class_name):
num = len(scores)
vmax = scores.max() * 255.
vmin = scores.min() * 255.
for i in range(num):
img = test_img[i]
img = denormalization(img)
# gt = gts[i].transpose(1, 2, 0).squeeze()
heat_map = scores[i] * 255
mask = scores[i]
mask[mask > threshold] = 1
mask[mask <= threshold] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
mask *= 255
vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
fig_img, ax_img = plt.subplots(1, 5, figsize=(12, 3))
fig_img.subplots_adjust(right=0.9)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
for ax_i in ax_img:
ax_i.axes.xaxis.set_visible(False)
ax_i.axes.yaxis.set_visible(False)
ax_img[0].imshow(img)
ax_img[0].title.set_text('Image')
# ax_img[1].imshow(gt, cmap='gray')
ax_img[1].title.set_text('GroundTruth')
ax = ax_img[2].imshow(heat_map, cmap='jet', norm=norm)
ax_img[2].imshow(img, cmap='gray', interpolation='none')
ax_img[2].imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')
ax_img[2].title.set_text('Predicted heat map')
ax_img[3].imshow(mask, cmap='gray')
ax_img[3].title.set_text('Predicted mask')
ax_img[4].imshow(vis_img)
ax_img[4].title.set_text('Segmentation result')
left = 0.92
bottom = 0.15
width = 0.015
height = 1 - 2 * bottom
rect = [left, bottom, width, height]
cbar_ax = fig_img.add_axes(rect)
cb = plt.colorbar(ax, shrink=0.6, cax=cbar_ax, fraction=0.046)
cb.ax.tick_params(labelsize=8)
font = {
'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 8,
}
cb.set_label('Anomaly Score', fontdict=font)
fig_img.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), dpi=100)
plt.close()
def denormalization(x):
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
x = (((x.transpose(1, 2, 0) * std) + mean) * 255.).astype(np.uint8)
return x
def embedding_concat(x, y):
B, C1, H1, W1 = x.size()
_, C2, H2, W2 = y.size()
s = int(H1 / H2)
x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
x = x.view(B, C1, -1, H2, W2)
z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
for i in range(x.size(2)):
z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
z = z.view(B, -1, H2 * W2)
z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
return z
if __name__ == '__main__':
main()
| nilq/baby-python | python |
"""
A QUANTIDADE DE UMA LETRA, A PRIMEIRA E A ÚLTIMA VEZ QUE APARECERAM NA FRASE!
"""
frase = str(input('Digite uma frase: ')).strip()
frase = frase.upper()
print('A quantidade de A é {} '.format(frase.count('A')))
print('A primeira vez que A apareceu foi: {} '.format(frase.find('A')+1))
print('A última vez que A apareceu foi: {} '.format(frase.rfind('A')+1))
'''
dividido = frase.split()
print(dividido)]
print(''.format(dividido[].count(['a'])))
'''
| nilq/baby-python | python |
ll=range(5, 20, 5)
for i in ll:
print(i)
print (ll)
x = 'Python'
for i in range(len(x)) :
print(x[i]) | nilq/baby-python | python |
from typing import Sequence, Union
from PIL import Image
class BaseTransform:
"""
Generic image transform type class
"""
slug: Union[None, str] = None # unique string that identifies a given transform
@staticmethod
def apply_transform(
img: Image.Image, parameters: Sequence[Union[str, int]]
) -> Image.Image:
raise NotImplementedError
@staticmethod
def derive_parameters(query: str) -> Sequence[Union[str, int]]:
raise NotImplementedError
| nilq/baby-python | python |
from collections import Counter
input_data = open("day12.input").read().split("\n")
input_data = [tuple(a.split("-")) for a in input_data]
connections = []
for (a, b) in input_data:
if a != 'start':
connections.append((b, a))
connections += input_data
connections.sort()
def part1(path, b):
return b not in path
def part2(path, b):
count = Counter(path + [b])
lower2 = 0
for key in count.keys():
if key in ['start', 'end'] and count[key] > 1:
return False
if key.islower() and count[key] > 1:
if count[key] < 3:
lower2 += 1
else:
return False
return lower2 <= 1
start_paths = [['start']]
end_paths = []
while True:
new_paths = []
for path in start_paths:
for (a, b) in connections:
if path[-1] == a and b == 'end':
end_paths.append(path + [b])
elif path[-1] == a and (b.isupper() or part2(path, b)):
new_paths.append(path + [b])
if len(new_paths) > 0:
start_paths = new_paths
else:
break
print('Result')
for path in end_paths:
print(path)
print(len(end_paths))
| nilq/baby-python | python |
import os
import numpy as np
import pandas as pd
from typing import Any, Dict, List, Optional, Tuple, NoReturn
import skfuzzy as fuzz
import skfuzzy.control as ctrl
from aggregation import OWA_T1
import matplotlib.pyplot as plt
class FLST1Model(object):
def __init__(self, rules_path:str, expert_mode:str):
self.antecedent = {}
self.consequent = {}
self.expert_mode = expert_mode
self.build_model()
self.system = self.build_rules(rules_dir=rules_path)
self.fuzz_inf = ctrl.ControlSystemSimulation(self.system,
flush_after_run=10)
def build_model(self)->NoReturn:
# ANTECENDENT
self.antecedent = {}
### Acceleration
self.antecedent['Acceleration'] = ctrl.Antecedent(universe=np.linspace(0,10, 11),
label='Acceleration')
self.antecedent['Acceleration']['small'] = \
fuzz.trapmf(self.antecedent['Acceleration'].universe, [0., 0., 3., 4.])
self.antecedent['Acceleration']['medium'] = \
fuzz.trapmf(self.antecedent['Acceleration'].universe, [3., 4., 6., 7.])
self.antecedent['Acceleration']['large'] = \
fuzz.trapmf(self.antecedent['Acceleration'].universe, [6., 7., 10., 10.])
### Deceleration
self.antecedent['Deceleration'] = ctrl.Antecedent(universe=np.linspace(0,10, 11),
label='Deceleration')
self.antecedent['Deceleration']['small'] = \
fuzz.trapmf(self.antecedent['Deceleration'].universe, [0., 0., 3., 4.])
self.antecedent['Deceleration']['medium'] = \
fuzz.trapmf(self.antecedent['Deceleration'].universe, [3., 4., 6., 7.])
self.antecedent['Deceleration']['large'] = \
fuzz.trapmf(self.antecedent['Deceleration'].universe, [6., 7., 10., 10.])
### Lateral Jerk
self.antecedent['LateralJerk'] = ctrl.Antecedent(universe=np.linspace(0,16, 17),
label='LateralJerk')
self.antecedent['LateralJerk']['small'] = \
fuzz.trapmf(self.antecedent['LateralJerk'].universe, [0., 0., 4., 6.])
self.antecedent['LateralJerk']['medium'] = \
fuzz.trapmf(self.antecedent['LateralJerk'].universe, [4., 6., 10., 12.])
self.antecedent['LateralJerk']['large'] = \
fuzz.trapmf(self.antecedent['LateralJerk'].universe, [10., 12., 16., 16.])
### Velocity
self.antecedent['Velocity'] = ctrl.Antecedent(universe=np.linspace(0,100, 101),
label='Velocity')
self.antecedent['Velocity']['very_slow'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [0., 0., 15., 20.])
self.antecedent['Velocity']['slow'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [15., 20., 30., 35.])
self.antecedent['Velocity']['normal'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [30., 35., 50., 55.])
self.antecedent['Velocity']['fast'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [50., 55., 70., 75.])
self.antecedent['Velocity']['very_fast'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [70., 75., 100., 100.])
# CONSEQUENT
### Behavior (Driving Style)
self.consequent['Behavior'] = ctrl.Consequent(universe=np.linspace(0,1., 11),
label='Behavior')
self.consequent['Behavior']['calm'] = fuzz.trapmf(self.consequent['Behavior'].universe,
[0., 0., 0.2, 0.4])
self.consequent['Behavior']['moderate'] = fuzz.trapmf(self.consequent['Behavior'].universe,
[0.2, 0.4, 0.6, 0.8])
self.consequent['Behavior']['aggressive'] = fuzz.trapmf(self.consequent['Behavior'].universe,
[0.6, 0.8, 1., 1.])
def build_rules(self, rules_dir:str)->ctrl.ControlSystem:
assert os.path.exists(rules_dir),\
('[Fuzzy Logic System T1 model][build_rules][ERROR]'
' rules_dir not found!{}').format(rules_dir)
rules_files = os.listdir(rules_dir)
rules = None
if self.expert_mode=='single':
rules_files[0] = 'rules_0.csv'
print('[Fuzzy Logic System T1 mode][build rules]', end='')
print(f' single expert system! (rule:{rules_files[0]})')
rules = self._single_expert_rules(os.path.join(rules_dir, rules_files[0]))
elif self.expert_mode=='multiple':
print('[Fuzzy Logic System - T1][build_rules]', end='')
print(f' multiple expert system: (n_e: {len(rules_files)})')
rules = self._multiple_expert_rules(rules_files, root_dir=rules_dir)
else:
assert False,\
('[Fuzzy Logic System T1 model][build_rules][ERROR]'
' expert_mode invalid! {}').format(self.expert_mode)
assert rules is not None,\
('[Fuzzy Logic System T1 model][build_rules][ERROR]'
' error while building rules..')
system = ctrl.ControlSystem(rules=rules)
return system
def _single_expert_rules(self, rule_file:str)->List:
rules = pd.read_csv(rule_file)
assert rules.shape[1] == 5,\
('[Fuzzy Logic System T1 model][build_rules] wrong rule_file shape'
'{} != (m, 5)'.format(rules.shape))
domain = {'calm':'calm',
'more_calm_than_moderate':'calm',
'between_calm_and_moderate':'moderate',
'more_moderate_than_calm':'moderate',
'moderate':'moderate',
'more_moderate_than_aggressive':'moderate',
'between_moderate_and_aggressive':'aggressive',
'more_aggressive_than_moderate':'aggressive',
'aggressive':'aggressive'}
#self._check_rules(rules=rules)
fuzz_rules = []
for line in rules.iterrows():
index, r = line[0], line[1]
xs = domain[r['driving_style']]
fr = ctrl.Rule(antecedent=(self.antecedent['Velocity'][r['velocity']] &\
self.antecedent['Acceleration'][r['acceleration']] &\
self.antecedent['Deceleration'][r['deceleration']] &\
self.antecedent['LateralJerk'][r['lateral_jerk']]),
consequent=self.consequent['Behavior'][xs],
label=f'rule - {index}')
fuzz_rules.append(fr)
return fuzz_rules
def _multiple_expert_function(self, label:str)->float:
domain = {'calm':1,
'more_calm_than_moderate':2,
'between_calm_and_moderate':3,
'more_moderate_than_calm':4,
'moderate':5,
'more_moderate_than_aggressive':6,
'between_moderate_and_aggressive':7,
'more_aggressive_than_moderate':8,
'aggressive':9}
return (1./9.)*domain[label]
def _multiple_expert_rules(self, rules_files:List[str], root_dir:str)->NoReturn:
rules = None
#get rules
decisions = []
for rule_file in rules_files:
_file = pd.read_csv(os.path.join(root_dir,rule_file))
decisions.append(_file['driving_style'].values)
rules = _file[['velocity', 'acceleration', 'deceleration', 'lateral_jerk']]
decisions = np.asarray(decisions).T
#aggregate decisions
y = []
for d in decisions:
#print(d, end="")
xs = np.array([self._multiple_expert_function(label=l) for l in d])
value = OWA_T1(X=xs,kind=2)
memb_value, set_labels = self._fuzz_driving_style(value=value)
y.append(set_labels[np.argmax(memb_value)])
#print(y[-1])
#create rules
fuzz_rules = []
for line, _y in zip(rules.iterrows(), y):
index, r = line[0], line[1]
fr = ctrl.Rule(antecedent=(self.antecedent['Velocity'][r['velocity']] &\
self.antecedent['Acceleration'][r['acceleration']] &\
self.antecedent['Deceleration'][r['deceleration']] &\
self.antecedent['LateralJerk'][r['lateral_jerk']]),
consequent=self.consequent['Behavior'][_y],
label=f'rule - {index}')
fuzz_rules.append(fr)
return fuzz_rules
def _fuzz_driving_style(self,value:float)->Tuple:
memb_value = []
set_labels = []
for label, term in self.consequent['Behavior'].terms.items():
mi =fuzz.interp_membership(self.consequent['Behavior'].universe,
term.mf,
value)
memb_value.append(mi)
set_labels.append(label)
return memb_value, set_labels
def inference(self, observation:Dict) -> Dict:
"""
perform inference at the fuzzy system
"""
vel = observation['velocity']*3.6 #m/s -> Km/h
acc = observation['acceleration'] #m/s^2
dec = observation['deceleration'] #m/s^2
ljk = observation['lateral_jerk'] #std (m/s^3)
self.fuzz_inf.input['Acceleration'] = acc
self.fuzz_inf.input['Deceleration'] = dec
self.fuzz_inf.input['LateralJerk'] = ljk
self.fuzz_inf.input['Velocity'] = vel
self.fuzz_inf.compute()
y = self.fuzz_inf.output['Behavior']
memb_value, set_labels = self._fuzz_driving_style(value=y)
result = {}
result['value'] = y
result['membership_values'] = np.asarray(memb_value)
result['set_labels']=set_labels
return result
def plot(self)-> NoReturn:
self.antecedent['Acceleration'].view()
self.antecedent['Deceleration'].view()
self.antecedent['Velocity'].view()
self.consequent['Behavior'].view()
plt.show()
| nilq/baby-python | python |
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize('fasterloop.pyx'))
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from enum import Enum, unique
@unique
class AnalyzeFieldIdx(Enum):
IDX_MODULE_NAME = 0
IDX_ANALYE_NAME = 1
IDX_COLUMN_INFO = 2
IDX_IS_EXECUTE = 3
| nilq/baby-python | python |
import unittest
from . import day01
class TestDay1(unittest.TestCase):
def test_basic(self):
self.assertEqual('hello', 'hello')
def test_fuel_is_calculated_correctly_for_given_examples(self):
self.assertEqual(day01.get_fuel_required(module_mass=12), 2)
self.assertEqual(day01.get_fuel_required(module_mass=14), 2)
self.assertEqual(day01.get_fuel_required(module_mass=1969), 654)
self.assertEqual(day01.get_fuel_required(module_mass=100756), 33583)
def test_fuel_for_fuel_is_calculated_correctly_for_given_examples(self):
self.assertEqual(day01.get_fuel_required_for_fuel(fuel_volume=2), 0)
self.assertEqual(day01.get_fuel_required_for_fuel(fuel_volume=654), 966-654)
self.assertEqual(day01.get_fuel_required_for_fuel(fuel_volume=33583), 50346-33583)
if __name__ == "__main__":
unittest.main() | nilq/baby-python | python |
# Code by JohnXdator
n,k = map(int,input().split())
ups = list(map(int,input().split()))
count = 0
for i in range(n):
if ups[k-1] == 0 and ups[i] == ups[k-1]:
count>=count+0
elif ups[k-1] <= ups[i]:
count=count+1
else:
count=count+0
print(count)
| nilq/baby-python | python |
from django.test import TestCase
from django.core.management import call_command
class TestUi(TestCase):
def setUp(self):
call_command('loaddata', 'user', verbosity=0)
call_command('loaddata', 'init', verbosity=0)
call_command('loaddata', 'test/testWorld', verbosity=0)
def test_ui(self):
response = self.client.post('/ui/login/', {'username': 'admin', 'password': 'admin1379'})
self.assertEqual(response.status_code, 200)
def test_wrong_pwd(self):
response = self.client.post('/ui/login/', {'username': 'admin', 'password': 'admin137xxx'})
self.assertEqual(response.status_code, 401)
def test_bad_user(self):
response = self.client.post('/ui/login/', {'username': 'adminxxx', 'password': 'admin1379'})
self.assertEqual(response.status_code, 401) | nilq/baby-python | python |
import math
import warnings
from torch import Tensor
import torch.nn as nn
def zeros_():
"""Return the initializer filling the input Tensor with the scalar zeros"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.zeros_(tensor)
return initializer
def ones_():
"""Return the initializer filling the input Tensor with the scalar ones"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.ones_(tensor)
return initializer
def uniform_(a: float = 0., b: float = 1.):
r"""Return the initializer filling the input Tensor with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
a (float): the lower bound of the uniform distribution. Defaults 0.0.
b (float): the upper bound of the uniform distribution. Defaults 1.0.
"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.uniform_(tensor, a, b)
return initializer
def normal_(mean: float = 0., std: float = 1.):
r"""Return the initializer filling the input Tensor with values drawn from the normal distribution
.. math::
\mathcal{N}(\text{mean}, \text{std}^2)
Args:
mean (float): the mean of the normal distribution. Defaults 0.0.
std (float): the standard deviation of the normal distribution. Defaults 1.0.
"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.normal_(tensor, mean, std)
return initializer
def trunc_normal_(mean: float = 0., std: float = 1., a: float = -2., b: float = 2.):
r"""Return the initializer filling the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
mean (float): the mean of the normal distribution. Defaults 0.0.
std (float): the standard deviation of the normal distribution. Defaults 1.0.
a (float): the minimum cutoff value. Defaults -2.0.
b (float): the maximum cutoff value. Defaults 2.0.
"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.trunc_normal_(tensor, mean, std, a, b)
return initializer
def kaiming_uniform_(a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Return the initializer filling the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan_mode}}}
Also known as 'He initialization'.
Args:
a (int): the negative slope of the rectifier used after this layer (only used with ``'leaky_relu'``).
mode (str, optional): either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity (str, optional): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
"""
# adapted from torch.nn.init
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
if mode == 'fan_in':
assert fan_in is not None, 'Fan_in is not provided.'
fan = fan_in
elif mode == 'fan_out':
assert fan_out is not None, 'Fan_out is not provided.'
fan = fan_out
else:
raise ValueError(f'Invalid initialization mode \'{mode}\'')
std = nn.init.calculate_gain(nonlinearity, a) / math.sqrt(fan)
bound = math.sqrt(3.) * std
return nn.init.uniform_(tensor, -bound, bound)
return initializer
def kaiming_normal_(a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Return the initializer filling the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
normal distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \frac{\text{gain}}{\sqrt{\text{fan_mode}}}
Also known as 'He initialization'.
Args:
a (int): the negative slope of the rectifier used after this layer (only used with ``'leaky_relu'``).
mode (str, optional): either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity (str, optional): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
"""
# adapted from torch.nn.init
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
if mode == 'fan_in':
assert fan_in is not None, 'Fan_in is not provided.'
fan = fan_in
elif mode == 'fan_out':
assert fan_out is not None, 'Fan_out is not provided.'
fan = fan_out
else:
raise ValueError(f'Invalid initialization mode \'{mode}\'')
std = nn.init.calculate_gain(nonlinearity, a) / math.sqrt(fan)
return nn.init.normal_(tensor, 0, std)
return initializer
def xavier_uniform_(a: float = math.sqrt(3.), scale: float = 2., gain: float = 1.):
r"""Return the initializer filling the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan_in} + \text{fan_out}}}
Also known as 'Glorot initialization'.
Args:
a (float, optional): an optional scaling factor used to calculate uniform
bounds from standard deviation. Defaults ``math.sqrt(3.)``.
scale (float, optional): an optional scaling factor used to calculate standard deviation. Defaults 2.0.
gain (float, optional): an optional scaling factor. Defaults 1.0.
"""
# adapted from torch.nn.init
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
assert fan_in is not None, 'Fan_in is not provided.'
fan = fan_in
if fan_out is not None:
fan += fan_out
std = gain * math.sqrt(scale / float(fan))
bound = a * std
return nn.init.uniform_(tensor, -bound, bound)
return initializer
def xavier_normal_(scale: float = 2., gain: float = 1.):
r"""Return the initializer filling the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan_in} + \text{fan_out}}}
Also known as 'Glorot initialization'.
Args:
scale (float, optional): an optional scaling factor used to calculate standard deviation. Defaults 2.0.
gain (float, optional): an optional scaling factor. Defaults 1.0.
"""
# adapted from torch.nn.init
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
assert fan_in is not None, 'Fan_in is not provided.'
fan = fan_in
if fan_out is not None:
fan += fan_out
std = gain * math.sqrt(scale / float(fan))
return nn.init.normal_(tensor, 0., std)
return initializer
def lecun_uniform_():
# adapted from jax.nn.initializers
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
assert fan_in is not None, 'Fan_in is not provided.'
var = 1.0 / fan_in
bound = math.sqrt(3 * var)
return nn.init.uniform_(tensor, -bound, bound)
return initializer
def lecun_normal_():
# adapted from jax.nn.initializers
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
assert fan_in is not None, 'Fan_in is not provided.'
std = math.sqrt(1.0 / fan_in)
return nn.init.trunc_normal_(tensor, std=std / .87962566103423978)
return initializer | nilq/baby-python | python |
#!/usr/bin/python
# Filename: mysqlfunc.py
# Purpose: All the mysql functions
# !!! need to encapsulate a cur with something like a using statement
# Database errors
import MySQLdb, pdb, logger, dnsCheck
from MySQLdb import Error
#All the variables for paths
from variables import *
def create_dbConnection():
try:
# trying to create a connection with the proceeding connection
a = MySQLdb.connect(user=databaseUser, passwd=databasePasswd, db=databaseName, unix_socket="/opt/lampp/var/mysql/mysql.sock")
return a
except Error as e:
print(e)
return None
def sqlExeCommit(statem):
conn = create_dbConnection()
cur = conn.cursor()
cur.execute(statem)
conn.commit()
def sqlCommit(conn):
conn.commit()
# Only execute
def sqlExe(cur, statem):
cur.execute(statem)
# Execute return
def sqlExeRet(statem):
conn = create_dbConnection()
cur = conn.cursor()
cur.execute(statem)
return cur.fetchall()
def sqlExeRetOne(statem):
conn = create_dbConnection()
cur = conn.cursor()
cur.execute(statem)
return cur.fetchone()
# Returns the domains based on the domainRangeId
def domainsBydomainRangeId(id):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT domainName FROM Domains WHERE domainRangeId = %s"%str(id)
cur.execute(statem)
results = []
for column in cur.fetchall():
results.append(column[0])
return results
# Returns the domains based on the domainRangeId
def domainIdsBydomainRangeId(id):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT domainId FROM Domains WHERE domainRangeId = %s"%str(id)
cur.execute(statem)
results = []
for column in cur.fetchall():
results.append(int(column[0]))
return results
# Returns the domains based on the domainRangeId
def domainNameByDomainId(id):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT domainName FROM Domains WHERE domainId = %s"%str(id)
cur.execute(statem)
return cur.fetchone()[0]
# Return the domainRange value associated with the rangeId
def domainRangeByrangeId(cur, id):
statem = "SELECT domainRange FROM InScope WHERE domainRangeId = %s"%str(id)
cur.execute(statem)
return cur.fetchone()[0]
# Return all scope Ids
def AllScopeIds(self):
conn = mysqlfunc.create_dbConnection()
cur = conn.cursor()
# Saving the programI
ScopeIds = []
# Grab all the InScopeIds based on the programName
statem = "SELECT domainRangeId FROM InScope"
cur.execute(statem)
for column in cur.fetchall():
ScopeIds.append(int(column[0]))
return ScopeIds
#Good for iterates on own commit
def insertDomain(domain, domainRangeId):
conn = create_dbConnection()
cur = conn.cursor()
# checkInternet
if dnsCheck.checkHostByName(domain):
# pdb catch in case something goes wrong
# Find ips
try:
# Insert into Domains
statem = "INSERT IGNORE INTO Domains(domainRangeId, domainName, dateFound) VALUES (%s, \"%s\", CURDATE())"%(domainRangeId, domain)
cur.execute(statem)
print '[+] New Domain:',domain
logger.logNewDomain(domain)
except Exception,e:
print e
pdb.set_trace()
# Commit
conn.commit()
def removeDomain(domain):
conn = create_dbConnection()
cur = conn.cursor()
cur.execute('DELETE FROM Domains WHERE domainName like \'%s\''%(domain))
conn.commit()
def removeDomainArray(domainArray):
conn = create_dbConnection()
cur = conn.cursor()
for domain in domainArray:
cur.execute('DELETE FROM Ips WHERE domainId = (SELECT domainId FROM Domains where domainName = \'%s\')'%(domain))
cur.execute('DELETE FROM Domains WHERE domainName like \'%s\''%(domain))
conn.commit()
def returnAllDomains(cur):
statem = "SELECT domainName FROM Domains"
cur.execute(statem)
results = []
for column in cur.fetchall():
results.append(column[0])
return results
# Returns an Array of inScope Ids based onthe program
# oldName: returnInScopeIds
def InScopeIdsByProgramName(program):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT domainRangeId FROM InScope WHERE programId = (SELECT programId FROM Programs WHERE name = \"%s\")"%(program)
results = []
cur.execute(statem)
for a in cur.fetchall():
results.append(int(a[0]))
return results
def programNameByProgramId(programId):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT name from Programs WHERE programId = %s"%programId
cur.execute(statem)
return cur.fetchone()[0]
def ProgramIdByProgramName(programName):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT programId from Programs WHERE Programs = %s"%programId
cur.execute(statem)
return cur.fetchone()[0]
def blacklistedByDomainRangeId(cur, id):
statem = "SELECT blacklistedContent FROM BlacklistedDomains WHERE domainRangeId = %s"%str(id)
cur.execute(statem)
results = []
for a in cur.fetchall():
results.append(a[0])
return results | nilq/baby-python | python |
#!/usr/bin/env python
import functools
import os
import os.path
from datetime import timedelta
from functools import update_wrapper
from flask import Flask, abort, current_app, jsonify, make_response, request
import psycopg2
DATABASE = os.environ['POSTGRES_DB']
USERNAME = os.environ['POSTGRES_USER']
PASSWORD = os.environ['POSTGRES_PASSWORD']
QUERY_FORMAT = """
SELECT elect_div FROM com_elb WHERE
ST_Contains(geom, ST_SetSRID(ST_Point({longitude:f}, {latitude:f}), 4283))
"""
app = Flask(__name__)
if not app.debug:
import logging
from logging.handlers import RotatingFileHandler
logfile = os.path.expanduser('/home/docker/logs/division.log')
file_handler = RotatingFileHandler(logfile)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
translation_table = str.maketrans('', '', " -'")
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.route('/division', methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin='*', headers=['Content-Type', 'X-Requested-With'])
def division_lookup():
if request.json is None and request.method == 'POST':
abort(400, "Must provide JSON (did you set Content-type?)")
elif request.method == 'POST':
args = request.json
else:
args = request.args
if 'latitude' not in args:
abort(400, "Most provide latitude and longitude")
if 'longitude' not in args:
abort(400, "Most provide latitude and longitude")
conn = psycopg2.connect(host='postgres', database=DATABASE,
user=USERNAME, password=PASSWORD)
cursor = conn.cursor()
cursor.execute(QUERY_FORMAT.format(latitude=float(args['latitude']),
longitude=float(args['longitude'])))
result = cursor.fetchone()
if result is None:
name = None
else:
name = result[0].lower().translate(translation_table)
return jsonify({'division': name})
if __name__ == '__main__':
app.run(debug=True)
| nilq/baby-python | python |
from botcity.core import DesktopBot
# Uncomment the line below for integrations with BotMaestro
# Using the Maestro SDK
# from botcity.maestro import *
class Bot(DesktopBot):
def action(self, execution=None):
# Fetch the Activity ID from the task:
# task = self.maestro.get_task(execution.task_id)
# activity_id = task.activity_id
# Opens the BotCity website.
self.browse("http://www.botcity.dev")
# Uncomment to mark this task as finished on BotMaestro
# self.maestro.finish_task(
# task_id=execution.task_id,
# status=AutomationTaskFinishStatus.SUCCESS,
# message="Task Finished OK."
# )
def not_found(self, label):
print(f"Element not found: {label}")
if __name__ == '__main__':
Bot.main()
| nilq/baby-python | python |
import os
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import PersonalityInsightsV3
from services.base import BaseService, BaseServiceResult
class IBMWatson(BaseService):
"""
IBM Watson service wrapper
"""
def __init__(self, service_wrapper, service_url):
"""
Initiate the service engine
:param service_wrapper: IBM Watson service engine
:type service_wrapper: PersonalityInsightsV3
:param service_url: IBM Watson service URL address
:type service_url: str
"""
super().__init__("ibm", service_url)
self.service = service_wrapper
def get_personality_scores(self, text_content):
"""
Get personality scores from textual content
:param text_content: Textual data of minimum 100 words
:type text_content: str
:return: Results from service engine
:rtype: dict
"""
result = self.service.profile(
{"contentItems": [{"content": text_content}]},
accept="application/json",
raw_scores=True,
consumption_preferences=True,
).get_result()
return BaseServiceResult(200, result)
PERSONALITY_API_KEY = os.getenv("PERSONALITY_API_KEY")
PERSONALITY_URL = os.getenv("PERSONALITY_URL")
PERSONALITY_ENGINE = PersonalityInsightsV3(
version="2017-10-13", authenticator=IAMAuthenticator(apikey=PERSONALITY_API_KEY)
)
PERSONALITY_ENGINE.set_service_url(PERSONALITY_URL)
IBMWatsonService = IBMWatson(PERSONALITY_ENGINE, PERSONALITY_URL)
| nilq/baby-python | python |
#!/usr/bin/python3
# creates the SQLite database file - run this first
import sqlite3
# create db file
con = sqlite3.connect('./db/ic_log1_2020-06-30_manual.db')
cur = con.cursor()
# create table
cur.execute('''CREATE TABLE IF NOT EXISTS iclog (date real, ic integer, note text)''')
# close the connection
con.close()
'''
Legend:
date: a UNIX timestamp
ic: internet connection boolean true or false, 1 | 0
error: short description of the problem
'''
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
def main():
n, m = map(int, input().split())
summed = 4 * n - m
xy = list()
# 2x + 3y + 4z = M
# x + y + z = N を解く
# See:
# https://atcoder.jp/contests/abc006/submissions/1112016
# WAの原因:成立しない条件の境界値を0以下だと思っていた,2項目の条件に気がつけなかった
if summed < 0:
print(-1, -1, -1)
exit()
# xを決め打ち
for x in range(summed // 2 + 1):
y = summed - 2 * x
if y >= 0:
xy.append((x, y))
for x, y in xy:
z = n - (x + y)
if z >= 0:
print(x, y, z)
exit()
print(-1, -1, -1)
if __name__ == '__main__':
main()
| nilq/baby-python | python |
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: Jackson Lee 7/8/14
This script reads in a fasta or fastq and filters for sequences greater or less
than a threshold length
Input fastq file
@2402:1:1101:1392:2236/2
GATAGTCTTCGGCGCCATCGTCATCCTCTACACCCTCAAGGCGAGCGGCGCGATGGAGACAATCCAGTGGGGCATGCAGCAGGTGACACCGGACTCCCGGATCCA
+
@@CFFFFFGHHHHIJJIIJIHIJIIIIJIIGEIJJIJJJJJIIIJHFFDDBD8BBD>BCBCCDDDCDCCCDBDDDDDDDDDDD<CDDDDDDDDBBCDDBD<<BDD
--------------------------------------------------------------------------------
usage: filter_fasta_by_len.py -i sequence.fasta -g filter_greater_than -l filter_less_than
"""
#-------------------------------------------------------------------------------
#Header - Linkers, Libs, Constants
from string import strip
from Bio import SeqIO
from argparse import ArgumentParser, RawDescriptionHelpFormatter
#-------------------------------------------------------------------------------
#function declarations
def process_and_generate(input_iterator, threshold, greaterflag):
"""Reusable function that processes a record, then generates each record.
input_iterator is an iterator that returns one record at a time
process_function is a function that takes one record and does some
processing on it
"""
for rec in input_iterator:
if greaterflag:
if len(rec.seq) <= threshold:
yield rec
else:
if len(rec.seq) >= threshold:
yield rec
#-------------------------------------------------------------------------------
#Body
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "filter_fasta_by_len.py -i sequence.fasta -g filter_greater_than -l filter_less_than",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input_fastq", action="store",
dest="inputfilename",
help="fastq file of input sequences")
parser.add_argument("-g", "--filter_greater_than", action="store", type=int,
dest="greaterthan",
help="filter out sequences greater than or equal to \
this size")
parser.add_argument("-l", "--filter_less_than", action="store", type=int,
dest="lessthan",
help="filter out sequences less than or equal this size")
options = parser.parse_args()
mandatories = ["inputfilename"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
inputfilename = options.inputfilename
left, __, right = inputfilename.rpartition('.')
fasta =['fa','fasta','faa','fas', 'fna']
fastq =['fq','fastq']
if right in fasta:
ext = "fasta"
elif right in fastq:
ext = "fastq"
print "Processing read file: " + inputfilename
with open(inputfilename,'U') as infile:
parse_iterator = SeqIO.parse(infile, ext)
if options.greaterthan == None and options.lessthan == None:
print "\nError: Missing Comparison Value\n"
parser.print_help()
exit(-1)
elif options.greaterthan == None and options.lessthan != None:
lessthan = options.lessthan
print "and filtering out sequences less than ", lessthan
outputfilename = left + '.filtered.lessthan.' + str(lessthan) + "." + right
with open(outputfilename, 'w') as outfile:
record_generator = process_and_generate(parse_iterator, lessthan, False)
SeqIO.write(record_generator, outfile, ext)
elif options.greaterthan != None and options.lessthan == None:
greaterthan = options.greaterthan
print "and filtering out sequences greater than ", greaterthan
outputfilename = left + '.filtered.greaterthan.' + str(greaterthan) + "." + right
with open(outputfilename, 'w') as outfile:
record_generator = process_and_generate(parse_iterator, greaterthan, True)
SeqIO.write(record_generator, outfile, ext)
elif options.greaterthan != None and options.lessthan != None:
greaterthan = options.greaterthan
lessthan = options.lessthan
print "and filtering out sequences less than ", lessthan, " and greater than ", greaterthan
outputfilename = left + '.filtered.greaterthan.' + str(greaterthan) + ".filtered.lessthan." + str(lessthan) + '.' + right
with open(outputfilename, 'w') as outfile:
pre_record_generator = process_and_generate(parse_iterator, greaterthan, True)
record_generator = process_and_generate(pre_record_generator, lessthan, False)
SeqIO.write(record_generator, outfile, ext)
print "Done!"
| nilq/baby-python | python |
import numpy as np
from os import listdir
from os.path import join
#def random_shift_events(events, max_shift=20, resolution=(180, 240)):
def random_shift_events(events, f, max_shift=20, resolution=(195, 346)):
H, W = resolution
x_shift, y_shift = np.random.randint(-max_shift, max_shift+1, size=(2,))
#print('rm -rf ~/cachefs/erl/' + f)
#print(events.shape)
#print(events[:,0])
#print(events[:,1])
events[:,0] += x_shift
events[:,1] += y_shift
valid_events = (events[:,0] >= 0) & (events[:,0] < W) & (events[:,1] >= 0) & (events[:,1] < H)
events = events[valid_events]
return events
#def random_flip_events_along_x(events, resolution=(180, 240), p=0.5):
def random_flip_events_along_x(events, resolution=(195, 346), p=0.5):
H, W = resolution
if np.random.random() < p:
events[:,0] = W - 1 - events[:,0]
return events
class NCaltech101:
def __init__(self, root, augmentation=False):
self.classes = listdir(root)
self.files = []
self.labels = []
self.augmentation = augmentation
for i, c in enumerate(self.classes):
new_files = [join(root, c, f) for f in listdir(join(root, c))]
self.files += new_files
self.labels += [i] * len(new_files)
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
"""
returns events and label, loading events from aedat
:param idx:
:return: x,y,t,p, label
"""
label = self.labels[idx]
f = self.files[idx]
#print('rm -rf ~/cachefs/erl/' + f)
events = np.load(f).astype(np.float32)
if self.augmentation:
events = random_shift_events(events, f)
events = random_flip_events_along_x(events)
return events, label
| nilq/baby-python | python |
import json
import cryptography.fernet
from django.conf import settings
from django.utils.encoding import force_bytes, force_text
from django_pgjson.fields import get_encoder_class
import six
# Allow the use of key rotation
if isinstance(settings.FIELD_ENCRYPTION_KEY, (tuple, list)):
keys = [
cryptography.fernet.Fernet(k)
for k in settings.FIELD_ENCRYPTION_KEY
]
elif isinstance(settings.FIELD_ENCRYPTION_KEY, dict):
# allow the keys to be indexed in a dictionary
keys = [
cryptography.fernet.Fernet(k)
for k in settings.FIELD_ENCRYPTION_KEY.values()
]
else:
# else turn the single key into a list of one
keys = [cryptography.fernet.Fernet(settings.FIELD_ENCRYPTION_KEY), ]
crypter = cryptography.fernet.MultiFernet(keys)
def no_op_encrypt_values(data, encrypter=None, skip_keys=None):
"""
A noop function with the same call signature of `encrypt_values`.
Returns:
obj - returns the data parameter unaltered.
"""
return data
def pick_encrypter(key, keys, encrypter):
"""
Returns encrypting function.
To facilitate skipping keys during encryption we need to pick between the
encrypting function or a noop funciton.
Returns:
function
"""
if key in keys:
return no_op_encrypt_values
return encrypter
def encrypt_values(data, encrypter=None, skip_keys=None):
"""
Returns data with values it contains recursively encrypted.
Note that this will use `json.dumps` to convert the data to a string type.
The encoder class will be the value of `PGJSON_ENCODER_CLASS` in the
settings or `django.core.serializers.json.DjangoJSONEncoder`.
Arguments:
data (object): the data to decrypt.
encrypter (function): the decryption function to use. If not
specified it will use the
cryptography.fernet.MultiFernetMultiFernet.encrypt method
with the keys being taken from settings.FIELD_ENCRYPTION_KEY
skip_keys (list[str]): a list of keys that should not be encrypted
Returns:
object
"""
if skip_keys is None:
skip_keys = []
encrypter = encrypter or crypter.encrypt
if isinstance(data, (list, tuple, set)):
return [encrypt_values(x, encrypter, skip_keys) for x in data]
if isinstance(data, dict):
return {
key: pick_encrypter(key, skip_keys, encrypt_values)(
value, encrypter, skip_keys)
for key, value in six.iteritems(data)
}
if isinstance(data, six.string_types):
return force_text(encrypter(data.encode('unicode_escape')))
return force_text(encrypter(
force_bytes(json.dumps(data, cls=get_encoder_class()))
))
def decrypt_values(data, decrypter=None):
"""
Returns data with values it contains recursively decrypted.
Note that this will use `json.loads` to convert the decrypted data to
its most likely python type.
Arguments:
data (object): the data to decrypt.
decrypter (function): the decryption function to use. If not
specified it will use the
cryptography.fernet.MultiFernetMultiFernet.decrypt method
with the keys being taken from settings.FIELD_ENCRYPTION_KEY
Returns:
object
"""
decrypter = decrypter or crypter.decrypt
if isinstance(data, (list, tuple, set)):
return [decrypt_values(x, decrypter) for x in data]
if isinstance(data, dict):
return {
key: decrypt_values(value, decrypter)
for key, value in six.iteritems(data)
}
if isinstance(data, six.string_types):
# string data! if we got a string or unicode convert it to
# bytes first, as per http://stackoverflow.com/a/11174804.
#
# Note 1: This is required for the decrypter, it only accepts bytes.
# Note 2: this is primarily needed because the decrypt method is called
# on the value during the save as well as during the read, by the
# django ORM.
data = data.encode('unicode_escape')
try:
# decrypt the bytes data
value = decrypter(data)
except TypeError:
# Not bytes data??! probably from a django field calling
# to_python during value assignment
value = data
except cryptography.fernet.InvalidToken:
# Either the data is corrupted, e.g. a lost key or the data
# was never encrypted, this could be from django calling to_python
# during value assignment.
value = data
try:
# undo the unicode mess from earlier
value = value.decode('unicode_escape')
except AttributeError:
pass
try:
return json.loads(value)
except (ValueError, TypeError):
# Not valid json, just return the value
return value
| nilq/baby-python | python |
from pptx import Presentation
from pptx.util import Inches
import pyexcel as pe
print("""
Exemplo de criação de apresentação PPTX em loop utilizando dados de Excel
Vish, o bagulho foi loko pra conseguir criar este aplicativo mano
-> agora aprendi, já era
Day 24 Code Python - 23/05/2018
""")
dadosExcel = pe.iget_records(file_name="apresentacao_automatica.xlsx") # tentar criar uma função
prs = Presentation() # se for ler um PPTX, passar como parâmetro
title_only_slide_layout = prs.slide_layouts[5]
slide = prs.slides.add_slide(title_only_slide_layout)
shapes = slide.shapes
shapes.title.text = 'Idades -> Feito com Python'
# configuração de colunas e celulas --> ainda preciso descobrir como automatizar estes tamanhos porque o bagulho tá loko
rows = 5
cols = 2
left = top = Inches(2.0)
width = Inches(6.0)
height = Inches(0.8)
table = shapes.add_table(rows, cols, left, top, width, height).table
# tamanho das colunas
table.columns[0].width = Inches(3.0)
table.columns[1].width = Inches(2.0)
# nome das colunas - fixo na posição 0
table.cell(0, 0).text = 'Nome'
table.cell(0, 1).text = 'Idade'
nome = []
idade = []
cont = 1
for itens in dadosExcel:
# escrevendo os dados na células
if cont > 0: # o 0 sempre será o header, e este é fixo.
table.cell(cont, 0).text = str(itens['nome'])
table.cell(cont, 1).text = str(itens['idade'])
cont += 1
# liberando o recurso, iiirraaaaa
pe.free_resources()
# salvando o arquivo pptx
prs.save('apresentacao_tabela_automatica.pptx')
print('-' * 34)
print('APRESENTAÇÃO CRIADA COM SUCESSO.')
| nilq/baby-python | python |
"""
This sample shows how to create a list in json
of all items in a group
Python 2.x/3.x
ArcREST 3.5,6
"""
from __future__ import print_function
from __future__ import absolute_import
import arcrest
import os
import json
from arcresthelper import orgtools, common
import csv
import sys
from arcresthelper.packages import six
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect,sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def _unicode_convert(obj):
""" converts unicode to anscii """
if isinstance(obj, dict):
return {_unicode_convert(key): _unicode_convert(value) for key, value in obj.items()}
elif isinstance(obj, list):
return [_unicode_convert(element) for element in obj]
elif isinstance(obj, str):
return obj
elif isinstance(obj, six.text_type):
return obj.encode('utf-8')
elif isinstance(obj, six.integer_types):
return obj
else:
return obj
if __name__ == "__main__":
proxy_port = None
proxy_url = None
securityinfo = {}
securityinfo['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI
securityinfo['username'] = ""#<UserName>
securityinfo['password'] = ""#<Password>
securityinfo['org_url'] = "http://www.arcgis.com"
securityinfo['proxy_url'] = proxy_url
securityinfo['proxy_port'] = proxy_port
securityinfo['referer_url'] = None
securityinfo['token_url'] = None
securityinfo['certificatefile'] = None
securityinfo['keyfile'] = None
securityinfo['client_id'] = None
securityinfo['secret_id'] = None
groups = ["Demographic Content"] #Name of groups
outputlocation = r"C:\TEMP"
outputfilename = "group.json"
outputitemID = "id.csv"
try:
orgt = orgtools.orgtools(securityinfo)
groupRes = []
if orgt.valid:
fileName = os.path.join(outputlocation,outputfilename)
csvFile = os.path.join(outputlocation,outputitemID)
iconPath = os.path.join(outputlocation,"icons")
if not os.path.exists(iconPath):
os.makedirs(iconPath)
if sys.version_info[0] == 2:
access = 'wb+'
kwargs = {}
else:
access = 'wt+'
kwargs = {'newline':''}
file = open(fileName, "w")
with open(fileName, access, **kwargs) as csvFile:
idwriter = csv.writer(csvFile)
for groupName in groups:
results = orgt.getGroupContent(groupName=groupName,
onlyInOrg=True,
onlyInUser=True)
if not results is None:
for result in results:
idwriter.writerow([result['title'],result['id']])
thumbLocal = orgt.getThumbnailForItem(itemId=result['id'],
fileName=result['title'],
filePath=iconPath)
result['thumbnail']=thumbLocal
groupRes.append(result)
if len(groupRes) > 0:
print ("%s items found" % str(len(groupRes)))
groupRes = _unicode_convert(groupRes)
file.write(json.dumps(groupRes, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ': ')))
file.close()
except (common.ArcRestHelperError) as e:
print ("error in function: %s" % e[0]['function'])
print ("error on line: %s" % e[0]['line'])
print ("error in file name: %s" % e[0]['filename'])
print ("with error message: %s" % e[0]['synerror'])
if 'arcpyError' in e[0]:
print ("with arcpy message: %s" % e[0]['arcpyError'])
except:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror) | nilq/baby-python | python |
def get_customized_mapping(cls):
mapping = {
"name": {
"type": "text",
"copy_to": [
"all"
]
},
"is_public": {
"type": "boolean"
},
"taxid": {
"type": "integer"
},
"genes": {
"properties": {
"mygene_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"symbol": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"ncbigene": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"ensemblgene": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"uniprot": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"name": {
"type": "text"
}
}
},
"reactome": {
"properties": {
"id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
"copy_to": [
"all"
]
},
"geneset_name": {
"type": "text"
}
}
}
}
return mapping
| nilq/baby-python | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.