id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3278546 | '''
@author: davandev
'''
import abc
class ServiceIf(object):
'''
Interface for services
'''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def handle_request(self, input):
"""
Abstract method to override to handle received request.
"""
return
@abc.abstractmethod
def get_name(self):
"""
Abstract method returns name of the service
"""
return ""
@abc.abstractmethod
def stop_service(self):
'''
Abstract method to stop service
'''
pass
@abc.abstractmethod
def start_service(self):
'''
Abstract method to start service
'''
pass
@abc.abstractmethod
def is_service_running(self):
'''
Abstract method to determine if service is running
'''
pass
| StarcoderdataPython |
1747639 | <reponame>revl/pants<gh_stars>1-10
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.register import build_file_aliases
from pants.base.exceptions import TargetDefinitionException
from pants.testutil.test_base import TestBase
class TestPythonBinary(TestBase):
@classmethod
def alias_groups(self):
return build_file_aliases()
def setUp(self):
super().setUp()
# Force creation of SourceRootConfig global instance. PythonBinary uses source roots
# when computing entry points.
self.context()
def test_python_binary_must_have_some_entry_point(self):
self.add_to_build_file("", 'python_binary(name = "binary")')
with self.assertRaises(TargetDefinitionException):
self.target(":binary")
def test_python_binary_with_entry_point_no_source(self):
self.add_to_build_file("", 'python_binary(name = "binary", entry_point = "blork")')
assert self.target(":binary").entry_point == "blork"
def test_python_binary_with_source_no_entry_point(self):
self.create_file("blork.py")
self.create_file("bin/blork.py")
self.add_to_build_file(
"",
"""python_binary(
name = "binary1",
source = "blork.py",
)
python_binary(
name = "binary2",
source = "bin/blork.py",
)""",
)
assert self.target(":binary1").entry_point == "blork"
assert self.target(":binary2").entry_point == "bin.blork"
def test_python_binary_with_entry_point_and_source(self):
self.add_to_build_file(
"",
"""python_binary(
name = "binary1",
entry_point = "blork",
source = "blork.py",
)
python_binary(
name = "binary2",
entry_point = "blork:main",
source = "blork.py",
)
python_binary(
name = "binary3",
entry_point = "bin.blork:main",
source = "bin/blork.py",
)""",
)
assert "blork" == self.target(":binary1").entry_point
assert "blork:main" == self.target(":binary2").entry_point
assert "bin.blork:main" == self.target(":binary3").entry_point
def test_python_binary_with_entry_point_and_source_mismatch(self):
self.create_file("binary1/hork.py")
self.add_to_build_file(
"binary1", 'python_binary(entry_point = "blork", source = "hork.py")',
)
with self.assertRaises(TargetDefinitionException):
self.target("binary1")
self.create_file("binary2/hork.py")
self.add_to_build_file(
"binary2", 'python_binary(entry_point = "blork:main", source = "hork.py")',
)
with self.assertRaises(TargetDefinitionException):
self.target("binary2")
self.create_file("binary3/blork.py")
self.add_to_build_file(
"binary3", 'python_binary(entry_point = "bin.blork", source = "blork.py")',
)
with self.assertRaises(TargetDefinitionException):
self.target("binary3")
self.create_file("binary4/bin.py")
self.add_to_build_file(
"binary4", 'python_binary(entry_point = "bin.blork", source = "bin.py")',
)
with self.assertRaises(TargetDefinitionException):
self.target("binary4")
| StarcoderdataPython |
1633458 | from contract import Forge
FIXED_DEPOSIT_AMOUNT = 10000 * 10**18
print("投资数量为:",FIXED_DEPOSIT_AMOUNT)
def getEthSupply():
eth = Forge.functions.eth_supply().call()
print("当前ETH供应量为:",eth/10 **18)
ndao = Forge.functions.ndao_supply().call()
print("当前NDAO供应量为:",ndao/10 ** 18)
return eth,ndao
def calOutPrice():
eth, ndao = getEthSupply()
mul = eth * ndao
new_ndao = ndao + FIXED_DEPOSIT_AMOUNT
new_eth = mul / new_ndao
eth_price = eth - new_eth + 1
print("计算output价格为:",eth_price/10 ** 18)
def getOutprice():
out_price = Forge.functions.getOutputPrice(FIXED_DEPOSIT_AMOUNT).call()
print("合约output价格为:",out_price/10 ** 18)
calOutPrice()
getOutprice() | StarcoderdataPython |
3347196 | from assemblyline import odm
from . import PerformanceTimer
MSG_TYPES = {"DispatcherHeartbeat"}
LOADER_CLASS = "assemblyline.odm.messages.dispatcher_heartbeat.DispatcherMessage"
@odm.model()
class Queues(odm.Model):
ingest = odm.Integer()
files = odm.Integer()
@odm.model()
class Inflight(odm.Model):
max = odm.Integer()
outstanding = odm.Integer()
@odm.model()
class Metrics(odm.Model):
files_completed = odm.Integer()
submissions_completed = odm.Integer()
cpu_seconds = PerformanceTimer()
cpu_seconds_count = odm.Integer()
busy_seconds = PerformanceTimer()
busy_seconds_count = odm.Integer()
@odm.model()
class Heartbeat(odm.Model):
inflight = odm.Compound(Inflight)
instances = odm.Integer()
metrics = odm.Compound(Metrics)
queues = odm.Compound(Queues)
component = odm.Keyword()
@odm.model()
class DispatcherMessage(odm.Model):
msg = odm.Compound(Heartbeat)
msg_loader = odm.Enum(values={LOADER_CLASS}, default=LOADER_CLASS)
msg_type = odm.Enum(values=MSG_TYPES, default="DispatcherHeartbeat")
sender = odm.Keyword()
| StarcoderdataPython |
4834568 | <filename>miriad/squint.py
#!/usr/bin/python3
import shutil, glob, os
import miriad
def split(uvo, uvc, so, lines=[]):
""" Split in different files LL and RR """
from subprocess import CalledProcessError
stks = ['ll', 'rr', 'lr', 'rl']
for stk in stks:
for lin in lines:
path = '{}/{}.{}.{}'.format(uvc, so, lin, stk)
if os.path.exists(path): shutil.rmtree(path)
try:
miriad.uvaver({
'vis' : '{}/{}.{}'.format(uvo, so, lin),
'out' : '{}/{}.{}.{}'.format(uvc, so, lin, stk),
'select' : 'pol({})'.format(stk)
})
except CalledProcessError:
print("### Retrying with stokes selection instead")
miriad.uvaver({
'vis' : '{}/{}.{}'.format(uvo, so, lin),
'out' : '{}/{}.{}.{}'.format(uvc, so, lin, stk),
'stokes' : stk
})
def selfcal(so, uvc, lines=[]):
"""
Original map used for selfcal in MAPS
Independent step for RR and LL (u,v) files
1. Selcalibration of continuum
2. Applying selfcalibration for continuum
3. Copyinggains to Line data (all in the USB)
4. Applying selfcalibration for lines
5. Concanate LL and RR in ine file
6. Resort data
lines: ex. ['co3-2', 'sio8-7', 'cnt.usb', 'usb']
"""
calibrator = 'cnt.usb'
for stk in ['ll', 'rr']:
for sb in [calibrator]:
miriad.selfcal({
'vis' : '{}/{}.{}.{}'.format(uvc, so, sb, stk),
'model' : 'MAPS/{}.cont.usb.i.cc'.format(so),
'refant' : 6,
'interval' : 8,
'options' : 'phase'
})
miriad.gpplt({
'vis' : '{}/{}.{}.{}'.format(uvc, so, sb, stk),
'device' : '1/xs',
'yaxis' : 'phase',
'nxy' : '1,3'
})
input("Press enter to continue...")
path = '{}/{}.{}.{}.slfc'.format(uvc, so, sb, stk)
if os.path.exists(path): shutil.rmtree(path)
miriad.uvaver({
'vis' : '{}/{}.{}.{}'.format(uvc, so, sb, stk),
'out' : '{}/{}.{}.{}.slfc'.format(uvc, so, sb, stk)
})
for lin in [l for l in lines if l != calibrator]: # iterate over lines excluding the calibrator
path = '{}/{}.{}.{}.slfc'.format(uvc, so, lin, stk)
if os.path.exists(path): shutil.rmtree(path)
miriad.gpcopy({
'vis' : '{}/{}.cnt.usb.{}'.format(uvc, so, stk),
'out' : '{}/{}.{}.{}'.format(uvc, so, lin, stk),
})
miriad.uvaver({
'vis' : '{}/{}.{}.{}'.format(uvc, so, lin, stk),
'out' : '{}/{}.{}.{}.slfc'.format(uvc, so, lin, stk),
})
for lin in lines:
vis = '{}/{}.{}'.format(uvc, so, lin)
for folder in ['tmp.5', 'tmp.6', '{}/{}.{}.corrected.slfc'.format(uvc, so, lin)]:
if os.path.exists(folder): shutil.rmtree(folder)
miriad.uvcat({
'vis' : '{0}.rr.slfc,{0}.ll.slfc,{0}.rl,{0}.lr'.format(vis),
'out' : 'tmp.5',
})
miriad.uvsort({
'vis' : 'tmp.5',
'out' : 'tmp.6',
})
miriad.uvaver({
'vis' : 'tmp.6',
'out' : '{}/{}.{}.corrected.slfc'.format(uvc, so, lin),
'interval' : 5
})
def mapvis(uvo, uvc, so, mapdir, lines=[], lineSelection=[]):
"""
Make a map from visibilities
1. Continuum Stokes I,V Uncorrected & Corrected data
2. Map All lines. Corrected
3. Map All lines. Uncorrected
4. Continuum LL and RR independently, for non-selfcal and selfcal cases
"""
calibrator = 'cnt.usb'
if len(lines) != len(lineSelection):
lineSelection = [None for l in lines]
# 1.
src = '{}/{}.cnt'.format(mapdir, so)
tall = 0.03
for path in glob.glob('{}.*'.format(src)):
if os.path.exists(path): shutil.rmtree(path)
vis = '{}/{}.cnt.usb.corrected.slfc'.format(uvc, so)
for src in ['{}/{}.cnt'.format(mapdir, so), '{}/{}.cnt.uncorrected'.format(mapdir, so)]:
miriad.invert({
'vis': vis,
'stokes': 'i,v',
'beam': '{}.bm'.format(src),
'map': '{0}.i.mp,{0}.v.mp'.format(src),
'imsize': 128,
'cell': 0.3,
'options': 'systemp,double,mfs',
'sup': 0
})
for stk in ['i', 'v']:
miriad.clean({
'map': '{}.{}.mp'.format(src, stk),
'beam': '{}.bm'.format(src),
'out': '{}.{}.cc'.format(src, stk),
'niters': 3000,
'cutoff': tall
})
miriad.restor({
'map': '{}.{}.mp'.format(src, stk),
'beam': '{}.bm'.format(src),
'model': '{}.{}.cc'.format(src, stk),
'out': '{}.{}.cm'.format(src, stk),
})
vis = '{}/{}.cnt.usb'.format(uvo, so)
# 2. Map corrected line data
# 3. Map uncorrected line data with same paramenters as in 2
tall = 0.50
# remove continuum, its already been mapped
lines.remove(calibrator)
lines.remove('usb')
for i, lin in enumerate(lines):
vis = '{}/{}.{}.corrected.slfc'.format(uvc, so, lin)
for src in ['{}/{}.{}'.format(mapdir, so, lin), '{}/{}.{}.uncorrected'.format(mapdir, so, lin)]:
line = miriad.averageVelocityLine(vis, 2)
for path in glob.glob('{}.*'.format(src)):
if os.path.exists(path): shutil.rmtree(path)
invertOptions = {
'vis': vis,
'stokes': 'i,v',
'beam': '{}.bm'.format(src),
'map': '{0}.i.mp,{0}.v.mp'.format(src),
'imsize': 128,
'cell': 0.3,
'options': 'systemp,double,mfs',
'sup': 0,
}
if lineSelection[i] is not None:
invertOptions['line'] = lineSelection[i]
miriad.invert(invertOptions)
for stk in ['i', 'v']:
miriad.clean({
'map': '{}.{}.mp'.format(src, stk),
'beam': '{}.bm'.format(src),
'out': '{}.{}.cc'.format(src, stk),
'niters': 3000,
'cutoff': tall
})
miriad.restor({
'map': '{}.{}.mp'.format(src, stk),
'beam': '{}.bm'.format(src),
'model': '{}.{}.cc'.format(src, stk),
'out': '{}.{}.cm'.format(src, stk),
})
vis = '{}/{}.{}'.format(uvo, so, lin)
# 4. nopol is for selfcal case (this option is not used!)
tall = 0.03
for stk in ['ll', 'rr']:
src = '{}/{}.cnt.{}'.format(mapdir, so, stk)
for path in glob.glob('{}.*'.format(src)):
if os.path.exists(path): shutil.rmtree(path)
for pol in ['nopol', 'nocal']:
path = '{}.bm'.format(src)
if os.path.exists(path): shutil.rmtree(path)
miriad.invert({
'vis': '{}/{}.cnt.usb.{}'.format(uvc, so, stk),
'beam': '{}.bm'.format(src),
'map': '{}.{}.mp'.format(src, pol),
'imsize': 128,
'cell': 0.3,
'options': 'systemp,double,mfs,{}'.format(pol),
'sup': 0
})
miriad.clean({
'map': '{}.{}.mp'.format(src, pol),
'beam': '{}.bm'.format(src),
'out': '{}.{}.cc'.format(src, pol),
'niters': 3000,
'cutoff': tall
})
miriad.restor({
'map': '{}.{}.mp'.format(src, pol),
'beam': '{}.bm'.format(src),
'model': '{}.{}.cc'.format(src, pol),
'out': '{}.{}.cm'.format(src, pol),
})
def mapallvis(uvo, uvc, so, mapdir, lines=[], lineSelection=[], selects=[]):
"""
Similar to mapvis but doesn't do multiple frequency synthesis.
The frequency axis is preserved so you can get spectra from the image.
"""
from subprocess import CalledProcessError
if len(lines) != len(lineSelection):
lineSelection = [None for l in lines]
if len(lines) != len(selects):
selects = [None for l in lines]
calibrator = 'cnt.usb'
tall = 0.50
# remove continuum, its already been mapped
lineSelection.pop(lines.index(calibrator))
lines.remove(calibrator)
for i, lin in enumerate(lines):
vis = '{}/{}.{}.corrected.slfc'.format(uvc, so, lin)
for src in ['{}/{}.{}'.format(mapdir, so, lin), '{}/{}.{}.uncorrected'.format(mapdir, so, lin)]:
for path in glob.glob('{}*.full.*'.format(src)):
if os.path.exists(path): shutil.rmtree(path)
invertOptions = {
'vis': vis,
'stokes': 'i,v',
'beam': '{}.full.bm'.format(src),
'map': '{0}.i.full.mp,{0}.v.full.mp'.format(src),
'imsize': 128,
'cell': 0.3,
'options': 'systemp,double',
'sup': 0,
}
if selects[i]:
invertOptions['select'] = selects[i]
try:
miriad.invert(invertOptions)
except CalledProcessError:
print("### Retrying invert with line selection")
line = miriad.averageVelocityLine(vis, 2)
sel = lineSelection[i] if lineSelection[i] is not None else line
invertOptions['line'] = sel
miriad.invert(invertOptions)
for stk in ['i', 'v']:
miriad.clean({
'map': '{}.{}.full.mp'.format(src, stk),
'beam': '{}.full.bm'.format(src),
'out': '{}.{}.full.cc'.format(src, stk),
'niters': 3000,
'cutoff': tall
})
miriad.restor({
'map': '{}.{}.full.mp'.format(src, stk),
'beam': '{}.full.bm'.format(src),
'model': '{}.{}.full.cc'.format(src, stk),
'out': '{}.{}.full.cm'.format(src, stk),
})
vis = '{}/{}.{}'.format(uvo, so, lin)
def disp(uvo, uvc, so, mapdir, lines=[], stokesVrms=[]):
"""
1. Plot uncorrected channel map
2. Plot corrected channel map
"""
for i, lin in enumerate(lines):
# for lin in ['cnt', 'co3-2', 'sio8-7']:
devicetype = 'ps/cps'
filename = lin
src = '{}/{}.{}'.format(mapdir, so, lin)
nxy = '1,1'
path = '{}.v-i.perc'.format(src)
if os.path.exists(path): shutil.rmtree(path)
path = '{}.v-i.perc.uncorrected'.format(src)
if os.path.exists(path): shutil.rmtree(path)
rms = stokesVrms[i]
if lin == 'cnt':
for suffix in ['', 'uncorrected.']:
opts = {
'exp': '100*<{0}.{1}v.cm>/<{0}.{1}i.cm>'.format(src, suffix),
'mask': '<{}.{}i.cm>.gt.0.4'.format(src, suffix),
}
suffix = '.uncorrected' if suffix != '' else ''
opts['out'] = '{}.v-i.perc{}'.format(src, suffix)
miriad.maths(opts)
else:
for suffix in ['', 'uncorrected.']:
val = 6 if suffix == '' else 8
opts = {
'exp': '100*<{0}.{1}v.cm>/<{0}.{1}i.cm>'.format(src, suffix),
'mask': '<{}.{}i.cm>.gt.{}'.format(src, suffix, val),
}
suffix = '.uncorrected' if suffix != '' else ''
opts['out'] = '{}.v-i.perc{}'.format(src, suffix)
miriad.maths(opts)
for datatype in ['uncorr', 'corr']:
cgdispOpts = {
'type': 'cont,cont',
'labtyp': 'arcsec,arcsec',
'options': 'full,beambl,3val',
'csize': '0,1,0,0',
'cols1': 2, 'cols2': 8,
'levs1': '-95,-75,-55,-35,-15,15,35,55,75,95',
'nxy': nxy,
}
if datatype is 'uncorr':
# flux plot
cgdispOpts['slev'] = 'p,1,a,{}'.format(rms)
cgdispOpts['device'] = '{}.uncorr.{}'.format(filename, devicetype)
cgdispOpts['in'] = '{0}.uncorrected.i.cm,{0}.uncorrected.v.cm'.format(src)
cgdispOpts['levs2'] = '-8,-7,-6,-5,-4,-3,-2,2,3,4,5,6,7,8'
miriad.cgdisp(cgdispOpts)
miriad.imstat({'in': '{}.i.cm'.format(src), 'region':'box(3,3,50,125)'})
miriad.imstat({'in': '{}.v.cm'.format(src), 'region':'box(3,3,50,125)'})
input("Press enter to continue...")
# v/i plot
cgdispOpts['slev'] = 'p,1,a,1'
cgdispOpts['device'] = '{}.uncorr.perc.{}'.format(filename, devicetype)
cgdispOpts['in'] = '{0}.uncorrected.i.cm,{0}.v-i.perc.uncorrected'.format(src)
cgdispOpts['levs2'] = '-6,-5,-4,-3,-2,-1,1,2,3,4,5,6'
miriad.cgdisp(cgdispOpts)
input("Press enter to continue...")
else:
# flux plot
cgdispOpts['slev'] = 'p,1,a,{}'.format(rms)
cgdispOpts['device'] = '{}.corr.{}'.format(filename, devicetype)
cgdispOpts['in'] = '{0}.i.cm,{0}.v.cm'.format(src)
cgdispOpts['levs2'] = '-8,-7,-6,-5,-4,-3,-2,2,3,4,5,6,7,8'
miriad.cgdisp(cgdispOpts)
miriad.imstat({'in': '{}.i.cm'.format(src), 'region':'box(3,3,50,125)'})
miriad.imstat({'in': '{}.v.cm'.format(src), 'region':'box(3,3,50,125)'})
input("Press enter to continue... ")
# v/i plot
cgdispOpts['slev'] = 'p,1,a,1'
cgdispOpts['device'] = '{}.corr.perc.{}'.format(filename, devicetype)
cgdispOpts['in'] = '{0}.i.cm,{0}.v-i.perc'.format(src)
cgdispOpts['levs2'] = '-6,-5,-4,-3,-2,-1,1,2,3,4,5,6'
miriad.cgdisp(cgdispOpts)
input("Press enter to continue... ")
def peak(so, mapdir):
src = '{}/{}.cnt'.format(mapdir, so)
for li in ['ll.nocal', 'rr.nocal', 'll.nopol', 'rr.nopol']:
miriad.maxfit({'in': '{}.{}.cm'.format(src, li), 'log': 'maxfit_{}.{}'.format(so, li)})
miriad.maxfit({'in': '{}.i.cm'.format(src), 'log': 'maxfit_{}.stokesI'.format(so)})
if __name__ == '__main__':
so = 'NGC7538S-s4'
uvo = 'UVDATA'
uvc = 'UVOffsetCorrect'
mapdir = 'MAPSCorrect'
lines = ['co3-2', 'ch2co17-16', 'cnt.usb', 'usb']
input("Press return to split")
split(uvo, uvc, so, lines)
input("Press return to selfcal")
selfcal(so, uvc, lines)
input("Press return to map visibilities")
mapvis(uvo, uvc, so, mapdir, lines[:],
lineSelection=[None, None, None, None]
)
input("Press return to map visibilities with frequency axis")
mapvis(uvo, uvc, so, mapdir, lines[:])
input("Press return to save plots")
disp(uvo, uvc, so, mapdir,
lines=['co3-2', 'ch2co17-16', 'cnt'],
stokesVrms=[0.044, 0.0089, 0.0055]
)
| StarcoderdataPython |
139298 | from setuptools import setup
# Version meaning (X.Y.Z)
# X: Major version (e.g. vastly different scene, platform, etc)
# Y: Minor version (e.g. new tasks, major changes to existing tasks, etc)
# Z: Patch version (e.g. small changes to tasks, bug fixes, etc)
setup(name='rlbench',
version='1.0.8',
description='RLBench',
author='<NAME>',
author_email='<EMAIL>',
url='https://www.doc.ic.ac.uk/~slj12',
packages=[
'rlbench',
'rlbench.backend',
'rlbench.tasks',
'rlbench.task_ttms',
'rlbench.robot_ttms',
'rlbench.sim2real',
'rlbench.assets',
'rlbench.gym'
],
package_data={'': ['*.ttm', '*.obj', '**/**/*.ttm', '**/**/*.obj'],
'rlbench': ['task_design.ttt']},
)
| StarcoderdataPython |
65189 | <reponame>dpetrovykh/DPy
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 10:00:13 2020
@author: dpetrovykh
"""
import math
class Circle:
def __init__(self, radius= 1):
self.radius = float(radius)
@classmethod
def fromDiameter(cls, diameter):
return cls(radius = diameter/2)
@property
def area(self):
return math.pi*self.radius**2
@property
def perimeter(self):
return math.pi*self.radius*2
@property
def diameter(self):
return self.radius*2 | StarcoderdataPython |
187275 | <filename>openai_ros/src/openai_ros/task_envs/turtlebot3/turtlebot3_world.py<gh_stars>0
#!/usr/bin/env python3
import rospy
from openai_ros.robot_envs import turtlebot3_env
from gym import spaces
import numpy as np
class TurtleBot3WorldEnv(turtlebot3_env.TurtleBot3Env):
"""
TurtleBot3WorldEnv class is an implementation for general turtlebot3 task
"""
def __init__(self, name_space: str = 'turtlebot3'):
"""
Initialize TurtleBot3WorldEnv class
Parameters
----------
name_space: str
string used to uniquely identify the ros node and related parameters
refer (turtlebot3_params.yaml)
"""
super(TurtleBot3WorldEnv, self).__init__()
self._num_actions = rospy.get_param('/' + name_space + '/n_actions')
self._skip_beam_interval = rospy.get_param('/' + name_space + '/skip_beam_interval')
self._min_laser_value = rospy.get_param('/' + name_space + '/min_laser_value')
self._max_laser_value = rospy.get_param('/' + name_space + '/max_laser_value')
self._init_linear_speed = rospy.get_param('/' + name_space + '/init_linear_speed')
self._init_angular_speed = rospy.get_param('/' + name_space + '/init_angular_speed')
self._linear_forward_speed = rospy.get_param('/' + name_space + '/linear_forward_speed')
self._linear_turn_speed = rospy.get_param('/' + name_space + '/linear_turn_speed')
self._angular_speed = rospy.get_param('/' + name_space + '/angular_speed')
# construct observation space
laser_scan = self.get_laser_scan() # by this point we already executed _check_laser_scan_is_ready()
num_laser_readings = len(laser_scan.ranges) / self._skip_beam_interval
high = np.full( int(num_laser_readings), self._max_laser_value , dtype = np.float32)
low = np.full( int(num_laser_readings), self._min_laser_value , dtype = np.float32)
self._observation_space = spaces.Box(low, high)
# construct action space
self._action_space = spaces.Discrete(self._num_actions)
# construct reward range
self._reward_range = (-np.inf, np.inf)
self._episode_done = False
self._motion_error = 0.05
self._update_rate = 30
rospy.loginfo('status: TurtleBot3WorldEnv is ready')
def _set_init_pose(self):
"""
Set the initial pose of the turtlebot3
"""
self._move_base( self._init_linear_speed, self._init_angular_speed,
self._motion_error, self._update_rate )
def _init_env_variables(self):
"""
Initialize environment variables
"""
self._episode_done = False
def _set_action(self, action: int):
"""
Apply the give action to the environment
Parameters
----------
action: int
based on the action id number corresponding linear and angular speed for the rosbot is set
Action List:
* 0 = MoveFoward
* 1 = TurnLeft
* 2 = TurnRight
"""
if action == 0: # move forward
linear_speed = self._linear_forward_speed
angular_speed = 0.0
elif action == 1: # turn left
linear_speed = self._linear_turn_speed
angular_speed = self._angular_speed
elif action == 2: # turn right
linear_speed = self._linear_turn_speed
angular_speed = -1 * self._angular_speed
else: # do nothing / stop
linear_speed = 0.0
angular_speed = 0.0
self._move_base( linear_speed, angular_speed,
self._motion_error, self._update_rate )
def _get_obs(self):
"""
Return the observation from the environment
"""
laser_scan = self.get_laser_scan()
# discretize lazer scan
disc_laser_scan = []
num_laser_readings = len(laser_scan.ranges) / self._skip_beam_interval
for i, beam in enumerate(laser_scan.ranges):
if (i%num_laser_readings == 0):
if np.isinf(beam):
disc_laser_scan.append(self._max_laser_value)
elif np.isnan(beam):
disc_laser_scan.append(self._min_laser_value)
else:
disc_laser_scan.append(beam)
return disc_laser_scan
def _is_done(self):
"""
Indicates whether or not the episode is done
"""
# TODO
pass
def _compute_reward(self, observation, done):
"""
Calculate the reward based on the observation
"""
# TODO
return 0
| StarcoderdataPython |
12311 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bcache(MakefilePackage):
"""Bcache is a patch for the Linux kernel to use SSDs to cache other block
devices."""
homepage = "http://bcache.evilpiepirate.org"
url = "https://github.com/g2p/bcache-tools/archive/v1.0.8.tar.gz"
version('1.0.8', sha256='d56923936f37287efc57a46315679102ef2c86cd0be5874590320acd48c1201c')
version('1.0.7', sha256='64d76d1085afba8c3d5037beb67bf9d69ee163f357016e267bf328c0b1807abd')
version('1.0.6', sha256='9677c6da3ceac4e1799d560617c4d00ea7e9d26031928f8f94b8ab327496d4e0')
version('1.0.5', sha256='1449294ef545b3dc6f715f7b063bc2c8656984ad73bcd81a0dc048cbba416ea9')
version('1.0.4', sha256='102ffc3a8389180f4b491188c3520f8a4b1a84e5a7ca26d2bd6de1821f4d913d')
depends_on('libuuid')
depends_on('util-linux')
depends_on('gettext')
depends_on('pkgconfig', type='build')
def setup_build_environment(self, env):
env.append_flags('LDFLAGS', '-lintl')
patch('func_crc64.patch', sha256='558b35cadab4f410ce8f87f0766424a429ca0611aa2fd247326ad10da115737d')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('bcache-register', prefix.bin)
install('bcache-super-show', prefix.bin)
install('make-bcache', prefix.bin)
install('probe-bcache', prefix.bin)
| StarcoderdataPython |
3286049 | <reponame>redwankarimsony/SSD-Mobilenet-People-Detection
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 15:45:16 2019
@author: viswanatha
"""
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d, Sequential, ModuleList, ReLU
import torch
from mobilenet_ssd_priors import *
from MobileNetV2 import MobileNetV2, MobileNetV2_pretrained
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MobileNetV1(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV1, self).__init__()
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1),
)
self.fc = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.model(x)
x = F.avg_pool2d(x, 7)
x = x.view(-1, 1024)
x = self.fc(x)
return x
class PredictionConvolutions(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes, backbone_net):
"""
:param n_classes: number of different types of objects
"""
super(PredictionConvolutions, self).__init__()
self.n_classes = n_classes
# Number of prior-boxes we are considering per position in each feature map
n_boxes = {'conv4_3': 4,
'conv7': 6,
'conv8_2': 6,
'conv9_2': 6,
'conv10_2': 4,
'conv11_2': 4}
# 4 prior-boxes implies we use 4 different aspect ratios, etc.
# Localization prediction convolutions (predict offsets w.r.t prior-boxes)
if backbone_net == 'MobileNetV2':
self.loc_conv4_3 = nn.Conv2d(96, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1280, n_boxes['conv7'] * 4, kernel_size=3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1)
# Class prediction convolutions (predict classes in localization boxes)
self.cl_conv4_3 = nn.Conv2d(96, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1280, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1)
# Initialize convolutions' parameters
self.init_conv2d()
elif backbone_net == 'MobileNetV1':
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1)
# Class prediction convolutions (predict classes in localization boxes)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1)
# Initialize convolutions' parameters
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.)
def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats):
batch_size = conv4_3_feats.size(0)
l_conv4_3 = self.loc_conv4_3(conv4_3_feats) # (N, 16, 38, 38)
l_conv4_3 = l_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 38, 38, 16), to match prior-box order (after .view())
l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) # (N, 5776, 4), there are a total 5776 boxes on this feature map
l_conv7 = self.loc_conv7(conv7_feats) # (N, 24, 19, 19)
l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 24)
l_conv7 = l_conv7.view(batch_size, -1, 4) # (N, 2166, 4), there are a total 2116 boxes on this feature map
l_conv8_2 = self.loc_conv8_2(conv8_2_feats) # (N, 24, 10, 10)
l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 24)
l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) # (N, 600, 4)
l_conv9_2 = self.loc_conv9_2(conv9_2_feats) # (N, 24, 5, 5)
l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 24)
l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) # (N, 150, 4)
l_conv10_2 = self.loc_conv10_2(conv10_2_feats) # (N, 16, 3, 3)
l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 16)
l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) # (N, 36, 4)
l_conv11_2 = self.loc_conv11_2(conv11_2_feats) # (N, 16, 1, 1)
l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 16)
l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) # (N, 4, 4)
# Predict classes in localization boxes
c_conv4_3 = self.cl_conv4_3(conv4_3_feats) # (N, 4 * n_classes, 38, 38)
c_conv4_3 = c_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 38, 38, 4 * n_classes), to match prior-box order (after .view())
c_conv4_3 = c_conv4_3.view(batch_size, -1,
self.n_classes) # (N, 5776, n_classes), there are a total 5776 boxes on this feature map
c_conv7 = self.cl_conv7(conv7_feats) # (N, 6 * n_classes, 19, 19)
c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 6 * n_classes)
c_conv7 = c_conv7.view(batch_size, -1,
self.n_classes) # (N, 2166, n_classes), there are a total 2116 boxes on this feature map
c_conv8_2 = self.cl_conv8_2(conv8_2_feats) # (N, 6 * n_classes, 10, 10)
c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 6 * n_classes)
c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) # (N, 600, n_classes)
c_conv9_2 = self.cl_conv9_2(conv9_2_feats) # (N, 6 * n_classes, 5, 5)
c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 6 * n_classes)
c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) # (N, 150, n_classes)
c_conv10_2 = self.cl_conv10_2(conv10_2_feats) # (N, 4 * n_classes, 3, 3)
c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 4 * n_classes)
c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) # (N, 36, n_classes)
c_conv11_2 = self.cl_conv11_2(conv11_2_feats) # (N, 4 * n_classes, 1, 1)
c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 4 * n_classes)
c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) # (N, 4, n_classes)
# A total of 8732 boxes
# Concatenate in this specific order (i.e. must match the order of the prior-boxes)
locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) # (N, 8732, 4)
classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2],
dim=1) # (N, 8732, n_classes)
return locs, classes_scores
#auxiliary_conv = [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256]
class AuxillaryConvolutions(nn.Module):
def __init__(self, backbone_net):
super(AuxillaryConvolutions, self).__init__()
if backbone_net == "MobileNetV2":
self.extras = ModuleList([
Sequential(
Conv2d(in_channels=1280, out_channels=256, kernel_size=1),
ReLU(),
Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=512, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=256, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=256, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
)
])
self.init_conv2d()
elif backbone_net=="MobileNetV1":
self.extras = ModuleList([
Sequential(
Conv2d(in_channels=1024, out_channels=256, kernel_size=1),
ReLU(),
Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=512, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=256, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=256, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
)
])
self.init_conv2d()
def init_conv2d(self):
for c in self.children():
for layer in c:
if isinstance(layer, nn.Conv2d):
nn.init.xavier_uniform_(layer.weight)
nn.init.constant_(c.bias, 0.)
def forward(self, inp_features_10x10):
features = []
x = inp_features_10x10
for layer in self.extras:
x = layer(x)
features.append(x)
features_5x5 = features[0]
features_3x3 = features[1]
features_2x2 = features[2]
features_1x1 = features[3]
return features_5x5, features_3x3, features_2x2, features_1x1
class SSD(nn.Module):
def __init__(self, num_classes, backbone_network):
super(SSD, self).__init__()
self.num_classes = num_classes
self.priors = torch.FloatTensor(priors).to(device)
#self.base_net = MobileNetV1().model
self.backbone_net = backbone_network
if self.backbone_net == 'MobileNetV1':
self.base_net = MobileNetV1().model
elif self.backbone_net == 'MobileNetV2':
self.base_net = MobileNetV2_pretrained('mobilenet_v2.pth.tar').model
else:
raise('SSD cannot be created with the provided base network')
#self.base_net = MobileNetV2()
self.aux_network = AuxillaryConvolutions(self.backbone_net)
self.prediction_network = PredictionConvolutions(num_classes, self.backbone_net)
def forward(self, image):
x= image
if self.backbone_net == 'MobileNetV1':
source_layer_indexes = [
12,
14,]
start_layer_index = 0
flag = 0
x = x.to('cuda')
for end_layer_index in source_layer_indexes:
for layer in self.base_net[start_layer_index: end_layer_index]:
x = layer(x)
layer_output = x
start_layer_index = end_layer_index
if flag ==0:
features_19x19 = layer_output
elif flag ==1:
features_10x10 = layer_output
flag+=1
for layer in self.base_net[end_layer_index:]:
x = layer(x)
elif self.backbone_net == 'MobileNetV2':
for index, feat in enumerate(self.base_net.features):
x = feat(x)
if index==13:
features_19x19 = x
if index==18:
features_10x10 = x
layer_output = x
features_5x5, features_3x3, features_2x2, features_1x1 = self.aux_network(layer_output)
features = []
features.append(features_19x19)
features.append(features_10x10)
features.append(features_5x5)
features.append(features_3x3)
features.append(features_2x2)
features.append(features_1x1)
locs, class_scores = self.prediction_network.forward(features_19x19, features_10x10, features_5x5, features_3x3, features_2x2, features_1x1)
return locs, class_scores
'''
import numpy as np
import torch
img = np.random.rand(1, 3, 300, 300)
img = torch.Tensor(img)
model = SSD(20)
loc, classes = model.forward(img)
print (loc.shape, classes.shape)
'''
| StarcoderdataPython |
1662448 | <filename>examples/custom_providers_example.py
from devoutils.faker import SyslogFakeGenerator
import random
from devo.sender import Sender
def get_choices():
return ["Failed", "Success", "Totally broken", "404", "500", "What?"]
if __name__ == "__main__":
with open("./custom_providers_template.jinja2", 'r') as myfile:
template = myfile.read()
con = None
# This example need a sender con
# Example
# con = Sender(config="./config.yaml")
custom = {"random": random, "choices": get_choices}
# If you remove simulation or set to false, data will be send
f = SyslogFakeGenerator(engine=con,
template=template,
simulation=True,
probability=80,
frequency=(0.1, 3),
providers=custom,
verbose=True)
f.start()
| StarcoderdataPython |
3379984 | <gh_stars>0
#!/bin/env python
from setuptools import setup
setup(
name="interpol",
version="0.1",
description="A way to interpolate data yielded from iterators",
url="https://github.com/radium226/interpol",
license="GPL",
packages=["interpol"],
zip_safe=True,
install_requires=[
"scipy"
]
)
| StarcoderdataPython |
3287854 | from talon import Context, Module, actions, ui
# ctx = Context()
mod = Module()
mod.tag("cdda", desc="Cataclysm: Dark Days Ahead")
# ctx.matches = r"""
# app: cataclysm-tiles
# """
@mod.action_class
class Actions:
def key_repeat(key: str, count: int):
"""Play key with delay"""
for i in range(0, count):
actions.key(key)
actions.sleep("10ms")
| StarcoderdataPython |
1759562 | <gh_stars>1-10
# coding: utf-8
__author__ = 'baocaixiong'
from message import ConfirmMessage, QueryMessage
from messageio import writer
def confirm_message(message_id, token):
cm = ConfirmMessage()
cm.token = token
cm.update_content({'id': message_id})
return writer(cm)
query_message = lambda **kwargs: writer(QueryMessage(**kwargs))
| StarcoderdataPython |
1636863 | from photons_app.errors import ApplicationCancelled, ApplicationStopped
from photons_app.errors import UserQuit
from photons_app import helpers as hp
import platform
import asyncio
import logging
import signal
import sys
log = logging.getLogger("photons_app.tasks.runner")
class Runner:
def __init__(self, task, kwargs):
self.task = task
self.kwargs = kwargs
def run_loop(self):
photons_app = self.task.photons_app
target_register = self.task.collector.configuration["target_register"]
self.Run(self.task.run(**self.kwargs), photons_app, target_register).run()
class Run:
def __init__(self, coro, photons_app, target_register):
self.coro = coro
self.photons_app = photons_app
self.target_register = target_register
self.loop = self.photons_app.loop
@property
def significant_future(self):
graceful_future = self.photons_app.graceful_final_future
if graceful_future.setup:
return graceful_future
return self.photons_app.final_future
def run(self):
self.photons_app.final_future.add_done_callback(hp.silent_reporter)
self.significant_future.add_done_callback(hp.silent_reporter)
self.register_sigterm_handler(self.significant_future)
task, waiter = self.make_waiter()
override = None
graceful = self.significant_future is self.photons_app.graceful_final_future
try:
self.loop.run_until_complete(waiter)
except KeyboardInterrupt as error:
override = self.got_keyboard_interrupt(error)
except asyncio.CancelledError as error:
override = self.got_cancelled(error)
except:
override = sys.exc_info()[1]
log.debug("CLEANING UP")
try:
self.final(task, waiter)
finally:
self.final_close()
if isinstance(override, ApplicationStopped) and graceful:
return
if override is not None:
raise override from None
def register_sigterm_handler(self, final_future):
if platform.system() != "Windows":
def stop_final_fut():
if not final_future.done():
final_future.set_exception(ApplicationStopped())
self.loop.add_signal_handler(signal.SIGTERM, stop_final_fut)
async def wait(self, task):
wait = [self.photons_app.final_future, self.significant_future, task]
await hp.wait_for_first_future(*wait, name="||run>wait[wait_for_program_exit]")
if task.done():
await task
if self.photons_app.final_future.done():
await self.photons_app.final_future
if self.significant_future.done():
await self.significant_future
def make_waiter(self):
task = self.loop.create_task(self.coro)
task.add_done_callback(hp.silent_reporter)
waiter = self.loop.create_task(self.wait(task))
waiter.add_done_callback(hp.silent_reporter)
return task, waiter
def got_keyboard_interrupt(self, error):
error = UserQuit()
if not self.significant_future.done():
try:
self.significant_future.set_exception(error)
except RuntimeError:
pass
return error
def got_cancelled(self, error):
error = ApplicationCancelled()
if not self.significant_future.done():
try:
self.significant_future.set_exception(error)
except RuntimeError:
pass
return error
def transfer_result(self, complete, pending):
if complete is None or complete.cancelled():
if not pending.done():
pending.cancel()
return
if not complete.done():
return
exc = complete.exception()
if exc:
if not pending.done():
pending.set_exception(exc)
return
complete.result()
if not pending.done():
pending.set_result(None)
def final(self, task, waiter):
self.wait_for_main_task(task)
self.wait_for_waiter(waiter)
self.ensure_finished_futures(task, waiter)
self.run_cleanup()
self.ensure_all_tasks_cancelled()
def wait_for_main_task(self, task):
log.debug("Waiting for main task to finish")
# If we exited because final future is done but graceful is not
# Then the task won't end, so let's tell graceful we're done now
if self.photons_app.final_future.done() and not self.significant_future.done():
self.transfer_result(self.photons_app.final_future, self.significant_future)
# If we're not using the graceful future then we assume the task won't stop by itself
# The graceful future is about saying the task will stop by itself when you resolve graceful
if not self.photons_app.graceful_final_future.setup:
task.cancel()
try:
self.loop.run_until_complete(
asyncio.tasks.gather(task, loop=self.loop, return_exceptions=True)
)
except KeyboardInterrupt:
pass
except:
pass
finally:
task.cancel()
def wait_for_waiter(self, waiter):
log.debug("Waiting for waiter task to finish")
waiter.cancel()
try:
self.loop.run_until_complete(
asyncio.tasks.gather(waiter, loop=self.loop, return_exceptions=True)
)
except:
pass
def run_cleanup(self):
log.debug("Running cleaners")
targets = self.target_register.used_targets
self.loop.run_until_complete(self.photons_app.cleanup(targets))
def ensure_finished_futures(self, task, waiter):
self.transfer_result(None if not task.done() else task, self.photons_app.final_future)
if not self.significant_future.done():
self.significant_future.cancel()
if self.photons_app.graceful_final_future.setup:
if self.significant_future.cancelled() or isinstance(
self.significant_future.exception(),
(UserQuit, ApplicationStopped, ApplicationCancelled),
):
self.photons_app.final_future.cancel()
self.transfer_result(self.significant_future, self.photons_app.final_future)
def ensure_all_tasks_cancelled(self):
log.debug("Cancelling tasks and async generators")
self.cancel_all_tasks()
self.loop.run_until_complete(self.shutdown_asyncgens())
def final_close(self):
self.loop.close()
del self.photons_app.loop
del self.photons_app.final_future
del self.photons_app.graceful_final_future
def cancel_all_tasks(self):
if hasattr(asyncio.tasks, "all_tasks"):
to_cancel = asyncio.tasks.all_tasks(self.loop)
else:
to_cancel = asyncio.Task.all_tasks(self.loop)
to_cancel = [t for t in to_cancel if not t.done()]
if not to_cancel:
return
for task in to_cancel:
task.cancel()
gathered = asyncio.tasks.gather(*to_cancel, loop=self.loop, return_exceptions=True)
self.loop.run_until_complete(gathered)
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
self.loop.call_exception_handler(
{
"message": "unhandled exception during shutdown",
"exception": task.exception(),
"task": task,
}
)
async def shutdown_asyncgens(self):
if not len(self.loop._asyncgens):
return
closing_agens = list(self.loop._asyncgens)
self.loop._asyncgens.clear()
# I would do an asyncio.tasks.gather but it would appear that just causes
# the asyncio loop to think it's shutdown, so I have to do them one at a time
for ag in closing_agens:
try:
await hp.stop_async_generator(
ag, name="||shutdown_asyncgens[wait_for_closing_agens]"
)
except asyncio.CancelledError:
pass
except:
exc = sys.exc_info()[1]
self.loop.call_exception_handler(
{
"message": "an error occurred during closing of asynchronous generator",
"exception": exc,
"asyncgen": ag,
}
)
| StarcoderdataPython |
1791611 | <reponame>Anioko/CMS
import json
import os
import cv2
from datetime import datetime
from logging import log
from time import time
from app import db
class Workplace(db.Model):
###Places of work to be listed here
__tablename__ = 'workplaces'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete="CASCADE"))
name = db.Column(db.String)
description = db.Column(db.String)
role = db.Column(db.String)
role_description = db.Column(db.String)
start_date = db.Column(db.DateTime, default=datetime.now, nullable=False)
end_date = db.Column(db.DateTime, nullable=False)
currently = db.Column(db.String)
city = db.Column(db.String)
state = db.Column(db.String)
country = db.Column(db.String)
created_at = db.Column(db.DateTime, default=db.func.now())
updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
def __repr__(self):
return u'<{self.__class__.__name__}: {self.id}>'.format(self=self)
| StarcoderdataPython |
4833534 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wshop.models.fields.autoslugfield
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CommunicationEventType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', wshop.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', editable=False, separator='_', max_length=128, help_text='Code used for looking up this event programmatically', blank=True)),
('name', models.CharField(verbose_name='Name', max_length=255, help_text='This is just used for organisational purposes')),
('category', models.CharField(default='Order related', max_length=255, verbose_name='Category', choices=[('Order related', 'Order related'), ('User related', 'User related')])),
('email_subject_template', models.CharField(verbose_name='Email Subject Template', max_length=255, blank=True, null=True)),
('email_body_template', models.TextField(blank=True, verbose_name='Email Body Template', null=True)),
('email_body_html_template', models.TextField(verbose_name='Email Body HTML Template', blank=True, help_text='HTML template', null=True)),
('sms_template', models.CharField(verbose_name='SMS Template', max_length=170, help_text='SMS template', blank=True, null=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('date_updated', models.DateTimeField(auto_now=True, verbose_name='Date Updated')),
],
options={
'verbose_name_plural': 'Communication event types',
'verbose_name': 'Communication event type',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.TextField(max_length=255, verbose_name='Subject')),
('body_text', models.TextField(verbose_name='Body Text')),
('body_html', models.TextField(verbose_name='Body HTML', blank=True)),
('date_sent', models.DateTimeField(auto_now_add=True, verbose_name='Date Sent')),
('user', models.ForeignKey(verbose_name='User', related_name='emails', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Emails',
'verbose_name': 'Email',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('body', models.TextField()),
('category', models.CharField(max_length=255, blank=True)),
('location', models.CharField(default='Inbox', max_length=32, choices=[('Inbox', 'Inbox'), ('Archive', 'Archive')])),
('date_sent', models.DateTimeField(auto_now_add=True)),
('date_read', models.DateTimeField(blank=True, null=True)),
('recipient', models.ForeignKey(related_name='notifications', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('sender', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
],
options={
'ordering': ('-date_sent',),
'verbose_name_plural': 'Notifications',
'verbose_name': 'Notification',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductAlert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=75, db_index=True, verbose_name='Email', blank=True)),
('key', models.CharField(max_length=128, db_index=True, verbose_name='Key', blank=True)),
('status', models.CharField(default='Active', max_length=20, verbose_name='Status', choices=[('Unconfirmed', 'Not yet confirmed'), ('Active', 'Active'), ('Cancelled', 'Cancelled'), ('Closed', 'Closed')])),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('date_confirmed', models.DateTimeField(blank=True, verbose_name='Date confirmed', null=True)),
('date_cancelled', models.DateTimeField(blank=True, verbose_name='Date cancelled', null=True)),
('date_closed', models.DateTimeField(blank=True, verbose_name='Date closed', null=True)),
('product', models.ForeignKey(to='catalogue.Product', on_delete=models.CASCADE)),
('user', models.ForeignKey(null=True, verbose_name='User', related_name='alerts', to=settings.AUTH_USER_MODEL, blank=True, on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Product alerts',
'verbose_name': 'Product alert',
'abstract': False,
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
1634623 | import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
'''
run this file from root folder:
python3 datasets/process_data.py datasets/messages.csv datasets/categories.csv datasets/DisasterResponse.db
'''
def load_data(messages_filepath, categories_filepath):
"""
PARAMETER:
messages_filepath - filepath for messages
categories_filepath - filepath for categories
RETURN:
df - merged messages and categories DataFrame
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.concat([messages, categories], axis=1)
return df
def clean_data(df):
'''
PARAMETER:
df (DataFrame) - dataframe to be cleaned
RETURN:
df (DataFrame) - cleaned dataframe
'''
#split categories from one column to 36 columns
categories = df['categories'].str.split(';', expand=True)
#get columns name for each categories and set it to categories df
category_colnames = categories.iloc[1] \
.apply(lambda x: x[0:-2])
categories.columns = category_colnames
#convert categories value to 0 and 1
for column in categories:
categories[column] = categories[column].str[-1]
categories[column] = pd.to_numeric(categories[column])
# merge categories df into master df
df = pd.concat([df.drop(['id','categories'], axis=1), categories], axis=1)
# remove duplicates
df = df[~df.duplicated()]
#check for non-binary result
nonbinary_cols = []
for category in df.drop(columns=['message', 'original', 'genre']).columns:
if len(list(df[category].unique())) > 2:
nonbinary_cols.append(category)
# convert non 0 and 1 to 1
for category in nonbinary_cols:
df[category][~df[category].isin([0,1])] = 1
return df
def save_data(df, database_filename):
'''
PARAMETER:
df (DataFrame) - dataframe to be saved
database_filename (string) - database file name
RETURN:
None
'''
engine = create_engine('sqlite:///' + database_filename)
df.to_sql('messages_and_categories', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | StarcoderdataPython |
131605 | <gh_stars>10-100
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.
IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.
IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='iso2022_jp_2', encode=Codec().encode,
decode=Codec().decode, incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder, streamreader=StreamReader,
streamwriter=StreamWriter)
| StarcoderdataPython |
90431 | # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import time
import threading
from itertools import chain
import collections
import traceback
import os
from lvmdbusd import cfg
from lvmdbusd.utils import pv_dest_ranges, log_debug, log_error, add_no_notify
from lvmdbusd.lvm_shell_proxy import LVMShellProxy
try:
import simplejson as json
except ImportError:
import json
SEP = '{|}'
total_time = 0.0
total_count = 0
# We need to prevent different threads from using the same lvm shell
# at the same time.
cmd_lock = threading.RLock()
class LvmExecutionMeta(object):
def __init__(self, start, ended, cmd, ec, stdout_txt, stderr_txt):
self.lock = threading.RLock()
self.start = start
self.ended = ended
self.cmd = cmd
self.ec = ec
self.stdout_txt = stdout_txt
self.stderr_txt = stderr_txt
def __str__(self):
with self.lock:
return "EC= %d for %s\n" \
"STARTED: %f, ENDED: %f\n" \
"STDOUT=%s\n" \
"STDERR=%s\n" % \
(self.ec, str(self.cmd), self.start, self.ended, self.stdout_txt,
self.stderr_txt)
class LvmFlightRecorder(object):
def __init__(self, size=16):
self.queue = collections.deque(maxlen=size)
def add(self, lvm_exec_meta):
self.queue.append(lvm_exec_meta)
def dump(self):
with cmd_lock:
if len(self.queue):
log_error("LVM dbus flight recorder START")
for c in self.queue:
log_error(str(c))
log_error("LVM dbus flight recorder END")
cfg.blackbox = LvmFlightRecorder()
def _debug_c(cmd, exit_code, out):
log_error('CMD= %s' % ' '.join(cmd))
log_error(("EC= %d" % exit_code))
log_error(("STDOUT=\n %s\n" % out[0]))
log_error(("STDERR=\n %s\n" % out[1]))
def call_lvm(command, debug=False):
"""
Call an executable and return a tuple of exitcode, stdout, stderr
:param command: Command to execute
:param debug: Dump debug to stdout
"""
# print 'STACK:'
# for line in traceback.format_stack():
# print line.strip()
# Prepend the full lvm executable so that we can run different versions
# in different locations on the same box
command.insert(0, cfg.LVM_CMD)
command = add_no_notify(command)
process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True,
env=os.environ)
out = process.communicate()
stdout_text = bytes(out[0]).decode("utf-8")
stderr_text = bytes(out[1]).decode("utf-8")
if debug or process.returncode != 0:
_debug_c(command, process.returncode, (stdout_text, stderr_text))
return process.returncode, stdout_text, stderr_text
# The actual method which gets called to invoke the lvm command, can vary
# from forking a new process to using lvm shell
_t_call = call_lvm
def _shell_cfg():
global _t_call
# noinspection PyBroadException
try:
lvm_shell = LVMShellProxy()
_t_call = lvm_shell.call_lvm
cfg.SHELL_IN_USE = lvm_shell
return True
except Exception:
_t_call = call_lvm
cfg.SHELL_IN_USE = None
log_error(traceback.format_exc())
log_error("Unable to utilize lvm shell, dropping back to fork & exec")
return False
def set_execution(shell):
global _t_call
with cmd_lock:
# If the user requested lvm shell and we are currently setup that
# way, just return
if cfg.SHELL_IN_USE and shell:
return True
else:
if not shell and cfg.SHELL_IN_USE:
cfg.SHELL_IN_USE.exit_shell()
cfg.SHELL_IN_USE = None
_t_call = call_lvm
if shell:
if cfg.args.use_json:
return _shell_cfg()
else:
return False
return True
def time_wrapper(command, debug=False):
global total_time
global total_count
with cmd_lock:
start = time.time()
results = _t_call(command, debug)
ended = time.time()
total_time += (ended - start)
total_count += 1
cfg.blackbox.add(LvmExecutionMeta(start, ended, command, *results))
return results
call = time_wrapper
# Default cmd
# Place default arguments for every command here.
def _dc(cmd, args):
c = [cmd, '--noheading', '--separator', '%s' % SEP, '--nosuffix',
'--unbuffered', '--units', 'b']
c.extend(args)
return c
def parse(out):
rc = []
for line in out.split('\n'):
# This line includes separators, so process them
if SEP in line:
elem = line.split(SEP)
cleaned_elem = []
for e in elem:
e = e.strip()
cleaned_elem.append(e)
if len(cleaned_elem) > 1:
rc.append(cleaned_elem)
else:
t = line.strip()
if len(t) > 0:
rc.append(t)
return rc
def parse_column_names(out, column_names):
lines = parse(out)
rc = []
for i in range(0, len(lines)):
d = dict(list(zip(column_names, lines[i])))
rc.append(d)
return rc
def options_to_cli_args(options):
rc = []
for k, v in list(dict(options).items()):
if k.startswith("-"):
rc.append(k)
else:
rc.append("--%s" % k)
if v != "":
rc.append(str(v))
return rc
def pv_remove(device, remove_options):
cmd = ['pvremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.append(device)
return call(cmd)
def _qt(tag_name):
return '@%s' % tag_name
def _tag(operation, what, add, rm, tag_options):
cmd = [operation]
cmd.extend(options_to_cli_args(tag_options))
if isinstance(what, list):
cmd.extend(what)
else:
cmd.append(what)
if add:
cmd.extend(list(chain.from_iterable(
('--addtag', _qt(x)) for x in add)))
if rm:
cmd.extend(list(chain.from_iterable(
('--deltag', _qt(x)) for x in rm)))
return call(cmd, False)
def pv_tag(pv_devices, add, rm, tag_options):
return _tag('pvchange', pv_devices, add, rm, tag_options)
def vg_tag(vg_name, add, rm, tag_options):
return _tag('vgchange', vg_name, add, rm, tag_options)
def lv_tag(lv_name, add, rm, tag_options):
return _tag('lvchange', lv_name, add, rm, tag_options)
def vg_rename(vg, new_name, rename_options):
cmd = ['vgrename']
cmd.extend(options_to_cli_args(rename_options))
cmd.extend([vg, new_name])
return call(cmd)
def vg_remove(vg_name, remove_options):
cmd = ['vgremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.extend(['-f', vg_name])
return call(cmd)
def vg_lv_create(vg_name, create_options, name, size_bytes, pv_dests):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name, '--yes'])
pv_dest_ranges(cmd, pv_dests)
return call(cmd)
def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(snapshot_options))
cmd.extend(["-s"])
if size_bytes != 0:
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
if not thin_pool:
cmd.extend(['--size', str(size_bytes) + 'B'])
else:
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
cmd.extend(['--yes'])
return cmd
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool):
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', raid_type])
cmd.extend(['--size', str(size_bytes) + 'B'])
if num_stripes != 0:
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name, '--yes'])
return call(cmd)
def vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
return _vg_lv_create_raid(vg_name, create_options, name, raid_type,
size_bytes, num_stripes, stripe_size_kb)
def vg_lv_create_mirror(
vg_name, create_options, name, size_bytes, num_copies):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'mirror'])
cmd.extend(['--mirrors', str(num_copies)])
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name, '--yes'])
return call(cmd)
def vg_create_cache_pool(md_full_name, data_full_name, create_options):
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'cache-pool', '--force', '-y',
'--poolmetadata', md_full_name, data_full_name])
return call(cmd)
def vg_create_thin_pool(md_full_name, data_full_name, create_options):
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'thin-pool', '--force', '-y',
'--poolmetadata', md_full_name, data_full_name])
return call(cmd)
def lv_remove(lv_path, remove_options):
cmd = ['lvremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.extend(['-f', lv_path])
return call(cmd)
def lv_rename(lv_path, new_name, rename_options):
cmd = ['lvrename']
cmd.extend(options_to_cli_args(rename_options))
cmd.extend([lv_path, new_name])
return call(cmd)
def lv_resize(lv_full_name, size_change, pv_dests,
resize_options):
cmd = ['lvresize', '--force']
cmd.extend(options_to_cli_args(resize_options))
if size_change < 0:
cmd.append("-L-%dB" % (-size_change))
else:
cmd.append("-L+%dB" % (size_change))
cmd.append(lv_full_name)
pv_dest_ranges(cmd, pv_dests)
return call(cmd)
def lv_lv_create(lv_full_name, create_options, name, size_bytes):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--virtualsize', str(size_bytes) + 'B', '-T'])
cmd.extend(['--name', name, lv_full_name, '--yes'])
return call(cmd)
def lv_cache_lv(cache_pool_full_name, lv_full_name, cache_options):
# lvconvert --type cache --cachepool VG/CachePoolLV VG/OriginLV
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(cache_options))
cmd.extend(['-y', '--type', 'cache', '--cachepool',
cache_pool_full_name, lv_full_name])
return call(cmd)
def lv_detach_cache(lv_full_name, detach_options, destroy_cache):
cmd = ['lvconvert']
if destroy_cache:
option = '--uncache'
else:
# Currently fairly dangerous
# see: https://bugzilla.redhat.com/show_bug.cgi?id=1248972
option = '--splitcache'
cmd.extend(options_to_cli_args(detach_options))
# needed to prevent interactive questions
cmd.extend(["--yes", "--force"])
cmd.extend([option, lv_full_name])
return call(cmd)
def supports_json():
cmd = ['help']
rc, out, err = call(cmd)
if rc == 0:
if cfg.SHELL_IN_USE:
return True
else:
if 'fullreport' in err:
return True
return False
def lvm_full_report_json():
pv_columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free',
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
'vg_uuid', 'pv_missing']
pv_seg_columns = ['pvseg_start', 'pvseg_size', 'segtype',
'pv_uuid', 'lv_uuid', 'pv_name']
vg_columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free',
'vg_sysid', 'vg_extent_size', 'vg_extent_count',
'vg_free_count', 'vg_profile', 'max_lv', 'max_pv',
'pv_count', 'lv_count', 'snap_count', 'vg_seqno',
'vg_mda_count', 'vg_mda_free', 'vg_mda_size',
'vg_mda_used_count', 'vg_attr', 'vg_tags']
lv_columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size',
'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid',
'origin', 'data_percent',
'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
'metadata_lv', 'lv_parent', 'lv_role', 'lv_layout',
'snap_percent', 'metadata_percent', 'copy_percent',
'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid']
lv_seg_columns = ['seg_pe_ranges', 'segtype', 'lv_uuid']
cmd = _dc('fullreport', [
'-a', # Need hidden too
'--configreport', 'pv', '-o', ','.join(pv_columns),
'--configreport', 'vg', '-o', ','.join(vg_columns),
'--configreport', 'lv', '-o', ','.join(lv_columns),
'--configreport', 'seg', '-o', ','.join(lv_seg_columns),
'--configreport', 'pvseg', '-o', ','.join(pv_seg_columns),
'--reportformat', 'json'
])
rc, out, err = call(cmd)
if rc == 0:
# With the current implementation, if we are using the shell then we
# are using JSON and JSON is returned back to us as it was parsed to
# figure out if we completed OK or not
if cfg.SHELL_IN_USE:
assert(type(out) == dict)
return out
else:
return json.loads(out)
return None
def pv_retrieve_with_segs(device=None):
d = []
err = ""
out = ""
rc = 0
columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free',
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
'vg_uuid', 'pvseg_start', 'pvseg_size', 'segtype', 'pv_missing']
# Lvm has some issues where it returns failure when querying pvs when other
# operations are in process, see:
# https://bugzilla.redhat.com/show_bug.cgi?id=1274085
for i in range(0, 10):
cmd = _dc('pvs', ['-o', ','.join(columns)])
if device:
cmd.extend(device)
rc, out, err = call(cmd)
if rc == 0:
d = parse_column_names(out, columns)
break
else:
time.sleep(0.2)
log_debug("LVM Bug workaround, retrying pvs command...")
if rc != 0:
msg = "We were unable to get pvs to return without error after " \
"trying 10 times, RC=%d, STDERR=(%s), STDOUT=(%s)" % \
(rc, err, out)
log_error(msg)
raise RuntimeError(msg)
return d
def pv_resize(device, size_bytes, create_options):
cmd = ['pvresize']
cmd.extend(options_to_cli_args(create_options))
if size_bytes != 0:
cmd.extend(['--yes', '--setphysicalvolumesize', str(size_bytes) + 'B'])
cmd.extend([device])
return call(cmd)
def pv_create(create_options, devices):
cmd = ['pvcreate', '-ff']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(devices)
return call(cmd)
def pv_allocatable(device, yes, allocation_options):
yn = 'n'
if yes:
yn = 'y'
cmd = ['pvchange']
cmd.extend(options_to_cli_args(allocation_options))
cmd.extend(['-x', yn, device])
return call(cmd)
def pv_scan(activate, cache, device_paths, major_minors, scan_options):
cmd = ['pvscan']
cmd.extend(options_to_cli_args(scan_options))
if activate:
cmd.extend(['--activate', "ay"])
if cache:
cmd.append('--cache')
if len(device_paths) > 0:
for d in device_paths:
cmd.append(d)
if len(major_minors) > 0:
for mm in major_minors:
cmd.append("%s:%s" % (mm))
return call(cmd)
def vg_create(create_options, pv_devices, name):
cmd = ['vgcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.append(name)
cmd.extend(pv_devices)
return call(cmd)
def vg_change(change_options, name):
cmd = ['vgchange']
cmd.extend(options_to_cli_args(change_options))
cmd.append(name)
return call(cmd)
def vg_reduce(vg_name, missing, pv_devices, reduce_options):
cmd = ['vgreduce']
cmd.extend(options_to_cli_args(reduce_options))
if missing:
cmd.append('--removemissing')
elif len(pv_devices) == 0:
cmd.append('--all')
cmd.append(vg_name)
cmd.extend(pv_devices)
return call(cmd)
def vg_extend(vg_name, extend_devices, extend_options):
cmd = ['vgextend']
cmd.extend(options_to_cli_args(extend_options))
cmd.append(vg_name)
cmd.extend(extend_devices)
return call(cmd)
def _vg_value_set(name, arguments, options):
cmd = ['vgchange']
cmd.extend(options_to_cli_args(options))
cmd.append(name)
cmd.extend(arguments)
return call(cmd)
def vg_allocation_policy(vg_name, policy, policy_options):
return _vg_value_set(vg_name, ['--alloc', policy], policy_options)
def vg_max_pv(vg_name, number, max_options):
return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(number)],
max_options)
def vg_max_lv(vg_name, number, max_options):
return _vg_value_set(vg_name, ['-l', str(number)], max_options)
def vg_uuid_gen(vg_name, ignore, options):
assert ignore is None
return _vg_value_set(vg_name, ['--uuid'], options)
def activate_deactivate(op, name, activate, control_flags, options):
cmd = [op]
cmd.extend(options_to_cli_args(options))
op = '-a'
if control_flags:
# Autoactivation
if (1 << 0) & control_flags:
op += 'a'
# Exclusive locking (Cluster)
if (1 << 1) & control_flags:
op += 'e'
# Local node activation
if (1 << 2) & control_flags:
op += 'l'
# Activation modes
if (1 << 3) & control_flags:
cmd.extend(['--activationmode', 'complete'])
elif (1 << 4) & control_flags:
cmd.extend(['--activationmode', 'partial'])
# Ignore activation skip
if (1 << 5) & control_flags:
cmd.append('--ignoreactivationskip')
if activate:
op += 'y'
else:
op += 'n'
cmd.append(op)
cmd.append(name)
return call(cmd)
def vg_retrieve(vg_specific):
if vg_specific:
assert isinstance(vg_specific, list)
columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free',
'vg_sysid', 'vg_extent_size', 'vg_extent_count',
'vg_free_count', 'vg_profile', 'max_lv', 'max_pv',
'pv_count', 'lv_count', 'snap_count', 'vg_seqno',
'vg_mda_count', 'vg_mda_free', 'vg_mda_size',
'vg_mda_used_count', 'vg_attr', 'vg_tags']
cmd = _dc('vgs', ['-o', ','.join(columns)])
if vg_specific:
cmd.extend(vg_specific)
d = []
rc, out, err = call(cmd)
if rc == 0:
d = parse_column_names(out, columns)
return d
def lv_retrieve_with_segments():
columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size',
'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid',
'origin', 'data_percent',
'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
'metadata_lv', 'seg_pe_ranges', 'segtype', 'lv_parent',
'lv_role', 'lv_layout',
'snap_percent', 'metadata_percent', 'copy_percent',
'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid']
cmd = _dc('lvs', ['-a', '-o', ','.join(columns)])
rc, out, err = call(cmd)
d = []
if rc == 0:
d = parse_column_names(out, columns)
return d
if __name__ == '__main__':
pv_data = pv_retrieve_with_segs()
for p in pv_data:
print(str(p))
| StarcoderdataPython |
3284337 | <reponame>Ivan1225/NameViz
import json
import jsonpickle
import os
import getopt, sys
import re
from analysis_name import check_outlier
class Name:
def __init__(self, name, filename, filepath, line, position, nametype, vartype, parent):
self.name = name
self.fileName = filename
self.filePath = filepath
self.line = line
self.position = position
self.type = nametype
self.variableType = vartype
self.subNames = []
self.parent = parent
if not (name and nametype):
self.isOutlier = False
self.errorMessage = ''
else:
extra_fields = check_outlier(name, nametype)
self.isOutlier = extra_fields["isOutlier"]
self.errorMessage = extra_fields["errorMessage"]
def addName(self, name):
self.subNames.append(name)
def main(argv):
directory = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"d:o:")
except getopt.GetoptError:
print ('usage is: search.py -d <inputdirectory> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-d':
directory = arg
elif opt == '-o':
outputfile = arg
if(directory == ''):
print ('usage is: search.py -d <inputdirectory> -o <outputfile>')
sys.exit(2)
if(outputfile == ''):
print ('usage is: search.py -d <inputdirectory> -o <outputfile>')
sys.exit(2)
if(os.path.isdir(directory) == False):
print ('error: ', directory, ' is not a valid directory')
sys.exit(2)
data = []
stack = []
for dirpath, dirs, files in os.walk(directory):
for filename in files:
fname = os.path.join(dirpath,filename)
relpath = os.path.relpath(fname, os.path.commonprefix([fname, os.getcwd()]))
if fname.endswith('.java'):
topNode = Name(None, filename, relpath, None, None, None, None, None)
currentNode = topNode
data.append(currentNode)
linecount = 0
commented = False
with open(fname) as myfile:
print('parsing: ' + filename + '...')
for line in myfile:
linecount += 1
if commented:
if '*/' in line:
line = line.split('*/')[1]
commented = False
else: continue
if '/*' in line:
aline = line.split('/*')[0]
commented = True
if '*/' in line:
bline = line.split('*/')[1]
commented = False
placeholder = ''
i = 0
while i < (line.find('*/') - line.find('/*')):
placeholder = placeholder + ' '
i += 1
line = aline + placeholder + bline
else:
line = aline
classmatch = re.search('(?<=class)(\s)+[^\s]+', line)
interfacematch = re.search('(?<=interface)(\s)+[^\s]+', line)
enummatch = re.search('(?<=enum)(\s)+[^\s]+', line)
methodmatch = re.search('[a-zA-Z]+[a-zA-Z0-9$_]*(\[\]|<[a-zA-Z]+[a-zA-Z0-9$_]*>)? +[a-zA-Z]+[a-zA-Z0-9$_]* *(?=\()', line)
varmatch = re.finditer('([a-zA-Z]+[a-zA-Z0-9$_]*(\[\]|<[a-zA-Z]+[a-zA-Z0-9$_]*>)?( +)[a-zA-Z]+[a-zA-Z0-9$_]*( *)(?=(=|;)))', line)
constantmatch = re.findall('final( +)([a-zA-Z]+[a-zA-Z0-9$_]*(\[\]|<[a-zA-Z]+[a-zA-Z0-9$_]*>)?( +)[a-zA-Z]+[a-zA-Z0-9$_]*( *)(?=(=|;)))', line)
openbracketmatch = re.findall('{', line)
closebracketmatch = re.findall('}', line)
if line.strip().startswith('//'):
continue
if classmatch != None:
newNode = Name(getClassLevelName(classmatch), filename, relpath, linecount, calculatePos(classmatch), 'ClassName', None, currentNode)
currentNode.addName(newNode)
stack.append('node')
currentNode = newNode
elif interfacematch != None:
newNode = Name(getClassLevelName(interfacematch), filename, relpath, linecount, calculatePos(interfacematch), 'InterfaceName', None, currentNode)
currentNode.addName(newNode)
stack.append('node')
currentNode = newNode
elif enummatch != None:
newNode = Name(getClassLevelName(enummatch), filename, relpath, linecount, calculatePos(enummatch), 'EnumName', None, currentNode)
currentNode.addName(newNode)
stack.append('node')
currentNode = newNode
elif methodmatch != None:
methodNameWithType = methodmatch.group(0).strip()
methodNameWithTypeArr = methodNameWithType.split(' ')
methodType = methodNameWithTypeArr[0]
if methodType not in ['new', 'else', 'throw']:
methodName = methodNameWithTypeArr[len(methodNameWithTypeArr)-1]
if methodName != currentNode.name:
newNode = Name(methodName, filename, relpath, linecount, calculatePos(methodmatch), 'MethodName', None, currentNode)
currentNode.addName(newNode)
stack.append('node')
currentNode = newNode
elif varmatch != []:
for match in varmatch:
nameWithType = match.group(0).strip()
nameTypeArr = nameWithType.split(' ')
name = nameTypeArr[len(nameTypeArr) - 1]
vartype = nameTypeArr[0]
nameType = 'VariableName'
if (vartype != 'return'):
if constantmatch != []:
for cons in constantmatch:
if (match[0] in cons):
nameType = 'ConstantName'
break
newNode = Name(name, filename, relpath, linecount, calculatePos(match), nameType, vartype, currentNode)
currentNode.addName(newNode)
if openbracketmatch != []:
for obracket in openbracketmatch:
stack.append('{')
if closebracketmatch != []:
for cbracket in closebracketmatch:
stack.pop()
if len(stack) == 0:
currentNode = topNode
elif stack[len(stack)-1] != '{':
currentNode = currentNode.parent
if currentNode == None:
currentNode = topNode
stack.pop()
print('parsing: ' + filename +' complete!')
print('----------------------')
with open(outputfile, 'w') as outfile:
outfile.write(jsonpickle.encode(data))
def calculatePos(match):
matchArr = match.group(0).split(' ')
pos = match.span()[0]
i = 0
while i < len(matchArr) - 1:
if (matchArr[i] == ''):
pos += 1
else:
pos += len(matchArr[i])
i += 1
return pos
def getClassLevelName(match):
return match.group(0).strip()
if __name__ == "__main__":
main(sys.argv[1:]) | StarcoderdataPython |
3360817 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import join
import numpy as np
from tqdm import tqdm
from absl import app, flags
from third_party.xiuminglib import xiuminglib as xm
from data_gen.util import save_npz
from brdf.renderer import SphereRenderer
from brdf.merl.merl import MERL
flags.DEFINE_string('indir', '', "directory to downloaded MERL binary files")
flags.DEFINE_float('vali_frac', 0.01, "fraction of data used for validation")
flags.DEFINE_string(
'envmap_path', 'point', "light probe path or a special string like 'point'")
flags.DEFINE_integer('envmap_h', 16, "light probe height")
flags.DEFINE_float('envmap_inten', 40., "light probe intensity")
flags.DEFINE_float(
'slice_percentile', 80,
"clip percentile for visualizing characteristic slice")
flags.DEFINE_integer('ims', 128, "render size during visualization")
flags.DEFINE_integer('spp', 1, "samples per pixel for BRDF rendering")
flags.DEFINE_string('outdir', '', "output directory")
flags.DEFINE_boolean(
'overwrite', False, "whether to remove output folder if it already exists")
FLAGS = flags.FLAGS
def main(_):
xm.os.makedirs(FLAGS.outdir, rm_if_exists=FLAGS.overwrite)
brdf = MERL()
# ------ Testing
renderer = SphereRenderer(
FLAGS.envmap_path, FLAGS.outdir, envmap_inten=FLAGS.envmap_inten,
envmap_h=FLAGS.envmap_h, ims=FLAGS.ims, spp=FLAGS.spp)
# First 90x90 Rusink. are for the characteristic slice
cslice_rusink = brdf.get_characterstic_slice_rusink()
cslice_rusink = np.reshape(cslice_rusink, (-1, 3))
# Next are for rendering
render_rusink = brdf.dir2rusink(renderer.ldir, renderer.vdir)
render_rusink = render_rusink[renderer.lvis.astype(bool)]
qrusink = np.vstack((cslice_rusink, render_rusink))
data = {
'envmap_h': FLAGS.envmap_h, 'ims': FLAGS.ims, 'spp': FLAGS.spp,
'rusink': qrusink.astype(np.float32)}
out_path = join(FLAGS.outdir, 'test.npz')
save_npz(data, out_path)
# ------ Training & Validation
brdf_paths = xm.os.sortglob(FLAGS.indir)
for i, path in enumerate(tqdm(brdf_paths, desc="Training & Validation")):
brdf = MERL(path=path)
rusink = brdf.tbl[:, :3]
refl = brdf.tbl[:, 3:]
refl = xm.img.rgb2lum(refl)
refl = refl[:, None]
# Training-validation split
n = brdf.tbl.shape[0]
take_every = int(1 / FLAGS.vali_frac)
ind = np.arange(0, n)
vali_ind = np.arange(0, n, take_every, dtype=int)
train_ind = np.array([x for x in ind if x not in vali_ind])
train_rusink = rusink[train_ind, :]
train_refl = refl[train_ind, :]
vali_rusink = rusink[vali_ind, :]
vali_refl = refl[vali_ind, :]
train_data = {
'i': i, 'name': brdf.name,
'envmap_h': FLAGS.envmap_h, 'ims': FLAGS.ims, 'spp': FLAGS.spp,
'rusink': train_rusink.astype(np.float32),
'refl': train_refl.astype(np.float32)}
vali_data = {
'i': i, 'name': brdf.name,
'envmap_h': FLAGS.envmap_h, 'ims': FLAGS.ims, 'spp': FLAGS.spp,
'rusink': vali_rusink.astype(np.float32),
'refl': vali_refl.astype(np.float32)}
# Dump to disk
out_path = join(FLAGS.outdir, 'train_%s.npz' % brdf.name)
save_npz(train_data, out_path)
out_path = join(FLAGS.outdir, 'vali_%s.npz' % brdf.name)
save_npz(vali_data, out_path)
# Visualize
vis_dir = join(FLAGS.outdir, 'vis')
for achro in (False, True):
# Characteristic slice
cslice = brdf.get_characterstic_slice()
if achro:
cslice = xm.img.rgb2lum(cslice)
cslice = np.tile(cslice[:, :, None], (1, 1, 3))
cslice_img = brdf.characteristic_slice_as_img(
cslice, clip_percentile=FLAGS.slice_percentile)
folder_name = 'cslice'
if achro:
folder_name += '_achromatic'
out_png = join(vis_dir, folder_name, brdf.name + '.png')
xm.io.img.write_img(cslice_img, out_png)
# Render with this BRDF
qrusink = brdf.dir2rusink(renderer.ldir, renderer.vdir)
lvis = renderer.lvis.astype(bool)
qrusink_flat = qrusink[lvis]
rgb_flat = brdf.query(qrusink_flat)
rgb = np.zeros_like(renderer.lcontrib)
rgb[lvis] = rgb_flat
if achro:
rgb = xm.img.rgb2lum(rgb)
rgb = np.tile(rgb[:, :, :, None], (1, 1, 1, 3))
render = renderer.render(rgb)
folder_name = 'render'
if achro:
folder_name += '_achromatic'
out_png = join(vis_dir, folder_name, brdf.name + '.png')
xm.io.img.write_arr(render, out_png, clip=True)
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
68935 | # 461. Hamming Distance
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
return bin(x ^ y).count('1') | StarcoderdataPython |
194273 | """A collection of decorators to modify rule docstrings for Sphinx."""
from sqlfluff.core.rules.config_info import STANDARD_CONFIG_INFO_DICT
from sqlfluff.core.rules.base import rules_logger # noqa
FIX_COMPATIBLE = "``sqlfluff fix`` compatible."
def document_fix_compatible(cls):
"""Mark the rule as fixable in the documentation."""
cls.__doc__ = cls.__doc__.replace("\n", f"\n\n{FIX_COMPATIBLE}\n", 1)
return cls
def is_fix_compatible(cls) -> bool:
"""Return whether the rule is documented as fixable."""
return FIX_COMPATIBLE in cls.__doc__
def document_configuration(cls, ruleset="std"):
"""Add a 'Configuration' section to a Rule docstring.
Utilize the the metadata in config_info to dynamically
document the configuration options for a given rule.
This is a little hacky, but it allows us to propagate configuration
options in the docs, from a single source of truth.
"""
if ruleset == "std":
config_info = STANDARD_CONFIG_INFO_DICT
else:
raise (
NotImplementedError(
"Add another config info dict for the new ruleset here!"
)
)
config_doc = "\n | **Configuration**"
try:
for keyword in cls.config_keywords:
try:
info_dict = config_info[keyword]
except KeyError:
raise KeyError(
"Config value {!r} for rule {} is not configured in `config_info`.".format(
keyword, cls.__name__
)
)
config_doc += "\n | `{0}`: {1}. Must be one of {2}.".format(
keyword, info_dict["definition"], info_dict["validation"]
)
config_doc += "\n |"
except AttributeError:
rules_logger.info("No config_keywords defined for {0}".format(cls.__name__))
return cls
# Add final blank line
config_doc += "\n"
# Add the configuration section immediately after the class description
# docstring by inserting after the first line break, or first period,
# if there is no line break.
end_of_class_description = "." if "\n" not in cls.__doc__ else "\n"
cls.__doc__ = cls.__doc__.replace(end_of_class_description, "\n" + config_doc, 1)
return cls
| StarcoderdataPython |
125783 | <reponame>alfredo-milani/ParseScript<gh_stars>0
import threading
class ParseThread(threading.Thread):
"""
"""
__lock = threading.Lock()
def __init__(self, target, target_args=(), callback=None, callback_args=(), *args, **kwargs):
super(ParseThread, self).__init__(target=self.__target_with_callback, *args, **kwargs)
self.__method = target
self.__method_args = target_args
self.__callback = callback
self.__callback_args = callback_args
def __target_with_callback(self):
self.__method(*self.__method_args)
if self.__callback is not None:
with ParseThread.__lock:
self.__callback(*self.__callback_args)
@classmethod
def get_lock(cls):
cls.__lock.acquire()
@classmethod
def release_lock(cls):
cls.__lock.release()
| StarcoderdataPython |
1730056 | <gh_stars>0
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group
from .forms import CustomUserChangeForm, CustomUserCreationForm
from .models import Address, CustomUser, Profile, TOTPRequest
# Register your models here.
admin.site.site_header = "پروژه فروشگاه اینترنتی - مدیریت کل سایت"
admin.site.unregister(Group)
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = (
"first_name",
"last_name",
"email",
"is_salesman",
"is_staff",
"is_active",
"passed_phone_number_verification",
)
list_filter = (
"is_superuser",
"is_salesman",
"is_active",
"passed_phone_number_verification",
)
fieldsets = (
(
None,
{
"fields": (
"email",
"phone_number",
"first_name",
"last_name",
"password",
"is_<PASSWORD>",
"passed_phone_number_verification",
)
},
),
("Permissions", {"fields": ("is_staff", "is_active")}),
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": (
"email",
"phone_number",
"first_name",
"last_name",
"<PASSWORD>",
"<PASSWORD>",
"is_salesman",
"is_staff",
"is_active",
"passed_phone_number_verification",
),
},
),
)
search_fields = (
"first_name",
"last_name",
)
ordering = (
"first_name",
"last_name",
)
admin.site.register(CustomUser, CustomUserAdmin)
class ProfileAdmin(admin.ModelAdmin):
ordering = ("id",)
admin.site.register(Profile, ProfileAdmin)
class AddressAdmin(admin.ModelAdmin):
list_display = (
"city",
"address",
"user",
)
list_filter = (
"city",
"user",
)
search_fields = (
"city",
"address",
)
ordering = ("city",)
admin.site.register(Address, AddressAdmin)
class TOTPRequestAdmin(admin.ModelAdmin):
list_display = (
"request_id",
"channel",
"receiver",
"code",
"created",
)
ordering = ("-created",)
admin.site.register(TOTPRequest, TOTPRequestAdmin)
| StarcoderdataPython |
1605618 | <gh_stars>1-10
#!/usr/bin/env python
import hashlib
import os
import sys
from abc import ABC, abstractmethod
from binascii import hexlify
from getpass import getpass
from optparse import OptionParser
import sha3
from mnemonic.mnemonic import Mnemonic
from pycoin.contrib.segwit_addr import bech32_encode, convertbits
from pycoin.encoding.b58 import b2a_hashed_base58
from pycoin.encoding.bytes32 import to_bytes_32
from pycoin.symbols import btc, xtn
# mw -p TREZOR 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about'
# > seed c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04
# ku H:$SEED
# > master xprv9s21ZrQH143K3h3fDYiay8mocZ3afhfULfb5GX8kCBdno77K4HiA15Tg23wpbeF1pLfs1c5SPmYHrEpTuuRhxMwvKDwqdKiGJS9XFKzUsAF
# ku -s "44'/0'/0'/0/0" H:$SEED
# > 1PEha8dk5Me5J1rZWpgqSt5F4BroTBLS5y
VISUALIZATION_PATH = "9999'/9999'"
class Coin(ABC):
def __init__(self, address_prefix, coin_derivation, deposit_path, change_path):
self.address_prefix = address_prefix
self.coin_derivation = coin_derivation
self.deposit_path = deposit_path
self.change_path = change_path
@abstractmethod
def to_address(self, subkey, purpose):
pass
@abstractmethod
def to_private(self, exponent):
pass
def path(self, i, change, purpose):
extra_path = self.change_path if change else self.deposit_path
return self.base_derivation(purpose) + "%s/%d" % (extra_path, i)
def address(self, master, i, change=False, purpose=None):
path = self.path(i, change, purpose)
subkey = next(master.subkeys(path))
private = self.to_private(subkey.secret_exponent())
address = self.to_address(subkey, purpose)
return address, private
def has_change_chain(self):
return self.change_path is not None
def can_generate_addresses(self, purpose):
return True
def xpub(self, master, purpose):
raise NotImplementedError()
def base_derivation(self, purpose):
return self.coin_derivation
class BTCCoin(Coin):
def __init__(self, *args, bech_prefix):
super().__init__(*args)
self.bech_prefix = bech_prefix
def can_generate_addresses(self, purpose):
if purpose == 'p2wsh':
return False
return True
def path(self, i, change, purpose):
base_derivation = self.base_derivation(purpose)
extra_path = self.change_path if change else self.deposit_path
return base_derivation + "%s/%d" % (extra_path, i)
def base_derivation(self, purpose):
if purpose is None or purpose == 'p2pkh':
return self.coin_derivation
elif purpose == 'p2wpkh' or purpose == 'p2wsh':
if self.address_prefix == b'\0':
return "48'/0'/0'/2'"
elif self.address_prefix == b'\x6f':
return "48'/1'/0'/2'"
raise RuntimeError('invalid coin')
else:
raise RuntimeError('invalid purpose ' + purpose)
def to_address(self, subkey, purpose):
if purpose == 'p2wpkh':
return bech32_encode(self.bech_prefix, [0] + convertbits(subkey.hash160(), 8, 5))
elif purpose == 'p2wsh':
raise RuntimeError('no addresses can be generated for p2wsh')
else:
return b2a_hashed_base58(self.address_prefix + subkey.hash160())
def to_private(self, exponent):
return b2a_hashed_base58(b'\x80' + to_bytes_32(exponent) + b'\01')
def xpub(self, master, purpose):
base_derivation = self.base_derivation(purpose)
subkey = next(master.subkeys(base_derivation))
return subkey.as_text()
def xprv(self, master, purpose):
base_derivation = self.base_derivation(purpose)
subkey = next(master.subkeys(base_derivation))
return subkey.as_text(as_private=True)
class ETHCoin(Coin):
def to_address(self, subkey, _purpose):
hasher = sha3.keccak_256()
sec = subkey.sec(False)[1:]
hasher.update(sec)
return hexlify(hasher.digest()[-20:]).decode()
def to_private(self, exponent):
return hexlify(to_bytes_32(exponent)).decode()
class XRPCoin(Coin):
def to_address(self, subkey, _purpose):
from .ripple import RippleBaseDecoder
ripple_decoder = RippleBaseDecoder()
return ripple_decoder.encode(subkey.hash160())
def to_private(self, exponent):
return hexlify(to_bytes_32(exponent)).decode()
class CosmosCoin(Coin):
def to_address(self, subkey, _purpose):
return bech32_encode(self.address_prefix.decode(), convertbits(subkey.hash160(), 8, 5))
def to_private(self, exponent):
return hexlify(to_bytes_32(exponent)).decode()
coin_map = {
"btc": BTCCoin(b'\0', "44'/0'/0'", "/0", "/1", bech_prefix='bc'),
"tbtc": BTCCoin(b'\x6f', "44'/1'/0'", "/0", "/1", bech_prefix='tb'),
"zcash": BTCCoin(b'\x1c\xb8', "44'/1893'/0'", "/0", "/1", bech_prefix=None),
"eth": ETHCoin(b'', "44'/60'/0'", "/0", None),
"rop": ETHCoin(b'', "44'/1'/0'", "/0", None),
"xrp": XRPCoin(b'', "44'/144'/0'", "/0", None),
"txrp": XRPCoin(b'', "44'/1'/0'", "/0", None),
"cosmos": CosmosCoin(b'cosmos', "44'/118'/0'", "/0", None),
}
coins = list(coin_map.keys())
coin_list = ', '.join(coins)
purposes = ['p2pkh', 'p2wpkh', 'p2wsh']
purpose_list = ', '.join(purposes)
def mnemonic_to_master(mnemonic, passphrase, netcode='BTC', check=True):
if check:
if not Mnemonic('english').check(mnemonic):
raise RuntimeError('mnemonic is non-standard, please check spelling')
seed = Mnemonic.to_seed(mnemonic, passphrase=passphrase)
if netcode == 'BTC':
master = btc.network.keys.bip32_seed(seed)
elif netcode == 'XTN':
master = xtn.network.keys.bip32_seed(seed)
else:
raise RuntimeError("unknown netcode")
return seed, master
def generate(data=None):
if data is None:
data = os.urandom(16)
return Mnemonic('english').to_mnemonic(data)
def hash_entropy(entropy_string):
ee = hashlib.sha256(entropy_string.encode('utf-8'))
return ee.digest()[0:16]
def visual(master):
import hashprint
subkey = next(master.subkeys(VISUALIZATION_PATH))
return hashprint.pformat(list(bytearray(subkey.hash160())))
def main():
parser = OptionParser(usage="""Usage: %prog [options] [MNEMONIC_PHRASE]
Note that the mnemonic phrase is required if --generate is not supplied.
""")
parser.add_option("-p", "--passphrase", help="use an additional wallet passphrase, will prompt if not provided."
" Pass an empty string to not have a passphrase.", metavar="PASSPHRASE")
parser.add_option("-r", "--show-private", default=False, action="store_true", help="show private keys")
parser.add_option("-s", "--show-seed", default=False, action="store_true", help="show master seed")
parser.add_option("-x", "--show-xpub", default=False, action="store_true", help="show xpub and xprv if --show-private is on")
parser.add_option("-c", "--coin", default="btc", help="use COIN, one of: " + coin_list, choices=coins)
parser.add_option("-n", "--count", default=20, type="int", help="print out N addresses", metavar="N")
parser.add_option("-g", "--generate", default=False, action="store_true", help="generate a seed")
parser.add_option("-e", "--entropy", default=False, action="store_true", help="type some entropy")
parser.add_option("-v", "--visual", default=False, action="store_true", help="print visual seed")
parser.add_option("-u", "--purpose", default=None, help="one of: " + purpose_list, choices=purposes)
parser.add_option("-a", "--change", default=False, action="store_true", help="show change addresses")
parser.add_option("-q", "--quiet", default=False, action="store_true", help="be quiet")
parser.add_option("--allow-non-standard", default=False, action="store_true", help="allow non-standard mnemonic")
(options, args) = parser.parse_args()
if len(args) > 1:
sys.stderr.write('too many arguments - did you quote the mnemonic phrase?\n')
sys.exit(1)
entropy = None
if options.entropy:
if not options.quiet:
sys.stdout.write("Enter entropy string followed by a \\n. ")
sys.stdout.write("No entropy is added, make sure you provide enough.\n")
sys.stdout.write(": ")
entropy_string = input()
entropy = hash_entropy(entropy_string)
if options.generate:
print(generate(entropy))
exit()
passphrase = options.passphrase if options.passphrase is not None else getpass('Passphrase: ')
# TODO this should be in the coin map, not here
netcode = 'BTC'
if options.coin == 'tbtc':
netcode = 'XTN'
(seed, master) = mnemonic_to_master(args[0], passphrase, netcode, not options.allow_non_standard)
if options.show_seed:
print(hexlify(seed))
exit()
if options.visual:
visualization = visual(master)
if sys.stdout.isatty():
from .colorize import colorize
print(colorize(visualization))
else:
print(visualization)
coin = coin_map[options.coin]
if not options.quiet:
print("base derivation path: %s" % coin.base_derivation(options.purpose))
if options.show_xpub:
print(("%s" if options.quiet else "xpub: %s") % coin.xpub(master, options.purpose))
if options.show_private:
print(("%s" if options.quiet else "xprv: %s") % coin.xprv(master, options.purpose))
if coin.can_generate_addresses(options.purpose):
for i in range(options.count):
(address, private) = coin.address(master, i, change=options.change, purpose=options.purpose)
if options.show_private:
print("%s %s" % (address, private))
else:
print("%s" % (address,))
else:
if not options.show_xpub:
print("no addresses can be shown for %s, try --show-xpub" % options.purpose)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1774701 | <reponame>matthewelse/bleep<gh_stars>10-100
# bleep: BLE Abstraction Library for Python
#
# Copyright (c) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 2/3 compatibility
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from future.utils import bytes_to_native_str, native_str_to_bytes
from future.builtins import int, bytes
from .characteristic import GATTCharacteristic
from ..util import BLEUUID, UUIDAccessor
import logging
class GATTService(object):
"""Represents a single BLE Characteristic
Attributes:
characteristic (UUIDAccessor): Allows dictionary-like access to _unique_
characteristics.
characteristics (UUIDAccessor): Allows dictionary-like access to all characteristics.
GATTService.characteristics[UUID] always returns a list of GATTCharacteristics
"""
def __init__(self, device, uuid, start, end):
"""Creates an instance of GATTService.
Args:
device (BLEDevice): BLEDevice object of which this is an attribute
uuid (BLEUUID): The uuid representing this particular attribute
start (int): The first handle of this service
end (int): The last handle in this service
"""
self.logger = logging.getLogger('bleep.GATTService')
self.device = device
self.uuid = uuid
self.start = start
self.end = end
self._characteristics = self._discover_characteristics()
self.characteristic = UUIDAccessor(self._characteristics)
self.characteristics = UUIDAccessor(self._characteristics, True)
def _discover_characteristics(self):
characteristics = {}
self.logger.debug("Discovering Characteristics")
raw_chars = self.device.requester.discover_characteristics(self.start, self.end)
self.logger.debug("Discovered: %s", raw_chars)
for i, char in enumerate(raw_chars):
handle = char['handle']
value_handle = char['value_handle']
uuid = char['uuid']
properties = char['properties']
if i == len(raw_chars) - 1:
end_handle = self.end - 1
else:
end_handle = raw_chars[i + 1]['handle'] - 1
characteristic = GATTCharacteristic(self.device, handle, value_handle, end_handle, BLEUUID(uuid), properties)
if characteristic.uuid not in characteristics:
characteristics[characteristic.uuid] = [characteristic]
else:
characteristics[characteristic.uuid].append(characteristic)
return characteristics
def __repr__(self):
return str(self.uuid)
| StarcoderdataPython |
1694642 | from aws_cdk import (
aws_lambda as _lambda,
aws_sns as sns,
aws_sns_subscriptions as subscriptions,
aws_dynamodb as dynamo_db,
core
)
class TheDynamoFlowStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, sns_topic_arn: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# DynamoDB Table
table = dynamo_db.Table(self, "Hits",
partition_key=dynamo_db.Attribute(name="path", type=dynamo_db.AttributeType.STRING)
)
dynamo_lambda = _lambda.Function(self, "DynamoLambdaHandler",
runtime=_lambda.Runtime.NODEJS_12_X,
handler="dynamo.handler",
code=_lambda.Code.from_asset("lambdas"),
tracing=_lambda.Tracing.ACTIVE,
environment={
"HITS_TABLE_NAME": table.table_name
}
)
# grant the lambda role read/write permissions to our table
table.grant_read_write_data(dynamo_lambda)
topic = sns.Topic.from_topic_arn(self, 'SNSTopic', sns_topic_arn)
topic.add_subscription(subscriptions.LambdaSubscription(dynamo_lambda))
| StarcoderdataPython |
4827100 | from datetime import datetime
from datetime import timedelta
def date_plus_days(date, days):
if date == '':
return current_day()
my_date = datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]))
my_date = my_date + timedelta(days=days)
if my_date > datetime.today():
print(current_day())
return current_day()
print(my_date.strftime('%Y%m%d'))
return my_date.strftime('%Y%m%d')
def date_less_days(date, days):
my_date = datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]))
my_date = my_date - timedelta(days=days)
return my_date.strftime('%Y%m%d')
def day_of_year(date):
my_date = datetime.strptime(date, '%Y-%m-%d')
return my_date.timetuple().tm_yday
def current_day():
return datetime.today().strftime('%Y%m%d')
def current_day_weather():
return datetime.today().strftime('%Y-%m-%d')
def date_weather_format(date):
my_date = datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]))
return my_date.strftime('%Y-%m-%d')
| StarcoderdataPython |
3237441 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import json as json
from collections import defaultdict
import pickle
import ipdb as ipdb
#import apply_lexical_rule
from pprint import pprint
import segment_phrases.segment_phrases as seg
class TestLexicalRule(unittest.TestCase):
# setup -- create a new CKY instance, a new phrase table, and a new phrase segmentation
def setUp(self):
test_sen = ["Sample", "sentences", "are", "always", "really", "dumb", "."]
self.test_phrases = seg.all_phrases(test_sen, len(test_sen))
# small map
#self.phrase_table = pickle.load(open('phrase_table/test_phrase_table.db'))
# big map
self.phrase_table = pickle.load(open('phrase_table/big_phrase_table.db'))
# this is a test of the segmenter
def test_items_in_phrases(self):
self.assertListEqual(self.test_phrases[5], [ ["Sample", "sentences", "are", "always", "really", "dumb"],["sentences", "are", "always", "really", "dumb", "."] ], "The top-level (2 phrase) segmentation should be correct")
def test_phrase_mapping(self):
# we know this sentence has some matches in the phrase table
de_test_sen = ["Akteure", "haben", "Aktien"]
# print the mappings for each phrase
for i, level in enumerate(seg.all_phrases(de_test_sen,len(de_test_sen))):
#print "FOR LEVEL: %i" % i
for j, phrase in enumerate(level):
index = "[" + str(i) + ", " + str(j) + "]"
#print "PHRASE INDEX: %s" % index
#print "FOR FOREIGN PHRASE COVERAGE: %s" % phrase
key = " ".join(phrase) #TODO: hack? -- depends upon tokenization methods
# print "\tThe mappings are: %s" % str(self.phrase_table[key])
print "\tThe mappings are: %s" % str([(x["target"], x["e|f"]) for x in self.phrase_table[key]])
def test_lexical_rule_application(self):
# segments should be put in the proper spot into the table
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1684624 | <reponame>PedroHenriqueSimoes/Exercicios-Python
sal = (float(input('Qual é seu salario? ')))
if sal <= 1250.00:
print('O salario que era de R${:.2f} passou para R${:.2f}'.format(sal, ((sal * 15) / 100) + sal))
print('por conta de 15 por cento de aumento.')
else:
print('O salario que era de R${:.2f} passou para R${:.2f}'.format(sal, ((sal * 10) / 100)+ sal))
print('por conta de 10 por cento de aumento') | StarcoderdataPython |
17451 | <filename>examples/nni_data_augmentation/basenet/data.py
#!/usr/bin/env python
"""
data.py
"""
import itertools
def loopy_wrapper(gen):
while True:
for x in gen:
yield x
class ZipDataloader:
def __init__(self, dataloaders):
self.dataloaders = dataloaders
self._len = len(dataloaders[0])
def __len__(self):
return self._len
def __iter__(self):
counter = 0
iters = [loopy_wrapper(d) for d in self.dataloaders]
while counter < len(self):
yield tuple(zip(*[next(it) for it in iters]))
counter += 1
| StarcoderdataPython |
100652 | <filename>07/solve.py
import re
import json
import ast
f = open("input.txt","r").read()
rx = re.sub(r"\n", r'",\n"', f)
rx = '[\n\"'+rx+'\"\n]'
d = ast.literal_eval(rx)
print(d)
rules = {}
colors = {}
def handle(str):
[src,content] = str.split("bags contain")
src = src[:-1]
content = content.split(",")
content = [x.split(" ") for x in content]
if src in rules:
print("UNHANDLED for:"+src)
rules.update({src:{}})
colors.update({src:0})
for x in content:
if x[1] == "no": continue
name = x[2]+" "+x[3]
count = int(x[1])
colors.update({name:0})
rules[src].update({name:count})
for x in d:
handle(x)
for (k,v) in rules.items():
print(str(k)+":\t"+str(v))
print(colors)
def dictMerge(d1,d2):
ret = {}
ret.update(d2)
for (k,v) in d1.items():
if k in ret:
ret[k] += v
else:
ret.update({k:v})
return ret
def getch(color, depth = 0):
print((" "*depth)+color+str(rules[color]))
ret = {color:1}
if len(rules[color]) == 0:
return {color:1}
else:
for (k,v) in rules[color].items():
child = {}
for content,nOcc in getch(k, depth+1).items():
child.update({content:nOcc*v})
ret = dictMerge(ret, child)
return ret
"""
def contains(arr, index, mustContain):
if mustContain in arr[index]:
return True
has = False
for k,v in rules[index].items():
has|=contains(arr, k, mustContain)
return has
nHasGold = 0
for c in colors:
x = contains(rules, c, "shiny gold")
if x == True:
nHasGold+=1
print({c:x})
print(nHasGold)
"""
x = getch("shiny gold")
print(x)
print(sum(x.values())-1)
| StarcoderdataPython |
1691314 | import re
from fuzzywuzzy import fuzz
major_matcher = re.compile(r'(?<={).*?(?=})')
def judge_answer(user_answer, question_answer):
"""Judge answer response as correct or not
"""
user_answer = user_answer.lower()
question_answer = question_answer.lower()
if user_answer == "":
return False
major_answers = major_matcher.findall(question_answer)
if len(major_answers) <= 0:
major_answers = [question_answer]
r = 0.8*compare_answer_tokens(user_answer, major_answers) + \
0.2*compare_answer_partial(user_answer, major_answers)
return r >= 0.7
def compare_answer_tokens(user_answer, major_answers):
"""Compare by ordered tokens"""
return max(fuzz.token_sort_ratio(user_answer, major_answer)/100 for major_answer in major_answers)
def compare_answer_partial(user_answer, major_answers):
"""Compare by partial"""
return max(fuzz.partial_ratio(user_answer, major_answer)/100 for major_answer in major_answers) | StarcoderdataPython |
4780 | <gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrapper around the maze navigation environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from . import simple_maze
import cv2
import numpy as np
class Environment(object):
"""Wrapper around the Simple maze environment."""
def __init__(self, difficulty=None):
"""Initialize the environment with the specified difficulty."""
self.difficulty = difficulty
self._sim_env = simple_maze.navigate(difficulty=difficulty)
self.stepcount = 0
def reset(self):
"""Resets the environment."""
self.stepcount = 0
time_step = self._sim_env.reset()
return time_step
def get_goal_im(self):
"""Computes and returns the goal image."""
currp = copy.deepcopy(self._sim_env.physics.data.qpos[:])
currv = copy.deepcopy(self._sim_env.physics.data.qvel[:])
self._sim_env.task.dontreset = True
tg = copy.deepcopy(self._sim_env.physics.named.data.geom_xpos['target'][:2])
self._sim_env.physics.data.qpos[:] = tg
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
self._sim_env.physics.data.qpos[:] = tg
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
self._sim_env.physics.data.qpos[:] = currp
self._sim_env.physics.data.qvel[:] = currv
self.step([0, 0])
self._sim_env.task.dontreset = False
return gim
def get_subgoal_ims(self, numg):
"""Computes and returs the ground truth sub goal images."""
currp = copy.deepcopy(self._sim_env.physics.data.qpos[:])
currv = copy.deepcopy(self._sim_env.physics.data.qvel[:])
self._sim_env.task.dontreset = True
tg = copy.deepcopy(self._sim_env.physics.named.data.geom_xpos['target'][:2])
sg = []
if self.difficulty == 'e':
if numg == 1:
self._sim_env.physics.data.qpos[:] = currp + (tg - currp) / 2
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = currp + (tg - currp) / 3
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = currp + 2 * (tg - currp) / 3
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif self.difficulty == 'm':
if numg == 1:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif self.difficulty == 'h':
if numg == 1:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall1A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall1A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall1A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall1A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
sg = np.array(sg)
self._sim_env.physics.data.qpos[:] = currp
self._sim_env.physics.data.qvel[:] = currv
self.step([0, 0])
self._sim_env.task.dontreset = False
return sg
def is_goal(self):
"""Checks if the current state is a goal state."""
return self._sim_env.task.is_goal(self._sim_env.physics)
def step(self, action=None):
"""Steps the environment."""
time_step = self._sim_env.step(action)
self._sim_env.physics.data.qvel[:] = 0
return time_step
def get_observation(self):
"""Return image observation."""
obs = self._sim_env.task.get_observation(self._sim_env.physics)
im = self._sim_env.physics.render(256, 256, camera_id='fixed')
im = cv2.resize(im, (64, 64), interpolation=cv2.INTER_LANCZOS4)
return obs, im
| StarcoderdataPython |
1746978 | <filename>models/Global-Flow-Local-Attention/data/hmubi_dataset.py
import os.path
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
import pandas as pd
from util import pose_utils
import numpy as np
import torch
from tqdm import tqdm
class HMUBIDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = BaseDataset.modify_commandline_options(parser, is_train)
if is_train:
parser.set_defaults(load_size=256)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(old_size=(256, 256))
parser.set_defaults(structure_nc=18)
parser.set_defaults(image_nc=3)
parser.set_defaults(display_winsize=256)
return parser
def get_paths(self, opt):
if opt.phase == 'test':
root = os.path.join(opt.dataroot, 'test')
pairLst = os.path.join(opt.dataroot, 'pairs-test.txt')
elif opt.phase == 'train':
root = os.path.join(opt.dataroot, opt.phase)
pairLst = os.path.join(opt.dataroot, 'pairs-train.txt')
phase = opt.phase
name_pairs = self.init_categories(pairLst)
image_dir = os.path.join(root, 'image')
bonesLst = os.path.join(root, 'keypoints')
maskLst = os.path.join(root, 'mask')
return image_dir, bonesLst, maskLst, name_pairs
def init_categories(self, pairLst):
import os
print(os.getcwd())
pairs_file_train = pd.read_csv(pairLst, sep=' ', header=None)
size = len(pairs_file_train)
pairs = []
print('Building data pairs ...')
for i in tqdm(range(size)):
root = os.path.join(self.opt.dataroot, self.opt.phase)
P1_name = pairs_file_train.iloc[i][0].split('/')[1].split('.')[0]
P2_name = pairs_file_train.iloc[i][1].split('/')[1].split('.')[0]
# Need target keypoints, source keypoints, source image, and source mask
target_keypoints = os.path.join(root, 'keypoints/' + P2_name + '.txt')
target_image = os.path.join(root, 'image/' + P2_name + '.jpg')
target_mask = os.path.join(root, 'mask/' + P2_name + '.png')
source_keypoints = os.path.join(root, 'keypoints/' + P1_name + '.txt')
source_image = os.path.join(root, 'image/' + P1_name + '.jpg')
source_mask = os.path.join(root, 'mask/' + P1_name + '.png')
if self.opt.phase=='train':
if not os.path.exists(target_keypoints) or not os.path.exists(target_image) or not os.path.exists(target_mask) \
or not os.path.exists(source_keypoints) or not os.path.exists(source_image) or not os.path.exists(source_mask):
continue
pair = [pairs_file_train.iloc[i][0], pairs_file_train.iloc[i][1]]
pairs.append(pair)
print('Loading data pairs finished ...')
return pairs
def name(self):
return "HMUBIDataset" | StarcoderdataPython |
1714629 | <reponame>chenwenxiao/DOI
from enum import Enum
from typing import *
import mltk
import numpy as np
from .types import *
__all__ = [
'ArrayMapper', 'ArrayMapperList',
'Identity', 'Reshape', 'Flatten', 'Transpose', 'Pad',
'ChannelTranspose', 'ChannelFirstToLast', 'ChannelLastToFirst',
'ChannelLastToDefault', 'ChannelFirstToDefault',
'Affine', 'ScaleToRange', 'ReduceToBitDepth', 'Dequantize',
'BernoulliSample',
'DownSample', 'UpSample',
'GrayscaleToRGB', 'CropImage', 'ScaleImageMode', 'ScaleImage',
]
NumberType = Union[int, float, np.ndarray]
class ArrayMapper(object):
_input_info: ArrayInfo = None
_output_info: ArrayInfo = None
@property
def fitted(self) -> bool:
return self._input_info is not None
@property
def input_info(self) -> ArrayInfo:
if self._input_info is None:
raise RuntimeError(f'`fit()` has not been called: {self!r}')
return self._input_info
@property
def output_info(self) -> ArrayInfo:
if self._output_info is None:
raise RuntimeError(f'`fit()` has not been called: {self!r}')
return self._output_info
def _fit(self, info: ArrayInfo) -> ArrayInfo:
raise NotImplementedError()
def fit(self, info: ArrayInfo) -> ArrayInfo:
if self._input_info is not None:
raise RuntimeError(f'`fit()` has already been called: {self!r}')
self._input_info = info
self._output_info = self._fit(info)
return self._output_info
def fit_dataset(self, dataset, slot: str) -> ArrayInfo:
return self.fit(dataset.slots[slot])
def transform(self, array: mltk.Array) -> mltk.Array:
raise NotImplementedError()
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
raise NotImplementedError()
class ArrayMapperList(ArrayMapper, Sequence[ArrayMapper]):
mappers: List[ArrayMapper]
def __init__(self, mappers: Iterable[ArrayMapper]):
mappers = list(mappers)
if mappers:
fitted = mappers[0].fitted
for mapper in mappers[1:]:
if mapper.fitted != fitted:
raise ValueError(f'The `mappers` must be all fitted or '
f'all not fitted.')
if fitted:
self._input_info = mappers[0].input_info
self._output_info = mappers[-1].output_info
self.mappers = mappers
def __getitem__(self, item: int) -> ArrayMapper:
return self.mappers[item]
def __len__(self) -> int:
return len(self.mappers)
def __iter__(self) -> Iterator[ArrayMapper]:
return iter(self.mappers)
def _fit(self, info: ArrayInfo) -> ArrayInfo:
for mapper in self.mappers:
info = mapper.fit(info)
return info
def transform(self, array: mltk.Array) -> mltk.Array:
for mapper in self.mappers:
array = mapper.transform(array)
return array
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
for mapper in reversed(self.mappers):
array = mapper.inverse_transform(array, strict=strict)
return array
class Identity(ArrayMapper):
def _fit(self, info: ArrayInfo) -> ArrayInfo:
return info
def transform(self, array: mltk.Array) -> mltk.Array:
return array
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
return array
class Reshape(ArrayMapper):
in_shape: List[int]
out_shape: List[int]
def __init__(self, shape: List[int]):
out_shape = []
neg_one_count = 0
for s in shape:
if s == -1:
if neg_one_count > 0:
raise ValueError(f'At most one `-1` can be present in '
f'`shape`: got {shape!r}')
neg_one_count += 1
elif s < 0:
raise ValueError(f'Not a valid shape: {shape!r}')
out_shape.append(s)
self.out_shape = out_shape
def _fit(self, info: ArrayInfo) -> ArrayInfo:
info.require_shape(deterministic=False)
self.in_shape = list(info.shape)
in_size = int(np.prod(self.in_shape))
out_size = int(np.prod(self.out_shape))
if out_size >= 0 and out_size != in_size or \
out_size < 0 and in_size % (-out_size) != 0:
raise ValueError(f'Cannot reshape array from {self.in_shape!r} to '
f'{self.out_shape}')
if out_size < 0:
for i, s in enumerate(self.out_shape):
if s == -1:
self.out_shape[i] = in_size // (-out_size)
break
return info.copy(shape=self.out_shape)
def transform(self, array: mltk.Array) -> mltk.Array:
arr_shape = mltk.utils.get_array_shape(array)
pos = len(arr_shape) - len(self.in_shape)
return np.reshape(array, list(arr_shape[:pos]) + self.out_shape)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
arr_shape = mltk.utils.get_array_shape(array)
pos = len(arr_shape) - len(self.out_shape)
return np.reshape(array, list(arr_shape[:pos]) + self.in_shape)
class Flatten(Reshape):
def __init__(self):
super().__init__([-1])
class Transpose(ArrayMapper):
perm: List[int]
inverse_perm: List[int]
def __init__(self, perm: List[int]):
perm = list(map(int, perm))
for a in perm:
if a >= 0:
raise ValueError(f'`perm` must all be negative integers.')
reverse_axis = [0] * len(perm)
for i in range(-len(perm), 0):
reverse_axis[perm[i]] = i
self.perm = perm
self.inverse_perm = reverse_axis
def _fit(self, info: ArrayInfo) -> ArrayInfo:
info.require_shape(deterministic=False)
in_shape = info.shape
if len(in_shape) != len(self.perm):
raise ValueError(f'The input shape is required to be '
f'{len(self.perm)}d for transpose axis {self.perm!r}: '
f'got input shape {in_shape!r}')
new_shape = []
for a in self.perm:
new_shape.append(in_shape[a])
return info.copy(shape=new_shape)
def transform(self, array: mltk.Array) -> mltk.Array:
front_perm = list(range(0, len(mltk.utils.get_array_shape(array)) - len(self.perm)))
return np.transpose(array, front_perm + self.perm)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
front_perm = list(range(0, len(mltk.utils.get_array_shape(array)) - len(self.inverse_perm)))
return np.transpose(array, front_perm + self.inverse_perm)
class Pad(ArrayMapper):
fill_value: Union[int, float]
padding: List[Tuple[int, int]]
inv_slices: List[slice]
def __init__(self,
padding: Sequence[Tuple[int, int]],
fill_value: Union[int, float] = 0):
self.padding = list(padding)
self.fill_value = fill_value
self.inv_slices = [
slice(l, -r) if r > 0 else (
slice(l, None) if l > 0 else slice(None))
for l, r in self.padding
]
def _fit(self, info: ArrayInfo) -> ArrayInfo:
if len(info.shape) < len(self.padding):
raise ValueError(
f'`info.shape` must be at least {len(self.padding)}d: '
f'got shape {info.shape}')
shape = list(info.shape)
for i, (s, (l, r)) in enumerate(
zip(reversed(info.shape), reversed(self.padding)), 1):
shape[-i] = s + l + r
return info.copy(shape=shape)
def transform(self, array: mltk.Array) -> mltk.Array:
arr_shape = mltk.utils.get_array_shape(array)
padding = ([(0, 0)] * (len(arr_shape) - len(self.padding))) + self.padding
return np.pad(array, padding, mode='constant', constant_values=self.fill_value)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
arr_shape = mltk.utils.get_array_shape(array)
slc = (
[slice(None)] * (len(arr_shape) - len(self.inv_slices)) +
self.inv_slices
)
return array[slc]
class ChannelTranspose(ArrayMapper):
from_format: ChannelFormat
to_format: ChannelFormat
internal_mapper: ArrayMapper
def __init__(self,
from_format: Union[str, ChannelFormat],
to_format: Union[str, ChannelFormat]):
self.from_format = ChannelFormat(from_format)
self.to_format = ChannelFormat(to_format)
def _fit(self, info: ArrayInfo) -> ArrayInfo:
info.require_shape(deterministic=False)
spatial_ndims = len(info.shape) - 1
if spatial_ndims not in (1, 2, 3):
raise ValueError(f'Shape not supported by `ChannelTranspose`: '
f'shape is {info.shape!r}')
if self.from_format == self.to_format:
perm = None
elif self.from_format == 'channel_last':
perm = [-1] + list(range(-spatial_ndims - 1, -1))
else:
perm = list(range(-spatial_ndims, 0)) + [-spatial_ndims - 1]
if perm is None:
self.internal_mapper = Identity()
else:
self.internal_mapper = Transpose(perm)
return self.internal_mapper.fit(info)
def transform(self, array: mltk.Array) -> mltk.Array:
return self.internal_mapper.transform(array)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
return self.internal_mapper.inverse_transform(array, strict)
class ChannelLastToFirst(ChannelTranspose):
def __init__(self):
super().__init__(ChannelFormat.CHANNEL_LAST, ChannelFormat.CHANNEL_FIRST)
class ChannelFirstToLast(ChannelTranspose):
def __init__(self):
super().__init__(ChannelFormat.CHANNEL_FIRST, ChannelFormat.CHANNEL_LAST)
class ChannelLastToDefault(ChannelTranspose):
def __init__(self):
from tensorkit import tensor as T
super().__init__(
ChannelFormat.CHANNEL_LAST,
ChannelFormat.CHANNEL_LAST if T.IS_CHANNEL_LAST else ChannelFormat.CHANNEL_FIRST
)
class ChannelFirstToDefault(ChannelTranspose):
def __init__(self):
from tensorkit import tensor as T
super().__init__(
ChannelFormat.CHANNEL_FIRST,
ChannelFormat.CHANNEL_LAST if T.IS_CHANNEL_LAST else ChannelFormat.CHANNEL_FIRST
)
def maybe_apply_affine(x, scale, bias):
if x is not None:
return x * scale + bias
def maybe_clip(x, low, high):
if low is not None and high is not None:
return np.clip(x, low, high)
elif low is None and high is None:
return x
elif low is not None:
return np.maximum(x, low)
elif high is not None:
return np.minimum(x, high)
def is_integer_dtype(dtype: str) -> bool:
return dtype in ('int8', 'uint8', 'int16', 'int32', 'int64', 'int', 'long')
class _BaseAffine(ArrayMapper):
need_transform: bool = True
in_dtype: str
in_dtype_is_int: bool
out_dtype: str
out_dtype_is_int: bool
in_range: Tuple[Optional[NumberType], Optional[NumberType]]
out_range: Tuple[Optional[NumberType], Optional[NumberType]]
scale: NumberType
bias: NumberType
def _fit(self, info: ArrayInfo) -> ArrayInfo:
self.in_dtype = info.dtype
self.in_dtype_is_int = is_integer_dtype(self.in_dtype)
self.out_dtype_is_int = is_integer_dtype(self.out_dtype)
self.in_range = (info.min_val, info.max_val)
return info.copy(
dtype=self.out_dtype,
min_val=self.out_range[0],
max_val=self.out_range[1],
)
def transform(self, array: mltk.Array) -> mltk.Array:
if not self.need_transform:
return array
array = maybe_clip(array * self.scale + self.bias, *self.out_range)
if self.out_dtype_is_int:
array = np.round(array)
return array.astype(self.out_dtype)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
if not self.need_transform:
return array
array = maybe_clip((array - self.bias) / float(self.scale), *self.in_range)
if self.in_dtype_is_int:
array = np.round(array)
return array.astype(self.in_dtype)
class Affine(_BaseAffine):
"""Scale the input array by affine transformation `Ax+b`."""
def __init__(self, scale: NumberType, bias: NumberType,
dtype: str = FLOAT_X):
self.out_dtype = dtype
self.scale = scale
self.bias = bias
def _fit(self, info: ArrayInfo) -> ArrayInfo:
self.out_range = (
maybe_apply_affine(info.min_val, self.scale, self.bias),
maybe_apply_affine(info.max_val, self.scale, self.bias),
)
return super()._fit(info)
class ScaleToRange(_BaseAffine):
"""Scale the input array to a specific value range."""
min_val: NumberType
max_val: NumberType
def __init__(self, min_val: NumberType, max_val: NumberType,
dtype: str = FLOAT_X):
self.out_dtype = dtype
self.min_val = min_val
self.max_val = max_val
self.out_range = (self.min_val, self.max_val)
def _fit(self, info: ArrayInfo) -> ArrayInfo:
info.require_min_max_val()
self.need_transform = (self.min_val != info.min_val) or (self.max_val != info.max_val)
self.scale = (self.max_val - self.min_val) / float(info.max_val - info.min_val)
self.bias = self.min_val - info.min_val * self.scale
return super()._fit(info)
class ReduceToBitDepth(ArrayMapper):
"""Reduce the bit depth of a discrete array."""
bit_depth: int
bit_depth_diff: int
dtype_is_int: bool
need_transform: bool
in_scale: NumberType
out_bin_size: NumberType
min_val: NumberType
def __init__(self, bit_depth: int):
self.bit_depth = bit_depth
def _fit(self, info: ArrayInfo) -> ArrayInfo:
info.require_discrete()
info.require_min_max_val()
if 2 ** info.bit_depth != info.n_discrete_vals:
raise ValueError(f'`info.n_discrete_vals != 2 ** info.bit_depth`.')
if info.bit_depth < self.bit_depth:
raise ValueError(f'Cannot enlarge bit-depth with `ReduceToBitDepth` mapper.')
new_bit_depth = min(info.bit_depth, self.bit_depth)
bit_depth_diff = info.bit_depth - new_bit_depth
new_n_vals = 2 ** new_bit_depth # the new n_discrete_vals
in_bin_size = (info.max_val - info.min_val) / (info.n_discrete_vals - 1.)
out_bin_size = in_bin_size * (2 ** bit_depth_diff)
need_transform = new_bit_depth != info.bit_depth
self.need_transform = need_transform
if not need_transform:
return info
self.bit_depth_diff = bit_depth_diff
self.dtype_is_int = is_integer_dtype(info.dtype)
self.in_bin_size = in_bin_size
self.out_bin_size = out_bin_size
self.min_val = info.min_val
self.max_val = self.min_val + out_bin_size * (new_n_vals - 1)
if self.dtype_is_int:
self.in_bin_size = int(round(self.in_bin_size))
self.out_bin_size = int(round(self.out_bin_size))
self.max_val = int(round(self.max_val))
return info.copy(
n_discrete_vals=new_n_vals,
bit_depth=new_bit_depth,
max_val=self.max_val
)
def transform(self, array: mltk.Array) -> mltk.Array:
if not self.need_transform:
return array
ret = np.round((array - self.min_val) / self.in_bin_size).astype(np.int32)
ret = ret >> self.bit_depth_diff
ret = self.min_val + ret * self.out_bin_size
if self.dtype_is_int:
ret = np.round(ret)
ret = ret.astype(array.dtype)
return ret
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
if strict and self.need_transform:
raise RuntimeError(f'`ReduceToBitDepth` is not strictly invertible.')
return array
class Dequantize(ArrayMapper):
"""Adds uniform noise to discrete array, making it continuous."""
in_dtype: str
out_dtype: str
bin_size: NumberType
in_min_val: NumberType
# small infinitesimal to ensure the generated noise reside in [-0.5, 0.5),
# rather than [-0.5, 0.5]
epsilon: float
def __init__(self,
dtype: str = FLOAT_X,
epsilon: float = 0.0):
self.out_dtype = dtype
self.epsilon = epsilon
def _fit(self, info: ArrayInfo) -> ArrayInfo:
info.require_discrete()
info.require_min_max_val()
self.in_dtype = info.dtype
self.in_min_val = info.min_val
self.bin_size = np.asarray(
(info.max_val - info.min_val) / (info.n_categories - 1.),
dtype=self.out_dtype
)
return info.copy(
dtype=self.out_dtype,
min_val=float(info.min_val - 0.5 * self.bin_size),
max_val=float(info.max_val + 0.5 * self.bin_size),
is_discrete=False,
)
def transform(self, array: mltk.Array) -> mltk.Array:
noise = np.random.random(size=mltk.utils.get_array_shape(array))
if self.epsilon > 0.:
noise = np.minimum(noise, 1. - self.epsilon)
noise = (noise - 0.5) * self.bin_size
array = array + noise
return array.astype(self.out_dtype)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
array = self.in_min_val + self.bin_size * np.asarray(
(array - self.output_info.min_val) / self.bin_size,
dtype=np.int32
)
return array.astype(self.in_dtype)
class BernoulliSample(ArrayMapper):
in_dtype: str
out_dtype: str
def __init__(self, dtype: str = 'int32'):
self.out_dtype = dtype
def _fit(self, info: ArrayInfo) -> ArrayInfo:
if info.is_discrete or info.min_val != 0 or info.max_val != 1:
raise ValueError('The source array values are not continuous, or '
'not within the range [0, 1]. ')
self.in_dtype = info.dtype
return info.copy(dtype=self.out_dtype, is_discrete=True,
n_discrete_vals=2, bit_depth=1)
def transform(self, array: mltk.Array) -> mltk.Array:
return np.random.binomial(n=1, p=array).astype(self.out_dtype)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
if strict:
raise RuntimeError('`BernoulliSampler` is not strictly invertible.')
return np.asarray(array, dtype=self.in_dtype)
def down_sample(array, scale: List[int]):
shape = mltk.utils.get_array_shape(array)
n = len(shape)
k = len(scale)
if len(shape) < k:
raise ValueError(f'`array` must be at least {k}d.')
temp_shape = list(shape[:n - k])
reduce_axis = []
next_axis = n - k
for a, b in zip(shape[n-k:], scale):
if a % b != 0:
raise ValueError(
f'`array.shape` cannot be evenly divided by `scale`: '
f'shape {shape} vs scale {scale}')
temp_shape.extend((a // b, b))
reduce_axis.append(next_axis + 1)
next_axis += 2
array = np.reshape(array, temp_shape)
array = np.mean(array, axis=tuple(reduce_axis), keepdims=False)
return array
def up_sample(array, scale: List[int]):
shape = mltk.utils.get_array_shape(array)
n = len(shape)
k = len(scale)
if len(shape) < k:
raise ValueError(f'`array` must be at least {k}d.')
temp_shape = list(shape[:n - k])
out_shape = list(temp_shape)
tile_rep = [1] * (n - k)
for a, b in zip(shape[n-k:], scale):
temp_shape.extend((a, 1))
out_shape.append(a * b)
tile_rep.extend((1, b))
array = np.reshape(array, temp_shape)
array = np.tile(array, tile_rep)
array = array.reshape(out_shape)
return array
class DownSample(ArrayMapper):
"""Down-sampling by averaging over multiple pixels."""
in_dtype: str
out_dtype: str
scale: List[int]
def __init__(self, scale: Sequence[int], dtype=FLOAT_X):
self.scale = list(scale)
self.out_dtype = dtype
def _fit(self, info: ArrayInfo) -> ArrayInfo:
info.require_shape(deterministic=False)
shape = list(info.shape)
k = len(self.scale)
if len(shape) < k:
raise ValueError(f'`info.shape` must be at least {k}d.')
for i, (size, ratio) in enumerate(
zip(reversed(shape), reversed(self.scale))):
if size is not None:
if size % ratio != 0:
raise ValueError(
f'`info.shape` cannot be evenly divided by `scale`: '
f'shape {info.shape} vs scale {self.scale}')
shape[-(i + 1)] = size // ratio
self.in_dtype = info.dtype
return info.copy(dtype=self.out_dtype, shape=shape)
def transform(self, array: mltk.Array) -> mltk.Array:
return down_sample(array, self.scale).astype(self.out_dtype)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
if strict:
raise RuntimeError('`DownSample` is not strictly invertible.')
return up_sample(array, self.scale).astype(self.in_dtype)
class UpSample(ArrayMapper):
in_dtype: str
out_dtype: str
scale: List[int]
def __init__(self, scale: Sequence[int], dtype=FLOAT_X):
self.scale = list(scale)
self.out_dtype = dtype
def _fit(self, info: ArrayInfo) -> ArrayInfo:
info.require_shape(deterministic=False)
shape = list(info.shape)
k = len(self.scale)
if len(shape) < k:
raise ValueError(f'`info.shape` must be at least {k}d.')
for i, (size, ratio) in enumerate(
zip(reversed(shape), reversed(self.scale))):
if size is not None:
shape[-(i + 1)] = size * ratio
self.in_dtype = info.dtype
return info.copy(dtype=self.out_dtype, shape=shape)
def transform(self, array: mltk.Array) -> mltk.Array:
return up_sample(array, self.scale).astype(self.out_dtype)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
return down_sample(array, self.scale).astype(self.in_dtype)
class BaseImageMapper(ArrayMapper):
channel_last: Optional[bool]
def __init__(self, channel_last: Optional[bool] = None):
self.channel_last = channel_last
def _fit_channel_axis(self, info: ArrayInfo):
info.require_shape(False)
if len(info.shape) != 3:
raise ValueError(f'Invalid shape: {info.shape}')
if self.channel_last is None:
if (info.shape[-1] in (1, 3)) == (info.shape[-3] in (1, 3)):
raise ValueError('`channel_last` cannot be determined automatically.')
self.channel_last = info.shape[-1] in (1, 3)
if (self.channel_last and info.shape[-1] not in (1, 3)) or \
(not self.channel_last and info.shape[-3] not in (1, 3)):
raise ValueError(f'Invalid shape {info.shape!r} for `channel_last` '
f'{self.channel_last!r}')
def _get_spatial_shape(self, shape):
if self.channel_last is True:
return shape[:-1]
elif self.channel_last is False:
return shape[1:]
else:
return shape
def _replace_spatial_shape(self, shape, spatial):
if self.channel_last is True:
return list(spatial) + [shape[-1]]
elif self.channel_last is False:
return [shape[0]] + list(spatial)
else:
return list(spatial)
class GrayscaleToRGB(BaseImageMapper):
def _fit(self, info: ArrayInfo) -> ArrayInfo:
self._fit_channel_axis(info)
shape = list(info.shape)
if self.channel_last:
if shape[-1] != 1:
raise ValueError(f'Invalid shape: {shape}')
shape[-1] = 3
else:
if shape[-3] != 1:
raise ValueError(f'Invalid shape: {shape}')
shape[-3] = 3
return info.copy(shape=shape)
def transform(self, array: mltk.Array) -> mltk.Array:
shape = mltk.utils.get_array_shape(array)
reps = [1] * len(shape)
reps[-1 if self.channel_last else -3] = 3
return np.tile(array, reps)
def inverse_transform(self,
array: mltk.Array,
strict: bool = False) -> mltk.Array:
if self.channel_last:
return array[..., 0:1]
else:
return array[..., 0:1, :, :]
class CropImage(BaseImageMapper):
pos: Tuple[int, int]
size: Tuple[int, int]
def __init__(self,
*,
bbox: Optional[Sequence[int]] = None, # (top, bottom, left, right)
pos: Optional[Sequence[int]] = None,
size: Optional[Sequence[int]] = None,
channel_last: Optional[bool] = None):
if not ((bbox is None and pos is not None and size is not None) or
(bbox is not None and pos is None and size is None)):
raise ValueError('Either `bbox`, or a pair of `pos` and `size` '
'should be specified, but not both.')
if bbox is not None:
if len(bbox) != 4:
raise ValueError(f'`bbox` must be a sequence of 4 integers.')
pos = (bbox[0], bbox[2])
size = (bbox[1] - bbox[0], bbox[3] - bbox[2])
else:
if len(pos) != 2 or len(size) != 2:
raise ValueError(f'`pos` and `size` must be sequences of 2 '
f'integers.')
pos = tuple(pos)
size = tuple(size)
super().__init__(channel_last)
self.pos = pos
self.size = size
def _check_shape_against_bbox(self, shape):
if (shape[0] is not None and self.size[0] > shape[0]) or \
(shape[1] is not None and self.size[1] > shape[1]):
raise ValueError(f'Spatial shape `{shape!r}` cannot be cropped: pos '
f'= {self.pos}, size = {self.size}.')
def _fit(self, info: ArrayInfo) -> ArrayInfo:
self._fit_channel_axis(info)
self._check_shape_against_bbox(self._get_spatial_shape(info.shape))
new_shape = self._replace_spatial_shape(info.shape, list(self.size))
return info.copy(shape=new_shape)
def transform(self, array: mltk.Array) -> mltk.Array:
p = self.pos
s = self.size
if self.channel_last is True:
return array[..., p[0]: p[0] + s[0], p[1]: p[1] + s[1], :]
else:
return array[..., p[0]: p[0] + s[0], p[1]: p[1] + s[1]]
def inverse_transform(self, array: mltk.Array, strict: bool = False) -> mltk.Array:
raise RuntimeError('`CropImage` is not invertible.')
class ScaleImageMode(int, Enum):
SCIPY_NO_AA = 0
"""Use no antialias method with SciPy resize kernel."""
SCIPY_GAUSSIAN_AA = 1
"""Use the default gaussian filter antialias method with SciPy resize kernel."""
CELEBA_GAUSSIAN_AA = 99
"""
The scale method from:
https://github.com/andersbll/autoencoding_beyond_pixels/blob/master/dataset/celeba.py
"""
class ScaleImage(BaseImageMapper):
mode: ScaleImageMode
resize_kernel: Callable[[np.ndarray], np.ndarray]
value_range: Tuple[Union[int, float]]
size: List[int]
def __init__(self,
size: Sequence[int], # (height, width)
mode: Union[ScaleImageMode, int] = ScaleImageMode.SCIPY_GAUSSIAN_AA,
channel_last: Optional[bool] = None):
if len(size) != 2:
raise ValueError(f'`size` must be a sequence of two integers.')
super().__init__(channel_last)
self.mode = ScaleImageMode(mode)
self.size = list(size)
self.resize_kernel = self._scipy_kernel
def _fit(self, info: ArrayInfo) -> ArrayInfo:
self._fit_channel_axis(info)
r = (info.min_val, info.max_val)
if r != (0, 255) and r != (0, 1):
raise ValueError(f'Images pixel values must within range (0, 255) '
f'or (0, 1): got {r!r}')
self.value_range = r
return info.copy(shape=self._replace_spatial_shape(info.shape, self.size))
def _scipy_kernel(self, img: np.ndarray):
from skimage import transform, filters
dtype = img.dtype
# ensure axis order: (H, W, C)
if self.channel_last is False:
img = img.transpose([-2, -1, -3])
# get the spatial shape
shape = img.shape[-3: -1] if self.channel_last is True else img.shape[-2:]
# ensure image value range is in (0, 1)
if self.value_range[1] == 255:
img = img.astype(np.float32) / 255.
# scale the image
new_size = tuple(self.size + [img.shape[-1]])
if self.mode == ScaleImageMode.SCIPY_NO_AA:
img = transform.resize(img, new_size, anti_aliasing=False)
elif self.mode == ScaleImageMode.SCIPY_GAUSSIAN_AA:
img = transform.resize(img, new_size, anti_aliasing=True)
elif self.mode == ScaleImageMode.CELEBA_GAUSSIAN_AA:
# https://github.com/andersbll/autoencoding_beyond_pixels/blob/master/dataset/celeba.py
scale = (shape[0] * 1. / self.size[0], shape[1] * 1. / self.size[1])
sigma = (np.sqrt(scale[0]) / 2., np.sqrt(scale[1]) / 2.)
img = filters.gaussian(img, sigma=sigma, multichannel=len(img.shape) > 2)
img = transform.resize(
img, tuple(self.size + [img.shape[-1]]), order=3,
# Turn off anti-aliasing, since we have done gaussian filtering.
# Note `anti_aliasing` defaults to `True` until skimage >= 0.15,
# which version is released in 2019/04, while the repo
# `andersbll/autoencoding_beyond_pixels` was released in 2015.
anti_aliasing=False,
# same reason as above
mode='constant',
)
# scale back to the original range
if self.value_range[1] == 255:
img = (img * 255)
# back to the original axis order
if self.channel_last is False:
img = img.transpose([-1, -3, -2])
# return the image
img = img.astype(dtype)
return img
def transform(self, array: mltk.Array) -> mltk.Array:
value_ndims = 3
shape = mltk.utils.get_array_shape(array)
if len(shape) < value_ndims:
raise ValueError(f'`array` must be at least {value_ndims}d: '
f'got shape {shape!r}.')
front_shape, back_shape = shape[:-value_ndims], shape[-value_ndims:]
array = np.reshape(array, [-1] + list(back_shape))
array = np.stack([self.resize_kernel(np.asarray(im)) for im in array],
axis=0)
array = np.reshape(array, front_shape + array.shape[-value_ndims:])
return array
def inverse_transform(self, array: mltk.Array, strict: bool = False) -> mltk.Array:
raise RuntimeError(f'`ScaleImage` is not invertible.')
| StarcoderdataPython |
1653197 | from mathutils import MathUtils
import os
import numpy as np
import re
from glogpy.dynamics_job import dynamics_job as dj
class ParseLogAll():
def parse(txt, step_lim=None):
sections = re.split("\*{4} Time.{1,}\d{1,}\.\d{1,}.{1,}\*{4}", txt )
res = []
nsteps = 0
for s in sections[1:]:
if step_lim!=None:
if nsteps == step_lim: break
try:
data = dj(s.strip())
data = data.parse()
res.append(data)
except: raise Exception(f'An error occured parsing step {nsteps} {txt}')
nsteps += 1
return res, nsteps
def I_ImportLogalls(basedir, ngwps, gwp_dir_fmt='gwp{}_V1', fname='gwp{}_V1_dd_data_nm.logall',
step_lim=None, print_steps=True,
quantities=['xyz', 'ci', 'csf', 'mq', 'sd', 'an', 'am']):
# Quantities options are
# xyz = geometries
# ci = CI Coeff
# csf = CSF Coeff
# mq = Mulliken Q #TODO Split into sum/unsum?
# sd = Spin density #TODO Split into sum/unsum?
# dp = Dipole moment
# an = Atom numbers
# am = Atom masses
# fo = Forces
# maxf = max + rms force
# case = CASSCF Energy
# casde = CASSCF DE
steps = None
datax = []
for i in range(1, ngwps+1):
if print_steps: print(f'Parsing GWP {i}/{ngwps}')
# Plus one is to compensate for weirdness of python range()
# We want to loop over 1,2,3 ... ngwp-1, ngwp
loagallf = os.path.join(basedir, gwp_dir_fmt.format(i), fname.format(i))
assert(os.path.exists(loagallf))
with open(loagallf, 'r') as f:
data = f.read()
parsed_data, nsteps = ParseLogAll.parse(data, step_lim = step_lim)
# Make sure all GWP loagalls contain same num steps
if steps == None:
steps = nsteps
else:
assert(steps == nsteps)
datax.append(parsed_data)
assert(len(datax) == ngwps) # Quick sanity check (GWP in = GWP out)
results = {}
results['steps'] = steps
# First pull constants
if 'an' in quantities:
results['atomnos'] = datax[0][0]['atomnos']
if 'am' in quantities:
results['atommasses'] = datax[0][0]['atommasses']
# Work way through scalar data to gen [GWP x Step]
if 'ci' in quantities:
results['adiabats'] = np.zeros([ngwps, steps, len(datax[0][0]['adiabats'])], dtype=complex)
if 'csf' in quantities:
results['diabats'] = np.zeros([ngwps, steps, len(datax[0][0]['diabats'])], dtype=complex)
if 'maxf' in quantities:
results['maxf'] = np.zeros([ngwps, steps])
results['rmsf'] = np.zeros([ngwps, steps])
# Vector quantities [GWP x Step x Dim]
if 'xyz' in quantities:
results['geomx'] = np.zeros([ngwps, steps, len(datax[0][0]['geom_init']), 3])
if 'fo' in quantities:
results['forces'] = np.zeros((ngwps, steps, len(datax[0][0]['forces']), 3))
if 'dp' in quantities:
results['dipolemom'] = np.zeros((ngwps, steps, 3))
if 'casde' in quantities:
results['casde'] = np.zeros((ngwps, steps))
if 'case' in quantities:
results['case'] = np.zeros((ngwps, steps))
if 'mq' in quantities:
results['mullikensum'] = np.zeros([ngwps,steps, len(datax[0][0]['mulliken_sum'])])
results['mullikenmap'] = [int(i) for i in list(datax[0][0]['mulliken_sum'].keys())]
if 'sd' in quantities:
results['spindensum'] = np.zeros([ngwps,steps, len(datax[0][0]['spinden_sum'])])
results['spindenmap'] = [int(i) for i in list(datax[0][0]['spinden_sum'].keys())]
for i, gwp in enumerate(datax):
for j, ts in enumerate(gwp):
if 'ci' in quantities:
results['adiabats'][i,j] = np.array(MathUtils.dict_to_list(ts['adiabats']))
if 'csf' in quantities:
results['diabats'][i,j] = np.array(MathUtils.dict_to_list(ts['diabats']))
if 'xyz' in quantities:
gtemp = MathUtils.dict_to_list(ts['geom_init'])
gtemp = [x[1] for x in gtemp]
results['geomx'][i,j] = np.array(gtemp)
if 'fo' in quantities:
ftemp = MathUtils.dict_to_list(ts['forces'])
results['forces'][i,j] = np.array(ftemp)
if 'dp' in quantities:
results['dipolemom'][i,j] = np.array(ts['dipole'][0])
if 'casde' in quantities:
results['casde'][i,j] = ts['casde']
if 'case' in quantities:
results['case'][i,j] = ts['case']
if 'mq' in quantities:
for atomidx, mullsum in ts['mulliken_sum'].items():
results['mullikensum'][i,j,results['mullikenmap'].index(atomidx)] = mullsum
if 'sd' in quantities:
for atomidx, sdsum in ts['spinden_sum'].items():
results['spindensum'][i,j,results['spindenmap'].index(atomidx)] = sdsum
if 'maxf' in quantities:
results['maxf'][i,j] = ts['maxforce']
results['rmsf'][i,j] = ts['rmsforce']
return results | StarcoderdataPython |
158307 | import csv
from pathlib import Path
equity_funding = [
{"Company": "CryptoVisors", "Amount": 200000, "Series": "A"},
{"Company": "Flutterwave", "Amount": 65000000, "Series": "D"},
{"Company": "nCino", "Amount": 80000000, "Series": "D"},
{"Company": "Privacy.com", "Amount": 10000000, "Series": "B"},
]
# Create an empty list called `big_raisers`
# YOUR CODE HERE!
big_raisers = []
# Iterate (loop) through each dictionary in the list of dictionaries.
for equity in equity_funding:
if equity["Amount"] >= 50000000:
big_raisers.append(equity)
# print(big_raisers)
header = ["Company", "Amount", "Series"]
csvpath = Path('large_equity_rounds.csv')
print("writting the data to csv file...")
with open(csvpath, "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=",")
csvwriter.writerow(header)
for item in big_raisers:
csvwriter.writerow(item.values())
print(item.keys())
| StarcoderdataPython |
149529 | <gh_stars>0
import csv
import math
import random
import operator
import logging
logging.basicConfig(format='[%(asctime)s] [%(name)s:%(lineno)d] | [%(levelname)s]: %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def load_data(filename, split, trainingSet=None, testSet=None):
"""load IRIS dataset and randomly split it into test set and training set."""
if trainingSet is None:
trainingSet = []
if testSet is None:
testSet = []
with open(filename, 'rt') as csvfile:
lines = csv.reader(csvfile)
next(lines, None) # skip the headers
dataset = list(lines)
for x in range(len(dataset) - 1):
for y in range(4):
dataset[x][y] = float(dataset[x][y])
if random.random() < split:
trainingSet.append(dataset[x])
else:
testSet.append(dataset[x])
def euclidean_distance(instance1, instance2, length):
"""euclidean distance calculation."""
distance = 0
for x in range(length-1):
distance += pow((instance1[x] - instance2[x]), 2)
return math.sqrt(distance)
def get_neighbors(trainingSet, testInstance, k):
"""selecting subset with the smallest distance."""
distances = []
length = len(testInstance) - 1
for x in range(len(trainingSet)):
dist = euclidean_distance(testInstance, trainingSet[x], length)
distances.append((trainingSet[x], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def get_predicted_response(neighbors):
classVotes = {}
for x in range(len(neighbors)):
response = neighbors[x][-1]
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
sortedVotes = sorted(classVotes.items(), key=operator.itemgetter(1), reverse=True)
return sortedVotes[0][0]
def get_accuracy(testSet, predictions):
"""Calculate accuracy."""
correct = 0
for x in range(len(testSet)):
if testSet[x][-1] in predictions[x]:
correct = correct + 1
return correct / float(len(testSet)) * 100
def apply_knn():
# prepare data
trainingSet = []
testSet = []
split = 0.67 # 67% of input dataset is for training, rest is used for test-dataset
logger.info("--- Begin Loading DataSet --- ")
load_data('./Iris.csv', split, trainingSet, testSet)
logger.info('Training-Set SIZE: ' + repr(len(trainingSet)))
logger.info('Test-Set SIZE: ' + repr(len(testSet)))
logger.info("--- Loading DataSet Completed.--- ")
# generate predictions
predictions = []
k = 3
for x in range(len(testSet)):
neighbors = get_neighbors(trainingSet, testSet[x], k)
result = get_predicted_response(neighbors)
predictions.append(result)
logger.info('predicted=' + repr(result) + ', actual=' + repr(testSet[x][-1]))
accuracy = get_accuracy(testSet, predictions)
logger.info('Accuracy: ' + repr(accuracy) + '%')
if __name__ == "__main__":
"""Start KNN Process."""
apply_knn()
| StarcoderdataPython |
1712198 | <reponame>jonasrla/desafio_youse<filename>parte_2/Context/create_policy_context.py
from .base_context import BaseContext
from pyspark.sql import functions as f
class CreatePolicyContext(BaseContext):
def __init__(self, file_path):
self.app_name = 'Process Create Policy'
super().__init__(file_path)
def process(self):
df_policy = self.transformation()
self.append_table(df_policy, 'policies')
def transformation(self):
df_result = self.input.selectExpr('payload.policy_number as id',
'payload.order_uuid as order_id',
'payload.insurance_type as insurance_type',
'raw_timestamp as created_at')
df_result = df_result.withColumn('status', f.lit('created'))
df_result = df_result.withColumn('created_at',
f.from_unixtime(df_result.created_at))
return df_result
| StarcoderdataPython |
4839958 | <reponame>GeGao2014/fairlearn
"""Script to dynamically update the ReadMe file for a particular release
Since PyPI and GitHub have slightly different ideas about markdown, we have to update
the ReadMe file when we upload to PyPI. This script makes the necessary changes.
Most of the updates performed should be fairly robust. The one which may give trouble
is in '_update_current_version' which looks for _CURRENT_RELEASE_PATTERN in the
text in order to update both the text and the link.
The produced file assumes that a tag 'vX' (where X corresponds to the current version
of `fairlearn`) exists in the repo. Otherwise, the links won't work.
"""
import argparse
import logging
import os
import re
import sys
from _utils import _LogWrapper
_logger = logging.getLogger(__name__)
_BASE_URI_FORMAT = "https://github.com/fairlearn/fairlearn/tree/v{0}"
_CURRENT_RELEASE_PATTERN = r"\[fairlearn v(\S+)\]\(https://github.com/fairlearn/fairlearn/tree/v\1\)" # noqa: E501
_OTHER_MD_REF_PATTERN = r"\]\(\./(\w+\.md)"
_SAME_MD_REF_PATTERN = r"\]\((#.+)\)"
def _get_base_path(target_version):
return _BASE_URI_FORMAT.format(target_version)
def _update_current_version(line, target_version):
_logger.info("Starting %s", sys._getframe().f_code.co_name)
current_release_pattern = re.compile(_CURRENT_RELEASE_PATTERN)
# Extract the current version from the line
match = current_release_pattern.search(line)
result = line
if match:
_logger.info("Matched %s", match)
# Replace with the updated version
result = result.replace(match.groups()[0], target_version)
_logger.info("Updated string: %s", result.rstrip())
return result
def _update_other_markdown_references(line, target_version):
_logger.info("Starting %s", sys._getframe().f_code.co_name)
markdown_ref_pattern = re.compile(_OTHER_MD_REF_PATTERN)
result = line
match = markdown_ref_pattern.search(line)
if match:
_logger.info("Matched %s", match)
for m in match.groups():
old_str = "./{0}".format(m)
new_str = "{0}/{1}".format(_get_base_path(target_version), m)
result = result.replace(old_str, new_str)
_logger.info("Updated string: %s", result.rstrip())
return result
def _update_same_page_references(line, target_version):
_logger.info("Starting %s", sys._getframe().f_code.co_name)
same_page_ref_pattern = re.compile(_SAME_MD_REF_PATTERN)
result = line
match = same_page_ref_pattern.search(line)
if match:
_logger.info("Matched %s", match)
for m in match.groups():
old_str = m
new_str = "{0}{1}".format(_get_base_path(target_version), m)
result = result.replace(old_str, new_str)
_logger.info("Updated string: %s", result.rstrip())
return result
def _process_line(line, target_version):
_logger.info("Starting %s", sys._getframe().f_code.co_name)
result = _update_current_version(line, target_version)
result = _update_other_markdown_references(result, target_version)
result = _update_same_page_references(result, target_version)
return result
def process_readme(input_file_name, output_file_name):
sys.path.append(os.getcwd())
import fairlearn
target_version = fairlearn.__version__
_logger.info("fairlearn version: %s", target_version)
text_lines = []
with _LogWrapper("reading file {}".format(input_file_name)):
with open(input_file_name, 'r') as input_file:
text_lines = input_file.readlines()
result_lines = [_process_line(line, target_version) for line in text_lines]
with _LogWrapper("writing file {}".format(output_file_name)):
with open(output_file_name, 'w') as output_file:
output_file.writelines(result_lines)
def build_argument_parser():
desc = "Process ReadMe file for PyPI"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--input-file-name", help="Path to the file to be processed",
required=True)
parser.add_argument("--output-file-name", help="Path to store the processed file",
required=True)
return parser
def main(argv):
parser = build_argument_parser()
args = parser.parse_args(argv)
process_readme(args.input_file_name, args.output_file_name)
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
3392720 | <gh_stars>0
"""empty message
Revision ID: f82b5fe93062
Revises: <PASSWORD>
Create Date: 2020-07-15 19:04:44.063507
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('nocab',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('words_es', sa.String(length=128), nullable=True),
sa.Column('form', sa.Integer(), nullable=True),
sa.Column('def_es', sa.String(length=1000), nullable=True),
sa.Column('frequency', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('vocab')
op.create_foreign_key(None, 'association', 'nocab', ['vocab_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'association', type_='foreignkey')
op.create_table('vocab',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('words_es', sa.VARCHAR(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('nocab')
# ### end Alembic commands ###
| StarcoderdataPython |
3270996 | <gh_stars>100-1000
"""
Copyright 2019, ETH Zurich
This file is part of L3C-PyTorch.
L3C-PyTorch is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
L3C-PyTorch is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with L3C-PyTorch. If not, see <https://www.gnu.org/licenses/>.
--------------------------------------------------------------------------------
Very thin wrapper around DiscretizedMixLogisticLoss.cdf_step_non_shared that keeps track of targets, which are the
same for all channels of the bottleneck, as well as the current channel index.
"""
import torch
from src.l3c.logistic_mixture import CDFOut, DiscretizedMixLogisticLoss
class CodingCDFNonshared(object):
def __init__(self, l, total_C, dmll: DiscretizedMixLogisticLoss):
"""
:param l: predicted distribution, i.e., NKpHW, see DiscretizedMixLogisticLoss
:param total_C:
:param dmll:
"""
self.l = l
self.dmll = dmll
# Lp = L+1
self.targets = torch.linspace(dmll.x_min - dmll.bin_width / 2,
dmll.x_max + dmll.bin_width / 2,
dmll.L + 1, dtype=torch.float32, device=l.device)
self.total_C = total_C
self.c_cur = 0
def get_next_C(self, decoded_x) -> CDFOut:
"""
Get CDF to encode/decode next channel
:param decoded_x: NCHW
:return: C_cond_cur, NHWL'
"""
C_Cur = self.dmll.cdf_step_non_shared(
self.l, self.targets, self.c_cur, self.total_C, decoded_x)
self.c_cur += 1
return C_Cur
| StarcoderdataPython |
1774265 | <gh_stars>1-10
#! /usr/bin/env python
#! /opt/casa/packages/RHEL7/release/current/bin/python
#
# AAP = Admit After Pipeline
#
# Example python script (and module) that for a given directory finds all ALMA pbcor.fits files
# and runs a suite of predefined ADMIT recipes on them, in a local directory named madmit_<YMD_HMS>
# It normally matches the pb.fits files, so ADMIT can work on noise flat image cubes.
#
# Notes:
# 1. this is still for old-style admit, not ADMIT3, but should port to python3 when ready
# 2. this does not encode the different tree view that is encoded in the old do_aap5 or runa1
# 3. it handles *.pb.fits as well as *.pb.fits.gz files that should mirror the *.pbcor.fits files
#
# SCRIPT usage
# aap.py -d dir1 [-c] [-n] [-r] [-s] [-v]
# -c check files to see if there are orphans we may not have encoded for ADMIT processing
# -n dry-run, prints out the commands as they would run (old style ADMIT)
# -r remove all CASA images/tables after the ADMIT run
# -s single mode, only one default run per image/cube
# -v verbose
#
# To use as a script, your shell environment must have 'casa' and CASA's 'python' in the $PATH,
# this normally takes two modifications, e.g.
# export PATH=$CASAROOT/bin:$CASAROOT/lib/casa/bin:$PATH
#
# MODULE usage
# import aap
# madmitname = aap.compute_admit(dirname)
#
# @todo ...
#
_version = "9-sep-2020 PJT"
import os, sys
import argparse as ap
import glob
import datetime
# decipher the python environment (yuck)
try:
import casa
print("Warning fake: still assuming classic ADMIT")
is_admit3 = False
except:
try:
import casatasks # pre-release now does this????
is_admit3 = True
print("Good fake news: running ADMIT3")
except:
print("Bad fake news: your python doesn't know casa or casatasks")
def version():
"""
identify yourself
"""
print("AAP Version %s" % _version)
def usage():
"""
command line helper
"""
print("Usage: %s -d DIR(s)")
print("For one or more DIR's find the pbcor.fits files that are needed for 'runa1' and 'runa2' type recipes in ADMIT")
sys.exit(0)
def splitall(path):
"""
Taken from https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s16.html
"""
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def casa_cleanup(admitname):
"""
clean an admit directory of all CASA images
the method here needs to be certified by a CASA expert
"""
# @todo in Python3: from pathlib import Path
# this is only 3 levels deep, works for now
files = glob.glob("%s/*/table.info" % admitname) + glob.glob("%s/*/*/table.info" % admitname) + glob.glob("%s/*/*/*/table.info" % admitname)
for f in files:
dat = f.replace("table.info","")
cmd = "rm -rf %s" % dat
print("CLEANUP: %s" % cmd)
os.system(cmd)
def find_pbcor(dirname, mfs=False, cube=False, verbose=False):
"""
find the ALMA pbcor files in a directory.... since everything starts with a pbcor file.
We keep the MFS/CONT separate from CUBE, since they require different recipes.
"""
pbcor = []
if cube:
pbcor1a = glob.glob('%s/*.cube.*pbcor*fits' % dirname)
for p1 in pbcor1a:
if verbose:
print(p1)
pbcor.append(p1)
if mfs:
pbcor2a = glob.glob('%s/*.mfs.*pbcor*fits' % dirname)
pbcor2c = glob.glob('%s/*.cont.*pbcor*fits' % dirname)
for p2 in pbcor2a+pbcor2c:
if verbose:
print(p2)
pbcor.append(p2)
return pbcor
def runa1(pbcorname,pbname=None,label=None,apars=[],dryrun=False,cleanup=False):
"""
the runa1 recipe, with optional extra args
$ADMIT/admit/test/admit2.py --pb fname.pb.fits.gz --basename x --out fname.<label>.admit --apar fname.<label>.apar fname.pbcor.fits
e.g. runa1(fname, "native.5sigma", ["numsigma=5"])
runa1(fname, "binned16.3sigma", ["insmooth=-16","numsigma=5"])
"""
r = '$ADMIT/admit/test/admit1.py'
r = r + ' --pb %s' % pbname
r = r + ' --basename x'
if len(apars) > 0:
if label == None:
aparname = pbcorname + '.apar'
else:
aparname = pbcorname.replace('.fits','') + '.%s.apar' % label
if not dryrun:
fp = open(aparname,"w")
fp.write("# written by AAP\n")
for apar in apars:
fp.write("%s\n" % apar)
fp.close()
r = r + ' --apar %s' % aparname
if label == None:
outname = pbcorname.replace('.fits','') + ".admit"
else:
outname = pbcorname.replace('.fits','') + '.%s.admit' % label
r = r + ' --out %s' % outname
r = r + ' %s' % pbcorname
r = r + ' > %s.log 2>&1' % outname
print(r)
if not dryrun:
os.system(r)
if cleanup:
casa_cleanup(outname)
def runa2(pbcorname,pbname=None,label=None,apars=[],dryrun=False,cleanup=False):
"""
the runa2 recipe, with optional extra args
$ADMIT/admit/test/admit2.py --pb fname.pb.fits.gz --basename x --out fname.<label>.admit --apar fname.<label>.apar fname.pbcor.fits
e.g. runa1(fname, "native.5sigma", ["numsigma=5"])
runa1(fname, "binned16.3sigma", ["insmooth=-16","numsigma=5"])
pbcorname = basename.pbcor.fits
outname = basename.pbcor.admit or basename.pbcor.<label>.admit
aparname = basename.pbcor.fits.apar or basename.pbcor.<label>.apar
"""
r = '$ADMIT/admit/test/admit2.py'
r = r + ' --pb %s' % pbname
r = r + ' --basename x'
if len(apars) > 0:
if label == None:
aparname = pbcorname + '.apar'
else:
aparname = pbcorname.replace('.fits','') + '.%s.apar' % label
if not dryrun:
fp = open(aparname,"w")
fp.write("# written by AAP\n")
for apar in apars:
fp.write("%s\n" % apar)
fp.close()
r = r + ' --apar %s' % aparname
if label == None:
outname = pbcorname.replace('.fits','') + ".admit"
else:
outname = pbcorname.replace('.fits','') + '.%s.admit' % label
r = r + ' --out %s' % outname
r = r + ' %s' % pbcorname
r = r + ' > %s.log 2>&1' % outname
print(r)
if not dryrun:
os.system(r)
if cleanup:
casa_cleanup(outname)
def run_admit(recipe, pbcor, madmitname, dryrun=False, verbose=False, single=False, cleanup=False):
"""
based on a full pbcor file run an ADMIT recipe
"""
idx = pbcor.find('.pbcor.fits')
pb = glob.glob(pbcor[:idx] + '.pb.fits*')
if len(pb) == 0:
print("Warning: no matching pb found for %s" % pbcor)
return
pb = pb[0]
if verbose:
print(pbcor)
print(pb)
# pbcor and pb are filenames relative to the dirname
# e.g. PID/S/G/M/product/member.uid___A001_X133f_X1a2.Tile_004_SMC_SWBar_sci.spw22.cube.I.pbcor.fits
# product/member.uid___A001_X133f_X1a2.Tile_004_SMC_SWBar_sci.spw22.cube.I.pbcor.fits
pbname = splitall(pb)[-1]
d = splitall(pbcor)
pbcorname = d[-1]
pbcorpath = os.path.abspath(pbcor)
pbpath = os.path.abspath(pb)
pdir = '/'.join(d[:-1])
adir = '/'.join(d[:-2]) + '/admit'
adir = madmitname
if verbose:
print(adir)
# now some horrid file operations which can possible be done more efficiently if I had a better toolkit
cmd = 'mkdir -p %s' % adir
if not dryrun:
os.system(cmd)
cwd = os.getcwd()
os.chdir(adir)
os.system('ln -sf %s' % (pbcorpath))
os.system('ln -sf %s' % (pbpath))
if recipe == 'runa2':
os.system('listfitsa %s' % pbcorname)
if single:
runa2(pbcorname,pbname,dryrun=dryrun,cleanup=cleanup)
else:
# @todo add some smoothing? go from 5ppx to 10ppx ?
# @todo LGM's default is numsigma=6
runa2(pbcorname,pbname,"5sigma",["numsigma=5"],dryrun=dryrun,cleanup=cleanup)
runa2(pbcorname,pbname,"3sigma",["numsigma=3"],dryrun=dryrun,cleanup=cleanup)
elif recipe == 'runa1':
os.system('listfitsa %s' % pbcorname)
if single:
runa1(pbcorname,pbname,dryrun=dryrun,cleanup=cleanup)
else:
# @todo LineID's default is numsigma=5
#runa1(pbcorname,pbname,"native.5sigma",["numsigma=5"],dryrun=dryrun,cleanup=cleanup)
#runa1(pbcorname,pbname,"binned4.3sigma",["insmooth=[-4]","numsigma=3"],dryrun=dryrun,cleanup=cleanup)
runa1(pbcorname,pbname,"native.3sigma",["numsigma=3"],dryrun=dryrun,cleanup=cleanup)
runa1(pbcorname,pbname,"binned16.3sigma",["insmooth=[-16]","numsigma=3"],dryrun=dryrun,cleanup=cleanup)
if not dryrun:
os.chdir(cwd)
def alma_names(dirname):
"""
debugging: search and destroy what we know
"""
cwd = os.getcwd()
os.chdir(dirname)
files = glob.glob('*fits*')
pbcors = glob.glob('*.pbcor.fits')
pbcors.sort()
nfiles = len(files)
npbcors = len(pbcors)
print("Found %d pbcor in %d fits files" % (npbcors,nfiles))
for pbcor in pbcors:
pb = pbcor.replace('.pbcor.fits','.pb.fits.gz')
try:
i1=files.index(pb)
files.remove(pbcor)
files.remove(pb)
except:
print("missing %s" % pb)
mask = pb.replace('.pb.','.mask.')
try:
i1=files.index(mask)
files.remove(mask)
except:
print("missing %s" % mask)
for f in files:
print("orphan %s" % f)
if len(files)==0:
print("Hurray, no orphan files")
os.chdir(cwd)
def compute_admit(dirname, madmitname=None, verbose=False, dryrun=False, single=False, cleanup=False):
"""
do it all
"""
# @todo if dirname contains the whole P/S/G/M name, store that too
if madmitname == None:
prefix=dirname.split('/')
# try some unique name that name-completes but also parses fast by the human eye and filebrowsers
madmitname = os.path.abspath('./madmit_'+datetime.datetime.now().strftime('%Y%m%d_%H%M%S.%f'))
madmitname = os.path.abspath(prefix[-1]+"_"+prefix[-2]+"_"+datetime.datetime.now().strftime('%Y%m%d_%H%M%S.%f'))
print("MADMIT: %s" % madmitname)
# @todo only mfs and cube? what about cont ? or _ph and _pb
p1 = find_pbcor(dirname,cube=True, verbose=verbose)
print("Found %d cube pbcor fits files for ADMIT to process" % len(p1))
p2 = find_pbcor(dirname,mfs=True, verbose=verbose)
print("Found %d msf pbcor fits files for ADMIT to process" % len(p2))
if len(p1) + len(p2) == 0:
return None
# the cheap continuum maps
for p in p2:
run_admit('runa2', p, madmitname, verbose=verbose, dryrun=dryrun, single=single, cleanup=cleanup)
# the expensive cubes
for p in p1:
run_admit('runa1', p, madmitname, verbose=verbose, dryrun=dryrun, single=single, cleanup=cleanup)
return madmitname
if __name__ == "__main__":
parser = ap.ArgumentParser(description='AAP (ADMIT After Pipeline) processing - %s' % _version)
parser.add_argument('-d', '--dirname', nargs = 1, type = str, default = ['.'],
help = 'Name of the directory containing data')
parser.add_argument('-c', '--checknames', action="store_true", default = False,
help = 'Name Check on all fits files, report orphans')
parser.add_argument('-n', '--dryrun', action = "store_true", default = False,
help = 'Dryrun mode')
parser.add_argument('-r', '--cleanup', action = "store_true", default = False,
help = 'Cleanup CASA images after run')
parser.add_argument('-s', '--single', action = "store_true", default = False,
help = 'Single ADMIT mode')
parser.add_argument('-v', '--verbose', action = "store_true", default = False,
help = 'Verbose mode.')
args = vars(parser.parse_args())
if len(sys.argv) == 1:
usage()
version()
# Project ID, below which there is (at least one) Sous/Gous/Mous
dirname = args['dirname'][0]
do_names = args['checknames']
verbose = args['verbose']
dryrun = args['dryrun']
single = args['single']
cleanup = args['cleanup']
print(single)
if do_names:
alma_names(dirname)
else:
madmitname = compute_admit(dirname,verbose=verbose,dryrun=dryrun,single=single,cleanup=cleanup)
# - end
| StarcoderdataPython |
3276163 | import torch
from torch.nn import functional as F
class WSDDNLossComputation(object):
"""
Computes the loss for WSDDN, which is a multi-label image-level binary cross-entropy loss
"""
def __init__(self, cfg):
self.config = cfg
self.background_weight = cfg.MODEL.ROI_BOX_HEAD.LOSS_WEIGHT_BACKGROUND
def __call__(self, class_logits, targets, num_box_per_img):
"""
Arguments:
class_logits (Tensor)
targets (Tensor): image-level multi-label target. Each row is a binary vector of lenth num_classes.
num_box_per_img (list[int])
Returns:
classification_loss (Tensor)
"""
device = class_logits.device
box_class_logits = class_logits.split(num_box_per_img, dim=0)
image_class_logits = [torch.logsumexp(l, dim=0) for l in box_class_logits]
image_class_logits = torch.stack(image_class_logits, dim=0)
negative_logits = torch.log(1.0 - torch.exp(image_class_logits) + 1e-6)
classification_loss = (- (targets * image_class_logits) -
((1 - targets) * negative_logits * self.background_weight))
classification_loss = classification_loss.mean()
return classification_loss
def make_roi_box_loss_evaluator(cfg):
loss_evaluator = WSDDNLossComputation(cfg)
return loss_evaluator
| StarcoderdataPython |
1687080 | <reponame>mindis/rnd-reco-gym
import numpy as np
from numpy.random.mtrand import RandomState
from sklearn.linear_model import LogisticRegression
from recogym import DefaultContext, Observation
from recogym.agents import Agent
from recogym.envs.session import OrganicSessions
from recogym.agents import FeatureProvider
debug = False
def build_train_data(logs, feature_provider):
user_states, actions, rewards, proba_actions = [], [], [], []
current_user = None
for _, row in logs.iterrows():
if current_user != row['u']:
# User has changed: start a new session and reset user state.
current_user = row['u']
sessions = OrganicSessions()
feature_provider.reset()
context = DefaultContext(row['u'], row['t'])
if row['z'] == 'organic':
sessions.next(context, row['v'])
else:
# For each bandit event, generate one observation for the user state,
# the taken action the obtained reward and the used probabilities.
feature_provider.observe(Observation(context, sessions))
user_states.append(feature_provider.features(None).copy())
actions.append(row['a'])
rewards.append(row['c'])
proba_actions.append(row['ps'])
if debug:
print(f"\nLikelihoodAgent build_train_data() "
f"\nactions {actions} "
f"\nrewards {rewards}")
if debug:
print(f"\nLikelihoodAgent build_train_data() row['a'] {row['a']} row['c'] {row['c']}")
# Start a new organic session.
sessions = OrganicSessions()
return np.array(user_states), np.array(actions).astype(int), np.array(rewards), np.array(proba_actions)
class LikelihoodAgent(Agent):
def __init__(self, feature_provider, seed=43):
self.feature_provider = feature_provider
self.random_state = RandomState(seed)
self.model = None
if debug:
print(f"\nLikelihoodAgent INIT num_products {self.feature_provider.config.num_products}")
@property
def num_products(self):
# if debug:
# print(f"\nLikelihoodAgent num_products {self.feature_provider.config.num_products}")
return self.feature_provider.config.num_products
def _create_features(self, user_state, action):
# Look at the data and see how it maps into the features - which is the combination of the history
# and the actions and the label, which is clicks.
# Note that only the bandit events correspond to records in the training data.
# To make a personalization, it is necessary to cross the action and history features.
# _Why_ ? We do the simplest possible to cross an element-wise Kronecker product.
"""Create the features that are used to estimate the expected reward from the user state"""
# print(f"\nLikelihoodAgent train() features size {len(user_state) * self.num_products}")
features = np.zeros(len(user_state) * self.num_products)
# if debug:
# print(f"\nLikelihoodAgent Create the features that are used to "
# f"estimate the expected reward from the user state "
# f"\nsize {len(user_state) * self.num_products}"
# f"\nfeatures {features}")
features[action * len(user_state): (action + 1) * len(user_state)] = user_state
# if debug:
# print(f"\nLikelihoodAgent _create_features() action {action} "
# f"\nfuture index start {action * len(user_state)}"
# f"\nfuture index end {(action + 1) * len(user_state)}"
# f"\nuser_state {user_state}"
# f"\nRETURN features {features}")
return features
# The "features" are represented by matrix of organic product views where horizontal position is product id 0-9.
# The every line is organic views and vertical offset is action id / recommendation product (7 for first matrix and 2 for the next one). Looks they call it "Kronecker product".
#
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 1. 2. 1. 0. 0. 0. 1. 2. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
#
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 1. 2. 1. 0. 0. 0. 1. 2. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
#
# .... there are 59 such matrices of observations in this configuration.
#
# The "rewards" is represented by array of size 59, 1 = click at bandit offer, there were 59 bandit actions 57 failures, 2 successful, 1 = click on bandit offer:
#
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
# 1. 0. 0. 0. 0. 0. 0. 0. 0.
def train(self, logs):
print(f"\nLikelihoodAgent train() num_products {self.feature_provider.config.num_products}")
user_states, actions, rewards, proba_actions = build_train_data(logs, self.feature_provider) # ---- build_train_data
features = np.vstack([
self._create_features(user_state, action)
for user_state, action in zip(user_states, actions)
])
self.model = LogisticRegression(solver='lbfgs', max_iter=5000)
if debug:
print(f"\nLikelihoodAgent train_from_logs() "
f"\nmodel.fit <- features size {len(features)}")
for feature in features:
print(f"\nmodel.fit <- feature size {len(feature)} : {feature}")
print(f"\nmodel.fit <- rewards size {len(rewards)} \n{rewards}")
self.model.fit(features, rewards) # ----- LEARN THE MODEL BY REWARDS ! X = FEATURES/VIEWS, Y = REWARDS ---> PROBABILITY OF REWARD
def _score_products(self, user_state):
all_action_features = np.array([
self._create_features(user_state, action)
for action in range(self.num_products)
])
if debug:
# print(f"\nLikelihoodAgent "
# f"\nall_action_features {all_action_features}"
# f"\n self.model.predict_proba {self.model.predict_proba(all_action_features)}")
print(f"\nLikelihoodAgent _score_products() user_state {user_state} return {self.model.predict_proba(all_action_features)[:, 1]}")
# predict_proba returns probability for 0 and 1 - [0.97692387 0.02307613]
# Using [:,1] in the code will give you the probabilities of 1 (product view)
return self.model.predict_proba(all_action_features)[:, 1]
def act(self, observation, reward, done):
"""Act method returns an action based on current observation and past history"""
if debug:
print(f"\nLikelihoodAgent ACT() reward {reward} done {done} \nobservation {observation.sessions()}")
self.feature_provider.observe(observation)
user_state = self.feature_provider.features(observation)
# prob is array of products click probability:
# _score_products returns [0.09832074 0.02307613 0.01848091 0.02051842 0.04991053 0.0237508 0.01884811 0.01946998 0.02045175 0.02017838]
prob = self._score_products(user_state)
action = np.argmax(prob) # -> 0 (returns MAX FROM prob above)
if debug:
print(f"LikelihoodAgent ACT() scored products - prob {prob} action {action}")
ps = 1.0
all_ps = np.zeros(self.num_products)
all_ps[action] = 1.0
if debug:
print(f"LikelihoodAgent ACT() user_state {user_state} prob {prob} action {action} ps {ps} ps-a {all_ps}")
return {
**super().act(observation, reward, done),
**{
'a': action,
'ps': ps,
'ps-a': all_ps,
}
}
def reset(self):
self.feature_provider.reset()
class CountFeatureProvider(FeatureProvider):
"""Feature provider as an abstract class that defines interface of setting/getting features"""
def __init__(self, config):
super(CountFeatureProvider, self).__init__(config)
self.feature_data = np.zeros((self.config.num_products))
def observe(self, observation):
"""Consider an Organic Event for a particular user"""
for session in observation.sessions():
self.feature_data[int(session['v'])] += 1
def features(self, observation):
"""Provide feature values adjusted to a particular feature set"""
return self.feature_data
def reset(self):
self.feature_data = np.zeros((self.config.num_products))
| StarcoderdataPython |
1722287 | #!/usr/bin/env python
# encoding: utf-8
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
import random
if len(nums) >= 1:
less = [];greater = []
if len(nums) == 1:return nums[0]
randnum = random.randint(0,len(nums)-1)
pivot = nums[randnum]
del nums[randnum]
for num in nums:
if num < pivot:less.append(num)
else: greater.append(num)
glen = len(greater)
if glen == k-1:
return pivot
elif glen > k-1:
return self.findKthLargest(greater, k)
else:
return self.findKthLargest(less, k-1-glen) | StarcoderdataPython |
1893 | # type: ignore
from typing import Union, List, Dict
from urllib.parse import urlparse
import urllib3
from pymisp import ExpandedPyMISP, PyMISPError, MISPObject, MISPSighting, MISPEvent, MISPAttribute
from pymisp.tools import GenericObjectGenerator
import copy
from pymisp.tools import FileObject
from CommonServerPython import *
logging.getLogger("pymisp").setLevel(logging.CRITICAL)
def handle_connection_errors(error):
if "SSLError" in error:
return_error('Unable to connect to MISP because of a SSLCertVerificationError, '
'Please try to use the Trust any certificate option.')
if "NewConnectionError" in error:
return_error('Unable to connect to MISP because of a NewConnectionError, '
'Please make sure your MISP server url is correct.')
if "Please make sure the API key and the URL are correct" in error:
return_error('Unable to connect to MISP, '
'Please make sure the API key is correct.')
return_error(error)
def warn(*args):
"""
Do nothing with warnings
"""
pass
# Disable requests warnings
urllib3.disable_warnings()
# Disable python warnings
warnings.warn = warn
''' GLOBALS/PARAMS '''
params = demisto.params()
if not params.get('credentials') or not (MISP_API_KEY := params.get('credentials', {}).get('password')):
raise DemistoException('Missing API Key. Fill in a valid key in the integration configuration.')
MISP_URL = params.get('url')
VERIFY = not params.get('insecure')
PROXIES = handle_proxy() # type: ignore
try:
PYMISP = ExpandedPyMISP(url=MISP_URL, key=MISP_API_KEY, ssl=VERIFY, proxies=PROXIES)
except PyMISPError as e:
handle_connection_errors(e.message)
PREDEFINED_FEEDS = {
'CIRCL': {'name': 'CIRCL OSINT Feed',
'url': 'https://www.circl.lu/doc/misp/feed-osint',
'format': 'misp',
'input': 'network'},
'Botvrij.eu': {'name': 'The Botvrij.eu Data',
'url': 'http://www.botvrij.eu/data/feed-osint',
'format': 'misp',
'input': 'network'}
}
THREAT_LEVELS_TO_ID = {
'High': 1,
'Medium': 2,
'Low': 3,
'Unknown': 4
}
MISP_ENTITIES_TO_CONTEXT_DATA = {
'deleted': 'Deleted',
'category': 'Category',
'comment': 'Comment',
'uuid': 'UUID',
'sharing_group_id': 'SharingGroupID',
'timestamp': 'LastChanged',
'to_ids': 'ToIDs',
'value': 'Value',
'event_id': 'EventID',
'ShadowAttribute': 'ShadowAttribute',
'disable_correlation': 'DisableCorrelation',
'distribution': 'Distribution',
'type': 'Type',
'id': 'ID',
'date': 'CreationDate',
'info': 'Info',
'published': 'Published',
'attribute_count': 'AttributeCount',
'proposal_email_lock': 'ProposalEmailLock',
'locked': 'Locked',
'publish_timestamp': 'PublishTimestamp',
'event_creator_email': 'EventCreatorEmail',
'name': 'Name',
'analysis': 'Analysis',
'threat_level_id': 'ThreatLevelID',
'old_id': 'OldID',
'org_id': 'OrganizationID',
'Org': 'Organization',
'Orgc': 'OwnerOrganization',
'orgc_uuid': 'OwnerOrganization.UUID',
'orgc_id': 'OwnerOrganization.ID',
'orgc_name': 'OwnerOrganization.Name',
'event_uuid': 'EventUUID',
'proposal_to_delete': 'ProposalToDelete',
'description': 'Description',
'version': 'Version',
'Object': 'Object',
'object_id': 'ObjectID',
'object_relation': 'ObjectRelation',
'template_version': 'TemplateVersion',
'template_uuid': 'TemplateUUID',
'meta-category': 'MetaCategory',
'decay_score': 'DecayScore',
'first_seen': 'first_seen',
'last_seen': 'last_seen',
'provider': 'Provider',
'source_format': 'SourceFormat',
'url': 'URL',
'event_uuids': 'EventUUIDS',
}
MISP_ANALYSIS_TO_IDS = {
'initial': 0,
'ongoing': 1,
'completed': 2
}
MISP_DISTRIBUTION_TO_IDS = {
'Your_organization_only': 0,
'This_community_only': 1,
'Connected_communities': 2,
'All_communities': 3,
'Inherit_event': 5
}
SIGHTING_TYPE_NAME_TO_ID = {
'sighting': 0,
'false_positive': 1,
'expiration': 2
}
SIGHTING_TYPE_ID_TO_NAME = {
'0': 'sighting',
'1': 'false_positive',
'2': 'expiration'
}
INDICATOR_TYPE_TO_DBOT_SCORE = {
'FILE': DBotScoreType.FILE,
'URL': DBotScoreType.URL,
'DOMAIN': DBotScoreType.DOMAIN,
'IP': DBotScoreType.IP,
'EMAIL': DBotScoreType.EMAIL,
}
DOMAIN_REGEX = (
r"([a-z¡-\uffff0-9](?:[a-z¡-\uffff0-9-]{0,61}"
"[a-z¡-\uffff0-9])?(?:\\.(?!-)[a-z¡-\uffff0-9-]{1,63}(?<!-))*"
"\\.(?!-)(?!(jpg|jpeg|exif|tiff|tif|png|gif|otf|ttf|fnt|dtd|xhtml|css"
"|html)$)(?:[a-z¡-\uffff-]{2,63}|xn--[a-z0-9]{1,59})(?<!-)\\.?$"
"|localhost)"
)
MISP_SEARCH_ARGUMENTS = [
'value',
'type',
'category',
'org',
'tags',
'from',
'to',
'event_id',
'uuid',
'to_ids',
'last',
'include_decay_score',
'include_sightings',
'include_correlations',
'limit',
'page',
'enforceWarninglist',
'include_feed_correlations',
]
EVENT_FIELDS = [
'id',
'orgc_id',
'org_id',
'date',
'threat_level_id',
'info',
'published',
'uuid',
'analysis',
'attribute_count',
'timestamp',
'distribution',
'proposal_email_lock',
'locked',
'publish_timestamp',
'sharing_group_id',
'disable_correlation',
'event_creator_email',
'Org',
'Orgc',
'RelatedEvent',
'Galaxy',
'Tag',
'decay_score',
'Object',
'Feed',
]
ATTRIBUTE_FIELDS = [
'id',
'event_id',
'object_id',
'object_relation',
'category',
'type',
'to_ids',
'uuid',
'timestamp',
'distribution',
'sharing_group_id',
'comment',
'deleted',
'disable_correlation',
'first_seen',
'last_seen',
'value',
'Event',
'Object',
'Galaxy',
'Tag',
'decay_score',
'Sighting',
]
def extract_error(error: list) -> List[dict]:
"""
Extracting errors raised by PYMISP into readable response, for more information and examples
please see UT: test_extract_error.
Args:
error: list of responses from error section
Returns:
List[Dict[str, any]]: filtered response
"""
return [{
'code': err[0],
'message': err[1].get('message'),
'errors': err[1].get('errors')
} for err in error]
def dict_to_generic_object_format(args: dict) -> List[dict]:
"""
Converts args dict into a list, please see GenericObjectGenerator Class in Pymisp.
Args:
args: dictionary describes MISP object
Returns:
list: list containing dicts that GenericObjectGenerator can take.
Examples:
>>> {'ip': '8.8.8.8', 'domain': 'google.com'}
[{'ip': '8.8.8.8'}, {'domain': 'google.com'}]
"""
return [{k: v} for k, v in args.items()]
def build_generic_object(template_name: str, args: List[dict]) -> GenericObjectGenerator:
"""
Args:
template_name: template name as described in https://github.com/MISP/misp-objects
args: arguments to create the generic object
Returns:
GenericObjectGenerator: object created in MISP
Example:
args should look like:
[{'analysis_submitted_at': '2018-06-15T06:40:27'},
{'threat_score': {value=95, to_ids=False}},
{'permalink': 'https://panacea.threatgrid.com/mask/samples/2e445ef5389d8b'},
{'heuristic_raw_score': 7.8385159793597}, {'heuristic_score': 96},
{'original_filename': 'juice.exe'}, {'id': '2e445ef5389d8b'}] # guardrails-disable-line
"""
misp_object = GenericObjectGenerator(template_name)
misp_object.generate_attributes(args)
return misp_object
def misp_convert_timestamp_to_date_string(timestamp: Union[str, int]) -> str:
"""
Gets a timestamp from MISP response (1546713469) and converts it to human readable format
"""
return datetime.utcfromtimestamp(int(timestamp)).strftime('%Y-%m-%dT%H:%M:%SZ') if timestamp else ""
def replace_keys_from_misp_to_context_data(obj_to_build: Union[dict, list, str]) -> Union[dict, list, str]:
"""
Replacing keys from MISP's format to Demisto's (as appear in ENTITIESDICT)
Args:
obj_to_build (Union[dict, list, str]): object to replace keys in
Returns:
Union[dict, list, str]: same object type that got in
"""
if isinstance(obj_to_build, list):
return [replace_keys_from_misp_to_context_data(item) for item in obj_to_build]
if isinstance(obj_to_build, dict):
return {
(MISP_ENTITIES_TO_CONTEXT_DATA[key] if key in MISP_ENTITIES_TO_CONTEXT_DATA else key):
replace_keys_from_misp_to_context_data(value) for key, value in obj_to_build.items()
}
return obj_to_build
def reputation_command_to_human_readable(outputs, score, events_to_human_readable):
found_tag_id, found_tag_name = "", ""
for event in events_to_human_readable:
# removing those fields as they are shared by the events
found_tag_id = event.pop('Tag_ID')
found_tag_name = event.pop('Tag_Name')
return {
'Attribute Type': outputs[0].get('Type'),
'Dbot Score': score,
'Attribute Value': outputs[0].get('Value'),
'Attribute Category': outputs[0].get('Category'),
'Timestamp': outputs[0].get('Timestamp'),
'Events with the scored tag': events_to_human_readable,
'Scored Tag ID': found_tag_id,
'Scored Tag Name': found_tag_name,
}
def limit_tag_output_to_id_and_name(attribute_dict, is_event_level):
"""
As tag list can be full of in unnecessary data, we want to limit this list to include only the ID and Name fields.
In addition, returns set of the found tag ids.
Some tags have a field called inherited. When it is set to 1 it says that it is an event's tag.
Otherwise (if it is set to 0 or not exists) it says that it is an attribute's tag.
If the data is event's (is_event_level = true) we would like to add to tag_set_ids all the tags
(event ones and the event's attribute tags ones as it is part of the event scope).
If the data is attribute's (is_event_level = false), and the tag is only related to an attribute
we would like to add it to tag_set_ids. In any other case, we won't add the tag.
Args:
attribute_dict (dict): The dictionary that includes the tag list.
is_event_level (bool): Whether the attribute_dict was received from an event object,
meaning the tags are event's ones. Otherwise, the data is attribute's (attribute tags).
"""
output = []
tag_set_ids = set()
tags_list = attribute_dict.get('Tag', [])
for tag in tags_list:
is_event_tag = tag.get('inherited', 0) # field doesn't exist when this is an attribute level, default is '0'
tag_id = tag.get('id')
if is_event_level:
tag_set_ids.add(tag_id)
else: # attribute level
if not is_event_tag:
tag_set_ids.add(tag_id)
output.append({'ID': tag_id, 'Name': tag.get('name')})
return output, tag_set_ids
def parse_response_reputation_command(misp_response, malicious_tag_ids, suspicious_tag_ids, attributes_limit):
"""
After getting all the attributes which match the required indicator value, this function parses the response.
This function goes over all the attributes that found (after limit the attributes amount to the given limit)
and by sub-functions calculated the score of the indicator.
For the context data outputs, for every attribute we remove the "Related Attribute" list and limits the tags and
galaxies lists. Eventually, the outputs will be a list of attributes along with their events objects.
Note: When limits the attributes amount, we sort the attributes list by the event ids as the greater event ids are
the newer ones.
Returns:
response (dict): The parsed outputs to context data (array of attributes).
score: the indicator score
found_tag: the tag (id) which made the indicator to get that score
found_related_events (dict): contains info (name, id, threat level id) about all the events that include
the indicator
Please see an example for a response in test_data/reputation_command_response.json
Please see an example for a parsed output in test_data/reputation_command_outputs.json
"""
response = copy.deepcopy(misp_response)
attributes_list = response.get('Attribute')
if not attributes_list:
return None
attributes_list = sorted(attributes_list,
key=lambda attribute_item: attribute_item['event_id'], reverse=True)[:attributes_limit]
found_related_events, attributes_tag_ids, event_tag_ids = prepare_attributes_array_to_context_data(attributes_list)
attribute_in_event_with_bad_threat_level = found_event_with_bad_threat_level_id(found_related_events)
score, found_tag = get_score(attribute_tags_ids=attributes_tag_ids, event_tags_ids=event_tag_ids,
malicious_tag_ids=malicious_tag_ids, suspicious_tag_ids=suspicious_tag_ids,
is_attribute_in_event_with_bad_threat_level=attribute_in_event_with_bad_threat_level)
formatted_response = replace_keys_from_misp_to_context_data({'Attribute': attributes_list})
return formatted_response, score, found_tag, found_related_events
def prepare_attributes_array_to_context_data(attributes_list):
attributes_tag_ids, event_tag_ids = set(), set()
found_related_events = {}
if not attributes_list:
return None
for attribute in attributes_list:
attribute.pop("RelatedAttribute") # get rid of this useless list
event = attribute.get('Event')
convert_timestamp_to_readable(attribute, event)
found_related_events[event.get("id")] = {"Event Name": event.get("info"),
"Threat Level ID": event.get('threat_level_id'),
"Event ID": event.get("id")}
if event.get('Tag'):
limit_tag_output, tag_ids = limit_tag_output_to_id_and_name(event, True)
event['Tag'] = limit_tag_output
event_tag_ids.update(tag_ids)
if attribute.get('Tag'):
limit_tag_output, tag_ids = limit_tag_output_to_id_and_name(attribute, False)
attribute['Tag'] = limit_tag_output
attributes_tag_ids.update(tag_ids)
return found_related_events, attributes_tag_ids, event_tag_ids
def convert_timestamp_to_readable(attribute, event):
if attribute.get('timestamp'):
attribute['timestamp'] = misp_convert_timestamp_to_date_string(attribute.get('timestamp'))
if event:
if event.get('timestamp'):
attribute['Event']['timestamp'] = misp_convert_timestamp_to_date_string(event.get('timestamp'))
if event.get('publish_timestamp'):
attribute['Event']['publish_timestamp'] = misp_convert_timestamp_to_date_string(
event.get('publish_timestamp'))
def found_event_with_bad_threat_level_id(found_related_events):
bad_threat_level_ids = ["1", "2", "3"]
for event in found_related_events.values():
if event['Threat Level ID'] in bad_threat_level_ids:
return True
return False
def get_score(attribute_tags_ids, event_tags_ids, malicious_tag_ids, suspicious_tag_ids,
is_attribute_in_event_with_bad_threat_level):
"""
Calculates the indicator score by following logic. Indicators of attributes and Events that:
* have tags which configured as malicious will be scored 3 (i.e malicious).
* have tags which configured as suspicious will be scored 2 (i.e suspicious).
* don't have any tags configured as suspicious nor malicious will be scored by their event's threat level id. In
such case, the score will be BAD if the threat level id is in [1,2,3]. Otherwise, the threat level is 4 = Unknown.
note:
- In case the same tag appears in both Malicious tag ids and Suspicious tag ids lists the indicator will
be scored as malicious.
- Attributes tags (both malicious and suspicious) are stronger than events' tags.
"""
found_tag = None
is_attribute_tag_malicious = any((found_tag := tag) in attribute_tags_ids for tag in malicious_tag_ids)
if is_attribute_tag_malicious:
return Common.DBotScore.BAD, found_tag
is_attribute_tag_suspicious = any((found_tag := tag) in attribute_tags_ids for tag in suspicious_tag_ids)
if is_attribute_tag_suspicious:
return Common.DBotScore.SUSPICIOUS, found_tag
is_event_tag_malicious = any((found_tag := tag) in event_tags_ids for tag in malicious_tag_ids)
if is_event_tag_malicious:
return Common.DBotScore.BAD, found_tag
is_event_tag_suspicious = any((found_tag := tag) in event_tags_ids for tag in suspicious_tag_ids)
if is_event_tag_suspicious:
return Common.DBotScore.SUSPICIOUS, found_tag
# no tag was found
if is_attribute_in_event_with_bad_threat_level:
return Common.DBotScore.BAD, None
return Common.DBotScore.NONE, None
def get_new_misp_event_object(args):
"""
Create a new MISP event object and set the event's details.
"""
event = MISPEvent()
event.distribution = MISP_DISTRIBUTION_TO_IDS[args.get('distribution')]
threat_level_id_arg = args.get('threat_level_id')
if threat_level_id_arg:
event.threat_level_id = THREAT_LEVELS_TO_ID[threat_level_id_arg]
analysis_arg = args.get('analysis')
event.analysis = MISP_ANALYSIS_TO_IDS.get(analysis_arg) if analysis_arg in MISP_ANALYSIS_TO_IDS else analysis_arg
event.info = args.get('info') if args.get('info') else 'Event from XSOAR'
event.date = datetime.today()
event.published = argToBoolean(args.get('published', 'False'))
return event
def create_event_command(demisto_args: dict):
"""Creating event in MISP with the given attribute args"""
new_event = get_new_misp_event_object(demisto_args)
new_event = PYMISP.add_event(new_event, True)
if isinstance(new_event, dict) and new_event.get('errors'):
raise DemistoException(new_event.get('errors'))
event_id = new_event.id
add_attribute(event_id=event_id, internal=True, new_event=new_event, demisto_args=demisto_args)
event = PYMISP.search(eventid=event_id)
human_readable = f"## MISP create event\nNew event with ID: {event_id} has been successfully created.\n"
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(event),
raw_response=event
)
def add_attribute(event_id: int = None, internal: bool = False, demisto_args: dict = {}, new_event: MISPEvent = None):
"""Adding attribute to a given MISP event object
This function can be called as an independence command or as part of another command (create event for example)
Args:
event_id (int): Event ID to add attribute to
internal (bool): if set to True, will not post results to Demisto
demisto_args (dict): Demisto args
new_event (MISPEvent): When this function was called from create event command, the attrubite will be added to
that existing event.
"""
attributes_args = {
'id': demisto_args.get('event_id'), # misp event id
'type': demisto_args.get('type', 'other'),
'category': demisto_args.get('category', 'External analysis'),
'to_ids': argToBoolean(demisto_args.get('to_ids', True)),
'comment': demisto_args.get('comment'),
'value': demisto_args.get('value')
}
event_id = event_id if event_id else arg_to_number(demisto_args.get('event_id'), "event_id")
attributes_args.update({'id': event_id}) if event_id else None
distribution = demisto_args.get('distribution')
attributes_args.update({'distribution': MISP_DISTRIBUTION_TO_IDS[distribution]}) if distribution else None
if not new_event:
response = PYMISP.search(eventid=event_id, pythonify=True)
if not response:
raise DemistoException(
f"Error: An event with the given id: {event_id} was not found in MISP. please check it once again")
new_event = response[0] # response[0] is MISP event
new_event.add_attribute(**attributes_args)
PYMISP.update_event(event=new_event)
if internal:
return
value = attributes_args.get('value')
updated_event = PYMISP.search(eventid=new_event.id, controller='attributes', value=value)
human_readable = f"## MISP add attribute\nNew attribute: {value} was added to event id {new_event.id}.\n"
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(updated_event),
raw_response=updated_event
)
def generic_reputation_command(demisto_args, reputation_type, dbot_type, malicious_tag_ids, suspicious_tag_ids,
reliability, attributes_limit):
reputation_value_list = argToList(demisto_args.get(reputation_type), ',')
command_results = []
for value in reputation_value_list:
command_results.append(
get_indicator_results(value, dbot_type, malicious_tag_ids, suspicious_tag_ids, reliability,
attributes_limit))
return command_results
def reputation_value_validation(value, dbot_type):
if dbot_type == 'FILE':
# hashFormat will be used only in output
hash_format = get_hash_type(value)
if hash_format == 'Unknown':
raise DemistoException('Invalid hash length, enter file hash of format MD5, SHA-1 or SHA-256')
if dbot_type == 'IP':
if not is_ip_valid(value):
raise DemistoException(f"Error: The given IP address: {value} is not valid")
if dbot_type == 'DOMAIN':
if not re.compile(DOMAIN_REGEX, regexFlags).match(value):
raise DemistoException(f"Error: The given domain: {value} is not valid")
if dbot_type == 'URL':
if not re.compile(urlRegex, regexFlags).match(value):
raise DemistoException(f"Error: The given url: {value} is not valid")
if dbot_type == 'EMAIL':
if not re.compile(emailRegex, regexFlags).match(value):
raise DemistoException(f"Error: The given email address: {value} is not valid")
def get_indicator_results(value, dbot_type, malicious_tag_ids, suspicious_tag_ids, reliability, attributes_limit):
"""
This function searches for the given attribute value in MISP and then calculates it's dbot score.
The score is calculated by the tags ids (attribute tags and event tags).
Args:
value (str): The indicator value (an IP address, email address, domain, url or file hash).
dbot_type (str): Indicator type (file, url, domain, email or ip).
malicious_tag_ids (set): Tag ids should be recognised as malicious.
suspicious_tag_ids (set): Tag ids should be recognised as suspicious
reliability (DBotScoreReliability): integration reliability score.
attributes_limit (int) : Limits the number of attributes that will be written to the context
Returns:
CommandResults includes all the indicator results.
"""
reputation_value_validation(value, dbot_type)
misp_response = PYMISP.search(value=value, controller='attributes', include_context=True,
include_correlations=True, include_event_tags=True, enforce_warninglist=True,
include_decay_score=True, includeSightings=True)
indicator_type = INDICATOR_TYPE_TO_DBOT_SCORE[dbot_type]
is_indicator_found = misp_response and misp_response.get('Attribute')
if is_indicator_found:
outputs, score, found_tag, found_related_events = parse_response_reputation_command(misp_response,
malicious_tag_ids,
suspicious_tag_ids,
attributes_limit)
dbot = Common.DBotScore(indicator=value, indicator_type=indicator_type,
score=score, reliability=reliability, malicious_description="Match found in MISP")
indicator = get_dbot_indicator(dbot_type, dbot, value)
all_attributes = outputs.get('Attribute')
events_to_human_readable = get_events_related_to_scored_tag(all_attributes, found_tag)
attribute_highlights = reputation_command_to_human_readable(all_attributes, score, events_to_human_readable)
readable_output = tableToMarkdown(f'Results found in MISP for value: {value}', attribute_highlights,
removeNull=True)
readable_output += tableToMarkdown('Related events', list(found_related_events.values()))
return CommandResults(indicator=indicator,
raw_response=misp_response,
outputs=all_attributes,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
readable_output=readable_output)
else:
dbot = Common.DBotScore(indicator=value, indicator_type=indicator_type,
score=Common.DBotScore.NONE, reliability=reliability,
malicious_description="No results were found in MISP")
indicator = get_dbot_indicator(dbot_type, dbot, value)
return CommandResults(indicator=indicator,
readable_output=f"No attributes found in MISP for value: {value}")
def get_events_related_to_scored_tag(all_attributes, found_tag):
"""
This function searches for all the events that have the tag (i.e found_tag) which caused the indicator to be scored
as malicious or suspicious.
Args:
all_attributes (dict): The parsed response from the MISP search attribute request
found_tag (str): The tag that was scored as malicious or suspicious. If no tag was found, then the score is
Unknown so no events should be found.
Returns:
list includes all the events that were detected as related to the tag.
"""
scored_events = []
if found_tag:
for attribute in all_attributes:
event = attribute.get('Event', {})
event_name = event.get('Info')
scored_events.extend(search_events_with_scored_tag(event, found_tag, event_name))
scored_events.extend(search_events_with_scored_tag(attribute, found_tag, event_name))
return remove_duplicated_related_events(scored_events)
def remove_duplicated_related_events(related_events):
related_events_no_duplicates = []
for i in range(len(related_events)):
if related_events[i] not in related_events[i + 1:]:
related_events_no_duplicates.append(related_events[i])
return related_events_no_duplicates
def search_events_with_scored_tag(object_data_dict, found_tag, event_name):
"""
By the given object we go over all the tags and search if found_tag is one of it's tags. If so, the event will be
added to related_events list
Args:
object_data_dict (dict): Event or attribute dict which includes tags list.
found_tag (str): The tag that was scored as malicious or suspicious.
event_name (str): Name of the event
"""
related_events = []
object_tags_list = object_data_dict.get('Tag', [])
for tag in object_tags_list:
if tag.get('ID') == found_tag:
event_id = get_event_id(object_data_dict)
tag_name = tag.get('Name')
related_events.append({'Event_ID': event_id, 'Event_Name': event_name,
'Tag_Name': tag_name, 'Tag_ID': tag.get('ID')})
return related_events
def get_event_id(data_dict):
if data_dict.get('EventID'):
return data_dict.get('EventID')
elif data_dict.get('ID'):
return data_dict.get('ID')
return data_dict.get('Event', {}).get('ID')
def get_dbot_indicator(dbot_type, dbot_score, value):
if dbot_type == "FILE":
hash_type = get_hash_type(value)
if hash_type == 'md5':
return Common.File(dbot_score=dbot_score, md5=value)
if hash_type == 'sha1':
return Common.File(dbot_score=dbot_score, sha1=value)
if hash_type == 'sha256':
return Common.File(dbot_score=dbot_score, sha256=value)
if dbot_type == "IP":
return Common.IP(ip=value, dbot_score=dbot_score)
if dbot_type == "DOMAIN":
return Common.Domain(domain=value, dbot_score=dbot_score)
if dbot_type == "EMAIL":
return Common.EMAIL(address=value, dbot_score=dbot_score)
if dbot_type == "URL":
return Common.URL(url=value, dbot_score=dbot_score)
def build_misp_complex_filter(demisto_query: str):
"""
Examples are available in UT: test_build_misp_complex_filter.
For more information please see build_complex_query in pymisp/api.py
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
str: dictionary created for misp to perform complex query
or if no complex query found returns the original input
"""
regex_and = r'(AND:)([^\;]+)(;)?'
regex_or = r'(OR:)([^\;]+)(;)?'
regex_not = r'(NOT:)([^\;]+)(;)?'
misp_query_params = dict()
match_and = re.search(regex_and, demisto_query, re.MULTILINE)
match_or = re.search(regex_or, demisto_query, re.MULTILINE)
match_not = re.search(regex_not, demisto_query, re.MULTILINE)
is_complex_and_operator = is_misp_complex_search_helper(match_and, misp_query_params, 'and_parameters')
is_complex_or_operator = is_misp_complex_search_helper(match_or, misp_query_params, 'or_parameters')
is_complex_not_operator = is_misp_complex_search_helper(match_not, misp_query_params, 'not_parameters')
is_complex_search = is_complex_and_operator or is_complex_or_operator or is_complex_not_operator
if is_complex_search:
return PYMISP.build_complex_query(**misp_query_params)
return demisto_query
def is_misp_complex_search_helper(match_operator, misp_query_params, operator_key):
is_complex_search = False
if match_operator is not None:
misp_query_params[operator_key] = match_operator.group(2).split(',')
is_complex_search = True
return is_complex_search
def prepare_args_to_search(controller):
demisto_args = demisto.args()
args_to_misp_format = {arg: demisto_args[arg] for arg in MISP_SEARCH_ARGUMENTS if arg in demisto_args}
# Replacing keys and values from Demisto to Misp's keys
if 'type' in args_to_misp_format:
args_to_misp_format['type_attribute'] = args_to_misp_format.pop('type')
if 'to_ids' in args_to_misp_format:
args_to_misp_format['to_ids'] = 1 if demisto_args.get('to_ids') == 'true' else 0
if 'from' in args_to_misp_format:
args_to_misp_format['date_from'] = args_to_misp_format.pop('from')
if 'to' in args_to_misp_format:
args_to_misp_format['date_to'] = args_to_misp_format.pop('to')
if 'event_id' in args_to_misp_format:
args_to_misp_format['eventid'] = argToList(args_to_misp_format.pop('event_id'))
if 'last' in args_to_misp_format:
args_to_misp_format['publish_timestamp'] = args_to_misp_format.pop('last')
if 'include_decay_score' in args_to_misp_format:
args_to_misp_format['include_decay_score'] = 1 if demisto_args.get('include_decay_score') == 'true' else 0
if 'include_sightings' in args_to_misp_format:
args_to_misp_format['include_sightings'] = 1 if demisto_args.get('include_sightings') == 'true' else 0
if 'include_correlations' in args_to_misp_format:
args_to_misp_format['include_correlations'] = 1 if demisto_args.get('include_correlations') == 'true' else 0
if 'enforceWarninglist' in args_to_misp_format:
args_to_misp_format['enforceWarninglist'] = 1 if demisto_args.get('enforceWarninglist') == 'true' else 0
if 'include_feed_correlations' in args_to_misp_format:
args_to_misp_format['includeFeedCorrelations'] = 1 if demisto_args.get(
'include_feed_correlations') == 'true' else 0
args_to_misp_format.pop('include_feed_correlations')
if 'limit' not in args_to_misp_format:
args_to_misp_format['limit'] = '50'
if 'tags' in args_to_misp_format:
args_to_misp_format['tags'] = build_misp_complex_filter(args_to_misp_format['tags'])
args_to_misp_format['controller'] = controller
demisto.debug(f"[MISP V3]: args for {demisto.command()} command are {args_to_misp_format}")
return args_to_misp_format
def build_attributes_search_response(response: Union[dict, requests.Response],
include_correlations=False) -> dict:
"""
Convert the response of attribute search returned from MISP to the context output format.
"""
response_object = copy.deepcopy(response)
if include_correlations:
# return full related attributes only if the user wants to get them back
ATTRIBUTE_FIELDS.append('RelatedAttribute')
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
attributes = response_object.get('Attribute')
return get_limit_attribute_search_outputs(attributes)
def get_limit_attribute_search_outputs(attributes):
for i in range(len(attributes)):
attributes[i] = {key: attributes[i].get(key) for key in ATTRIBUTE_FIELDS if key in attributes[i]}
build_galaxy_output(attributes[i])
build_tag_output(attributes[i])
build_sighting_output_from_attribute_search_response(attributes[i])
convert_timestamp_to_readable(attributes[i], None)
formatted_attributes = replace_keys_from_misp_to_context_data(attributes)
return formatted_attributes
def build_galaxy_output(given_object):
"""given_object is attribute or event, depends on the called function"""
if given_object.get('Galaxy'):
given_object['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in given_object['Galaxy']
]
def build_object_output(event):
if event.get('Object'):
event['Object'] = [
{
'name': event_object.get('name'),
'uuid': event_object.get('uuid'),
'description': event_object.get('description'),
'id': event_object.get('id')
} for event_object in event['Object']
]
def build_tag_output(given_object):
"""given_object is attribute or event, depends on the called function"""
if given_object.get('Tag'):
given_object['Tag'] = [
{'Name': tag.get('name'),
'is_galaxy': tag.get('is_galaxy')
} for tag in given_object.get('Tag')
]
def build_sighting_output_from_attribute_search_response(attribute):
if attribute.get('Sighting'):
attribute['Sighting'] = [
{'type': sighting.get('type')
} for sighting in attribute.get('Sighting')
]
def build_attributes_search_response_return_only_values(response_object: Union[dict, requests.Response]) -> list:
"""returns list of attributes' values that match the search query when user set the arg 'compact' to True"""
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
attributes = response_object.get('Attribute')
return [attribute.get('value') for attribute in attributes]
def pagination_args_validation(page, limit):
if page and page < 0:
raise DemistoException("page should be zero or a positive number")
if limit and limit < 0:
raise DemistoException("limit should be zero or a positive number")
def attribute_response_to_markdown_table(response: dict):
attribute_highlights = []
for attribute in response:
event = attribute.get('Event', {})
attribute_tags = [tag.get('Name') for tag in attribute.get('Tag')] if attribute.get(
'Tag') else None
attribute_sightings = [SIGHTING_TYPE_ID_TO_NAME[sighting.get('Type')] for sighting in
attribute.get('Sighting')] if attribute.get('Sighting') else None
attribute_highlights.append({
'Attribute ID': attribute.get('ID'),
'Event ID': attribute.get('EventID'),
'Attribute Category': attribute.get('Category'),
'Attribute Type': attribute.get('Type'),
'Attribute Comment': attribute.get('Comment'),
'Attribute Value': attribute.get('Value'),
'Attribute Tags': attribute_tags,
'Attribute Sightings': attribute_sightings,
'To IDs': attribute.get('ToIDs'),
'Timestamp': attribute.get('Timestamp'),
'Event Info': event.get('Info'),
'Event Organization ID': event.get('OrganizationID'),
'Event Distribution': event.get('Distribution'),
'Event UUID': event.get('UUID')
})
return attribute_highlights
def search_attributes(demisto_args: dict) -> CommandResults:
"""Execute a MISP search over 'attributes'"""
args = prepare_args_to_search('attributes')
outputs_should_include_only_values = argToBoolean(demisto_args.get('compact', False))
include_correlations = argToBoolean(demisto_args.get('include_correlations', False))
page = arg_to_number(demisto_args.get('page', 1), "page", required=True)
limit = arg_to_number(demisto_args.get('limit', 50), "limit", required=True)
pagination_args_validation(page, limit)
response = PYMISP.search(**args)
if response:
if outputs_should_include_only_values:
response_for_context = build_attributes_search_response_return_only_values(response)
number_of_results = len(response_for_context)
md = tableToMarkdown(f"MISP search-attributes returned {number_of_results} attributes",
response_for_context[:number_of_results], ["Value"])
else:
response_for_context = build_attributes_search_response(response, include_correlations)
attribute_highlights = attribute_response_to_markdown_table(response_for_context)
pagination_message = f"Current page size: {limit}\n"
if len(response_for_context) == limit:
pagination_message += f"Showing page {page} out others that may exist"
else:
pagination_message += f"Showing page {page}"
md = tableToMarkdown(
f"MISP search-attributes returned {len(response_for_context)} attributes\n {pagination_message}",
attribute_highlights, removeNull=True)
return CommandResults(
raw_response=response,
readable_output=md,
outputs=response_for_context,
outputs_prefix="MISP.Attribute",
outputs_key_field="ID"
)
else:
return CommandResults(readable_output=f"No attributes found in MISP for the given filters: {args}")
def build_events_search_response(response: Union[dict, requests.Response]) -> dict:
"""
Convert the response of event search returned from MISP to the context output format.
please note: attributes are excluded from search-events output as the information is too big. User can use the
command search-attributes in order to get the information about the attributes.
"""
response_object = copy.deepcopy(response)
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
events = [event.get('Event') for event in response_object]
for i in range(0, len(events)):
# Filter object from keys in event_args
events[i] = {key: events[i].get(key) for key in EVENT_FIELDS if key in events[i]}
events[i]['RelatedEvent'] = [] # there is no need in returning related event when searching for an event
build_galaxy_output(events[i])
build_tag_output(events[i])
build_object_output(events[i])
events[i]['timestamp'] = misp_convert_timestamp_to_date_string(events[i].get('timestamp'))
events[i]['publish_timestamp'] = misp_convert_timestamp_to_date_string(events[i].get('publish_timestamp'))
formatted_events = replace_keys_from_misp_to_context_data(events) # type: ignore
return formatted_events # type: ignore
def event_to_human_readable_tag_list(event):
event_tags = event.get('Tag', [])
if event_tags:
return [tag.get('Name') for tag in event_tags]
def event_to_human_readable_galaxy_list(event):
event_galaxies = event.get('Galaxy', [])
if event_galaxies:
return [galaxy.get('Name') for galaxy in event.get('Galaxy')]
def event_to_human_readable_object_list(event):
event_objects = event.get('Object', [])
if event_objects:
return [event_object.get('ID') for event_object in event.get('Object')]
def event_to_human_readable(response: dict):
event_highlights = []
for event in response:
event_tags = event_to_human_readable_tag_list(event)
event_galaxies = event_to_human_readable_galaxy_list(event)
event_objects = event_to_human_readable_object_list(event)
event_highlights.append({
'Event ID': event.get('ID'),
'Event Tags': event_tags,
'Event Galaxies': event_galaxies,
'Event Objects': event_objects,
'Publish Timestamp': event.get('PublishTimestamp'),
'Event Info': event.get('Info'),
'Event Org ID': event.get('OrganizationID'),
'Event Orgc ID': event.get('OwnerOrganization.ID'),
'Event Distribution': event.get('Distribution'),
'Event UUID': event.get('UUID'),
})
return event_highlights
def search_events(demisto_args: dict) -> CommandResults:
"""
Execute a MISP search using the 'event' controller.
"""
args = prepare_args_to_search('events')
page = arg_to_number(demisto_args.get('page', 1), "page", required=True)
limit = arg_to_number(demisto_args.get('limit', 50), "limit", required=True)
pagination_args_validation(page, limit)
response = PYMISP.search(**args)
if response:
response_for_context = build_events_search_response(response)
event_outputs_to_human_readable = event_to_human_readable(response_for_context)
pagination_message = f"Current page size: {limit}\n"
if len(response_for_context) == limit:
pagination_message += f"Showing page {page} out others that may exist"
else:
pagination_message += f"Showing page {page}"
md = tableToMarkdown(
f"MISP search-events returned {len(response_for_context)} events.\n {pagination_message}",
event_outputs_to_human_readable, removeNull=True)
return CommandResults(
raw_response=response,
readable_output=md,
outputs=response_for_context,
outputs_prefix="MISP.Event",
outputs_key_field="ID"
)
else:
return CommandResults(readable_output=f"No events found in MISP for the given filters: {args}")
def delete_event(demisto_args: dict):
"""
Gets an event id and deletes it.
"""
event_id = demisto_args.get('event_id')
response = PYMISP.delete_event(event_id)
if 'errors' in response:
raise DemistoException(f'Event ID: {event_id} has not found in MISP: \nError message: {response}')
else:
human_readable = f'Event {event_id} has been deleted'
return CommandResults(readable_output=human_readable, raw_response=response)
def add_tag(demisto_args: dict, is_attribute=False):
"""
Function will add tag to given UUID of event or attribute.
is_attribute (bool): if the given UUID belongs to an attribute (True) or event (False).
"""
uuid = demisto_args.get('uuid')
tag = demisto_args.get('tag')
try:
PYMISP.tag(uuid, tag) # add the tag
except PyMISPError:
raise DemistoException("Adding the required tag was failed. Please make sure the UUID exists.")
if is_attribute:
response = PYMISP.search(uuid=uuid, controller='attributes')
human_readable = f'Tag {tag} has been successfully added to attribute {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(response),
raw_response=response
)
# event's uuid
response = PYMISP.search(uuid=uuid)
human_readable = f'Tag {tag} has been successfully added to event {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(response),
raw_response=response
)
def remove_tag(demisto_args: dict, is_attribute=False):
"""
Function will remove tag to given UUID of event or attribute.
is_attribute (bool): if the given UUID is an attribute's one. Otherwise it's event's.
"""
uuid = demisto_args.get('uuid')
tag = demisto_args.get('tag')
try:
response = PYMISP.untag(uuid, tag)
if response and response.get('errors'):
raise DemistoException(f'Error in `{demisto.command()}` command: {response}')
except PyMISPError:
raise DemistoException("Removing the required tag was failed. Please make sure the UUID and tag exist.")
if is_attribute:
response = PYMISP.search(uuid=uuid, controller='attributes')
human_readable = f'Tag {tag} has been successfully removed from the attribute {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(response),
raw_response=response
)
# event's uuid
response = PYMISP.search(uuid=uuid)
human_readable = f'Tag {tag} has been successfully removed from the event {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(response),
raw_response=response
)
def add_sighting(demisto_args: dict):
"""Adds sighting to MISP attribute
"""
attribute_id = demisto_args.get('id')
attribute_uuid = demisto_args.get('uuid')
sighting_type = demisto_args['type'] # mandatory arg
att_id = attribute_id or attribute_uuid
if not att_id:
raise DemistoException('ID or UUID not specified')
sighting_args = {
'id': attribute_id,
'uuid': attribute_uuid,
'type': SIGHTING_TYPE_NAME_TO_ID[sighting_type]
}
sigh_obj = MISPSighting()
sigh_obj.from_dict(**sighting_args)
response = PYMISP.add_sighting(sigh_obj, att_id)
if response.get('message'):
raise DemistoException(f"An error was occurred: {response.get('message')}")
elif response.get('Sighting'):
human_readable = f'Sighting \'{sighting_type}\' has been successfully added to attribute {att_id}'
return CommandResults(readable_output=human_readable)
raise DemistoException(f"An error was occurred: {json.dumps(response)}")
def test(malicious_tag_ids, suspicious_tag_ids, attributes_limit):
"""
Test module.
"""
is_tag_list_valid(malicious_tag_ids)
is_tag_list_valid(suspicious_tag_ids)
if attributes_limit < 0:
raise DemistoException('Attribute limit has to be a positive number.')
response = PYMISP._prepare_request('GET', 'servers/getPyMISPVersion.json')
if PYMISP._check_json_response(response):
return 'ok'
else:
raise DemistoException('MISP has not connected.')
def build_feed_url(demisto_args):
url = demisto_args.get('feed')
url = url[:-1] if url.endswith('/') else url
if PREDEFINED_FEEDS.get(url):
url = PREDEFINED_FEEDS[url].get('url') # type: ignore
return url
def add_events_from_feed(demisto_args: dict, use_ssl: bool, proxies: dict):
"""Gets an OSINT feed from url and publishing them to MISP
urls with feeds for example: https://www.misp-project.org/feeds/
feed format must be MISP.
"""
headers = {'Accept': 'application/json'}
url = build_feed_url(demisto_args)
osint_url = f'{url}/manifest.json'
limit = arg_to_number(demisto_args.get('limit', 2), "limit", required=True)
try:
uri_list = requests.get(osint_url, verify=use_ssl, headers=headers, proxies=proxies).json()
events_ids = list() # type: List[Dict[str, int]]
for index, uri in enumerate(uri_list, 1):
response = requests.get(f'{url}/{uri}.json', verify=use_ssl, headers=headers, proxies=proxies).json()
misp_new_event = MISPEvent()
misp_new_event.load(response)
add_event_response = PYMISP.add_event(misp_new_event)
event_object = add_event_response.get('Event')
if event_object and 'id' in event_object:
events_ids.append({'ID': event_object['id']})
if limit == len(events_ids):
break
human_readable = tableToMarkdown(f'Total of {len(events_ids)} events was added to MISP.', events_ids)
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=events_ids,
)
except ValueError as e:
raise DemistoException(f'URL [{url}] is not a valid MISP feed. error: {e}')
def add_object(event_id: str, obj: MISPObject):
"""Sending object to MISP and returning outputs
Args:
obj: object to add to MISP
event_id: ID of event
"""
response = PYMISP.add_object(event_id, misp_object=obj)
if 'errors' in response:
raise DemistoException(f'Error in `{demisto.command()}` command: {response}')
for ref in obj.ObjectReference:
response = PYMISP.add_object_reference(ref)
for attribute in response.get('Object', {}).get('Attribute', []):
convert_timestamp_to_readable(attribute, None)
response['Object']['timestamp'] = misp_convert_timestamp_to_date_string(response.get('Object', {}).get('timestamp'))
formatted_response = replace_keys_from_misp_to_context_data(response)
formatted_response.update({"ID": event_id})
human_readable = f'Object has been added to MISP event ID {event_id}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=formatted_response,
)
def add_file_object(demisto_args: dict):
entry_id = demisto_args.get('entry_id')
event_id = demisto_args.get('event_id')
file_path = demisto.getFilePath(entry_id).get('path')
obj = FileObject(file_path)
return add_object(event_id, obj)
def add_domain_object(demisto_args: dict):
"""Adds a domain object to MISP
domain-ip description: https://www.misp-project.org/objects.html#_domain_ip
"""
text = demisto_args.get('text')
event_id = demisto_args.get('event_id')
domain = demisto_args.get('name')
obj = MISPObject('domain-ip')
ips = argToList(demisto_args.get('ip'))
for ip in ips:
obj.add_attribute('ip', value=ip)
obj.add_attribute('domain', value=domain)
if text:
obj.add_attribute('text', value=text)
return add_object(event_id, obj)
def add_url_object(demisto_args: dict):
"""Building url object in MISP scheme
Scheme described https://www.misp-project.org/objects.html#_url
"""
url_args = [
'text',
'last_seen',
'first_seen'
]
event_id = demisto_args.get('event_id')
url = demisto_args.get('url')
url_parse = urlparse(url)
url_obj = [{'url': url}]
url_obj.extend({'scheme': url_parse.scheme}) if url_parse.scheme else None
url_obj.append({'resource_path': url_parse.path}) if url_parse.path else None
url_obj.append({'query_string': url_parse.query}) if url_parse.query else None
url_obj.append({'domain': url_parse.netloc}) if url_parse.netloc else None
url_obj.append({'fragment': url_parse.fragment}) if url_parse.fragment else None
url_obj.append({'port': url_parse.port}) if url_parse.port else None
url_obj.append(
{'credential': (url_parse.username, url_parse.password)}) if url_parse.username and url_parse.password else None
url_obj.extend(convert_arg_to_misp_args(demisto_args, url_args))
g_object = build_generic_object('url', url_obj)
return add_object(event_id, g_object)
def add_generic_object_command(demisto_args: dict):
event_id = demisto_args.get('event_id')
template = demisto_args.get('template')
attributes = demisto_args.get('attributes').replace("'", '"')
try:
args = json.loads(attributes)
if not isinstance(args, list):
args = dict_to_generic_object_format(args)
obj = build_generic_object(template, args)
return add_object(event_id, obj)
except ValueError as e:
raise DemistoException(
f'`attribute` parameter could not be decoded, may not a valid JSON\nattribute: {attributes}', str(e))
def convert_arg_to_misp_args(demisto_args, args_names):
return [{arg.replace('_', '-'): demisto_args.get(arg)} for arg in args_names if demisto_args.get(arg)]
def add_ip_object(demisto_args: dict):
event_id = demisto_args.get('event_id')
ip_object_args = [
'dst_port',
'src_port',
'domain',
'hostname',
'ip_src',
'ip_dst'
]
# converting args to MISP's arguments types
misp_attributes_args = convert_arg_to_misp_args(demisto_args, ip_object_args)
ips = argToList(demisto_args.get('ip'))
for ip in ips:
misp_attributes_args.append({'ip': ip})
if misp_attributes_args:
non_req_args = [
'first_seen',
'last_seen',
]
misp_attributes_args.extend(convert_arg_to_misp_args(demisto_args, non_req_args))
misp_attributes_args.append({'text': demisto_args.get('comment')}) if demisto_args.get('comment') else None
obj = build_generic_object('ip-port', misp_attributes_args)
return add_object(event_id, obj)
else:
raise DemistoException(
f'None of required arguments presents. command {demisto.command()} requires one of {ip_object_args}')
def handle_tag_duplication_ids(malicious_tag_ids, suspicious_tag_ids):
"""
Gets 2 sets which include tag ids. If there is an id that exists in both sets, it will be removed from the
suspicious tag ids set and will be stayed only in the malicious one (as a tag that was configured to be malicious is
stronger than recognised as suspicious).
"""
common_ids = set(malicious_tag_ids) & set(suspicious_tag_ids)
suspicious_tag_ids = {tag_id for tag_id in suspicious_tag_ids if tag_id not in common_ids}
return malicious_tag_ids, suspicious_tag_ids
def is_tag_list_valid(tag_ids):
"""Gets a list ot tag ids (each one is str), and verify all the tags are valid positive integers."""
for tag in tag_ids:
try:
tag = int(tag)
if tag <= 0:
raise DemistoException(f"Tag id has to be a positive integer, please change the given: '{tag}' id.")
except ValueError:
raise DemistoException(f"Tag id has to be a positive integer, please change the given: '{tag}' id.")
def create_updated_attribute_instance(demisto_args: dict, attribute_uuid: str) -> MISPAttribute:
attribute_type = demisto_args.get('type')
distribution = demisto_args.get('distribution')
category = demisto_args.get('category')
comment = demisto_args.get('comment')
value = demisto_args.get('value')
first_seen = demisto_args.get('first_seen')
last_seen = demisto_args.get('last_seen')
attribute_instance = MISPAttribute()
attribute_instance.uuid = attribute_uuid
if attribute_type:
attribute_instance.type = attribute_type
if distribution:
attribute_instance.distribution = MISP_DISTRIBUTION_TO_IDS[distribution]
if category:
attribute_instance.category = category
if value:
attribute_instance.value = value
if comment:
attribute_instance.comment = comment
if first_seen:
attribute_instance.first_seen = first_seen
if last_seen:
attribute_instance.last_seen = last_seen
return attribute_instance
def update_attribute_command(demisto_args: dict) -> CommandResults:
attribute_uuid = demisto_args.get('attribute_uuid')
attribute_instance = create_updated_attribute_instance(demisto_args, attribute_uuid)
attribute_instance_response = PYMISP.update_attribute(attribute=attribute_instance, attribute_id=attribute_uuid)
if isinstance(attribute_instance_response, dict) and attribute_instance_response.get('errors'):
raise DemistoException(attribute_instance_response.get('errors'))
human_readable = f"## MISP update attribute\nAttribute: {attribute_uuid} was updated.\n"
attribute = attribute_instance_response.get('Attribute')
convert_timestamp_to_readable(attribute, None)
parsed_attribute_data = replace_keys_from_misp_to_context_data(attribute)
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=parsed_attribute_data,
)
def main():
params = demisto.params()
malicious_tag_ids = argToList(params.get('malicious_tag_ids'))
suspicious_tag_ids = argToList(params.get('suspicious_tag_ids'))
reliability = params.get('integrationReliability', 'B - Usually reliable')
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception("MISP V3 error: Please provide a valid value for the Source Reliability parameter")
attributes_limit = arg_to_number(params.get('attributes_limit', 20), "attributes_limit", required=True)
command = demisto.command()
demisto.debug(f'[MISP V3]: command is {command}')
args = demisto.args()
try:
malicious_tag_ids, suspicious_tag_ids = handle_tag_duplication_ids(malicious_tag_ids, suspicious_tag_ids)
if command == 'test-module':
return_results(test(malicious_tag_ids=malicious_tag_ids, suspicious_tag_ids=suspicious_tag_ids,
attributes_limit=attributes_limit))
elif command == 'misp-create-event':
return_results(create_event_command(args))
elif command == 'misp-add-attribute':
return_results(add_attribute(demisto_args=args))
elif command == 'misp-search-events':
return_results(search_events(args))
elif command == 'misp-search-attributes':
return_results(search_attributes(args))
elif command == 'misp-delete-event':
return_results(delete_event(args))
elif command == 'misp-add-sighting':
return_results(add_sighting(args))
elif command == 'misp-add-tag-to-event':
return_results(add_tag(args))
elif command == 'misp-add-tag-to-attribute':
return_results(add_tag(demisto_args=args, is_attribute=True))
elif command == 'misp-remove-tag-from-event':
return_results(remove_tag(args))
elif command == 'misp-remove-tag-from-attribute':
return_results(remove_tag(demisto_args=args, is_attribute=True))
elif command == 'misp-add-events-from-feed':
return_results(add_events_from_feed(demisto_args=args, use_ssl=VERIFY, proxies=PROXIES))
elif command == 'file':
return_results(
generic_reputation_command(args, 'file', 'FILE', malicious_tag_ids, suspicious_tag_ids, reliability,
attributes_limit))
elif command == 'url':
return_results(
generic_reputation_command(args, 'url', 'URL', malicious_tag_ids, suspicious_tag_ids, reliability,
attributes_limit))
elif command == 'ip':
return_results(
generic_reputation_command(args, 'ip', 'IP', malicious_tag_ids, suspicious_tag_ids, reliability,
attributes_limit))
elif command == 'domain':
return_results(
generic_reputation_command(args, 'domain', 'DOMAIN', malicious_tag_ids, suspicious_tag_ids,
reliability, attributes_limit))
elif command == 'email':
return_results(generic_reputation_command(args, 'email', 'EMAIL', malicious_tag_ids, suspicious_tag_ids,
reliability, attributes_limit))
elif command == 'misp-add-file-object':
return_results(add_file_object(args))
elif command == 'misp-add-domain-object':
return_results(add_domain_object(args))
elif command == 'misp-add-url-object':
return_results(add_url_object(args))
elif command == 'misp-add-ip-object':
return_results(add_ip_object(args))
elif command == 'misp-add-object':
return_results(add_generic_object_command(args))
elif command == 'misp-update-attribute':
return_results(update_attribute_command(args))
except PyMISPError as e:
return_error(e.message)
except Exception as e:
return_error(str(e))
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| StarcoderdataPython |
1791956 | <reponame>syqu22/django-react-blog
from posts.models import Post
from rest_framework import status
from rest_framework.test import APITestCase
from users.models import User
class TestViews(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='test', email='<EMAIL>', password='<PASSWORD>')
self.post = Post.objects.create(title='Test Post', slug='test-post', thumbnail='https://www.test.example.com', author=self.user,
body='Test content of the post', read_time=5, is_public=True)
self.private_post = Post.objects.create(title='Private Post', slug='private-post', thumbnail='https://www.test.example.com', author=self.user,
body='Test content of the post', read_time=5, is_public=False)
def authenticate_user(self):
self.user.is_verified = True
self.client.force_login(self.user)
self.client.force_authenticate(user=self.user)
def test_comments(self):
"""
User can see comments for public post
"""
res = self.client.get('/api/comments/test-post/', data={
'body': 'test'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_comments_as_auth_user(self):
"""
Authenticated user can see comments for public post
"""
self.authenticate_user()
res = self.client.get('/api/comments/test-post/', data={
'body': 'test'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_comments_for_nonpublic_post(self):
"""
User cannot see comments for nonpublic post
"""
res = self.client.get('/api/comments/private-post/', data={
'body': 'test'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_comments_for_nonpublic_post_as_auth_user(self):
"""
Authenticated user cannot see comments for nonpublic post
"""
self.authenticate_user()
res = self.client.get('/api/comments/private-post/', data={
'body': 'test'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_send_comment(self):
"""
Unathenticated user cannot send comments
"""
res = self.client.post('/api/comments/test-post/send/', data={
'body': 'test'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_send_comment_as_auth_user(self):
"""
Authenticated user can send comments
"""
self.authenticate_user()
res = self.client.post('/api/comments/test-post/send/', data={
'body': 'Test comment'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(Post.objects.get(
slug='test-post').comments.get(author=self.user).author, self.user)
def test_send_comment_on_nonpublic_post_as_auth_user(self):
"""
Authenticated user cannot send comments on nonpublic post
"""
self.authenticate_user()
res = self.client.post('/api/comments/private-post/send/', data={
'body': 'Test comment'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_send_comment_on_non_existing_post(self):
"""
User cannot send comments on non existing post
"""
res = self.client.post('/api/comments/random-site/send/', data={
'body': 'Test comment'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_send_comment_on_non_existing_post_as_auth_user(self):
"""
Authenticated user cannot send comments on non existing post
"""
self.authenticate_user()
res = self.client.post('/api/comments/random-site/send/', data={
'body': 'Test comment'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_send_many_comments(self):
"""
Authenticated user cannot send multiple comments in short time window
"""
self.authenticate_user()
res = self.client.post('/api/comments/test-post/send/', data={
'body': 'Test comment'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
res = self.client.post('/api/comments/test-post/send/', data={
'body': 'Another comment'
}, follow=True)
self.assertEqual(res.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
| StarcoderdataPython |
4815130 | # -*- coding: utf-8 -*-
import numpy as np
from numpy import pi, sqrt, exp, sin, cos, tan, log, log10
import const
## Mass of the star
M_s = const.M_sol
## Mean molecular weigth (assumed to be constant)
mu = 1
## Atmosphere loss model of <NAME> (2007) [https://ui.adsabs.harvard.edu/abs/2007P&SS...55.1426G]
dotM_ref = 1e12 ## g/s
a_ref = 0.045 * const.AU
t_age = 4.6e9 * const.yr
## This power law is applicable for `a > 0.015 AU`:
dotM = lambda t, a : dotM_ref * t_age/t * (a_ref/a)**2
dlogdotM = lambda t, a : [-1/t, -2/a]
## Keplerian orbital frequency
Omega_K = lambda r : sqrt(const.G*M_s/r**3)
## Temperature of gas
T = 1e4 ## K
## Sound velocity
cs = lambda T : sqrt((5/3)*const.RR_gas/mu*T)
| StarcoderdataPython |
1664070 | <filename>salt/tls-terminator/test_tls_terminator.py
import json
import os
from collections import OrderedDict
try:
from importlib.machinery import SourceFileLoader
def load_source(module, path):
return SourceFileLoader(module, path).load_module()
except ImportError:
# python 2
import imp
def load_source(module, path):
return imp.load_source(module, path)
import pytest
module = load_source('tls_terminator', os.path.join(os.path.dirname(__file__), 'init.sls'))
def test_is_external_backend():
def uut(backend):
return module.parse_backend(backend)[:4]
assert uut('https://example.com') == ('0/0', 443, True, 'both')
assert uut('https://example.com:8000') == ('0/0', 8000, True, 'both')
assert uut('https://10.10.10.10') == ('10.10.10.10', 443, True, 'ipv4')
assert uut('https://10.10.10.10:5000') == ('10.10.10.10', 5000, True, 'ipv4')
assert uut('http://example.com') == ('0/0', 80, True, 'both')
assert uut('http://10.10.10.10') == ('10.10.10.10', 80, True, 'ipv4')
assert uut('https://[2001:0db8:85a3:08d3:1319:8a2e:0370:7344]:8000') == \
('2001:db8:85a3:8d3:1319:8a2e:370:7344', 8000, True, 'ipv6')
assert uut('https://127.0.0.1') == ('127.0.0.1', 443, False, 'ipv4')
assert uut('http://127.0.0.1') == ('127.0.0.1', 80, False, 'ipv4')
assert uut('https://127.43.25.21') == ('127.43.25.21', 443, False, 'ipv4')
assert uut('http://[::1]') == ('::1', 80, False, 'ipv6')
assert uut('https://[::1]:5000') == ('::1', 5000, False, 'ipv6')
def test_get_port_sets():
uut = module.get_port_sets
assert uut([]) == []
assert uut([1]) == ['1']
assert uut([1, 2]) == ['1:2']
assert uut([1, 2, 4]) == ['1:2,4']
assert uut([1, 3,4,5, 7]) == ['1,3:5,7']
assert uut(range(0, 31, 2)) == ['0,2,4,6,8,10,12,14,16,18,20,22,24,26,28', '30']
def test_build_state():
state = module.build_state({
'example.com': {
'backend': 'http://127.0.0.1:5000',
}
})
backends = get_backends(state['tls-terminator-example.com-nginx-site'])
assert len(backends) == 1
assert backends['/']['upstream_identifier'].startswith('example.com-127.0.0.1_')
assert 'certbot' not in state['include']
rate_limits = merged(state['tls-terminator-rate-limit-zones']['file.managed'])
assert len(rate_limits['context']['rate_limit_zones']) == 0
def test_build_state_aliases():
short = {
'example.com': {'backend': 'http://127.0.0.1:5000'},
}
medium = {
'example.com': {
'backends': {
'/': 'http://127.0.0.1:5000',
}
}
}
full = {
'example.com': {
'backends': {
'/': {
'upstream': 'http://127.0.0.1:5000',
}
}
}
}
multiple = {
'example.com': {
'backends': {
'/': {
'upstreams': [
'http://127.0.0.1:5000',
]
}
}
}
}
uut = module.build_state
assert uut(short) == uut(medium) == uut(full) == uut(multiple)
def test_build_acme_state():
state = module.build_state({
'example.com': {
'backend': 'http://127.0.0.1:5000',
'acme': True,
}
})
assert 'certbot' in state['include']
def test_build_custom_tls_state():
state = module.build_state({
'example.com': {
'backend': 'http://127.0.0.1:5000',
'cert': 'FOOCERT',
'key': 'FOOKEY',
}
})
cert = state['tls-terminator-example.com-certs-1-cert']
key = state['tls-terminator-example.com-certs-1-key']
assert merged(cert['file.managed'])['contents'] == 'FOOCERT'
assert merged(key['file.managed'])['contents'] == 'FOOKEY'
assert 'certbot' not in state['include']
def test_build_custom_tls_pillar_state():
state = module.build_state({
'example.com': {
'backend': 'http://127.0.0.1:5000',
'cert_pillar': 'some:pillar:key',
'key_pillar': 'other:pillar:key',
}
})
cert = state['tls-terminator-example.com-certs-1-cert']
key = state['tls-terminator-example.com-certs-1-key']
assert merged(cert['file.managed'])['contents_pillar'] == 'some:pillar:key'
assert merged(key['file.managed'])['contents_pillar'] == 'other:pillar:key'
assert 'certbot' not in state['include']
def test_build_multiple_tls_pillar_state():
state = module.build_state({
'example.com': {
'backend': 'http://127.0.0.1:5000',
'certs': [{
'cert_pillar': 'pillar:rsa:cert',
'key_pillar': 'pillar:rsa:key',
}, {
'cert_pillar': 'pillar:ecdsa:cert',
'key_pillar': 'pillar:ecdsa:key',
}],
}
})
cert_1 = state['tls-terminator-example.com-certs-1-cert']
key_1 = state['tls-terminator-example.com-certs-1-key']
cert_2 = state['tls-terminator-example.com-certs-2-cert']
key_2 = state['tls-terminator-example.com-certs-2-key']
assert merged(cert_1['file.managed'])['contents_pillar'] == 'pillar:rsa:cert'
assert merged(key_1['file.managed'])['contents_pillar'] == 'pillar:rsa:key'
assert merged(cert_2['file.managed'])['contents_pillar'] == 'pillar:ecdsa:cert'
assert merged(key_2['file.managed'])['contents_pillar'] == 'pillar:ecdsa:key'
assert 'certbot' not in state['include']
def test_build_outgoing_firewall_rules():
state = module.build_state({
'example.com': {
'backend': 'http://1.1.1.1',
},
'foo.com': {
'backend': 'http://2.2.2.2:8000',
},
'bar.com': {
'backend': 'https://app.bar.com',
},
})
assert 'tls-terminator-outgoing-ipv4-port-443' not in state
example_v4 = merged(state['tls-terminator-outgoing-ipv4-to-1.1.1.1-port-80']['firewall.append'])
assert example_v4['family'] == 'ipv4'
assert example_v4['dports'] == '80'
assert example_v4['destination'] == '1.1.1.1'
foo_v4 = merged(state['tls-terminator-outgoing-ipv4-to-2.2.2.2-port-8000']['firewall.append'])
assert foo_v4['family'] == 'ipv4'
assert foo_v4['dports'] == '8000'
assert foo_v4['destination'] == '2.2.2.2'
bar_v4 = merged(state['tls-terminator-outgoing-ipv4-to-0/0-port-443']['firewall.append'])
assert bar_v4['family'] == 'ipv4'
assert bar_v4['dports'] == '443'
assert bar_v4['destination'] == '0/0'
bar_v6 = merged(state['tls-terminator-outgoing-ipv6-to-0/0-port-443']['firewall.append'])
assert bar_v6['family'] == 'ipv6'
assert bar_v6['dports'] == '443'
assert bar_v6['destination'] == '0/0'
def test_set_rate_limits():
state = module.build_state({
'example.com': {
'rate_limit': {
'zones': {
'default': {
'size': '10m',
'rate': '60r/m',
'key': '$cookie_session',
},
'sensitive': {
'rate': '10r/m',
}
},
'backends': {
'/': {
'zone': 'default',
'burst': 30,
},
'/login': {
'zone': 'sensitive',
'burst': 5,
'nodelay': False,
}
}
},
'backend': 'http://127.0.0.1:5000',
}
})
nginx_site = state['tls-terminator-example.com-nginx-site']
backends = get_backends(nginx_site)
assert len(backends) == 2
assert backends['/']['rate_limit'] == 'zone=default burst=30 nodelay'
assert backends['/login']['rate_limit'] == 'zone=sensitive burst=5'
# Should share upstream
assert backends['/login']['upstream_identifier'] == backends['/']['upstream_identifier']
assert len(merged(nginx_site['file.managed'])['context']['upstreams']) == 1
rate_limits = merged(state['tls-terminator-rate-limit-zones']['file.managed'])
assert rate_limits['context']['rate_limit_zones'] == [
'$cookie_session zone=default:10m rate=60r/m',
'$binary_remote_addr zone=sensitive:1m rate=10r/m',
]
assert 'tls-terminator-example.com-error-page-429' in state
def test_custom_error_pages():
pillar = OrderedDict({
'error_pages': {
'429': '429 loading {{ site }}',
502: {
'content_type': 'application/json',
'content': '{"error": 502, "site": "{{ site }}"}',
},
},
'test.com': {
'backend': 'http://127.0.0.1:5001',
'error_pages': {
502: '<p>Backend stumbled</p>',
},
},
})
# Add example.com later to ensure test.com is processed first to expose ordering bugs
pillar['example.com'] = {
'backend': 'http://127.0.0.1:5000',
}
state = module.build_state(pillar)
def error_page(site, error_code):
error_state = state['tls-terminator-%s-error-page-%d' % (site, error_code)]
file_state = merged(error_state['file.managed'])
return file_state
nginx_site = merged(state['tls-terminator-example.com-nginx-site']['file.managed'])
error_pages = nginx_site['context']['error_pages']
assert len(error_pages) == 3 # the two defaults plus 502
assert error_page('example.com', 429)['contents'] == '429 loading {{ site }}'
assert error_page('example.com', 502)['contents'] == '{"error": 502, "site": "{{ site }}"}'
assert error_page('example.com', 504)['contents'].startswith('<!doctype html>')
assert error_page('test.com', 429)['contents'] == '429 loading {{ site }}'
assert error_page('test.com', 502)['contents'] == '<p>Backend stumbled</p>'
def test_isolatest_site_upstreams():
state = module.build_state({
'example.com': {
'backend': 'http://127.0.0.1:5000',
},
'otherexample.com': {
'backend': 'http://127.0.0.1:5001',
},
})
context = merged(state['tls-terminator-example.com-nginx-site']['file.managed'])['context']
assert len(context['upstreams']) == 1
def test_upstream_with_url():
state = module.build_state({
'example.com': {
'backend': 'http://127.0.0.1:5000/path',
}
})
context = merged(state['tls-terminator-example.com-nginx-site']['file.managed'])['context']
upstreams = context['upstreams']
assert len(upstreams) == 1
assert not any('/' in identifier for identifier in upstreams)
def test_upstream_port_only_difference():
state = module.build_state({
'example.com': {
'backends': {
'/': 'http://127.0.0.1:5000',
'/path': 'http://127.0.0.1:5001',
}
}
})
context = merged(state['tls-terminator-example.com-nginx-site']['file.managed'])['context']
upstreams = context['upstreams']
assert len(upstreams) == 2
def test_upstream_set_trust_root():
state = module.build_state({
'example.com': {
'backends': {
'/': {
'upstream': 'https://10.10.10.10',
'upstream_trust_root': 'some upstream cert',
}
}
}
})
context = merged(state['tls-terminator-example.com-nginx-site']['file.managed'])['context']
trust_root_identifier = context['backends']['/']['upstream_identifier']
expected_trust_root_path_app1 = '/etc/nginx/ssl/%s-root.pem' % trust_root_identifier
assert context['backends']['/']['upstream_trust_root'] == expected_trust_root_path_app1
assert 'tls-terminator-upstream-%s-trust-root' % trust_root_identifier in state
def test_invalid_config():
with pytest.raises(ValueError):
module.build_state({
'example.com': {}
})
with pytest.raises(ValueError):
module.build_state({
'example.com': {
'backend': 'http://127.0.0.1',
'backends': {
'/': 'http://127.0.0.1',
},
}
})
with pytest.raises(ValueError):
module.build_state({
'example.com': {
'backends': {
'/': {
'upstream': 'http://10.10.10.10',
'upstreams': ['http://10.10.10.11'],
}
}
}
})
def test_multiple_upstreams():
state = module.build_state({
'example.com': {
'backends': {
'/': {
'upstreams': [
'http://10.10.10.10:5000 weight=2',
'http://10.10.10.11:5000'
],
'upstream_keepalive': 16,
'upstream_least_conn': True,
},
'/path': {
'upstream': 'http://10.10.10.12:5001',
}
}
}
})
context = merged(state['tls-terminator-example.com-nginx-site']['file.managed'])['context']
upstreams = context['upstreams']
assert upstreams['example.com-10.10.10.10_2d957d'] == {
'identifier': 'example.com-10.10.10.10_2d957d',
'servers': [{
'hostname': '10.10.10.10',
'port': 5000,
'arguments': 'weight=2',
}, {
'hostname': '10.10.10.11',
'port': 5000,
'arguments': None,
}],
'keepalive': 16,
'least_conn': True,
'scheme': 'http',
}
def test_add_headers():
state = module.build_state({
'add_headers': {
'Expect-CT': 'max-age=60, report-uri=https://example.com/.report-uri/expect-ct',
},
'example.com': {
'backends': {
'/': {
'upstream': 'http://127.0.0.1:5000',
},
'/other': {
'upstream': 'http://127.0.0.1:5001',
'add_headers': {
'X-Frame-Options': 'sameorigin',
}
}
},
'add_headers': {
'Referrer-Policy': 'strict-origin-when-cross-origin',
'Expect-CT': '',
}
},
})
context = merged(state['tls-terminator-example.com-nginx-site']['file.managed'])['context']
default_security_headers = (
'Strict-Transport-Security',
'X-Xss-Protection',
'X-Content-Type-Options',
'X-Frame-Options',
)
for header in default_security_headers:
assert header in context['headers']
assert 'Expect-CT' in context['headers']
assert 'Referrer-Policy' in context['headers']
assert context['backends']['/other']['headers']['X-Frame-Options'] == 'sameorigin'
assert context['headers']['Expect-CT'] == ''
def get_backends(state_nginx_site):
return merged(state_nginx_site['file.managed'])['context']['backends']
def merged(dict_list):
'''Merges a salt-style list of dicts into a single dict'''
merged_dict = {}
for dictionary in dict_list:
merged_dict.update(dictionary)
return merged_dict
| StarcoderdataPython |
4832745 | <filename>mnistsvhntext/flags.py
import argparse
from utils.BaseFlags import parser as parser
# DATASET NAME
parser.add_argument('--dataset', type=str, default='SVHN_MNIST_text', help="name of the dataset")
# DATA DEPENDENT
# to be set by experiments themselves
parser.add_argument('--style_m1_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--style_m2_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--style_m3_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--len_sequence', type=int, default=8, help="length of sequence")
parser.add_argument('--num_classes', type=int, default=10, help="number of classes on which the data set trained")
parser.add_argument('--dim', type=int, default=64, help="number of classes on which the data set trained")
parser.add_argument('--data_multiplications', type=int, default=20, help="number of pairs per sample")
parser.add_argument('--num_hidden_layers', type=int, default=1, help="number of channels in images")
parser.add_argument('--likelihood_m1', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m2', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m3', type=str, default='categorical', help="output distribution")
# SAVE and LOAD
# to bet set by experiments themselves
parser.add_argument('--encoder_save_m1', type=str, default='encoderM1', help="model save for encoder")
parser.add_argument('--encoder_save_m2', type=str, default='encoderM2', help="model save for encoder")
parser.add_argument('--encoder_save_m3', type=str, default='encoderM3', help="model save for decoder")
parser.add_argument('--decoder_save_m1', type=str, default='decoderM1', help="model save for decoder")
parser.add_argument('--decoder_save_m2', type=str, default='decoderM2', help="model save for decoder")
parser.add_argument('--decoder_save_m3', type=str, default='decoderM3', help="model save for decoder")
parser.add_argument('--clf_save_m1', type=str, default='clf_m1', help="model save for clf")
parser.add_argument('--clf_save_m2', type=str, default='clf_m2', help="model save for clf")
parser.add_argument('--clf_save_m3', type=str, default='clf_m3', help="model save for clf")
# LOSS TERM WEIGHTS
parser.add_argument('--beta_m1_style', type=float, default=1.0, help="default weight divergence term style modality 1")
parser.add_argument('--beta_m2_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--beta_m3_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--div_weight_m1_content', type=float, default=0.25, help="default weight divergence term content modality 1")
parser.add_argument('--div_weight_m2_content', type=float, default=0.25, help="default weight divergence term content modality 2")
parser.add_argument('--div_weight_m3_content', type=float, default=0.25, help="default weight divergence term content modality 2")
parser.add_argument('--div_weight_uniform_content', type=float, default=0.25, help="default weight divergence term prior")
| StarcoderdataPython |
4806074 | <filename>mysite/timesheets/apps.py
from django.apps import AppConfig
class TimesheetsConfig(AppConfig):
name = 'timesheets'
| StarcoderdataPython |
14321 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from moz_sql_parser import parse as parse_sql
import pyparsing
import re
from six.moves.urllib import parse
FROM_REGEX = re.compile(' from ("http.*?")', re.IGNORECASE)
def get_url(url, headers=0, gid=0, sheet=None):
parts = parse.urlparse(url)
if parts.path.endswith('/edit'):
path = parts.path[:-len('/edit')]
else:
path = parts.path
path = '/'.join((path.rstrip('/'), 'gviz/tq'))
qs = parse.parse_qs(parts.query)
if 'headers' in qs:
headers = int(qs['headers'][-1])
if 'gid' in qs:
gid = qs['gid'][-1]
if 'sheet' in qs:
sheet = qs['sheet'][-1]
if parts.fragment.startswith('gid='):
gid = parts.fragment[len('gid='):]
args = OrderedDict()
if headers > 0:
args['headers'] = headers
if sheet is not None:
args['sheet'] = sheet
else:
args['gid'] = gid
params = parse.urlencode(args)
return parse.urlunparse(
(parts.scheme, parts.netloc, path, None, params, None))
def extract_url(sql):
try:
url = parse_sql(sql)['from']
except pyparsing.ParseException:
# fallback to regex to extract from
match = FROM_REGEX.search(sql)
if match:
return match.group(1).strip('"')
return
while isinstance(url, dict):
url = url['value']['from']
return url
# Function to extract url from any sql statement
def url_from_sql(sql):
"""
Extract url from any sql statement.
:param sql:
:return:
"""
try:
parsed_sql = re.split('[( , " )]', str(sql))
for i, val in enumerate(parsed_sql):
if val.startswith('https:'):
sql_url = parsed_sql[i]
return sql_url
except Exception as e:
print("Error: {}".format(e))
| StarcoderdataPython |
4977 | <filename>Back-End/Python/timers/clock_named_tuple.py
from collections import namedtuple
MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days')
def add_time(start, duration, start_weekday=None):
weekdays = [
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday', 'Sunday'
]
start_time, period = start.split(' ')
def process_time():
current_hour, current_minute = ([int(t) for t in start_time.split(':')])
end_hour, end_minute = ([int(d) for d in duration.split(':')])
# Adds Current time plus End Time Total
end_hours, end_mins = (current_hour + end_hour, current_minute + end_minute)
# Calculates Total days passed
days = int(end_hours/24)
# Calculates New Time
new_time_array = [str(end_hours % 12 + end_mins // 60), ':', str(end_mins % 60).rjust(2, '0')]
new_time_joined = ''.join(new_time_array)
end_period = [period]
# Clock, calculates the days elapsed
clock = end_hours // 12
if start_weekday:
start_day_idx = weekdays.index(start_weekday.title())
new_weekday = weekdays[(start_day_idx + days % 7) % 7]
else:
new_weekday = False
# Figure out whether is AM or PM
for i in range(clock):
if end_period[-1].lower() == 'am':
end_period.append('PM')
else:
end_period.append('AM')
return MainTimer(new_time_joined, end_period, new_weekday, days)
# Triggers process time function
timed = process_time()
def process_output():
new_time = f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}'
if timed.new_weekday:
new_time += f'- {timed.new_weekday} -'
if timed.days == 1 and (period != timed.end_period or timed.end_period == 'AM'):
new_time += ' (new_day)'
elif timed.days > 1:
new_time += f' -Total days: {timed.days}- <<'
return new_time
new_time = process_output()
return new_time
print('---'*30)
x = add_time('10:00 AM', '54:00', 'Monday')
print(x)
print('---'*30) | StarcoderdataPython |
1759539 | # PiFrame weather.py
# Manages weather data as well as forecast for the "Weather" Extension
# Uses Open Weather API https://openweathermap.org/api
import requests, settings, json, datetime
# Request URLS for weather
currentWeatherRequestURL = lambda zip, apiKey : ("http://api.openweathermap.org/data/2.5/weather?zip=%s&appid=%s&units=imperial" % (zip, apiKey))
forecastWeatherRequestURL = lambda zip, apiKey : ("http://api.openweathermap.org/data/2.5/forecast?zip=%s&appid=%s&units=imperial" % (zip, apiKey))
weatherIconURL = lambda iconCode : "http://openweathermap.org/img/wn/%s@2x.png" % (iconCode)
# WeatherResponse is a serializable response containing requested weather information
class WeatherResponse:
def __init__(self, location, sunrise, sunset, currentResponse, todayForecast, upcomingForecast):
self.location = location
self.sunrise = sunrise
self.sunset = sunset
self.currentResponse = currentResponse
self.todaysForecast = todayForecast
self.upcomingForecasts = upcomingForecast
def toJSON(self):
return json.dumps(self, default=lambda weather: weather.__dict__)
# WeatherResponseItem represents a single weather log
class WeatherResponseItem:
def __init__(self, iconURL, epochTime, temperature, minTemperature, maxTemperature, humidity):
self.iconURL = iconURL
self.temperature = round(temperature, 0)
self.minTemperature = round(minTemperature, 0)
self.maxTemperature = round(maxTemperature, 0)
self.humidity = humidity
self.time = epochTime
# getWeatherResponseItemFromData is used to create a WeatherResponseItem object from a dictionary of weather data
# param :data: a dictionary of information from the API call
# param :timeStamp: the datetime that the weather information corresponds to
# return :WeatherResponseItem: the response item created with data
def getWeatherResponseItemFromData(data, timeStamp):
iconURL = weatherIconURL(data["weather"][0]["icon"])
temperature = data["main"]["temp"]
maxTemp = data["main"]["temp_max"]
minTemp = data["main"]["temp_min"]
humidity = data["main"]["humidity"]
time = int(timeStamp.timestamp())
return WeatherResponseItem(iconURL, time, temperature, minTemp, maxTemp, humidity)
# getWeather queries the weather API for the client. By default, the current data is retrieved.
# param :includeForecast: a boolean value that indicates if forecast data should be included in the request
# return :WeatherResponse: the results of the weather query/parse
def getWeather(includeForecast, settings):
zip = settings.zip
apiKey = settings.apiKey
# If API key is not set, let the user know
if apiKey == None or apiKey == "":
return '{"error": "API"}'
url = currentWeatherRequestURL(zip, apiKey)
response = requests.get(url, timeout=10)
# Make sure request was completed
if response.status_code != 200:
return '{"error": "REQUEST"}'
data = response.json()
location = data["name"]
sunset = data["sys"]["sunset"]
sunrise = data["sys"]["sunrise"]
timeStamp = datetime.datetime.now()
current = getWeatherResponseItemFromData(data, timeStamp)
todayForecast = []
upcomingForecast = []
if includeForecast:
url = forecastWeatherRequestURL(zip, apiKey)
response = requests.get(url, timeout=10)
# If request wasn't completed, skip to end and return what we have
if response.status_code == 200:
data = response.json()
currentDay = timeStamp.day
entriesForCurrentDay = []
for update in data["list"]:
dt = datetime.datetime.fromtimestamp(update["dt"])
dataDay = dt.day
responseItem = getWeatherResponseItemFromData(update, dt)
# Keep a list of weather for a given day
entriesForCurrentDay.append(responseItem)
# Should record forecasts for the next 24 hours
if len(todayForecast) < 8:
todayForecast.append(responseItem)
# Once we move to a new day add the normalized information to our upcomingForecast list
# Note, only the next 4 full days are recorded, not including the current day
if currentDay != dataDay and len(upcomingForecast) < 5:
if len(entriesForCurrentDay) == 8:
entryFromDaysForecast = parseAveragesForDaysForecast(entriesForCurrentDay)
upcomingForecast.append(entryFromDaysForecast)
entriesForCurrentDay = []
currentDay = dataDay
# Return our results
returnObj = WeatherResponse(location, sunrise, sunset, current, todayForecast, upcomingForecast)
return returnObj.toJSON()
# parseAveragesForDaysForecast goes over all 8 weather entries for a given day and creates one entry for the full day.
# This means taking the over all max and min temperatures, as well as the average temperature and humidity
# return :WeatherResponseItem: The consolidated response item
def parseAveragesForDaysForecast(entriesForCurrentDay):
temp = 0
humidity = 0
max_temp = -1000
min_temp = 1000
time = entriesForCurrentDay[0].time
for entry in entriesForCurrentDay:
temp += entry.temperature
humidity += entry.humidity
max_temp = entry.maxTemperature if entry.maxTemperature > max_temp else max_temp
min_temp = entry.minTemperature if entry.minTemperature < min_temp else min_temp
temp = temp / 8
humidity = humidity / 8
return WeatherResponseItem("", time, temp, min_temp, max_temp, humidity) | StarcoderdataPython |
1618975 | <filename>tieba/items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class TiebaItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
forum = scrapy.Field()
note = scrapy.Field()
reply = scrapy.Field()
type = scrapy.Field()
def __init__(self, *args, **kwargs):
super(TiebaItem, self).__init__(*args, **kwargs)
self['type'] = kwargs['type']
| StarcoderdataPython |
1679174 | #!/usr/bin/env python
#
# Integration with an Adafruit Arduino Motor Shield (V2) (http://www.adafruit.com/products/1438).
# Although this shield is built for the Arduino series boards, it can be modified/used in other
# applications. This tutorial will use the board connected to a Raspberry Pi 3 B+ with the following
# pin configurations ("Arduino Motor Controller Shield" referenced as "AMC" below). Note that this
# tutorial uses a DC motor:
#
# - AMC VCC screw terminal connected to *external* +5V Power Supply VCC - powers motors
# - AMC M1 (2-pin) screw terminal connected to Wire1 and Wire2 on DC Motor
# - AMC +5V pin connected to RasPi +5V (PIN2) - powers chips/circuitry on board
# - AMC SCL pin connected to RasPi SCL1 (PIN5)
# - AMC SDA pin connected to RasPi SDA1 (PIN3)
# - RasPi GND/Motor Shield GND Pin/External +5V GND all connected (Common Ground)
# - Arduino Motor Shield VIN Jumper **REMOVED**
#
# Code basis built from the following file:
# https://raw.githubusercontent.com/adafruit/Adafruit-Motor-HAT-Python-Library/master/examples/DCTest.py
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
import time
# default I2C communication - all Arduino Motor Shields ship with
# address 0x60 as referenceable I2C address
mh = Adafruit_MotorHAT(addr=0x60)
motor1 = mh.getMotor(1)
# output some debugging information about the pins for the motor
print("Motor Details:")
print("PWM PIN: {}".format(motor1.PWMpin))
print("IN1 PIN: {}".format(motor1.IN1pin))
print("IN2 PIN: {}".format(motor1.IN2pin))
print("------------------------------------")
# main execution loop
while (True):
try:
print("Spinning Forward...")
motor1.run(Adafruit_MotorHAT.FORWARD)
print("\tSpeed up...")
for i in range(255):
motor1.setSpeed(i)
time.sleep(0.01)
print("\tSlow down...")
for i in reversed(range(255)):
motor1.setSpeed(i)
time.sleep(0.01)
print("Spinning Backward...")
motor1.run(Adafruit_MotorHAT.BACKWARD)
print("\tSpeed up...")
for i in range(255):
motor1.setSpeed(i)
time.sleep(0.01)
print("\tSlow down...")
for i in reversed(range(255)):
motor1.setSpeed(i)
time.sleep(0.01)
time.sleep(1.0)
except KeyboardInterrupt:
break
# release the motor
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
| StarcoderdataPython |
1692349 | <reponame>xieyujia/RWOC
# ==========================================================================
#
# This file is a part of implementation for paper:
# DeepMOT: A Differentiable Framework for Training Multiple Object Trackers.
# This contribution is headed by Perception research team, INRIA.
#
# Contributor(s) : <NAME>
# INRIA contact : <EMAIL>
#
# ===========================================================================
import numpy as np
import copy
import torch
import cv2
# numpy version #
def xywh2xyxy(bbox):
"""
convert bbox from [x,y,w,h] to [x1, y1, x2, y2]
:param bbox: bbox in string [x, y, w, h], list
:return: bbox in float [x1, y1, x2, y2], list
"""
copy.deepcopy(bbox)
bbox[0] = float(bbox[0])
bbox[1] = float(bbox[1])
bbox[2] = float(bbox[2]) + bbox[0]
bbox[3] = float(bbox[3]) + bbox[1]
return bbox
def bb_fast_IOU_v1(boxA, boxB):
"""
Calculation of IOU, version numpy
:param boxA: numpy array [top left x, top left y, x2, y2]
:param boxB: numpy array of [top left x, top left y, x2, y2], shape = [num_bboxes, 4]
:return: IOU of two bounding boxes of shape [num_bboxes]
"""
if type(boxA) is type([]):
boxA = np.array(copy.deepcopy(boxA), dtype=np.float32)[-4:]
boxB = np.array(copy.deepcopy(boxB), dtype=np.float32)[:, -4:]
# determine the (x, y)-coordinates of the intersection rectangle
xA = np.maximum(boxA[0], boxB[:, 0])
yA = np.maximum(boxA[1], boxB[:, 1])
xB = np.minimum(boxA[2], boxB[:, 2])
yB = np.minimum(boxA[3], boxB[:, 3])
# compute the area of intersection rectangle
interArea = np.maximum(0.0, xB - xA + 1) * np.maximum(0.0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[:, 2] - boxB[:, 0] + 1) * (boxB[:, 3] - boxB[:, 1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / (boxAArea + boxBArea - interArea)
return iou
def warpcoordinates(coordinates, warp_matrix):
"""
camera motion compensations
:param coordinates: np.darray of shape [num_bbox, 4], 4=[x,y,x,y] or [num_bbox, 2], 2=[x,y]
:param warp_matrix: numpy.darray of shape [2,3]
:return: warped coordinates: np.darray of shape [num_bbox, 4], 4=[x,y,x,y]
"""
if coordinates.shape[1] == 4:
split_tl = coordinates[:, 0:2].copy()
split_br = coordinates[:, 2:4].copy()
pad_ones = np.ones((split_tl.shape[0], 1))
split_tl = np.transpose(np.hstack([split_tl, pad_ones]))
split_br = np.transpose(np.hstack([split_br, pad_ones]))
warped_tl = np.transpose(np.dot(warp_matrix, split_tl))
warped_br = np.transpose(np.dot(warp_matrix, split_br))
return np.hstack([warped_tl, warped_br])
else:
pad_ones = np.ones((coordinates.shape[0], 1))
coordinates = np.transpose(np.hstack([coordinates, pad_ones]))
return np.transpose(np.dot(warp_matrix, coordinates))
# def getWarpMatrix(im1, im2):
# """
# get warp matrix
# :param im1: curr image, numpy array, (h, w, c)
# :param im2: prev image, numpy array, (h, w, c)
# :return: affine transformation matrix, numpy array, (h, w)
# """
# im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
# im2Gray_ref = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# warp_mode = cv2.MOTION_AFFINE
# warp_matrix = np.eye(2, 3, dtype=np.float32)
# cc, warp_matrix = cv2.findTransformECC(im2Gray_ref, im1Gray, warp_matrix, warp_mode)
# return warp_matrix
def getWarpMatrix(im1, im2):
"""
get warp matrix
:param im1: curr image, numpy array, (h, w, c)
:param im2: prev image, numpy array, (h, w, c)
:return: affine transformation matrix, numpy array, (h, w)
"""
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray_ref = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
warp_mode = cv2.MOTION_EUCLIDEAN
warp_matrix = np.eye(2, 3, dtype=np.float32)
number_of_iterations = 100
termination_eps = 0.00001
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TermCriteria_COUNT, number_of_iterations, termination_eps)
cc, warp_matrix = cv2.findTransformECC(im2Gray_ref, im1Gray, warp_matrix, warp_mode, criteria, None, 1)
return warp_matrix
# torch version #
def IOUmask_fast(boxA, boxesB):
"""
get iou among boxA and many others boxes
:param boxA: [top left x, top left y, bottom right x, bottom right y], float torch tensor requiring gradients of shape (4,)!! not shape (1,4) !!
:param boxesB: [top left x, top left y, bottom right x, bottom right y], float torch tensor requiring gradients of shape (4,)!! not shape (1,4) !!
:return: iou
"""
boxesB = torch.FloatTensor(boxesB).cuda() # gt box
# determine the (x, y)-coor dinates of the intersection rectangle
xA = torch.max(boxA[0], boxesB[:, 0])
yA = torch.max(boxA[1], boxesB[:, 1])
xB = torch.min(boxA[2], boxesB[:, 2])
yB = torch.min(boxA[3], boxesB[:, 3])
# compute the area of intersection rectangle
interArea = torch.max(torch.zeros(1).cuda(), xB - xA + 1)\
* torch.max(torch.zeros(1).cuda(), yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxesBArea = (boxesB[:, 2] - boxesB[:, 0] + 1) * (boxesB[:, 3] - boxesB[:, 1] + 1)
iou = interArea / (boxAArea + boxesBArea - interArea)
return iou.unsqueeze(0)
def calculate_dist_fastV4_torch(bbox_det, bbox_gt, im_h, im_w):
"""
differentiable Version, normalized L2 distance
:param bbox_det: one detection bbox [x1, y1, x2, y2]
:param bbox_gt: list of ground truth bboxes [[x1, y1, x2, y2], ... ]
:param im_h: image height
:param im_w: image width
:return: normalized euclidean distance between detection and ground truth
"""
gt_box = torch.FloatTensor(bbox_gt).cuda() # gt box
D = (float(im_h)**2 + im_w**2)**0.5
c_gt_x, c_gt_y = 0.5 * (gt_box[:, 0] + gt_box[:, 2]), 0.5 * (gt_box[:, 1] + gt_box[:, 3])
c_det_x, c_det_y = 0.5 * (bbox_det[0] + bbox_det[2]), 0.5 * (bbox_det[1] + bbox_det[3])
# add eps=1e-12 for gradient numerical stability
return (1.0 - torch.exp(-5.0*torch.sqrt(1e-12+((c_gt_x-c_det_x)/D)**2 + ((c_gt_y-c_det_y)/D)**2))).unsqueeze(0)
def make_single_matrix_torchV2_fast(gt_bboxes, track_bboxes, img_h, img_w):
"""
Version torch, differentiable
:param gt_bboxes: list of ground truth bboxes !! [x1, y1, x2, y2] !! from img of frameID
:param det_bboxes: detection/hypothesis bboxes !! [x1, y1, x2, y2] !! from img of frameID, torch tensor Variable, [num_dets,4]
:param frameID: ID of this frame
:param img_h: height of the image
:param img_w: width of the image
:param T: threshold
:return: matrix
"""
# number of detections = N = height of matrix
N = track_bboxes.size(0)
gt = np.array(gt_bboxes, dtype=np.float32)
tmp = []
for i in range(N):
iou = IOUmask_fast(track_bboxes[i], gt[:, -4:])
l2_distance = calculate_dist_fastV4_torch(track_bboxes[i], gt[:, -4:], img_h, img_w)
tmp.append(0.5*(l2_distance + (1.0 - iou)))
dist_mat = torch.cat(tmp, dim=0)
if gt.shape[1] == 5:
gt_ids = gt[:, 0].astype(np.int32).tolist()
else:
gt_ids = []
return gt_ids, dist_mat.unsqueeze(0)
def mix_track_detV2(iou_mat, det, track):
"""
refine track bounding boxes by detections
:param iou_mat: iou between dets and tracks, numpy array, [num_track, num_det]
:param det: detection bbox matrix, numpy array, [num_detections, 4]
:param track: prediction from trackers, numpy array, [num_tracks, 4]
:return: refined new tracks, numpy array, [num_tracks, 4]
"""
values, idx = torch.max(iou_mat, dim=1)
mask = torch.ones_like(values)
for i in range(iou_mat.shape[0]):
# iou < 0.6, no refinement
if float(iou_mat[i, idx[i]]) <= 0.6:
mask[i] = 0.0
values = mask*values
return (1.0-values).view(-1, 1)*track + values.view(-1, 1)*torch.index_select(det, 0, idx)
| StarcoderdataPython |
3300502 | '''
====================================================================
(c) 2003-2016 <NAME>. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_svn_ui_actions.py
'''
import pathlib
import pysvn
from PyQt5 import QtWidgets
import wb_log_history_options_dialog
import wb_ui_actions
import wb_common_dialogs
import wb_svn_project
import wb_svn_info_dialog
import wb_svn_properties_dialog
import wb_svn_dialogs
import wb_svn_commit_dialog
import wb_svn_annotate
from wb_background_thread import thread_switcher
#
# Start with the main window components interface
# and add actions used by the main window
# and the commit window
#
# then derive to add tool bars and menus
# appropiate to each context
#
class SvnMainWindowActions(wb_ui_actions.WbMainWindowActions):
def __init__( self, factory ):
super().__init__( 'svn', factory )
def setupDebug( self ):
self.debugLog = self.main_window.app.debug_options.debugLogSvnUi
#--- Enablers ---------------------------------------------------------
#------------------------------------------------------------
#
# tree or table actions depending on focus
#
#------------------------------------------------------------
def enablerTreeTableSvnInfo( self ):
return self.main_window.callTreeOrTableFunction( self.enablerTreeSvnInfo, self.enablerTableSvnInfo )
def enablerTreeTableSvnProperties( self ):
return self.main_window.callTreeOrTableFunction( self.enablerTreeSvnProperties, self.enablerTableSvnProperties )
def enablerTreeTableSvnDiffBaseVsWorking( self ):
return self.main_window.callTreeOrTableFunction( self.enablerTreeSvnDiffBaseVsWorking, self.enablerTableSvnDiffBaseVsWorking )
def enablerTreeTableSvnDiffHeadVsWorking( self ):
return self.main_window.callTreeOrTableFunction( self.enablerTreeSvnDiffHeadVsWorking, self.enablerTableSvnDiffHeadVsWorking )
# ------------------------------------------------------------
def treeTableActionSvnDiffBaseVsWorking( self ):
self.main_window.callTreeOrTableFunction( self.treeActionSvnDiffBaseVsWorking, self.tableActionSvnDiffBaseVsWorking )
def treeTableActionSvnDiffHeadVsWorking( self ):
self.main_window.callTreeOrTableFunction( self.treeActionSvnDiffHeadVsWorking, self.tableActionSvnDiffHeadVsWorking )
@thread_switcher
def treeTableActionSvnInfo_Bg( self, checked=None ):
yield from self.main_window.callTreeOrTableFunction_Bg( self.treeActionSvnInfo, self.tableActionSvnInfo_Bg )
@thread_switcher
def treeTableActionSvnProperties_Bg( self, checked=None ):
yield from self.main_window.callTreeOrTableFunction_Bg( self.treeActionSvnProperties_Bg, self.tableActionSvnProperties_Bg )
#------------------------------------------------------------
def enablerTreeTableSvnLogHistory( self ):
return self.main_window.callTreeOrTableFunction( self.enablerTreeSvnLogHistory, self.enablerTableSvnLogHistory )
def enablerTreeSvnLogHistory( self ):
return self._enablerTreeSvnIsControlled()
def enablerTableSvnLogHistory( self ):
return self._enablerTableSvnIsControlled()
@thread_switcher
def tableActionSvnLogHistory_Bg( self, checked=None ):
yield from self.table_view.tableActionViewRepo_Bg( self.__actionSvnLogHistory_Bg )
@thread_switcher
def treeTableActionSvnLogHistory_Bg( self, checked=None ):
yield from self.main_window.callTreeOrTableFunction_Bg( self.treeActionSvnLogHistory_Bg, self.tableActionSvnLogHistory_Bg )
@thread_switcher
def treeActionSvnLogHistory_Bg( self, checked=None ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
yield from self.__actionSvnLogHistory_Bg( self.selectedSvnProject(), tree_node.relativePath() )
@thread_switcher
def __actionSvnLogHistory_Bg( self, svn_project, filename ):
self.progress.start( T_('Finding Tags') )
yield self.switchToBackground
try:
all_tag_nodes = svn_project.cmdTagsForFile( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e, 'Cannot get tags for %s:%s' % (svn_project.projectName(), filename) )
all_tag_nodes = []
all_tags = [node.tag_name for node in all_tag_nodes]
yield self.switchToForeground
self.progress.end()
options = wb_log_history_options_dialog.WbLogHistoryOptions( self.app, all_tags, self.main_window )
# as soon as possible del options to attemtp to avoid XCB errors
if not options.exec_():
return
self.setStatusAction( T_('Log for %(filename)s') %
{'filename': filename} )
self.progress.start( T_('Logs %(count)d') )
yield self.switchToBackground
try:
tag = options.getTag()
if tag is not None:
# find the tag node
for node in all_tag_nodes:
if node.tag_name == tag:
break
all_tag_nodes = [node]
rev = node.revision
else:
rev = None
all_commit_nodes = svn_project.cmdCommitLogForFile( filename, options.getLimit(), options.getSince(), options.getUntil(), rev )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e, 'Cannot get commit logs for %s:%s' % (svn_project.projectName(), filename) )
yield self.switchToForeground
return
if len(all_commit_nodes) > 0:
all_commit_nodes.extend( all_tag_nodes )
def key( node ):
return -node['revision'].number
all_commit_nodes.sort( key=key )
yield self.switchToForeground
self.progress.end()
self.setStatusAction()
log_history_view = self.factory.logHistoryView(
self.app,
T_('Commit Log for %(project)s:%(path)s') %
{'project': svn_project.projectName()
,'path': filename} )
log_history_view.showCommitLogForFile( svn_project, filename, all_commit_nodes )
log_history_view.show()
#------------------------------------------------------------
#
# tree actions
#
#------------------------------------------------------------
def selectedSvnProject( self ):
scm_project = self.table_view.selectedScmProject()
if scm_project is None:
return None
if not isinstance( scm_project, wb_svn_project.SvnProject ):
return None
return scm_project
def enablerTreeSvnDiffBaseVsWorking( self ):
return self._enablerTreeSvnIsControlled()
def enablerTreeSvnDiffHeadVsWorking( self ):
return self._enablerTreeSvnIsControlled()
def enablerTreeSvnInfo( self ):
return self._enablerTreeSvnIsControlled()
def enablerTreeSvnMkdir( self ):
return self._enablerTreeSvnIsControlled()
def enablerTreeSvnRevert( self ):
return self._enablerTreeSvnIsControlled()
def enablerTreeSvnAdd( self ):
return not self._enablerTreeSvnIsControlled()
def enablerTreeSvnProperties( self ):
return self._enablerTreeSvnIsControlled()
def _enablerTreeSvnIsControlled( self ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return False
tree_node.relativePath()
if not tree_node.project.hasFileState( tree_node.relativePath() ):
return False
file_state = tree_node.project.getFileState( tree_node.relativePath() )
return file_state.isControlled()
# ------------------------------------------------------------
def treeActionSvnDiffBaseVsWorking( self ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
try:
diff_text = tree_node.project.cmdDiffFolder( tree_node.relativePath(), head=False )
self.showDiffText( 'Diff Base vs. Working from %s' % (tree_node.relativePath(),), diff_text.split('\n') )
except wb_svn_project.ClientError as e:
tree_node.project.logClientError( e )
def treeActionSvnDiffHeadVsWorking( self ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
try:
diff_text = tree_node.project.cmdDiffFolder( tree_node.relativePath(), head=True )
self.showDiffText( 'Diff Head vs. Working from %s' % (tree_node.relativePath(),), diff_text.split('\n') )
except wb_svn_project.ClientError as e:
tree_node.project.logClientError( e )
@thread_switcher
def treeActionSvnAdd_Bg( self, checked=None ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
dialog = wb_svn_dialogs.WbAddFolderDialog( self.app, self.main_window, tree_node.relativePath() )
if dialog.exec_():
try:
tree_node.project.cmdAdd( tree_node.relativePath(), depth=dialog.getDepth(), force=dialog.getForce() )
except wb_svn_project.ClientError as e:
tree_node.project.logClientError( e )
yield from self.top_window.updateTableView_Bg()
@thread_switcher
def treeActionSvnRevert_Bg( self, checked=None ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
dialog = wb_svn_dialogs.WbRevertFolderDialog( self.app, self.main_window, tree_node.absolutePath() )
if dialog.exec_():
try:
tree_node.project.cmdRevert( tree_node.relativePath(), depth=dialog.getDepth() )
except wb_svn_project.ClientError as e:
tree_node.project.logClientError( e )
yield from self.top_window.updateTableView_Bg()
@thread_switcher
def treeActionSvnMkdir_Bg( self, checked=None ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
dialog = wb_common_dialogs.WbNewFolderDialog( self.app, self.main_window, tree_node.absolutePath() )
if dialog.exec_():
try:
tree_node.project.cmdMkdir( tree_node.relativePath() / dialog.getFolderName() )
except wb_svn_project.ClientError as e:
tree_node.project.logClientError( e )
yield from self.top_window.updateTableView_Bg()
def treeActionSvnInfo( self ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
try:
info = tree_node.project.cmdInfo( tree_node.relativePath() )
except wb_svn_project.ClientError as e:
tree_node.project.logClientError( e )
return
dialog = wb_svn_info_dialog.InfoDialog( self.app, self.main_window, tree_node.relativePath(), tree_node.absolutePath(), info )
dialog.exec_()
@thread_switcher
def treeActionSvnProperties_Bg( self, checked=None ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
svn_project = tree_node.project
filename = tree_node.relativePath()
prop_dict = svn_project.cmdPropList( filename )
dialog = wb_svn_properties_dialog.FolderPropertiesDialog( self.app, self.main_window, filename, prop_dict )
if dialog.exec_():
for is_present, name, value in dialog.getModifiedProperties():
try:
if not is_present:
# delete name
svn_project.cmdPropDel( name, filename )
else:
# add/update name value
svn_project.cmdPropSet( name, value, filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
yield from self.top_window.updateTableView_Bg()
def treeActionSvnCleanup( self, checked=None ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
self.top_window.setStatusAction( T_('Cleanup %s') % (tree_node.project.projectName(),) )
try:
tree_node.project.cmdCleanup()
self.log.info( 'Cleanup finished for %s' % (tree_node.project.projectName(),) )
except wb_svn_project.ClientError as e:
tree_node.project.logClientError( e )
self.top_window.setStatusAction()
@thread_switcher
def treeActionSvnUpdate_Bg( self, checked=None ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return
path = tree_node.relativePath()
if path == pathlib.Path( '.' ):
msg = (T_('Update %(project)s') %
{'project': tree_node.project.projectName()})
else:
msg = (T_('Update %(project)s:%(filename)s') %
{'project': tree_node.project.projectName()
,'filename': path})
self.log.infoheader( msg )
self.setStatusAction( msg )
self.progress.start( T_('Updated %(count)d') )
yield self.switchToBackground
try:
svn_project = tree_node.project
filename = tree_node.relativePath()
svn_project.initNotificationOfFilesInConflictCount()
rev_list = svn_project.cmdUpdate(
filename,
svn_project.svn_rev_head,
svn_project.svn_depth_infinity )
yield self.switchToForeground
self.__updateToRevisionProcessResults( tree_node, rev_list )
except pysvn.ClientError as e:
svn_project.logClientError( e )
yield self.switchToForeground
self.progress.end()
self.setStatusAction()
yield from self.top_window.updateTableView_Bg()
def __updateToRevisionProcessResults( self, tree_node, rev_list ):
svn_project = tree_node.project
filename = tree_node.relativePath()
if rev_list is not None:
for rev in rev_list:
if rev.number > 0:
count = self.progress.getEventCount()
if count == 0:
self.log.info( T_('Updated %(project)s:%(filename)s to revision %(rev)d, no new updates') %
{'project': svn_project.projectName()
,'filename': filename
,'rev': rev.number} )
else:
self.log.info( S_('Updated %(project)s:%(filename)s to revision %(rev)d, %(count)d new update',
'Updated %(project)s:%(filename)s to revision %(rev)d, %(count)d new updates', count) %
{'project': svn_project.projectName()
,'filename': filename
,'rev': rev.number
,'count': count} )
else:
self.log.warning( T_('Already up to date') )
files_in_conflict = self.progress.getInConflictCount()
if files_in_conflict > 0:
box = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Information,
T_('Warning'),
S_("%d file is in conflict",
"%d files are in conflict",
files_in_conflict) %
(files_in_conflict,),
QtWidgets.QMessageBox.Close,
parent=self.top_window )
box.exec_()
def treeActionSvnStatus( self ):
self.log.info( 'Not implemented yet' )
#------------------------------------------------------------
#
# table actions
#
#------------------------------------------------------------
def enablerTableSvnResolveConflict( self ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return False
tree_node.relativePath()
if not tree_node.project.hasFileState( tree_node.relativePath() ):
return False
file_state = tree_node.project.getFileState( tree_node.relativePath() )
return file_state.isConflicted()
def enablerTableSvnLock( self ):
return self._enablerTableSvnIsControlled()
def enablerTableSvnUnlock( self ):
return self._enablerTableSvnIsControlled()
def enablerTableSvnDiffBaseVsWorking( self ):
if not self.main_window.isScmTypeActive( 'svn' ):
return False
all_file_state = self.tableSelectedAllFileStates()
if len(all_file_state) == 0:
return False
for file_state in all_file_state:
if not file_state.isModified():
return False
return True
def enablerTableSvnDiffHeadVsWorking( self ):
if not self.main_window.isScmTypeActive( 'svn' ):
return False
return True
def enablerTableSvnInfo( self ):
return self._enablerTableSvnIsControlled()
def enablerTableSvnProperties( self ):
return self._enablerTableSvnIsControlled()
def enablerTableSvnAdd( self ):
# can only add uncontrolled files
return self.__enablerTableSvnIsUncontrolled()
def enablerTableSvnRevert( self ):
# can only revert uncontrolled files
return self._enablerTableSvnIsControlled()
def enablerSvnCommitInclude( self ):
return self._enablerTableSvnIsControlled()
def __enablerTableSvnIsUncontrolled( self ):
all_file_state = self.tableSelectedAllFileStates()
if len(all_file_state) == 0:
return False
for file_state in all_file_state:
if not file_state.isUncontrolled():
return False
return True
def _enablerTableSvnIsControlled( self ):
all_file_state = self.tableSelectedAllFileStates()
if len(all_file_state) == 0:
return False
for file_state in all_file_state:
if not file_state.isControlled():
return False
return True
def enablerSvnCheckin( self ):
tree_node = self.selectedSvnProjectTreeNode()
if tree_node is None:
return False
return tree_node.project.numUncommittedFiles() > 0
# ------------------------------------------------------------
def tableActionSvnDiffBaseVsWorking( self ):
for file_state in self.tableSelectedAllFileStates():
self.diffTwoFiles(
T_('Diff Base vs. Working %s') % (file_state.relativePath(),),
file_state.getTextLinesBase(),
file_state.getTextLinesWorking(),
T_('Base %s') % (file_state.relativePath(),),
T_('Working %s') % (file_state.relativePath(),)
)
def tableActionSvnDiffHeadVsWorking( self ):
for file_state in self.tableSelectedAllFileStates():
self.diffTwoFiles(
T_('Diff HEAD vs. Working %s') % (file_state.relativePath(),),
file_state.getTextLinesBase(),
file_state.getTextLinesWorking(),
T_('HEAD %s') % (file_state.relativePath(),),
T_('Working %s') % (file_state.relativePath(),)
)
@thread_switcher
def tableActionSvnInfo_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
try:
info = svn_project.cmdInfo( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
return
dialog = wb_svn_info_dialog.InfoDialog( self.app, self.main_window, filename, svn_project.pathForSvn( filename ), info )
dialog.exec_()
yield from self._tableActionSvnCmd_Bg( execute_function )
@thread_switcher
def tableActionSvnProperties_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
try:
prop_dict = svn_project.cmdPropList( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
return
dialog = wb_svn_properties_dialog.FilePropertiesDialog( self.app, self.main_window, filename, prop_dict )
if dialog.exec_():
for is_present, name, value in dialog.getModifiedProperties():
try:
if not is_present:
# delete name
svn_project.cmdPropDel( name, filename )
else:
# add/update name value
svn_project.cmdPropSet( name, value, filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
return
yield from self._tableActionSvnCmd_Bg( execute_function )
@thread_switcher
def tableActionSvnAdd_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
try:
svn_project.cmdAdd( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
yield from self._tableActionSvnCmd_Bg( execute_function )
@thread_switcher
def tableActionSvnRevert_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
try:
svn_project.cmdRevert( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
return
def are_you_sure( all_filenames ):
return wb_common_dialogs.WbAreYouSureRevert( self.main_window, all_filenames )
yield from self._tableActionSvnCmd_Bg( execute_function, are_you_sure )
@thread_switcher
def tableActionSvnResolveConflict_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
try:
svn_project.cmdResolved( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
return
def are_you_sure( all_filenames ):
return wb_common_dialogs.WbAreYouSureResolveConflict( self.main_window, all_filenames )
yield from self._tableActionSvnCmd_Bg( execute_function, are_you_sure )
@thread_switcher
def tableActionSvnLock_Bg( self, checked=None ):
dialog = wb_svn_dialogs.WbLockFileDialog( self.app, self.main_window )
def execute_function( svn_project, filename ):
self.log.infoheader( 'Locking %s… ' % (filename,) )
try:
svn_project.cmdLock( filename, dialog.getMessage(), dialog.getForce() )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
def are_you_sure( all_filenames ):
dialog.setAllFilenames( all_filenames )
return dialog.exec_()
yield from self._tableActionSvnCmd_Bg( execute_function, are_you_sure )
@thread_switcher
def tableActionSvnUnlock_Bg( self, checked=None ):
dialog = wb_svn_dialogs.WbUnlockFileDialog( self.app, self.main_window )
def execute_function( svn_project, filename ):
self.log.infoheader( 'Unlocking %s… ' % (filename,) )
try:
svn_project.cmdUnlock( filename, dialog.getForce() )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
def are_you_sure( all_filenames ):
dialog.setAllFilenames( all_filenames )
return dialog.exec_()
yield from self._tableActionSvnCmd_Bg( execute_function, are_you_sure )
@thread_switcher
def tableActionSvnDelete_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
file_state = svn_project.getFileState( filename )
if file_state.isControlled():
try:
svn_project.cmdDelete( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
return
else:
try:
file_state.absolutePath().unlink()
except IOError as e:
self.log.error( 'Error deleting %s' % (filename,) )
self.log.error( str(e) )
def are_you_sure( all_filenames ):
return wb_common_dialogs.WbAreYouSureDelete( self.main_window, all_filenames )
yield from self._tableActionSvnCmd_Bg( execute_function, are_you_sure )
@thread_switcher
def tableActionSvnRename_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
rename = wb_common_dialogs.WbRenameFilenameDialog( self.app, self.main_window )
rename.setName( filename.name )
if rename.exec_():
try:
# handles rename for controlled and uncontrolled files
svn_project.cmdRename( filename, filename.with_name( rename.getName() ) )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
yield from self._tableActionSvnCmd_Bg( execute_function )
@thread_switcher
def _tableActionSvnCmd_Bg( self, execute_function, are_you_sure_function=None ):
svn_project = self.selectedSvnProject()
if svn_project is None:
return
yield from self.table_view.tableActionViewRepo_Bg( execute_function, are_you_sure_function )
yield from self.top_window.updateTableView_Bg()
# ------------------------------------------------------------
def selectedSvnProjectTreeNode( self ):
if not self.main_window.isScmTypeActive( 'svn' ):
return None
tree_node = self.table_view.table_model.selectedScmProjectTreeNode()
# if the place holder is being used return none
if not isinstance( tree_node, wb_svn_project.SvnProjectTreeNode ):
return None
return tree_node
def enablerTableSvnAnnotate( self ):
return self._enablerTableSvnIsControlled()
@thread_switcher
def tableActionSvnAnnotate_Bg( self, checked=None ):
yield from self.table_view.tableActionViewRepo_Bg( self.__actionSvnAnnotate_Bg )
@thread_switcher
def __actionSvnAnnotate_Bg( self, svn_project, filename ):
self.setStatusAction( T_('Annotate %s') % (filename,) )
self.progress.start( T_('Annotate %(count)d'), 0 )
yield self.switchToBackground
try:
all_annotation_nodes = svn_project.cmdAnnotationForFile( filename )
all_annotate_revs = set()
for node in all_annotation_nodes:
all_annotate_revs.add( node.log_id )
yield self.switchToForeground
except wb_svn_project.ClientError as e:
svn_project.logClientError( e, 'Cannot get annotations for %s:%s' % (svn_project.projectPath(), filename) )
yield self.switchToForeground
return
self.progress.end()
self.progress.start( T_('Annotate Commit Logs %(count)d'), 0 )
yield self.switchToBackground
rev_min = min( all_annotate_revs )
rev_max = max( all_annotate_revs )
try:
all_commit_logs = svn_project.cmdCommitLogForAnnotateFile( filename, rev_max, rev_min )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e, 'Cannot get commit logs for %s:%s' % (svn_project.projectPath(), filename) )
all_commit_logs = []
yield self.switchToForeground
self.setStatusAction()
self.progress.end()
annotate_view = wb_svn_annotate.WbSvnAnnotateView(
self.app,
T_('Annotation of %s') % (filename,) )
annotate_view.showAnnotationForFile( all_annotation_nodes, all_commit_logs )
annotate_view.show()
commit_key = 'svn-commit-dialog'
@thread_switcher
def treeActionSvnCheckin_Bg( self, checked ):
if self.app.hasSingleton( self.commit_key ):
commit_dialog = self.app.getSingleton( self.commit_key )
commit_dialog.raise_()
return
svn_project = self.selectedSvnProject()
yield self.switchToBackground
svn_project.updateStateForCheckin()
yield self.switchToForeground
# QQQ need to finish the work to setup all_paths_to_checkin
commit_dialog = wb_svn_commit_dialog.WbSvnCommitDialog( self.app, svn_project, [] )
commit_dialog.commitAccepted.connect( self.app.wrapWithThreadSwitcher( self.__commitAccepted_Bg ) )
commit_dialog.commitClosed.connect( self.__commitClosed )
# show to the user
commit_dialog.show()
self.app.addSingleton( self.commit_key, commit_dialog )
# enabled states may have changed
self.main_window.updateActionEnabledStates()
@thread_switcher
def __commitAccepted_Bg( self ):
svn_project = self.selectedSvnProject()
if svn_project is None:
return
commit_dialog = self.app.getSingleton( self.commit_key )
message = commit_dialog.getMessage()
all_commit_files = commit_dialog.getAllCommitIncludedFiles()
# close with cause the commitClosed signal to be emitted
commit_dialog.close()
msg = T_('Check in %s') % (svn_project.projectName(),)
self.log.infoheader( msg )
self.setStatusAction( msg )
self.progress.start( T_('Sent %(count)d'), 0 )
yield self.switchToBackground
try:
commit_id = svn_project.cmdCommit( message, all_commit_files )
headline = message.split('\n')[0]
self.log.info( T_('Committed "%(headline)s" as %(commit_id)s') %
{'headline': headline, 'commit_id': commit_id} )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e, 'Cannot Check in %s' % (svn_project.projectName(),) )
yield self.switchToForeground
self.setStatusAction( T_('Ready') )
self.progress.end()
# take account of any changes
yield from self.top_window.updateTableView_Bg()
def __commitClosed( self ):
if self.app.hasSingleton( self.commit_key ):
self.app.popSingleton( self.commit_key )
#============================================================
#
# actions for commit dialog
#
#============================================================
@thread_switcher
def tableActionSvnAddAndInclude_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
try:
svn_project.cmdAdd( filename )
self.main_window.addCommitIncludedFile( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
yield from self._tableActionSvnCmd_Bg( execute_function )
@thread_switcher
def tableActionSvnRevertAndExclude_Bg( self, checked=None ):
def execute_function( svn_project, filename ):
try:
svn_project.cmdRevert( filename )
self.main_window.removeCommitIncludedFile( filename )
except wb_svn_project.ClientError as e:
svn_project.logClientError( e )
return
def are_you_sure( all_filenames ):
return wb_common_dialogs.WbAreYouSureRevert( self.main_window, all_filenames )
yield from self._tableActionSvnCmd_Bg( execute_function, are_you_sure )
@thread_switcher
def tableActionCommitInclude_Bg( self, checked=None ):
all_file_states = self.tableSelectedAllFileStates()
if len(all_file_states) == 0:
return
for entry in all_file_states:
if checked:
self.main_window.addCommitIncludedFile( entry.relativePath() )
else:
self.main_window.removeCommitIncludedFile( entry.relativePath() )
# take account of the changes
yield from self.top_window.updateTableView_Bg()
def checkerActionCommitInclude( self ):
all_file_states = self.tableSelectedAllFileStates()
if len(all_file_states) == 0:
return False
for entry in all_file_states:
if entry.relativePath() not in self.main_window.all_included_files:
return False
return True
#============================================================
#
# actions for log history view
#
#============================================================
def enablerTableSvnDiffLogHistory( self ):
mw = self.main_window
focus = mw.focusIsIn()
if focus == 'commits':
return len(mw.current_commit_selections) in (1,2)
elif focus == 'changes':
if len(mw.current_file_selection) == 0:
return False
node = mw.changes_model.changesNode( mw.current_file_selection[0] )
return node.action in 'M'
else:
assert False, 'focus not as expected: %r' % (focus,)
def tableActionSvnDiffLogHistory( self ):
mw = self.main_window
focus = mw.focusIsIn()
try:
if focus == 'commits':
self.diffLogHistory()
elif focus == 'changes':
self.diffFileChanges()
else:
assert False, 'focus not as expected: %r' % (focus,)
except wb_svn_project.ClientError as e:
mw.svn_project.logClientError( e )
def enablerTableSvnAnnotateLogHistory( self ):
mw = self.main_window
focus = mw.focusIsIn()
if focus == 'commits':
return len(mw.current_commit_selections) in (1,2)
else:
return False
def diffLogHistory( self ):
mw = self.main_window
filestate = mw.svn_project.getFileState( mw.filename )
if len( mw.current_commit_selections ) == 1:
# diff working against rev
rev_new = mw.svn_project.svn_rev_working
rev_old = mw.log_model.revForRow( mw.current_commit_selections[0] )
date_old = mw.log_model.dateStringForRow( mw.current_commit_selections[0] )
title_vars = {'rev_old': rev_old.number
,'date_old': date_old}
heading_new = T_('Working')
heading_old = T_('r%(rev_old)d date %(date_old)s') % title_vars
else:
rev_new = mw.log_model.revForRow( mw.current_commit_selections[0] )
date_new = mw.log_model.dateStringForRow( mw.current_commit_selections[0] )
rev_old = mw.log_model.revForRow( mw.current_commit_selections[-1] )
date_old = mw.log_model.dateStringForRow( mw.current_commit_selections[-1] )
title_vars = {'rev_old': rev_old.number
,'date_old': date_old
,'rev_new': rev_new.number
,'date_new': date_new}
heading_new = T_('r%(rev_new)d date %(date_new)s') % title_vars
heading_old = T_('r%(rev_old)d date %(date_old)s') % title_vars
if filestate.isDir():
title = T_('Diff %s') % (mw.filename,)
text = mw.svn_project.cmdDiffRevisionVsRevision( mw.filename, rev_old, rev_new )
self.showDiffText( title, text.split('\n') )
else:
title = T_('Diff %s') % (mw.filename,)
if rev_new == mw.svn_project.svn_rev_working:
text_new = filestate.getTextLinesWorking()
else:
text_new = filestate.getTextLinesForRevision( rev_new )
text_old = filestate.getTextLinesForRevision( rev_old )
self.diffTwoFiles(
title,
text_old,
text_new,
heading_old,
heading_new
)
def diffFileChanges( self ):
mw = self.main_window
node = mw.changes_model.changesNode( mw.current_file_selection[0] )
filename = node.path
rev_new = mw.log_model.revForRow( mw.current_commit_selections[0] ).number
rev_old = rev_new - 1
heading_new = 'r%d' % (rev_new,)
heading_old = 'r%d' % (rev_old,)
title = T_('Diff %s') % (filename,)
info = mw.svn_project.cmdInfo( pathlib.Path('.') )
url = info[ 'repos_root_URL' ] + filename
text_new = mw.svn_project.getTextLinesForRevisionFromUrl( url, rev_new )
text_old = mw.svn_project.getTextLinesForRevisionFromUrl( url, rev_old )
self.diffTwoFiles(
title,
text_old,
text_new,
heading_old,
heading_new
)
def tableActionSvnAnnotateLogHistory( self ):
self.log.error( 'annotateLogHistory TBD' )
| StarcoderdataPython |
1728353 | import urllib.parse
from flask import request, abort, render_template
from flask_login import current_user
from wikked.views import add_auth_data, add_navigation_data
from wikked.web import app, get_wiki
from wikked.webimpl import url_from_viewarg
from wikked.webimpl.decorators import requires_permission
from wikked.webimpl.history import (
get_site_history, get_page_history,
read_page_rev, diff_revs, diff_page_revs)
@app.route('/read/special:/History')
@requires_permission('wikihistory')
def site_history():
wiki = get_wiki()
user = current_user.get_id()
after_rev = request.args.get('rev')
data = get_site_history(wiki, user, after_rev=after_rev)
last_rev = data['history'][-1]['rev_id']
data['first_page'] = '/special/history'
data['next_page'] = '/special/history?rev=%s' % last_rev
add_auth_data(data)
add_navigation_data(
'', data,
raw_url='/api/site-history')
return render_template('special-changes.html', **data)
@app.route('/hist/<path:url>')
@requires_permission('history')
def page_history(url):
wiki = get_wiki()
user = current_user.get_id()
url = url_from_viewarg(url)
data = get_page_history(wiki, user, url)
add_auth_data(data)
add_navigation_data(
url, data,
read=True, edit=True, inlinks=True, upload=True,
raw_url='/api/history/' + url.lstrip('/'))
return render_template('history-page.html', **data)
@app.route('/rev/<path:url>')
@requires_permission('history')
def page_rev(url):
rev = request.args.get('rev')
if rev is None:
abort(400)
raw_url_args = {'rev': rev}
wiki = get_wiki()
user = current_user.get_id()
url = url_from_viewarg(url)
data = read_page_rev(wiki, user, url, rev=rev)
add_auth_data(data)
add_navigation_data(
url, data,
read=True,
raw_url='/api/revision/%s?%s' % (
url.lstrip('/'),
urllib.parse.urlencode(raw_url_args)))
return render_template('revision-page.html', **data)
@app.route('/diff/<path:url>')
@requires_permission('history')
def diff_page(url):
rev1 = request.args.get('rev1')
rev2 = request.args.get('rev2')
raw = request.args.get('raw')
if rev1 is None:
abort(400)
raw_url_args = {'rev1': rev1}
if rev2:
raw_url_args['rev2'] = rev2
wiki = get_wiki()
user = current_user.get_id()
url = url_from_viewarg(url)
data = diff_page_revs(wiki, user, url,
rev1=rev1, rev2=rev2, raw=raw)
add_auth_data(data)
add_navigation_data(
url, data,
read=True,
raw_url='/api/diff/%s?%s' % (
url.lstrip('/'),
urllib.parse.urlencode(raw_url_args)))
return render_template('diff-page.html', **data)
@app.route('/diff_rev/<rev>')
@requires_permission('history')
def diff_revision(rev):
wiki = get_wiki()
user = current_user.get_id()
data = diff_revs(wiki, user, rev)
add_auth_data(data)
add_navigation_data(
'', data)
return render_template('diff-rev.html', **data)
| StarcoderdataPython |
3368214 | import boto3
client = boto3.client("config")
response = client.put_config_rule(
ConfigRule={
"ConfigRuleName": "ec2-stopped-instance",
"Source": {"Owner": "AWS", "SourceIdentifier": "EC2_STOPPED_INSTANCE",},
"InputParameters": '{"AllowedDays":"30"}',
}
)
print(response)
| StarcoderdataPython |
1743351 | <reponame>mpipool/mpipool<gh_stars>1-10
from errr.tree import make_tree as _t, exception as _e
_t(globals(), MPIPoolError=_e(MPIProcessError=_e()))
| StarcoderdataPython |
4828628 | import xarray as xr
import xgcm
import numpy as np
import warnings
from xnemogcm import open_nemo_and_domain_cfg
import pytest
from xbasin.operations import Grid_ops
_metrics = {
("X",): ["e1t", "e1u", "e1v", "e1f"], # X distances
("Y",): ["e2t", "e2u", "e2v", "e2f"], # Y distances
("Z",): ["e3t_0", "e3u_0", "e3v_0", "e3f_0", "e3w_0"], # Z distances
}
def _assert_same_position(grid_ops,data,position):
check=grid_ops._matching_pos(data,position)
if type(check) is list:
assert all(check)
else:
assert check
def test_shift_position_to_T():
#ds = open_nemo_and_domain_cfg(datadir='data')
domcfg = xr.open_dataset("data/xnemogcm.domcfg_to.nc")
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
grid = xgcm.Grid(domcfg,metrics=_metrics,periodic=False)
grid_ops = Grid_ops(grid)
u_fr = nemo_ds.uo
v_fr = nemo_ds.vo
w_fr = nemo_ds.woce
u_3d_fr = [u_fr ,v_fr ,w_fr]
#Test single variables
u_to = grid_ops._shift_position(u_fr,output_position='T')
v_to = grid_ops._shift_position(v_fr, output_position='T')
w_to = grid_ops._shift_position(w_fr, output_position='T')
u_3d_to = grid_ops._shift_position(u_3d_fr,output_position='T')
#grid_ops._matching_pos([u_to,v_to,w_to,u_3d_to],'T')
_assert_same_position(grid_ops,[u_to,v_to,w_to],'T')
if __name__ == "__main__":
pass | StarcoderdataPython |
4806250 | <filename>tensorflow/python/ops/image_ops.py<gh_stars>1-10
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Image processing and decoding ops.
See the [Images](https://tensorflow.org/api_guides/python/image) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import linalg_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_image_ops import *
from tensorflow.python.ops.image_ops_impl import *
# pylint: enable=wildcard-import
# TODO(drpng): remove these once internal use has discontinued.
# pylint: disable=unused-import
from tensorflow.python.ops.image_ops_impl import _Check3DImage
from tensorflow.python.ops.image_ops_impl import _ImageDimensions
# pylint: enable=unused-import
_IMAGE_DTYPES = frozenset([
dtypes.uint8, dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,
dtypes.float64
])
def flat_transforms_to_matrices(transforms):
"""Converts `tf.contrib.image` projective transforms to affine matrices.
Note that the output matrices map output coordinates to input coordinates. For
the forward transformation matrix, call `tf.linalg.inv` on the result.
Args:
transforms: Vector of length 8, or batches of transforms with shape `(N,
8)`.
Returns:
3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the
*output coordinates* (in homogeneous coordinates) of each transform to the
corresponding *input coordinates*.
Raises:
ValueError: If `transforms` have an invalid shape.
"""
with ops.name_scope("flat_transforms_to_matrices"):
transforms = ops.convert_to_tensor(transforms, name="transforms")
if transforms.shape.ndims not in (1, 2):
raise ValueError("Transforms should be 1D or 2D, got: %s" % transforms)
# Make the transform(s) 2D in case the input is a single transform.
transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))
num_transforms = array_ops.shape(transforms)[0]
# Add a column of ones for the implicit last entry in the matrix.
return array_ops.reshape(
array_ops.concat(
[transforms, array_ops.ones([num_transforms, 1])], axis=1),
constant_op.constant([-1, 3, 3]))
def matrices_to_flat_transforms(transform_matrices):
"""Converts affine matrices to `tf.contrib.image` projective transforms.
Note that we expect matrices that map output coordinates to input coordinates.
To convert forward transformation matrices, call `tf.linalg.inv` on the
matrices and use the result here.
Args:
transform_matrices: One or more affine transformation matrices, for the
reverse transformation in homogeneous coordinates. Shape `(3, 3)` or `(N,
3, 3)`.
Returns:
2D tensor of flat transforms with shape `(N, 8)`, which may be passed into
`tf.contrib.image.transform`.
Raises:
ValueError: If `transform_matrices` have an invalid shape.
"""
with ops.name_scope("matrices_to_flat_transforms"):
transform_matrices = ops.convert_to_tensor(
transform_matrices, name="transform_matrices")
if transform_matrices.shape.ndims not in (2, 3):
raise ValueError("Matrices should be 2D or 3D, got: %s" %
transform_matrices)
# Flatten each matrix.
transforms = array_ops.reshape(transform_matrices,
constant_op.constant([-1, 9]))
# Divide each matrix by the last entry (normally 1).
transforms /= transforms[:, 8:9]
return transforms[:, :8]
@ops.RegisterGradient("ImageProjectiveTransformV2")
def _image_projective_transform_grad(op, grad):
"""Computes the gradient for ImageProjectiveTransform."""
images = op.inputs[0]
transforms = op.inputs[1]
interpolation = op.get_attr("interpolation")
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
# Invert transformations
transforms = flat_transforms_to_matrices(transforms=transforms)
inverse = linalg_ops.matrix_inverse(transforms)
transforms = matrices_to_flat_transforms(inverse)
output = gen_image_ops.image_projective_transform_v2(
images=grad,
transforms=transforms,
output_shape=array_ops.shape(image_or_images)[1:3],
interpolation=interpolation)
return [output, None, None]
| StarcoderdataPython |
1691689 | #!/usr/bin/env python3
import csv
from io import StringIO
import scrape_common as sc
url = 'https://www.sg.ch/ueber-den-kanton-st-gallen/statistik/covid-19/_jcr_content/Par/sgch_downloadlist_729873930/DownloadListPar/sgch_download.ocFile/KantonSG_C19-Tests_download.csv'
data = sc.download(url, silent=True)
# strip the "header" / description lines
data = "\n".join(data.split("\n")[9:])
reader = csv.DictReader(StringIO(data), delimiter=';')
for row in reader:
td = sc.TestData(canton='SG', url=url)
td.start_date = row['Datum']
td.end_date = row['Datum']
td.positive_tests = row['Positiv (PCR)']
td.negative_tests = row['Negativ (PCR)']
td.total_tests = row['Total Tests']
if row['Positiv in % vom Total']:
td.positivity_rate = float(row['Positiv in % vom Total']) * 100
td.positivity_rate = round(10 * td.positivity_rate) / 10
print(td)
| StarcoderdataPython |
3379731 | from nose.tools import assert_equal, assert_is_not_none, assert_almost_equal
from demagfacts import rectprism
# table for spheroid
# table = ((2.0, 0.17356),
# (3.0, 0.10871),
# (4.0, 0.075407),
# (5.0, 0.055821),
# (6.0, 0.043230),
# (7.0, 0.034609),
# (8.0, 0.028421),
# (9.0, 0.023816),
# (10.0, 0.020286),
# (11.0, 0.017515))
def test_cube():
assert_almost_equal(rectprism.dz(1., 1., 1.), 1.0/3.0)
def test_paper_table():
table = ((2.0, 0.19832),
(3.0, 0.14036),
(4.0, 0.10845),
(5.0, 0.088316),
(6.0, 0.074466),
(7.0, 0.064363),
(8.0, 0.056670),
(9.0, 0.050617),
(10.0, 0.045731),
(11.0, 0.041705))
for (p, Dz) in table:
yield assert_almost_equal, rectprism.dz(1.0, 1.0, p), Dz, 5
| StarcoderdataPython |
1608433 | <filename>pyxform/tests_v1/test_settings_auto_send_delete.py
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class SettingsAutoSendDelete(PyxformTestCase):
def test_settings_auto_send_true(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | |
| | type | name | label |
| | text | name | Name |
| settings | | | |
| | auto_send | | |
| | true | | |
""",
debug=True,
xml__contains=[
'<submission method="form-data-post" orx:auto-send="true"/>'
],
)
def test_settings_auto_delete_true(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | |
| | type | name | label |
| | text | name | Name |
| settings | | | |
| | auto_delete | | |
| | true | | |
""",
debug=True,
xml__contains=[
'<submission method="form-data-post" orx:auto-delete="true"/>'
],
)
def test_settings_auto_send_delete_false(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | |
| | type | name | label |
| | text | name | Name |
| settings | | | |
| | auto_delete | auto_send | |
| | false | false | |
""",
debug=True,
xml__contains=[
'<submission method="form-data-post" orx:auto-delete="false" orx:auto-send="false"/>'
],
) | StarcoderdataPython |
1702349 | <filename>lewis_emulators/rkndio/interfaces/stream_interface.py
from lewis.adapters.stream import StreamInterface
from lewis.utils.command_builder import CmdBuilder
from lewis.utils.replies import conditional_reply
class RkndioStreamInterface(StreamInterface):
# Commands that we expect via serial during normal operation
commands = {
CmdBuilder("get_idn").escape("*IDN?").eos().build(),
CmdBuilder("get_status").escape("STATUS").eos().build(),
CmdBuilder("get_error").escape("ERR").eos().build(),
CmdBuilder("get_d_i_state").escape("READ ").arg("2|3|4|5|6|7").eos().build(),
CmdBuilder("set_d_o_state").escape("WRITE ").arg("8|9|10|11|12|13").escape(" ").arg("FALSE|TRUE").eos().build()
}
in_terminator = "\r\n"
out_terminator = "\r\n"
def handle_error(self, request, error):
"""
Prints an error message if a command is not recognised.
Args:
request : Request.
error: The error that has occurred.
Returns:
None.
"""
self.log.error("An error occurred at request " + repr(request) + ": " + repr(error))
print("An error occurred at request {}: {}".format(request, error))
@conditional_reply("connected")
def get_idn(self):
return self._device.idn
@conditional_reply("connected")
def get_status(self):
return self._device.status
@conditional_reply("connected")
def get_error(self):
return self._device.error
@conditional_reply("connected")
def get_d_i_state(self, pin):
return self._device.get_input_state(pin)
@conditional_reply("connected")
def set_d_o_state(self, pin, state):
return self._device.set_output_state(pin, state)
| StarcoderdataPython |
1658521 | import rclpy
from rclpy.time import Time, Duration
from rclpy.node import Node
from std_msgs.msg import Header
from tf2_ros import LookupException, ExtrapolationException, ConnectivityException
from tf2_ros.buffer import Buffer
from tf2_ros.transform_broadcaster import TransformBroadcaster
from tf2_ros.transform_listener import TransformListener
from tf_transformations import euler_from_quaternion, quaternion_from_euler
from rclpy.qos import QoSPresetProfiles
from geometry_msgs.msg import TransformStamped
import numpy as np
from .filter_type import filters
class RigidBodyKalman(Node):
def __init__(self):
super().__init__('kalman_filter')
# Create tf2 broadcaster
self.pose_br = TransformBroadcaster(self)
# create a tf2 buffer and listener
self.buffer = Buffer()
TransformListener(self.buffer, self)
# Declare parameters
self.declare_parameter('verbose', 1)
self.declare_parameter('filter_type', 'const_accel')
self.declare_parameter('duration', False)
self.declare_parameter('state_indexes', '0,1,2,9,10,11')
# Get parameters
self.filter = filters[self.get_parameter('filter_type').get_parameter_value().string_value](1/60)
self.verbose = self.get_parameter('verbose').get_parameter_value().integer_value
self.duration = self.get_parameter('duration').get_parameter_value().bool_value
self.state_indexes = [int(i) for i in self.get_parameter('state_indexes').get_parameter_value().string_value.split(',')]
self.announcement = None
self.create_subscription(
Header,
'announcement',
self.set_announcement,
QoSPresetProfiles.get_from_short_key('sensor_data')
)
def set_announcement(self, msg):
self.announcement = msg
def callback(self, t):
euler = np.array(euler_from_quaternion([t.transform.rotation.x, t.transform.rotation.y, t.transform.rotation.z, t.transform.rotation.w]), dtype=np.float32)
trans = np.array([t.transform.translation.x, t.transform.translation.y, t.transform.translation.z], dtype=np.float32)
self.filter.predict()
self.filter.update(np.concatenate((trans, euler)))
t.transform.translation.x = self.filter.x[self.state_indexes[0]]
t.transform.translation.y = self.filter.x[self.state_indexes[1]]
t.transform.translation.z = self.filter.x[self.state_indexes[2]]
euler = quaternion_from_euler(self.filter.x[self.state_indexes[3]], self.filter.x[self.state_indexes[4]], self.filter.x[self.state_indexes[5]])
t.transform.rotation.x = euler[0]
t.transform.rotation.y = euler[1]
t.transform.rotation.z = euler[2]
t.child_frame_id = (self.get_namespace() + '/filtered_estimation').lstrip('/')
self.pose_br.sendTransform(t)
def main(args=None):
rclpy.init(args=args)
node = RigidBodyKalman()
while rclpy.ok():
rclpy.spin_once(node)
if node.announcement is None: continue
try:
t = node.buffer.lookup_transform('world', (node.get_namespace() + '/estimated_pose').lstrip('/'), node.announcement.stamp)
except (LookupException, ConnectivityException, ExtrapolationException):
pass
except TypeError:
node.get_logger().info(str(node.announcement))
else:
node.callback(t)
node.announcement = None
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | StarcoderdataPython |
3330670 | <reponame>ValentynaGorbachenko/cd2
'''
Given two arrays, write a function to compute their intersection.
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [9,4]
Note:
Each element in the result must be unique.
The result can be in any order.
'''
def intersect(nums1, nums2):
res = []
if len(nums1)<len(nums2):
set_num = set(nums1)
working_arr = nums2
else:
set_num = set(nums2)
working_arr = nums1
for key in set_num:
if int(key) in working_arr:
res.append(int(key))
return res | StarcoderdataPython |
72588 | <filename>tests/djvu_tests.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Unit tests for djvutext.py script."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
import os
import subprocess
from tests import _data_dir
from tests.aspects import unittest, TestCase
from pywikibot.tools.djvu import DjVuFile
_djvu_dir = 'djvu'
class TestDjVuFile(TestCase):
"""Test DjVuFile class."""
net = False
file_djvu_not_existing = os.path.join(_data_dir, _djvu_dir, 'not_existing.djvu')
file_djvu = os.path.join(_data_dir, _djvu_dir, 'myfile.djvu')
file_djvu_wo_text = os.path.join(_data_dir, _djvu_dir, 'myfile_wo_text.djvu')
test_txt = 'A file with non-ASCII characters, \nlike é or ç'
@classmethod
def setUpClass(cls):
"""SetUp tests."""
super(TestDjVuFile, cls).setUpClass()
try:
subprocess.Popen(['djvudump'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
raise unittest.SkipTest('djvulibre library not installed.')
def test_file_existance(self):
"""Test file existence checks."""
djvu = DjVuFile(self.file_djvu)
self.assertEqual(self.file_djvu, djvu.file_djvu)
self.assertRaises(IOError, DjVuFile, self.file_djvu_not_existing)
def test_number_of_images(self):
"""Test page number generator."""
djvu = DjVuFile(self.file_djvu)
self.assertEqual(djvu.number_of_images(), 4)
def test_has_text(self):
"""Test if djvu file contains text."""
djvu = DjVuFile(self.file_djvu)
self.assertTrue(djvu.has_text())
djvu = DjVuFile(self.file_djvu_wo_text)
self.assertFalse(djvu.has_text())
def test_get_existing_page_number(self):
"""Test if djvu file contains text."""
djvu = DjVuFile(self.file_djvu)
self.assertTrue(djvu.has_text())
txt = djvu.get_page(1)
self.assertEqual(txt, self.test_txt)
def test_get_not_existing_page_number(self):
"""Test if djvu file contains text."""
djvu = DjVuFile(self.file_djvu)
self.assertTrue(djvu.has_text())
self.assertRaises(ValueError, djvu.get_page, 100)
def test_get_not_existing_page(self):
"""Test if djvu file contains text."""
djvu = DjVuFile(self.file_djvu_wo_text)
self.assertFalse(djvu.has_text())
self.assertRaises(ValueError, djvu.get_page, 100)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| StarcoderdataPython |
1632742 | <reponame>formiel/speech-translation
"""
Organize multilingual data to prepare for training
"""
import os
import re
import shutil
import json
import subprocess
import argparse
SPLITS = ['train_sp', 'dev', 'tst-COMMON', 'tst-HE']
def get_info(tgt_langs="de_es_fr_it_nl_pt_ro_ru", use_lid=True, use_joint_dict=True):
pairs = ['en-'+ l for l in tgt_langs.split('_')]
if len(pairs) > 1:
assert use_lid
prefix = 'dict1'
if not use_joint_dict:
prefix = 'dict2'
suffix = f'lgs_{tgt_langs}'
if len(pairs) == 8:
suffix = 'lgs_all8'
elif len(pairs) == 1:
suffix = f'lgs_{tgt_langs}_id_{use_lid}'
return pairs, prefix, suffix
def create_data_links_jsons(input_dir, output_dir,
tgt_langs="de_es_fr_it_nl_pt_ro_ru",
use_lid=True, use_joint_dict=True,
nbpe_src=8000, nbpe=8000):
"""
Create symbolic links to save jsons in the following structure:
output_dir/tgt_langs/use_${prefix}/src${nbpe_src}_tgt${nbpe}/${split}/${lang_pair}.json
where:
- ${split} is "train_sp", "dev", "tst-COMMON", or "tst-HE"
- ${lang_pair} is "en-de", "en-es", etc.
"""
pairs, prefix, suffix = get_info(tgt_langs=tgt_langs, use_lid=use_lid, use_joint_dict=use_joint_dict)
assert len(pairs) > 1
output_dir = os.path.join(output_dir, tgt_langs, f'use_{prefix}', f'src{nbpe_src}_tgt{nbpe}')
for s in SPLITS:
os.makedirs(os.path.join(output_dir, s), exist_ok=True)
for p in pairs:
if use_joint_dict:
fname = f'data_{prefix}_{p}_bpe{nbpe}_tc_{suffix}.json'
else:
fname = f'data_{prefix}_{p}_bpe_src{nbpe_src}lc.rm_tgt{nbpe}tc_{suffix}.json'
src = os.path.join(input_dir, '{}.{}.{}'.format(s, p, p.split('-')[-1]), "deltafalse", fname)
dst = os.path.join(output_dir, s, '{}.json'.format(p))
print('{} -> {}'.format(src, dst))
subprocess.call(["ln", "-s", src, dst])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--tgt-langs', default='de_es_fr_it_nl_pt_ro_ru', type=str,
help='Target languages seperated by _')
parser.add_argument('--input-dir', default='./dump', type=str,
help='Path to directory where features are saved')
parser.add_argument('--output-dir', type=str,
help='Path to directory to save symlinks')
parser.add_argument('--use-lid', action='store_true',
help='Use language ID in the target sequence')
parser.add_argument('--use-joint-dict', action='store_true',
help='Use joint dictionary for source and target')
parser.add_argument('--nbpe', type=int, default=8000)
parser.add_argument('--nbpe-src', type=int, default=8000)
args = parser.parse_args()
create_data_links_jsons(args.input_dir, args.output_dir,
tgt_langs=args.tgt_langs,
use_lid=args.use_lid,
use_joint_dict=args.use_joint_dict,
nbpe=args.nbpe,
nbpe_src=args.nbpe_src)
if __name__ == "__main__":
main() | StarcoderdataPython |
150061 | <reponame>renmcc/bk-PaaS
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from common.base_validators import BaseValidator, ValidationError
from esb.bkcore.models import UserAuthToken
from esb.utils.func_ctrl import FunctionControllerClient
class UserAuthValidator(BaseValidator):
"""
validate user
"""
def __init__(self, *args, **kwargs):
super(UserAuthValidator, self).__init__(*args, **kwargs)
def validate(self, request):
kwargs = request.g.kwargs
app_code = request.g.app_code
access_token = kwargs.get('bk_access_token')
if access_token:
token_info = self.validate_access_token(app_code, access_token)
self.sync_current_username(request, token_info['username'])
return
bk_token = kwargs.get('bk_token')
if bk_token:
from components.bk.apis.bk_login.is_login import IsLogin
check_result = IsLogin().invoke(kwargs={'bk_token': bk_token}, request_id=request.g.request_id)
if not check_result['result']:
raise ValidationError('User authentication failed, please check if the bk_token is valid')
self.sync_current_username(request, check_result.get('data', {}).get('username', ''))
return
username = kwargs.get('bk_username') or kwargs.get('username')
if username and FunctionControllerClient.is_skip_user_auth(app_code):
self.sync_current_username(request, username)
return
raise ValidationError('User authentication failed, please provide a valid user identity, such as bk_token, bk_username') # noqa
def sync_current_username(self, request, username):
request.g.current_user_username = username
def validate_bk_token(self):
pass
def validate_access_token(self, app_code, access_token):
if not app_code:
raise ValidationError('APP Code [bk_app_code] cannot be empty')
if not access_token:
raise ValidationError('User TOKEN [bk_access_token] cannot be empty')
user_auth_token = UserAuthToken.objects.filter(app_code=app_code, auth_token=access_token).first()
if not user_auth_token:
raise ValidationError('The specified user TOKEN [bk_access_token] does not exist')
if user_auth_token.has_expired():
raise ValidationError('The specified user TOKEN [bk_access_token] has expired, please apply for authorization again') # noqa
return user_auth_token.get_info()
| StarcoderdataPython |
3285931 | <filename>metrics/metrics.py
from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, recall_score, matthews_corrcoef, f1_score, \
average_precision_score
from lifelines.utils import concordance_index
import numpy as np
from metrics.timeroc.timeROC import timeROC
from metrics.iauc.integrateAUC import IntegrateAUC
class Calculate():
def __init__(self):
pass
## Calculate the Area Under the ROC Curve
def auc(self, predicted, actual):
return roc_auc_score(actual, predicted)
## Calculate the balanced accuracy [ i.e., ( sensitivity + specificity ) / 2 ]
def bac(self, predicted, actual):
# https://github.com/rhiever/tpot/issues/108
# https://github.com/scikit-learn/scikit-learn/issues/6747
return recall_score(actual, predicted, average='macro')
## Calculate Matthew's Correlation Coefficient
def mcc(self, predicted, actual):
return matthews_corrcoef(actual, predicted)
## Calculate F1 score
def f1(self, predicted, actual):
return f1_score(actual, predicted)
## Calculate precision recall based AUC
def prAUC(self, predicted, actual):
# sklearn.metrics.precision_recall_curve(y_true, probas_pred)
fpr, tpr, thresholds = precision_recall_curve(actual, predicted)
prAUC = auc(fpr, tpr)
# alternatively
# prAUC = average_precision_score(actual, predicted)
return prAUC
## Calculate TimeROC
def timeROC(self, predicted, D_PFS, D_PFS_FLAG, times=30.5 * np.asarray([14, 16, 18, 20, 22])):
tempAUC = timeROC(T=D_PFS, delta=D_PFS_FLAG, marker=predicted, cause=1, times=times)
return tempAUC['AUC']
## Calculate integrated AUC
def integratedAUC(self, predicted, D_PFS, D_PFS_FLAG, times=30.5 * np.asarray([14, 16, 18, 20, 22])):
tempAUC = timeROC(T=D_PFS, delta=D_PFS_FLAG, marker=predicted, cause=1, times=times)
iaucs = IntegrateAUC(tempAUC['AUC'], tempAUC['times'], tempAUC['survProb'], max(tempAUC['times']))
return iaucs
## Calculate concordance Index
def concordanceIndex(self, predicted, D_PFS, D_PFS_FLAG):
return concordance_index(event_times=D_PFS, predicted_event_times=predicted, event_observed=D_PFS_FLAG)
def weightedAverage(self, metric, N):
## function to calculate the weighted average,
# takes in array of one metric for each validation study (example auc)
# and the N for each of the studies
return sum(metric * N) / sum(N)
## Wrapper for calculating all metrics:
# rawscore = continuous prediciton score from participant
# highrisk = binary predicted score from participant
# newProg = flag for true PFS_FLAG and PFS < 18mo
# PFStime = true PFS time
# progression = true PFS_FLAGs
def metrics(self, rawscore, highrisk, PFStime, pfs_flag):
cutoff = 18
HR1 = np.where((PFStime < cutoff * 30.5) & (pfs_flag == 1), 1, 0)
HR2 = np.where(PFStime >= cutoff * 30.5, 0, 1)
assertCountEqual(HR1 == HR2)
newProg = HR
progression = pfs_flag
auc = self.auc(rawscore, newProg)
bac = self.bac(highrisk, newProg)
mcc = self.mcc(highrisk, newProg)
f1 = self.f1(highrisk, newProg)
timeROC = self.timeROC(rawscore, PFStime, progression)
iAUC = self.integratedAUC(rawscore, PFStime, progression, times=30.5 * np.arange(12, 24, .25))
# Remove null
# rawscore_nona = rawscore[! is.na(HR)]
# newProg_nona = newProg[! is.na(HR)]
# prAUC = calculate.prAUC(rawscore_nona, newProg_nona)
prAUC = 0
return [auc, bac, mcc, f1, timeROC, iAUC, prAUC]
##### simple wrapper to faciliate bootstrapping later
def weightedMetrics(self, singleSubPredMat, PFStime, pfs_flag, study=None):
"""
:param singleSubPredMat: Full prediction matrix with columns: study,patient,predictionscore,highriskflag
:param PFStime: actual time to failure
:param pfs_flag: actual observed event flag
:param study: the study str
:return:
"""
rawscore = singleSubPredMat['predictionscore']
highrisk = singleSubPredMat['highriskflag']
cutoff = 18
HR = np.where((PFStime < cutoff * 30.5) & (pfs_flag == 1), 1, 0)
# HR2 = np.where(PFStime >= cutoff * 30.5, 0, 1)
newProg = HR
progression = pfs_flag
N = study[progression == 1]
for s in singleSubPredMat['study'].unique():
inds = singleSubPredMat['study'] == s
auc = (self.auc(rawscore[inds], newProg[inds]))
bac = self.bac(highrisk[inds], newProg[inds])
mcc = self.mcc(highrisk[inds], newProg[inds])
f1 = self.f1(highrisk[inds], newProg[inds])
timeROC = self.timeROC(rawscore[inds], PFStime[inds], progression[inds])
iAUC = self.integratedAUC(rawscore[inds], PFStime[inds], progression[inds],
times=30.5 * np.arange(12, 24, .25))
# Remove nulls for prAUC
# rawscore_nona = rawscore[inds][! is.na(HR[inds])]
# newProg_nona = newProg[inds][! is.na(HR[inds])]
# prAUC = self.prAUC(rawscore_nona, newProg_nona)
prAUC = []
print("study: " + s, [auc, bac, mcc, f1, iAUC])
# auc = self.weightedAverage(auc, N)
# bac = self.weightedAverage(bac, N)
# mcc = self.weightedAverage(mcc, N)
# f1 = self.weightedAverage(f1, N)
# timeROC = self.weightedAverage(timeROC, N)
# iAUC = self.weightedAverage(iAUC, N)
# prAUC = self.weightedAverage(prAUC, N)
return [auc, bac, mcc, f1, iAUC, prAUC]
| StarcoderdataPython |
170677 | from openslides.utils.exceptions import OpenSlidesError
class WorkflowError(OpenSlidesError):
"""Exception raised when errors in a workflow or state accure."""
pass
| StarcoderdataPython |
1833 | """ Defines the PolygonPlot class.
"""
from __future__ import with_statement
# Major library imports
import numpy as np
# Enthought library imports.
from enable.api import LineStyle, black_color_trait, \
transparent_color_trait
from kiva.agg import points_in_polygon
from traits.api import Enum, Float, Tuple, Property, cached_property, \
on_trait_change
# Local imports.
from base_xy_plot import BaseXYPlot
class PolygonPlot(BaseXYPlot):
""" Plots a polygon in dataspace.
Assuming that the index and value mappers are linear mappers, and that
"index" corresponds to X-coordinates and "value" corresponds to
Y-coordinates, the points are arranged in a counter-clockwise fashion.
The polygon is closed automatically, so there is no need to reproduce
the first point as the last point.
Nonlinear mappers are possible, but the results may be unexpected. Only the
data-space points are mapped in a nonlinear fashion. Straight lines
connecting them in a linear screen-space become curved in a nonlinear
screen-space; however, the drawing still contains straight lines in
screen-space.
If you don't want the edge of the polygon to be drawn, set **edge_color**
to transparent; don't try to do this by setting **edge_width** to 0. In
some drawing systems, such as PostScript, a line width of 0 means to make
the line as small as possible while still putting ink on the page.
"""
# The color of the line on the edge of the polygon.
edge_color = black_color_trait
# The thickness of the edge of the polygon.
edge_width = Float(1.0)
# The line dash style for the edge of the polygon.
edge_style = LineStyle
# The color of the face of the polygon.
face_color = transparent_color_trait
# Override the hittest_type trait inherited from BaseXYPlot
hittest_type = Enum("poly", "point", "line")
# The RGBA tuple for rendering edges. It is always a tuple of length 4.
# It has the same RGB values as edge_color_, and its alpha value is the
# alpha value of self.edge_color multiplied by self.alpha.
effective_edge_color = Property(Tuple, depends_on=['edge_color', 'alpha'])
# The RGBA tuple for rendering the face. It is always a tuple of length 4.
# It has the same RGB values as face_color_, and its alpha value is the
# alpha value of self.face_color multiplied by self.alpha.
effective_face_color = Property(Tuple, depends_on=['face_color', 'alpha'])
#----------------------------------------------------------------------
# Private 'BaseXYPlot' interface
#----------------------------------------------------------------------
def _gather_points(self):
""" Collects the data points that are within the bounds of the plot and
caches them.
"""
if self._cache_valid:
return
index = self.index.get_data()
value = self.value.get_data()
if not self.index or not self.value:
return
if len(index) == 0 or len(value) == 0 or len(index) != len(value):
self._cached_data_pts = []
self._cache_valid = True
return
points = np.transpose(np.array((index,value)))
self._cached_data_pts = points
self._cache_valid = True
def _render(self, gc, points):
""" Renders an Nx2 array of screen-space points as a polygon.
"""
with gc:
gc.clip_to_rect(self.x, self.y, self.width, self.height)
gc.set_stroke_color(self.effective_edge_color)
gc.set_line_width(self.edge_width)
gc.set_line_dash(self.edge_style_)
gc.set_fill_color(self.effective_face_color)
gc.lines(points)
gc.close_path()
gc.draw_path()
def _render_icon(self, gc, x, y, width, height):
""" Renders a representation of this plot as an icon into the box
defined by the parameters.
Used by the legend.
"""
with gc:
gc.set_stroke_color(self.effective_edge_color)
gc.set_line_width(self.edge_width)
gc.set_fill_color(self.effective_face_color)
if hasattr(self, 'line_style_'):
gc.set_line_dash(self.line_style_)
gc.draw_rect((x,y,width,height))
return
def hittest(self, screen_pt, threshold=7.0, return_distance=False):
""" Performs point-in-polygon testing or point/line proximity testing.
If self.hittest_type is "line" or "point", then behaves like the
parent class BaseXYPlot.hittest().
If self.hittest_type is "poly", then returns True if the given
point is inside the polygon, and False otherwise.
"""
if self.hittest_type in ("line", "point"):
return BaseXYPlot.hittest(self, screen_pt, threshold, return_distance)
data_pt = self.map_data(screen_pt, all_values=True)
index = self.index.get_data()
value = self.value.get_data()
poly = np.vstack((index,value)).T
if points_in_polygon([data_pt], poly)[0] == 1:
return True
else:
return False
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
@on_trait_change('edge_color, edge_width, edge_style, face_color, alpha')
def _attributes_changed(self):
self.invalidate_draw()
self.request_redraw()
#------------------------------------------------------------------------
# Property getters
#------------------------------------------------------------------------
@cached_property
def _get_effective_edge_color(self):
if len(self.edge_color_) == 4:
edge_alpha = self.edge_color_[-1]
else:
edge_alpha = 1.0
c = self.edge_color_[:3] + (edge_alpha * self.alpha,)
return c
@cached_property
def _get_effective_face_color(self):
if len(self.face_color_) == 4:
face_alpha = self.face_color_[-1]
else:
face_alpha = 1.0
c = self.face_color_[:3] + (face_alpha * self.alpha,)
return c
| StarcoderdataPython |
4833726 | <filename>priv/python2/erlport/tests/stdio_tests.py
# Copyright (c) 2009-2013, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from erlport import Atom
from erlport.stdio import RedirectedStdin, RedirectedStdout
class TestPort(object):
def write(self, data):
return data
class RedirectedStdinTestCase(unittest.TestCase):
def test_methods(self):
stdin = RedirectedStdin()
self.assertEqual(0, stdin.fileno())
self.assertEqual(True, stdin.isatty())
self.assertEqual(None, stdin.flush())
self.assertEqual(None, stdin.close())
self.assertRaises(ValueError, stdin.next)
self.assertRaises(ValueError, stdin.read)
self.assertRaises(ValueError, stdin.readline)
self.assertRaises(ValueError, stdin.readlines)
self.assertRaises(ValueError, stdin.xreadlines)
self.assertRaises(IOError, stdin.seek, 0)
self.assertRaises(IOError, stdin.tell)
self.assertRaises(IOError, stdin.truncate)
self.assertRaises(IOError, stdin.write, "data")
self.assertRaises(IOError, stdin.writelines, ["da", "ta"])
def test_attributes(self):
stdin = RedirectedStdin()
self.assertEqual(True, stdin.closed)
self.assertEqual("UTF-8", stdin.encoding)
self.assertEqual(None, stdin.errors)
self.assertEqual("r", stdin.mode)
self.assertEqual("<stdin>", stdin.name)
self.assertEqual(None, stdin.newlines)
self.assertEqual(False, stdin.softspace)
class RedirectedStdoutTestCase(unittest.TestCase):
def test_write(self):
stdout = RedirectedStdout(TestPort())
self.assertEqual((Atom("P"), "data"), stdout.write("data"))
self.assertRaises(TypeError, stdout.write, 1234)
def test_writelines(self):
stdout = RedirectedStdout(TestPort())
self.assertEqual((Atom("P"), "data"), stdout.writelines(["da", "ta"]))
self.assertRaises(TypeError, stdout.writelines, ["da", 1234])
def test_close(self):
stdout = RedirectedStdout(TestPort())
self.assertEqual(False, stdout.closed)
self.assertEqual(None, stdout.close())
self.assertEqual(True, stdout.closed)
self.assertEqual(None, stdout.close())
self.assertRaises(ValueError, stdout.write, "data")
self.assertRaises(ValueError, stdout.writelines, ["da", "ta"])
def test_methods(self):
stdout = RedirectedStdout(TestPort())
self.assertEqual(1, stdout.fileno())
self.assertEqual(True, stdout.isatty())
self.assertEqual(None, stdout.flush())
self.assertRaises(IOError, stdout.next)
self.assertRaises(IOError, stdout.read)
self.assertRaises(IOError, stdout.readline)
self.assertRaises(IOError, stdout.readlines)
self.assertRaises(IOError, stdout.xreadlines)
self.assertRaises(IOError, stdout.seek, 0)
self.assertRaises(IOError, stdout.tell)
self.assertRaises(IOError, stdout.truncate)
def test_attributes(self):
stdout = RedirectedStdout(TestPort())
self.assertEqual(False, stdout.closed)
self.assertEqual("UTF-8", stdout.encoding)
self.assertEqual(None, stdout.errors)
self.assertEqual("w", stdout.mode)
self.assertEqual("<stdout>", stdout.name)
self.assertEqual(None, stdout.newlines)
self.assertEqual(False, stdout.softspace)
def get_suite():
load = unittest.TestLoader().loadTestsFromTestCase
suite = unittest.TestSuite()
suite.addTests(load(RedirectedStdinTestCase))
suite.addTests(load(RedirectedStdoutTestCase))
return suite
| StarcoderdataPython |
146475 | from src.end_point import EndPoint
class Collection:
def __init__(self, collection_json):
self.end_points = [EndPoint(x) for x in collection_json["item"]]
def get_end_points(self):
return self.end_points
def remove_end_point(self, end_point):
self.end_points.remove(end_point)
| StarcoderdataPython |
1698536 | <filename>code/image-tagging-flickr8kcn/tf_tagging/utility.py
import os
import numpy as np
def load_config(config_path):
variables = {}
exec(compile(open(config_path, "rb").read(), config_path, 'exec'), variables)
return variables['config']
def get_concept_file(collection, annotation_name, rootpath):
return os.path.join(rootpath, collection, 'Annotations', annotation_name)
def get_feat_dir(collection, feature, rootpath):
return os.path.join(rootpath, collection, 'FeatureData', feature)
def get_train_feat_dir(FLAGS):
return get_feat_dir(FLAGS.train_collection, FLAGS.vf_name, FLAGS.rootpath)
def get_val_feat_dir(FLAGS):
return get_feat_dir(FLAGS.val_collection, FLAGS.vf_name, FLAGS.rootpath)
def get_test_feat_dir(FLAGS):
return get_feat_dir(FLAGS.test_collection, FLAGS.vf_name, FLAGS.rootpath)
def get_model_dir(FLAGS):
if FLAGS.multi_task:
return os.path.join(FLAGS.rootpath, FLAGS.train_collection, 'Models', FLAGS.aux_train_collection,
FLAGS.val_collection, FLAGS.annotation_name, FLAGS.aux_annotation_name, FLAGS.model_name, FLAGS.vf_name)
else:
return os.path.join(FLAGS.rootpath, FLAGS.train_collection, 'Models', FLAGS.val_collection,
FLAGS.annotation_name, FLAGS.model_name, FLAGS.vf_name)
def get_pred_dir(FLAGS):
if FLAGS.multi_task:
return os.path.join(FLAGS.rootpath, FLAGS.test_collection, 'autotagging', FLAGS.test_collection, FLAGS.annotation_name, FLAGS.aux_annotation_name,
FLAGS.train_collection, FLAGS.aux_train_collection, FLAGS.val_collection, FLAGS.model_name, FLAGS.vf_name)
else:
return os.path.join(FLAGS.rootpath, FLAGS.test_collection, 'autotagging', FLAGS.test_collection, FLAGS.annotation_name,
FLAGS.train_collection, FLAGS.val_collection, FLAGS.model_name, FLAGS.vf_name)
'''
perf_table =
hit1 p1 recall1 f1
hit5 p5 recall5 f5
hit10 p10 recall10 f10
'''
def convert_to_one_metric(perf_table):
#hit, precision, recall = perf_so_far.mean(axis=0)
hit5, p5, r5, f5 = perf_table[1,:]
#f = 2*p5*r5/(p5+r5+1e-10)
return p5
'''
return
hit1 p1 recall1 f1
hit5 p5 recall5 f5
hit10 p10 recall10 f10
'''
def compute_hit_precision_recall_f1(pred_labels, y_true, ranks=[1, 5, 10]):
n_samples = y_true.shape[0]
relevant = y_true.sum(axis=1) + 1e-10
res = []
for r in ranks:
matched = np.zeros(n_samples, dtype=int)
for i in range(n_samples):
matched[i] = np.sum(y_true[i, pred_labels[i,:r]])
hit = np.mean([x>0 for x in matched])
_prec = matched / float(r)
precision = np.mean(_prec)
_rec = np.divide(matched, relevant)
recall = np.mean(_rec)
f_measure = np.mean( 2*_prec*_rec / (_prec+_rec+1e-10))
res.append((hit, precision, recall, f_measure))
return np.asarray(res)
if __name__ == '__main__':
config_path = 'model_conf/baseline.py'
print load_config(config_path).keep_prob
| StarcoderdataPython |
51282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Constant settings for Cowbird application.
Constants defined with format ``COWBIRD_[VARIABLE_NAME]`` can be matched with corresponding
settings formatted as ``cowbird.[variable_name]`` in the ``cowbird.ini`` configuration file.
.. note::
Since the ``cowbird.ini`` file has to be loaded by the application to retrieve various configuration settings,
constant ``COWBIRD_INI_FILE_PATH`` (or any other `path variable` defined before it - see below) has to be defined
by environment variable if the default location is not desired (ie: if you want to provide your own configuration).
"""
import logging
import os
import re
from typing import TYPE_CHECKING
from pyramid.settings import asbool
from pyramid.threadlocal import get_current_registry
if TYPE_CHECKING:
# pylint: disable=W0611,unused-import
from typing import Optional
from cowbird.typedefs import AnySettingsContainer, SettingValue
# ===========================
# path variables
# ===========================
COWBIRD_MODULE_DIR = os.path.abspath(os.path.dirname(__file__))
COWBIRD_ROOT = os.path.dirname(COWBIRD_MODULE_DIR)
COWBIRD_CONFIG_DIR = os.getenv(
"COWBIRD_CONFIG_DIR", os.path.join(COWBIRD_ROOT, "config"))
COWBIRD_CONFIG_PATH = os.getenv("COWBIRD_CONFIG_PATH") # default None, require explicit specification
COWBIRD_INI_FILE_PATH = os.getenv(
"COWBIRD_INI_FILE_PATH", "{}/cowbird.ini".format(COWBIRD_CONFIG_DIR))
def _get_default_log_level():
"""
Get logging level from INI configuration file or fallback to default ``INFO`` if it cannot be retrieved.
"""
_default_log_lvl = "INFO"
try:
from cowbird.utils import get_settings_from_config_ini # pylint: disable=C0415 # avoid circular import error
_settings = get_settings_from_config_ini(COWBIRD_INI_FILE_PATH, section="logger_cowbird")
_default_log_lvl = _settings.get("level", _default_log_lvl)
# also considers 'ModuleNotFoundError' derived from 'ImportError', but not added to avoid Python <3.6 name error
except (AttributeError, ImportError): # noqa: W0703 # nosec: B110
pass
return _default_log_lvl
# ===========================
# variables from cowbird.env
# ===========================
# ---------------------------
# COWBIRD
# ---------------------------
COWBIRD_URL = os.getenv("COWBIRD_URL", None) # must be defined
COWBIRD_LOG_LEVEL = os.getenv("COWBIRD_LOG_LEVEL", _get_default_log_level()) # log level to apply to the loggers
COWBIRD_LOG_PRINT = asbool(os.getenv("COWBIRD_LOG_PRINT", False)) # log also forces print to the console
COWBIRD_LOG_REQUEST = asbool(os.getenv("COWBIRD_LOG_REQUEST", True)) # log detail of every incoming request
COWBIRD_LOG_EXCEPTION = asbool(os.getenv("COWBIRD_LOG_EXCEPTION", True)) # log detail of generated exceptions
COWBIRD_ADMIN_PERMISSION = "admin"
# ===========================
# constants
# ===========================
# ignore matches of settings and environment variables for following cases
COWBIRD_CONSTANTS = [
"COWBIRD_CONSTANTS",
"COWBIRD_MODULE_DIR",
"COWBIRD_ROOT",
"COWBIRD_ADMIN_PERMISSION",
# add more as needed
]
# ===========================
# utilities
# ===========================
_REGEX_ASCII_ONLY = re.compile(r"\W|^(?=\d)")
_SETTING_SECTION_PREFIXES = [
"cowbird",
]
_SETTINGS_REQUIRED = [
"COWBIRD_URL",
# FIXME: add others here as needed
]
def get_constant_setting_name(name):
"""
Find the equivalent setting name of the provided environment variable name.
Lower-case name and replace all non-ascii chars by `_`.
Then, convert known prefixes with their dotted name.
"""
name = re.sub(_REGEX_ASCII_ONLY, "_", name.strip().lower())
for prefix in _SETTING_SECTION_PREFIXES:
known_prefix = "{}_".format(prefix)
dotted_prefix = "{}.".format(prefix)
if name.startswith(known_prefix):
return name.replace(known_prefix, dotted_prefix, 1)
return name
def get_constant(constant_name, # type: str
settings_container=None, # type: Optional[AnySettingsContainer]
settings_name=None, # type: Optional[str]
default_value=None, # type: Optional[SettingValue]
raise_missing=True, # type: bool
print_missing=False, # type: bool
raise_not_set=True # type: bool
): # type: (...) -> SettingValue
"""
Search in order for matched value of :paramref:`constant_name`:
1. search in :py:data:`COWBIRD_CONSTANTS`
2. search in settings if specified
3. search alternative setting names (see below)
4. search in :mod:`cowbird.constants` definitions
5. search in environment variables
Parameter :paramref:`constant_name` is expected to have the format ``COWBIRD_[VARIABLE_NAME]`` although any value
can be passed to retrieve generic settings from all above mentioned search locations.
If :paramref:`settings_name` is provided as alternative name, it is used as is to search for results if
:paramref:`constant_name` was not found. Otherwise, ``cowbird.[variable_name]`` is used for additional search when
the format ``COWBIRD_[VARIABLE_NAME]`` was used for :paramref:`constant_name`
(i.e.: ``COWBIRD_ADMIN_USER`` will also search for ``cowbird.admin_user`` and so on for corresponding constants).
:param constant_name: key to search for a value
:param settings_container: WSGI application settings container (if not provided, uses found one in current thread)
:param settings_name: alternative name for `settings` if specified
:param default_value: default value to be returned if not found anywhere, and exception raises are disabled.
:param raise_missing: raise exception if key is not found anywhere
:param print_missing: print message if key is not found anywhere, return ``None``
:param raise_not_set: raise an exception if the found key is ``None``, search until last case if others are ``None``
:returns: found value or `default_value`
:raises ValueError: if resulting value is invalid based on options (by default raise missing/``None`` value)
:raises LookupError: if no appropriate value could be found from all search locations (according to options)
"""
from cowbird.utils import get_settings, print_log, raise_log # pylint: disable=C0415 # avoid circular import error
if constant_name in COWBIRD_CONSTANTS:
return globals()[constant_name]
missing = True
cowbird_value = None
if settings_container:
settings = get_settings(settings_container)
else:
# note: this will work only after include of cowbird will have triggered configurator setup
print_log("Using settings from local thread.", level=logging.DEBUG)
settings = get_settings(get_current_registry())
if settings and constant_name in settings: # pylint: disable=E1135
missing = False
cowbird_value = settings.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in settings with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if not settings_name:
settings_name = get_constant_setting_name(constant_name)
print_log("Constant alternate search: {}".format(settings_name), level=logging.DEBUG)
if settings and settings_name and settings_name in settings: # pylint: disable=E1135
missing = False
cowbird_value = settings.get(settings_name)
if cowbird_value is not None:
print_log("Constant found in settings with: {}".format(settings_name), level=logging.DEBUG)
return cowbird_value
cowbird_globals = globals()
if constant_name in cowbird_globals:
missing = False
cowbird_value = cowbird_globals.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in definitions with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if constant_name in os.environ:
missing = False
cowbird_value = os.environ.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in environment with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if not missing and raise_not_set:
raise_log("Constant was found but was not set: {}".format(constant_name),
level=logging.ERROR, exception=ValueError)
if missing and raise_missing:
raise_log("Constant could not be found: {}".format(constant_name),
level=logging.ERROR, exception=LookupError)
if missing and print_missing:
print_log("Constant could not be found: {} (using default: {})"
.format(constant_name, default_value), level=logging.WARN)
return cowbird_value or default_value
def validate_required(container):
# type: (AnySettingsContainer) -> None
"""
Validates that some value is provided for every mandatory configuration setting.
:raises: when any of the requirements are missing a definition.
"""
for cfg in _SETTINGS_REQUIRED:
get_constant(cfg, settings_container=container, raise_missing=True, raise_not_set=True)
| StarcoderdataPython |
82988 | """ bgasync.api - BGAPI classes, constants, and utility functions. """
# This file is auto-generated. Edit at your own risk!
from struct import Struct
from collections import namedtuple
from enum import Enum
from .apibase import *
class event_system_boot(Decodable):
decoded_type = namedtuple('event_system_boot_type', (
'major',
'minor',
'patch',
'build',
'll_version',
'protocol_version',
'hw',
))
decode_struct = Struct('<HHHHHBB')
class event_system_debug(Decodable):
decoded_type = namedtuple('event_system_debug_type', (
'data',
))
decode_struct = Struct('<B')
ends_with_uint8array = True
class event_system_endpoint_watermark_rx(Decodable):
decoded_type = namedtuple('event_system_endpoint_watermark_rx_type', (
'endpoint',
'data',
))
decode_struct = Struct('<BB')
class event_system_endpoint_watermark_tx(Decodable):
decoded_type = namedtuple('event_system_endpoint_watermark_tx_type', (
'endpoint',
'data',
))
decode_struct = Struct('<BB')
class event_system_script_failure(Decodable):
decoded_type = namedtuple('event_system_script_failure_type', (
'address',
'reason',
))
decode_struct = Struct('<HH')
class event_system_no_license_key(Decodable):
decoded_type = namedtuple('event_system_no_license_key_type', (
))
class command_system_reset(CommandEncoder):
__slots__ = ("boot_in_dfu",)
_id, _struct, _ends_with_uint8array = ((0, 0, 0), Struct('<B'), False)
def __init__(self, boot_in_dfu):
super(command_system_reset, self).__init__(boot_in_dfu)
class command_system_hello(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 0, 1), Struct('<'), False)
def __init__(self, ):
super(command_system_hello, self).__init__()
class response_system_hello(Decodable):
decoded_type = namedtuple('response_system_hello_type', (
))
class command_system_address_get(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 0, 2), Struct('<'), False)
def __init__(self, ):
super(command_system_address_get, self).__init__()
class response_system_address_get(Decodable):
decoded_type = namedtuple('response_system_address_get_type', (
'address',
))
decode_struct = Struct('<6s')
class command_system_reg_write(CommandEncoder):
__slots__ = ("address", "value",)
_id, _struct, _ends_with_uint8array = ((0, 0, 3), Struct('<HB'), False)
def __init__(self, address, value):
super(command_system_reg_write, self).__init__(address, value)
class response_system_reg_write(Decodable):
decoded_type = namedtuple('response_system_reg_write_type', (
'result',
))
decode_struct = Struct('<H')
class command_system_reg_read(CommandEncoder):
__slots__ = ("address",)
_id, _struct, _ends_with_uint8array = ((0, 0, 4), Struct('<H'), False)
def __init__(self, address):
super(command_system_reg_read, self).__init__(address)
class response_system_reg_read(Decodable):
decoded_type = namedtuple('response_system_reg_read_type', (
'address',
'value',
))
decode_struct = Struct('<HB')
class command_system_get_counters(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 0, 5), Struct('<'), False)
def __init__(self, ):
super(command_system_get_counters, self).__init__()
class response_system_get_counters(Decodable):
decoded_type = namedtuple('response_system_get_counters_type', (
'txok',
'txretry',
'rxok',
'rxfail',
'mbuf',
))
decode_struct = Struct('<BBBBB')
class command_system_get_connections(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 0, 6), Struct('<'), False)
def __init__(self, ):
super(command_system_get_connections, self).__init__()
class response_system_get_connections(Decodable):
decoded_type = namedtuple('response_system_get_connections_type', (
'maxconn',
))
decode_struct = Struct('<B')
class command_system_read_memory(CommandEncoder):
__slots__ = ("address", "length",)
_id, _struct, _ends_with_uint8array = ((0, 0, 7), Struct('<IB'), False)
def __init__(self, address, length):
super(command_system_read_memory, self).__init__(address, length)
class response_system_read_memory(Decodable):
decoded_type = namedtuple('response_system_read_memory_type', (
'address',
'data',
))
decode_struct = Struct('<IB')
ends_with_uint8array = True
class command_system_get_info(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 0, 8), Struct('<'), False)
def __init__(self, ):
super(command_system_get_info, self).__init__()
class response_system_get_info(Decodable):
decoded_type = namedtuple('response_system_get_info_type', (
'major',
'minor',
'patch',
'build',
'll_version',
'protocol_version',
'hw',
))
decode_struct = Struct('<HHHHHBB')
class command_system_endpoint_tx(CommandEncoder):
__slots__ = ("endpoint", "data",)
_id, _struct, _ends_with_uint8array = ((0, 0, 9), Struct('<BB'), True)
def __init__(self, endpoint, data):
super(command_system_endpoint_tx, self).__init__(endpoint, data)
class response_system_endpoint_tx(Decodable):
decoded_type = namedtuple('response_system_endpoint_tx_type', (
'result',
))
decode_struct = Struct('<H')
class command_system_whitelist_append(CommandEncoder):
__slots__ = ("address", "address_type",)
_id, _struct, _ends_with_uint8array = ((0, 0, 10), Struct('<6sB'), False)
def __init__(self, address, address_type):
super(command_system_whitelist_append, self).__init__(address, address_type)
class response_system_whitelist_append(Decodable):
decoded_type = namedtuple('response_system_whitelist_append_type', (
'result',
))
decode_struct = Struct('<H')
class command_system_whitelist_remove(CommandEncoder):
__slots__ = ("address", "address_type",)
_id, _struct, _ends_with_uint8array = ((0, 0, 11), Struct('<6sB'), False)
def __init__(self, address, address_type):
super(command_system_whitelist_remove, self).__init__(address, address_type)
class response_system_whitelist_remove(Decodable):
decoded_type = namedtuple('response_system_whitelist_remove_type', (
'result',
))
decode_struct = Struct('<H')
class command_system_whitelist_clear(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 0, 12), Struct('<'), False)
def __init__(self, ):
super(command_system_whitelist_clear, self).__init__()
class response_system_whitelist_clear(Decodable):
decoded_type = namedtuple('response_system_whitelist_clear_type', (
))
class command_system_endpoint_rx(CommandEncoder):
__slots__ = ("endpoint", "size",)
_id, _struct, _ends_with_uint8array = ((0, 0, 13), Struct('<BB'), False)
def __init__(self, endpoint, size):
super(command_system_endpoint_rx, self).__init__(endpoint, size)
class response_system_endpoint_rx(Decodable):
decoded_type = namedtuple('response_system_endpoint_rx_type', (
'result',
'data',
))
decode_struct = Struct('<HB')
ends_with_uint8array = True
class command_system_endpoint_set_watermarks(CommandEncoder):
__slots__ = ("endpoint", "rx", "tx",)
_id, _struct, _ends_with_uint8array = ((0, 0, 14), Struct('<BBB'), False)
def __init__(self, endpoint, rx, tx):
super(command_system_endpoint_set_watermarks, self).__init__(endpoint, rx, tx)
class response_system_endpoint_set_watermarks(Decodable):
decoded_type = namedtuple('response_system_endpoint_set_watermarks_type', (
'result',
))
decode_struct = Struct('<H')
class system_endpoints(Enum):
endpoint_api = 0
endpoint_test = 1
endpoint_script = 2
endpoint_usb = 3
endpoint_uart0 = 4
endpoint_uart1 = 5
class event_flash_ps_key(Decodable):
decoded_type = namedtuple('event_flash_ps_key_type', (
'key',
'value',
))
decode_struct = Struct('<HB')
ends_with_uint8array = True
class command_flash_ps_defrag(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 1, 0), Struct('<'), False)
def __init__(self, ):
super(command_flash_ps_defrag, self).__init__()
class response_flash_ps_defrag(Decodable):
decoded_type = namedtuple('response_flash_ps_defrag_type', (
))
class command_flash_ps_dump(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 1, 1), Struct('<'), False)
def __init__(self, ):
super(command_flash_ps_dump, self).__init__()
class response_flash_ps_dump(Decodable):
decoded_type = namedtuple('response_flash_ps_dump_type', (
))
class command_flash_ps_erase_all(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 1, 2), Struct('<'), False)
def __init__(self, ):
super(command_flash_ps_erase_all, self).__init__()
class response_flash_ps_erase_all(Decodable):
decoded_type = namedtuple('response_flash_ps_erase_all_type', (
))
class command_flash_ps_save(CommandEncoder):
__slots__ = ("key", "value",)
_id, _struct, _ends_with_uint8array = ((0, 1, 3), Struct('<HB'), True)
def __init__(self, key, value):
super(command_flash_ps_save, self).__init__(key, value)
class response_flash_ps_save(Decodable):
decoded_type = namedtuple('response_flash_ps_save_type', (
'result',
))
decode_struct = Struct('<H')
class command_flash_ps_load(CommandEncoder):
__slots__ = ("key",)
_id, _struct, _ends_with_uint8array = ((0, 1, 4), Struct('<H'), False)
def __init__(self, key):
super(command_flash_ps_load, self).__init__(key)
class response_flash_ps_load(Decodable):
decoded_type = namedtuple('response_flash_ps_load_type', (
'result',
'value',
))
decode_struct = Struct('<HB')
ends_with_uint8array = True
class command_flash_ps_erase(CommandEncoder):
__slots__ = ("key",)
_id, _struct, _ends_with_uint8array = ((0, 1, 5), Struct('<H'), False)
def __init__(self, key):
super(command_flash_ps_erase, self).__init__(key)
class response_flash_ps_erase(Decodable):
decoded_type = namedtuple('response_flash_ps_erase_type', (
))
class command_flash_erase_page(CommandEncoder):
__slots__ = ("page",)
_id, _struct, _ends_with_uint8array = ((0, 1, 6), Struct('<B'), False)
def __init__(self, page):
super(command_flash_erase_page, self).__init__(page)
class response_flash_erase_page(Decodable):
decoded_type = namedtuple('response_flash_erase_page_type', (
'result',
))
decode_struct = Struct('<H')
class command_flash_write_words(CommandEncoder):
__slots__ = ("address", "words",)
_id, _struct, _ends_with_uint8array = ((0, 1, 7), Struct('<HB'), True)
def __init__(self, address, words):
super(command_flash_write_words, self).__init__(address, words)
class response_flash_write_words(Decodable):
decoded_type = namedtuple('response_flash_write_words_type', (
))
class event_attributes_value(Decodable):
decoded_type = namedtuple('event_attributes_value_type', (
'connection',
'reason',
'handle',
'offset',
'value',
))
decode_struct = Struct('<BBHHB')
ends_with_uint8array = True
class event_attributes_user_read_request(Decodable):
decoded_type = namedtuple('event_attributes_user_read_request_type', (
'connection',
'handle',
'offset',
'maxsize',
))
decode_struct = Struct('<BHHB')
class event_attributes_status(Decodable):
decoded_type = namedtuple('event_attributes_status_type', (
'handle',
'flags',
))
decode_struct = Struct('<HB')
class command_attributes_write(CommandEncoder):
__slots__ = ("handle", "offset", "value",)
_id, _struct, _ends_with_uint8array = ((0, 2, 0), Struct('<HBB'), True)
def __init__(self, handle, offset, value):
super(command_attributes_write, self).__init__(handle, offset, value)
class response_attributes_write(Decodable):
decoded_type = namedtuple('response_attributes_write_type', (
'result',
))
decode_struct = Struct('<H')
class command_attributes_read(CommandEncoder):
__slots__ = ("handle", "offset",)
_id, _struct, _ends_with_uint8array = ((0, 2, 1), Struct('<HH'), False)
def __init__(self, handle, offset):
super(command_attributes_read, self).__init__(handle, offset)
class response_attributes_read(Decodable):
decoded_type = namedtuple('response_attributes_read_type', (
'handle',
'offset',
'result',
'value',
))
decode_struct = Struct('<HHHB')
ends_with_uint8array = True
class command_attributes_read_type(CommandEncoder):
__slots__ = ("handle",)
_id, _struct, _ends_with_uint8array = ((0, 2, 2), Struct('<H'), False)
def __init__(self, handle):
super(command_attributes_read_type, self).__init__(handle)
class response_attributes_read_type(Decodable):
decoded_type = namedtuple('response_attributes_read_type_type', (
'handle',
'result',
'value',
))
decode_struct = Struct('<HHB')
ends_with_uint8array = True
class command_attributes_user_read_response(CommandEncoder):
__slots__ = ("connection", "att_error", "value",)
_id, _struct, _ends_with_uint8array = ((0, 2, 3), Struct('<BBB'), True)
def __init__(self, connection, att_error, value):
super(command_attributes_user_read_response, self).__init__(connection, att_error, value)
class response_attributes_user_read_response(Decodable):
decoded_type = namedtuple('response_attributes_user_read_response_type', (
))
class command_attributes_user_write_response(CommandEncoder):
__slots__ = ("connection", "att_error",)
_id, _struct, _ends_with_uint8array = ((0, 2, 4), Struct('<BB'), False)
def __init__(self, connection, att_error):
super(command_attributes_user_write_response, self).__init__(connection, att_error)
class response_attributes_user_write_response(Decodable):
decoded_type = namedtuple('response_attributes_user_write_response_type', (
))
class attributes_attribute_change_reason(Enum):
write_request = 0
write_command = 1
write_request_user = 2
class attributes_attribute_status_flag(Enum):
notify = 1
indicate = 2
class event_connection_status(Decodable):
decoded_type = namedtuple('event_connection_status_type', (
'connection',
'flags',
'address',
'address_type',
'conn_interval',
'timeout',
'latency',
'bonding',
))
decode_struct = Struct('<BB6sBHHHB')
class event_connection_version_ind(Decodable):
decoded_type = namedtuple('event_connection_version_ind_type', (
'connection',
'vers_nr',
'comp_id',
'sub_vers_nr',
))
decode_struct = Struct('<BBHH')
class event_connection_feature_ind(Decodable):
decoded_type = namedtuple('event_connection_feature_ind_type', (
'connection',
'features',
))
decode_struct = Struct('<BB')
ends_with_uint8array = True
class event_connection_raw_rx(Decodable):
decoded_type = namedtuple('event_connection_raw_rx_type', (
'connection',
'data',
))
decode_struct = Struct('<BB')
ends_with_uint8array = True
class event_connection_disconnected(Decodable):
decoded_type = namedtuple('event_connection_disconnected_type', (
'connection',
'reason',
))
decode_struct = Struct('<BH')
class command_connection_disconnect(CommandEncoder):
__slots__ = ("connection",)
_id, _struct, _ends_with_uint8array = ((0, 3, 0), Struct('<B'), False)
def __init__(self, connection):
super(command_connection_disconnect, self).__init__(connection)
class response_connection_disconnect(Decodable):
decoded_type = namedtuple('response_connection_disconnect_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_connection_get_rssi(CommandEncoder):
__slots__ = ("connection",)
_id, _struct, _ends_with_uint8array = ((0, 3, 1), Struct('<B'), False)
def __init__(self, connection):
super(command_connection_get_rssi, self).__init__(connection)
class response_connection_get_rssi(Decodable):
decoded_type = namedtuple('response_connection_get_rssi_type', (
'connection',
'rssi',
))
decode_struct = Struct('<Bb')
class command_connection_update(CommandEncoder):
__slots__ = ("connection", "interval_min", "interval_max", "latency", "timeout",)
_id, _struct, _ends_with_uint8array = ((0, 3, 2), Struct('<BHHHH'), False)
def __init__(self, connection, interval_min, interval_max, latency, timeout):
super(command_connection_update, self).__init__(connection, interval_min, interval_max, latency, timeout)
class response_connection_update(Decodable):
decoded_type = namedtuple('response_connection_update_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_connection_version_update(CommandEncoder):
__slots__ = ("connection",)
_id, _struct, _ends_with_uint8array = ((0, 3, 3), Struct('<B'), False)
def __init__(self, connection):
super(command_connection_version_update, self).__init__(connection)
class response_connection_version_update(Decodable):
decoded_type = namedtuple('response_connection_version_update_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_connection_channel_map_get(CommandEncoder):
__slots__ = ("connection",)
_id, _struct, _ends_with_uint8array = ((0, 3, 4), Struct('<B'), False)
def __init__(self, connection):
super(command_connection_channel_map_get, self).__init__(connection)
class response_connection_channel_map_get(Decodable):
decoded_type = namedtuple('response_connection_channel_map_get_type', (
'connection',
'map',
))
decode_struct = Struct('<BB')
ends_with_uint8array = True
class command_connection_channel_map_set(CommandEncoder):
__slots__ = ("connection", "map",)
_id, _struct, _ends_with_uint8array = ((0, 3, 5), Struct('<BB'), True)
def __init__(self, connection, map):
super(command_connection_channel_map_set, self).__init__(connection, map)
class response_connection_channel_map_set(Decodable):
decoded_type = namedtuple('response_connection_channel_map_set_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_connection_features_get(CommandEncoder):
__slots__ = ("connection",)
_id, _struct, _ends_with_uint8array = ((0, 3, 6), Struct('<B'), False)
def __init__(self, connection):
super(command_connection_features_get, self).__init__(connection)
class response_connection_features_get(Decodable):
decoded_type = namedtuple('response_connection_features_get_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_connection_get_status(CommandEncoder):
__slots__ = ("connection",)
_id, _struct, _ends_with_uint8array = ((0, 3, 7), Struct('<B'), False)
def __init__(self, connection):
super(command_connection_get_status, self).__init__(connection)
class response_connection_get_status(Decodable):
decoded_type = namedtuple('response_connection_get_status_type', (
'connection',
))
decode_struct = Struct('<B')
class command_connection_raw_tx(CommandEncoder):
__slots__ = ("connection", "data",)
_id, _struct, _ends_with_uint8array = ((0, 3, 8), Struct('<BB'), True)
def __init__(self, connection, data):
super(command_connection_raw_tx, self).__init__(connection, data)
class response_connection_raw_tx(Decodable):
decoded_type = namedtuple('response_connection_raw_tx_type', (
'connection',
))
decode_struct = Struct('<B')
class connection_connstatus(Enum):
connected = 1
encrypted = 2
completed = 4
parameters_change = 8
class event_attclient_indicated(Decodable):
decoded_type = namedtuple('event_attclient_indicated_type', (
'connection',
'attrhandle',
))
decode_struct = Struct('<BH')
class event_attclient_procedure_completed(Decodable):
decoded_type = namedtuple('event_attclient_procedure_completed_type', (
'connection',
'result',
'chrhandle',
))
decode_struct = Struct('<BHH')
class event_attclient_group_found(Decodable):
decoded_type = namedtuple('event_attclient_group_found_type', (
'connection',
'start',
'end',
'uuid',
))
decode_struct = Struct('<BHHB')
ends_with_uint8array = True
class event_attclient_attribute_found(Decodable):
decoded_type = namedtuple('event_attclient_attribute_found_type', (
'connection',
'chrdecl',
'value',
'properties',
'uuid',
))
decode_struct = Struct('<BHHBB')
ends_with_uint8array = True
class event_attclient_find_information_found(Decodable):
decoded_type = namedtuple('event_attclient_find_information_found_type', (
'connection',
'chrhandle',
'uuid',
))
decode_struct = Struct('<BHB')
ends_with_uint8array = True
class event_attclient_attribute_value(Decodable):
decoded_type = namedtuple('event_attclient_attribute_value_type', (
'connection',
'atthandle',
'type',
'value',
))
decode_struct = Struct('<BHBB')
ends_with_uint8array = True
class event_attclient_read_multiple_response(Decodable):
decoded_type = namedtuple('event_attclient_read_multiple_response_type', (
'connection',
'handles',
))
decode_struct = Struct('<BB')
ends_with_uint8array = True
class command_attclient_find_by_type_value(CommandEncoder):
__slots__ = ("connection", "start", "end", "uuid", "value",)
_id, _struct, _ends_with_uint8array = ((0, 4, 0), Struct('<BHHHB'), True)
def __init__(self, connection, start, end, uuid, value):
super(command_attclient_find_by_type_value, self).__init__(connection, start, end, uuid, value)
class response_attclient_find_by_type_value(Decodable):
decoded_type = namedtuple('response_attclient_find_by_type_value_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_read_by_group_type(CommandEncoder):
__slots__ = ("connection", "start", "end", "uuid",)
_id, _struct, _ends_with_uint8array = ((0, 4, 1), Struct('<BHHB'), True)
def __init__(self, connection, start, end, uuid):
super(command_attclient_read_by_group_type, self).__init__(connection, start, end, uuid)
class response_attclient_read_by_group_type(Decodable):
decoded_type = namedtuple('response_attclient_read_by_group_type_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_read_by_type(CommandEncoder):
__slots__ = ("connection", "start", "end", "uuid",)
_id, _struct, _ends_with_uint8array = ((0, 4, 2), Struct('<BHHB'), True)
def __init__(self, connection, start, end, uuid):
super(command_attclient_read_by_type, self).__init__(connection, start, end, uuid)
class response_attclient_read_by_type(Decodable):
decoded_type = namedtuple('response_attclient_read_by_type_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_find_information(CommandEncoder):
__slots__ = ("connection", "start", "end",)
_id, _struct, _ends_with_uint8array = ((0, 4, 3), Struct('<BHH'), False)
def __init__(self, connection, start, end):
super(command_attclient_find_information, self).__init__(connection, start, end)
class response_attclient_find_information(Decodable):
decoded_type = namedtuple('response_attclient_find_information_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_read_by_handle(CommandEncoder):
__slots__ = ("connection", "chrhandle",)
_id, _struct, _ends_with_uint8array = ((0, 4, 4), Struct('<BH'), False)
def __init__(self, connection, chrhandle):
super(command_attclient_read_by_handle, self).__init__(connection, chrhandle)
class response_attclient_read_by_handle(Decodable):
decoded_type = namedtuple('response_attclient_read_by_handle_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_attribute_write(CommandEncoder):
__slots__ = ("connection", "atthandle", "data",)
_id, _struct, _ends_with_uint8array = ((0, 4, 5), Struct('<BHB'), True)
def __init__(self, connection, atthandle, data):
super(command_attclient_attribute_write, self).__init__(connection, atthandle, data)
class response_attclient_attribute_write(Decodable):
decoded_type = namedtuple('response_attclient_attribute_write_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_write_command(CommandEncoder):
__slots__ = ("connection", "atthandle", "data",)
_id, _struct, _ends_with_uint8array = ((0, 4, 6), Struct('<BHB'), True)
def __init__(self, connection, atthandle, data):
super(command_attclient_write_command, self).__init__(connection, atthandle, data)
class response_attclient_write_command(Decodable):
decoded_type = namedtuple('response_attclient_write_command_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_indicate_confirm(CommandEncoder):
__slots__ = ("connection",)
_id, _struct, _ends_with_uint8array = ((0, 4, 7), Struct('<B'), False)
def __init__(self, connection):
super(command_attclient_indicate_confirm, self).__init__(connection)
class response_attclient_indicate_confirm(Decodable):
decoded_type = namedtuple('response_attclient_indicate_confirm_type', (
'result',
))
decode_struct = Struct('<H')
class command_attclient_read_long(CommandEncoder):
__slots__ = ("connection", "chrhandle",)
_id, _struct, _ends_with_uint8array = ((0, 4, 8), Struct('<BH'), False)
def __init__(self, connection, chrhandle):
super(command_attclient_read_long, self).__init__(connection, chrhandle)
class response_attclient_read_long(Decodable):
decoded_type = namedtuple('response_attclient_read_long_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_prepare_write(CommandEncoder):
__slots__ = ("connection", "atthandle", "offset", "data",)
_id, _struct, _ends_with_uint8array = ((0, 4, 9), Struct('<BHHB'), True)
def __init__(self, connection, atthandle, offset, data):
super(command_attclient_prepare_write, self).__init__(connection, atthandle, offset, data)
class response_attclient_prepare_write(Decodable):
decoded_type = namedtuple('response_attclient_prepare_write_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_execute_write(CommandEncoder):
__slots__ = ("connection", "commit",)
_id, _struct, _ends_with_uint8array = ((0, 4, 10), Struct('<BB'), False)
def __init__(self, connection, commit):
super(command_attclient_execute_write, self).__init__(connection, commit)
class response_attclient_execute_write(Decodable):
decoded_type = namedtuple('response_attclient_execute_write_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class command_attclient_read_multiple(CommandEncoder):
__slots__ = ("connection", "handles",)
_id, _struct, _ends_with_uint8array = ((0, 4, 11), Struct('<BB'), True)
def __init__(self, connection, handles):
super(command_attclient_read_multiple, self).__init__(connection, handles)
class response_attclient_read_multiple(Decodable):
decoded_type = namedtuple('response_attclient_read_multiple_type', (
'connection',
'result',
))
decode_struct = Struct('<BH')
class attclient_attribute_value_types(Enum):
attribute_value_type_read = 0
attribute_value_type_notify = 1
attribute_value_type_indicate = 2
attribute_value_type_read_by_type = 3
attribute_value_type_read_blob = 4
attribute_value_type_indicate_rsp_req = 5
class event_sm_smp_data(Decodable):
decoded_type = namedtuple('event_sm_smp_data_type', (
'handle',
'packet',
'data',
))
decode_struct = Struct('<BBB')
ends_with_uint8array = True
class event_sm_bonding_fail(Decodable):
decoded_type = namedtuple('event_sm_bonding_fail_type', (
'handle',
'result',
))
decode_struct = Struct('<BH')
class event_sm_passkey_display(Decodable):
decoded_type = namedtuple('event_sm_passkey_display_type', (
'handle',
'passkey',
))
decode_struct = Struct('<BI')
class event_sm_passkey_request(Decodable):
decoded_type = namedtuple('event_sm_passkey_request_type', (
'handle',
))
decode_struct = Struct('<B')
class event_sm_bond_status(Decodable):
decoded_type = namedtuple('event_sm_bond_status_type', (
'bond',
'keysize',
'mitm',
'keys',
))
decode_struct = Struct('<BBBB')
class command_sm_encrypt_start(CommandEncoder):
__slots__ = ("handle", "bonding",)
_id, _struct, _ends_with_uint8array = ((0, 5, 0), Struct('<BB'), False)
def __init__(self, handle, bonding):
super(command_sm_encrypt_start, self).__init__(handle, bonding)
class response_sm_encrypt_start(Decodable):
decoded_type = namedtuple('response_sm_encrypt_start_type', (
'handle',
'result',
))
decode_struct = Struct('<BH')
class command_sm_set_bondable_mode(CommandEncoder):
__slots__ = ("bondable",)
_id, _struct, _ends_with_uint8array = ((0, 5, 1), Struct('<B'), False)
def __init__(self, bondable):
super(command_sm_set_bondable_mode, self).__init__(bondable)
class response_sm_set_bondable_mode(Decodable):
decoded_type = namedtuple('response_sm_set_bondable_mode_type', (
))
class command_sm_delete_bonding(CommandEncoder):
__slots__ = ("handle",)
_id, _struct, _ends_with_uint8array = ((0, 5, 2), Struct('<B'), False)
def __init__(self, handle):
super(command_sm_delete_bonding, self).__init__(handle)
class response_sm_delete_bonding(Decodable):
decoded_type = namedtuple('response_sm_delete_bonding_type', (
'result',
))
decode_struct = Struct('<H')
class command_sm_set_parameters(CommandEncoder):
__slots__ = ("mitm", "min_key_size", "io_capabilities",)
_id, _struct, _ends_with_uint8array = ((0, 5, 3), Struct('<BBB'), False)
def __init__(self, mitm, min_key_size, io_capabilities):
super(command_sm_set_parameters, self).__init__(mitm, min_key_size, io_capabilities)
class response_sm_set_parameters(Decodable):
decoded_type = namedtuple('response_sm_set_parameters_type', (
))
class command_sm_passkey_entry(CommandEncoder):
__slots__ = ("handle", "passkey",)
_id, _struct, _ends_with_uint8array = ((0, 5, 4), Struct('<BI'), False)
def __init__(self, handle, passkey):
super(command_sm_passkey_entry, self).__init__(handle, passkey)
class response_sm_passkey_entry(Decodable):
decoded_type = namedtuple('response_sm_passkey_entry_type', (
'result',
))
decode_struct = Struct('<H')
class command_sm_get_bonds(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 5, 5), Struct('<'), False)
def __init__(self, ):
super(command_sm_get_bonds, self).__init__()
class response_sm_get_bonds(Decodable):
decoded_type = namedtuple('response_sm_get_bonds_type', (
'bonds',
))
decode_struct = Struct('<B')
class command_sm_set_oob_data(CommandEncoder):
__slots__ = ("oob",)
_id, _struct, _ends_with_uint8array = ((0, 5, 6), Struct('<B'), True)
def __init__(self, oob):
super(command_sm_set_oob_data, self).__init__(oob)
class response_sm_set_oob_data(Decodable):
decoded_type = namedtuple('response_sm_set_oob_data_type', (
))
class sm_bonding_key(Enum):
ltk = 0x01
addr_public = 0x02
addr_static = 0x04
irk = 0x08
edivrand = 0x10
csrk = 0x20
masterid = 0x40
class sm_io_capability(Enum):
displayonly = 0
displayyesno = 1
keyboardonly = 2
noinputnooutput = 3
keyboarddisplay = 4
class event_gap_scan_response(Decodable):
decoded_type = namedtuple('event_gap_scan_response_type', (
'rssi',
'packet_type',
'sender',
'address_type',
'bond',
'data',
))
decode_struct = Struct('<bB6sBBB')
ends_with_uint8array = True
class event_gap_mode_changed(Decodable):
decoded_type = namedtuple('event_gap_mode_changed_type', (
'discover',
'connect',
))
decode_struct = Struct('<BB')
class command_gap_set_privacy_flags(CommandEncoder):
__slots__ = ("peripheral_privacy", "central_privacy",)
_id, _struct, _ends_with_uint8array = ((0, 6, 0), Struct('<BB'), False)
def __init__(self, peripheral_privacy, central_privacy):
super(command_gap_set_privacy_flags, self).__init__(peripheral_privacy, central_privacy)
class response_gap_set_privacy_flags(Decodable):
decoded_type = namedtuple('response_gap_set_privacy_flags_type', (
))
class command_gap_set_mode(CommandEncoder):
__slots__ = ("discover", "connect",)
_id, _struct, _ends_with_uint8array = ((0, 6, 1), Struct('<BB'), False)
def __init__(self, discover, connect):
super(command_gap_set_mode, self).__init__(discover, connect)
class response_gap_set_mode(Decodable):
decoded_type = namedtuple('response_gap_set_mode_type', (
'result',
))
decode_struct = Struct('<H')
class command_gap_discover(CommandEncoder):
__slots__ = ("mode",)
_id, _struct, _ends_with_uint8array = ((0, 6, 2), Struct('<B'), False)
def __init__(self, mode):
super(command_gap_discover, self).__init__(mode)
class response_gap_discover(Decodable):
decoded_type = namedtuple('response_gap_discover_type', (
'result',
))
decode_struct = Struct('<H')
class command_gap_connect_direct(CommandEncoder):
__slots__ = ("address", "addr_type", "conn_interval_min", "conn_interval_max", "timeout", "latency",)
_id, _struct, _ends_with_uint8array = ((0, 6, 3), Struct('<6sBHHHH'), False)
def __init__(self, address, addr_type, conn_interval_min, conn_interval_max, timeout, latency):
super(command_gap_connect_direct, self).__init__(address, addr_type, conn_interval_min, conn_interval_max, timeout, latency)
class response_gap_connect_direct(Decodable):
decoded_type = namedtuple('response_gap_connect_direct_type', (
'result',
'connection_handle',
))
decode_struct = Struct('<HB')
class command_gap_end_procedure(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 6, 4), Struct('<'), False)
def __init__(self, ):
super(command_gap_end_procedure, self).__init__()
class response_gap_end_procedure(Decodable):
decoded_type = namedtuple('response_gap_end_procedure_type', (
'result',
))
decode_struct = Struct('<H')
class command_gap_connect_selective(CommandEncoder):
__slots__ = ("conn_interval_min", "conn_interval_max", "timeout", "latency",)
_id, _struct, _ends_with_uint8array = ((0, 6, 5), Struct('<HHHH'), False)
def __init__(self, conn_interval_min, conn_interval_max, timeout, latency):
super(command_gap_connect_selective, self).__init__(conn_interval_min, conn_interval_max, timeout, latency)
class response_gap_connect_selective(Decodable):
decoded_type = namedtuple('response_gap_connect_selective_type', (
'result',
'connection_handle',
))
decode_struct = Struct('<HB')
class command_gap_set_filtering(CommandEncoder):
__slots__ = ("scan_policy", "adv_policy", "scan_duplicate_filtering",)
_id, _struct, _ends_with_uint8array = ((0, 6, 6), Struct('<BBB'), False)
def __init__(self, scan_policy, adv_policy, scan_duplicate_filtering):
super(command_gap_set_filtering, self).__init__(scan_policy, adv_policy, scan_duplicate_filtering)
class response_gap_set_filtering(Decodable):
decoded_type = namedtuple('response_gap_set_filtering_type', (
'result',
))
decode_struct = Struct('<H')
class command_gap_set_scan_parameters(CommandEncoder):
__slots__ = ("scan_interval", "scan_window", "active",)
_id, _struct, _ends_with_uint8array = ((0, 6, 7), Struct('<HHB'), False)
def __init__(self, scan_interval, scan_window, active):
super(command_gap_set_scan_parameters, self).__init__(scan_interval, scan_window, active)
class response_gap_set_scan_parameters(Decodable):
decoded_type = namedtuple('response_gap_set_scan_parameters_type', (
'result',
))
decode_struct = Struct('<H')
class command_gap_set_adv_parameters(CommandEncoder):
__slots__ = ("adv_interval_min", "adv_interval_max", "adv_channels",)
_id, _struct, _ends_with_uint8array = ((0, 6, 8), Struct('<HHB'), False)
def __init__(self, adv_interval_min, adv_interval_max, adv_channels):
super(command_gap_set_adv_parameters, self).__init__(adv_interval_min, adv_interval_max, adv_channels)
class response_gap_set_adv_parameters(Decodable):
decoded_type = namedtuple('response_gap_set_adv_parameters_type', (
'result',
))
decode_struct = Struct('<H')
class command_gap_set_adv_data(CommandEncoder):
__slots__ = ("set_scanrsp", "adv_data",)
_id, _struct, _ends_with_uint8array = ((0, 6, 9), Struct('<BB'), True)
def __init__(self, set_scanrsp, adv_data):
super(command_gap_set_adv_data, self).__init__(set_scanrsp, adv_data)
class response_gap_set_adv_data(Decodable):
decoded_type = namedtuple('response_gap_set_adv_data_type', (
'result',
))
decode_struct = Struct('<H')
class command_gap_set_directed_connectable_mode(CommandEncoder):
__slots__ = ("address", "addr_type",)
_id, _struct, _ends_with_uint8array = ((0, 6, 10), Struct('<6sB'), False)
def __init__(self, address, addr_type):
super(command_gap_set_directed_connectable_mode, self).__init__(address, addr_type)
class response_gap_set_directed_connectable_mode(Decodable):
decoded_type = namedtuple('response_gap_set_directed_connectable_mode_type', (
'result',
))
decode_struct = Struct('<H')
class gap_address_type(Enum):
public = 0
random = 1
class gap_discoverable_mode(Enum):
non_discoverable = 0
limited_discoverable = 1
general_discoverable = 2
broadcast = 3
user_data = 4
class gap_connectable_mode(Enum):
non_connectable = 0
directed_connectable = 1
undirected_connectable = 2
scannable_connectable = 3
class gap_discover_mode(Enum):
discover_limited = 0
discover_generic = 1
discover_observation = 2
class gap_ad_types(Enum):
ad_type_none = 0
ad_type_flags = 1
ad_type_services_16bit_more = 2
ad_type_services_16bit_all = 3
ad_type_services_32bit_more = 4
ad_type_services_32bit_all = 5
ad_type_services_128bit_more = 6
ad_type_services_128bit_all = 7
ad_type_localname_short = 8
ad_type_localname_complete = 9
ad_type_txpower = 10
class gap_advertising_policy(Enum):
adv_policy_all = 0
adv_policy_whitelist_scan = 1
adv_policy_whitelist_connect = 2
adv_policy_whitelist_all = 3
class gap_scan_policy(Enum):
all = 0
whitelist = 1
class event_hardware_io_port_status(Decodable):
decoded_type = namedtuple('event_hardware_io_port_status_type', (
'timestamp',
'port',
'irq',
'state',
))
decode_struct = Struct('<IBBB')
class event_hardware_soft_timer(Decodable):
decoded_type = namedtuple('event_hardware_soft_timer_type', (
'handle',
))
decode_struct = Struct('<B')
class event_hardware_adc_result(Decodable):
decoded_type = namedtuple('event_hardware_adc_result_type', (
'input',
'value',
))
decode_struct = Struct('<Bh')
class command_hardware_io_port_config_irq(CommandEncoder):
__slots__ = ("port", "enable_bits", "falling_edge",)
_id, _struct, _ends_with_uint8array = ((0, 7, 0), Struct('<BBB'), False)
def __init__(self, port, enable_bits, falling_edge):
super(command_hardware_io_port_config_irq, self).__init__(port, enable_bits, falling_edge)
class response_hardware_io_port_config_irq(Decodable):
decoded_type = namedtuple('response_hardware_io_port_config_irq_type', (
'result',
))
decode_struct = Struct('<H')
class command_hardware_set_soft_timer(CommandEncoder):
__slots__ = ("time", "handle", "single_shot",)
_id, _struct, _ends_with_uint8array = ((0, 7, 1), Struct('<IBB'), False)
def __init__(self, time, handle, single_shot):
super(command_hardware_set_soft_timer, self).__init__(time, handle, single_shot)
class response_hardware_set_soft_timer(Decodable):
decoded_type = namedtuple('response_hardware_set_soft_timer_type', (
'result',
))
decode_struct = Struct('<H')
class command_hardware_adc_read(CommandEncoder):
__slots__ = ("input", "decimation", "reference_selection",)
_id, _struct, _ends_with_uint8array = ((0, 7, 2), Struct('<BBB'), False)
def __init__(self, input, decimation, reference_selection):
super(command_hardware_adc_read, self).__init__(input, decimation, reference_selection)
class response_hardware_adc_read(Decodable):
decoded_type = namedtuple('response_hardware_adc_read_type', (
'result',
))
decode_struct = Struct('<H')
class command_hardware_io_port_config_direction(CommandEncoder):
__slots__ = ("port", "direction",)
_id, _struct, _ends_with_uint8array = ((0, 7, 3), Struct('<BB'), False)
def __init__(self, port, direction):
super(command_hardware_io_port_config_direction, self).__init__(port, direction)
class response_hardware_io_port_config_direction(Decodable):
decoded_type = namedtuple('response_hardware_io_port_config_direction_type', (
'result',
))
decode_struct = Struct('<H')
class command_hardware_io_port_config_function(CommandEncoder):
__slots__ = ("port", "function",)
_id, _struct, _ends_with_uint8array = ((0, 7, 4), Struct('<BB'), False)
def __init__(self, port, function):
super(command_hardware_io_port_config_function, self).__init__(port, function)
class response_hardware_io_port_config_function(Decodable):
decoded_type = namedtuple('response_hardware_io_port_config_function_type', (
'result',
))
decode_struct = Struct('<H')
class command_hardware_io_port_config_pull(CommandEncoder):
__slots__ = ("port", "tristate_mask", "pull_up",)
_id, _struct, _ends_with_uint8array = ((0, 7, 5), Struct('<BBB'), False)
def __init__(self, port, tristate_mask, pull_up):
super(command_hardware_io_port_config_pull, self).__init__(port, tristate_mask, pull_up)
class response_hardware_io_port_config_pull(Decodable):
decoded_type = namedtuple('response_hardware_io_port_config_pull_type', (
'result',
))
decode_struct = Struct('<H')
class command_hardware_io_port_write(CommandEncoder):
__slots__ = ("port", "mask", "data",)
_id, _struct, _ends_with_uint8array = ((0, 7, 6), Struct('<BBB'), False)
def __init__(self, port, mask, data):
super(command_hardware_io_port_write, self).__init__(port, mask, data)
class response_hardware_io_port_write(Decodable):
decoded_type = namedtuple('response_hardware_io_port_write_type', (
'result',
))
decode_struct = Struct('<H')
class command_hardware_io_port_read(CommandEncoder):
__slots__ = ("port", "mask",)
_id, _struct, _ends_with_uint8array = ((0, 7, 7), Struct('<BB'), False)
def __init__(self, port, mask):
super(command_hardware_io_port_read, self).__init__(port, mask)
class response_hardware_io_port_read(Decodable):
decoded_type = namedtuple('response_hardware_io_port_read_type', (
'result',
'port',
'data',
))
decode_struct = Struct('<HBB')
class command_hardware_spi_config(CommandEncoder):
__slots__ = ("channel", "polarity", "phase", "bit_order", "baud_e", "baud_m",)
_id, _struct, _ends_with_uint8array = ((0, 7, 8), Struct('<BBBBBB'), False)
def __init__(self, channel, polarity, phase, bit_order, baud_e, baud_m):
super(command_hardware_spi_config, self).__init__(channel, polarity, phase, bit_order, baud_e, baud_m)
class response_hardware_spi_config(Decodable):
decoded_type = namedtuple('response_hardware_spi_config_type', (
'result',
))
decode_struct = Struct('<H')
class command_hardware_spi_transfer(CommandEncoder):
__slots__ = ("channel", "data",)
_id, _struct, _ends_with_uint8array = ((0, 7, 9), Struct('<BB'), True)
def __init__(self, channel, data):
super(command_hardware_spi_transfer, self).__init__(channel, data)
class response_hardware_spi_transfer(Decodable):
decoded_type = namedtuple('response_hardware_spi_transfer_type', (
'result',
'channel',
'data',
))
decode_struct = Struct('<HBB')
ends_with_uint8array = True
class command_hardware_i2c_read(CommandEncoder):
__slots__ = ("address", "stop", "length",)
_id, _struct, _ends_with_uint8array = ((0, 7, 10), Struct('<BBB'), False)
def __init__(self, address, stop, length):
super(command_hardware_i2c_read, self).__init__(address, stop, length)
class response_hardware_i2c_read(Decodable):
decoded_type = namedtuple('response_hardware_i2c_read_type', (
'result',
'data',
))
decode_struct = Struct('<HB')
ends_with_uint8array = True
class command_hardware_i2c_write(CommandEncoder):
__slots__ = ("address", "stop", "data",)
_id, _struct, _ends_with_uint8array = ((0, 7, 11), Struct('<BBB'), True)
def __init__(self, address, stop, data):
super(command_hardware_i2c_write, self).__init__(address, stop, data)
class response_hardware_i2c_write(Decodable):
decoded_type = namedtuple('response_hardware_i2c_write_type', (
'written',
))
decode_struct = Struct('<B')
class command_hardware_set_txpower(CommandEncoder):
__slots__ = ("power",)
_id, _struct, _ends_with_uint8array = ((0, 7, 12), Struct('<B'), False)
def __init__(self, power):
super(command_hardware_set_txpower, self).__init__(power)
class response_hardware_set_txpower(Decodable):
decoded_type = namedtuple('response_hardware_set_txpower_type', (
))
class command_hardware_timer_comparator(CommandEncoder):
__slots__ = ("timer", "channel", "mode", "comparator_value",)
_id, _struct, _ends_with_uint8array = ((0, 7, 13), Struct('<BBBH'), False)
def __init__(self, timer, channel, mode, comparator_value):
super(command_hardware_timer_comparator, self).__init__(timer, channel, mode, comparator_value)
class response_hardware_timer_comparator(Decodable):
decoded_type = namedtuple('response_hardware_timer_comparator_type', (
'result',
))
decode_struct = Struct('<H')
class command_test_phy_tx(CommandEncoder):
__slots__ = ("channel", "length", "type",)
_id, _struct, _ends_with_uint8array = ((0, 8, 0), Struct('<BBB'), False)
def __init__(self, channel, length, type):
super(command_test_phy_tx, self).__init__(channel, length, type)
class response_test_phy_tx(Decodable):
decoded_type = namedtuple('response_test_phy_tx_type', (
))
class command_test_phy_rx(CommandEncoder):
__slots__ = ("channel",)
_id, _struct, _ends_with_uint8array = ((0, 8, 1), Struct('<B'), False)
def __init__(self, channel):
super(command_test_phy_rx, self).__init__(channel)
class response_test_phy_rx(Decodable):
decoded_type = namedtuple('response_test_phy_rx_type', (
))
class command_test_phy_end(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 8, 2), Struct('<'), False)
def __init__(self, ):
super(command_test_phy_end, self).__init__()
class response_test_phy_end(Decodable):
decoded_type = namedtuple('response_test_phy_end_type', (
'counter',
))
decode_struct = Struct('<H')
class command_test_phy_reset(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 8, 3), Struct('<'), False)
def __init__(self, ):
super(command_test_phy_reset, self).__init__()
class response_test_phy_reset(Decodable):
decoded_type = namedtuple('response_test_phy_reset_type', (
))
class command_test_get_channel_map(CommandEncoder):
__slots__ = ()
_id, _struct, _ends_with_uint8array = ((0, 8, 4), Struct('<'), False)
def __init__(self, ):
super(command_test_get_channel_map, self).__init__()
class response_test_get_channel_map(Decodable):
decoded_type = namedtuple('response_test_get_channel_map_type', (
'channel_map',
))
decode_struct = Struct('<B')
ends_with_uint8array = True
class command_test_debug(CommandEncoder):
__slots__ = ("input",)
_id, _struct, _ends_with_uint8array = ((0, 8, 5), Struct('<B'), True)
def __init__(self, input):
super(command_test_debug, self).__init__(input)
class response_test_debug(Decodable):
decoded_type = namedtuple('response_test_debug_type', (
'output',
))
decode_struct = Struct('<B')
ends_with_uint8array = True
CLASS_NAME_MAP = {
0: 'system',
1: 'flash',
2: 'attributes',
3: 'connection',
4: 'attclient',
5: 'sm',
6: 'gap',
7: 'hardware',
8: 'test',
}
EVENT_TYPE_MAP = {
(0, 0): event_system_boot,
(0, 1): event_system_debug,
(0, 2): event_system_endpoint_watermark_rx,
(0, 3): event_system_endpoint_watermark_tx,
(0, 4): event_system_script_failure,
(0, 5): event_system_no_license_key,
(1, 0): event_flash_ps_key,
(2, 0): event_attributes_value,
(2, 1): event_attributes_user_read_request,
(2, 2): event_attributes_status,
(3, 0): event_connection_status,
(3, 1): event_connection_version_ind,
(3, 2): event_connection_feature_ind,
(3, 3): event_connection_raw_rx,
(3, 4): event_connection_disconnected,
(4, 0): event_attclient_indicated,
(4, 1): event_attclient_procedure_completed,
(4, 2): event_attclient_group_found,
(4, 3): event_attclient_attribute_found,
(4, 4): event_attclient_find_information_found,
(4, 5): event_attclient_attribute_value,
(4, 6): event_attclient_read_multiple_response,
(5, 0): event_sm_smp_data,
(5, 1): event_sm_bonding_fail,
(5, 2): event_sm_passkey_display,
(5, 3): event_sm_passkey_request,
(5, 4): event_sm_bond_status,
(6, 0): event_gap_scan_response,
(6, 1): event_gap_mode_changed,
(7, 0): event_hardware_io_port_status,
(7, 1): event_hardware_soft_timer,
(7, 2): event_hardware_adc_result,
}
COMMAND_RETURN_TYPE_MAP = {
(0, 1): response_system_hello,
(0, 2): response_system_address_get,
(0, 3): response_system_reg_write,
(0, 4): response_system_reg_read,
(0, 5): response_system_get_counters,
(0, 6): response_system_get_connections,
(0, 7): response_system_read_memory,
(0, 8): response_system_get_info,
(0, 9): response_system_endpoint_tx,
(0, 10): response_system_whitelist_append,
(0, 11): response_system_whitelist_remove,
(0, 12): response_system_whitelist_clear,
(0, 13): response_system_endpoint_rx,
(0, 14): response_system_endpoint_set_watermarks,
(1, 0): response_flash_ps_defrag,
(1, 1): response_flash_ps_dump,
(1, 2): response_flash_ps_erase_all,
(1, 3): response_flash_ps_save,
(1, 4): response_flash_ps_load,
(1, 5): response_flash_ps_erase,
(1, 6): response_flash_erase_page,
(1, 7): response_flash_write_words,
(2, 0): response_attributes_write,
(2, 1): response_attributes_read,
(2, 2): response_attributes_read_type,
(2, 3): response_attributes_user_read_response,
(2, 4): response_attributes_user_write_response,
(3, 0): response_connection_disconnect,
(3, 1): response_connection_get_rssi,
(3, 2): response_connection_update,
(3, 3): response_connection_version_update,
(3, 4): response_connection_channel_map_get,
(3, 5): response_connection_channel_map_set,
(3, 6): response_connection_features_get,
(3, 7): response_connection_get_status,
(3, 8): response_connection_raw_tx,
(4, 0): response_attclient_find_by_type_value,
(4, 1): response_attclient_read_by_group_type,
(4, 2): response_attclient_read_by_type,
(4, 3): response_attclient_find_information,
(4, 4): response_attclient_read_by_handle,
(4, 5): response_attclient_attribute_write,
(4, 6): response_attclient_write_command,
(4, 7): response_attclient_indicate_confirm,
(4, 8): response_attclient_read_long,
(4, 9): response_attclient_prepare_write,
(4, 10): response_attclient_execute_write,
(4, 11): response_attclient_read_multiple,
(5, 0): response_sm_encrypt_start,
(5, 1): response_sm_set_bondable_mode,
(5, 2): response_sm_delete_bonding,
(5, 3): response_sm_set_parameters,
(5, 4): response_sm_passkey_entry,
(5, 5): response_sm_get_bonds,
(5, 6): response_sm_set_oob_data,
(6, 0): response_gap_set_privacy_flags,
(6, 1): response_gap_set_mode,
(6, 2): response_gap_discover,
(6, 3): response_gap_connect_direct,
(6, 4): response_gap_end_procedure,
(6, 5): response_gap_connect_selective,
(6, 6): response_gap_set_filtering,
(6, 7): response_gap_set_scan_parameters,
(6, 8): response_gap_set_adv_parameters,
(6, 9): response_gap_set_adv_data,
(6, 10): response_gap_set_directed_connectable_mode,
(7, 0): response_hardware_io_port_config_irq,
(7, 1): response_hardware_set_soft_timer,
(7, 2): response_hardware_adc_read,
(7, 3): response_hardware_io_port_config_direction,
(7, 4): response_hardware_io_port_config_function,
(7, 5): response_hardware_io_port_config_pull,
(7, 6): response_hardware_io_port_write,
(7, 7): response_hardware_io_port_read,
(7, 8): response_hardware_spi_config,
(7, 9): response_hardware_spi_transfer,
(7, 10): response_hardware_i2c_read,
(7, 11): response_hardware_i2c_write,
(7, 12): response_hardware_set_txpower,
(7, 13): response_hardware_timer_comparator,
(8, 0): response_test_phy_tx,
(8, 1): response_test_phy_rx,
(8, 2): response_test_phy_end,
(8, 3): response_test_phy_reset,
(8, 4): response_test_get_channel_map,
(8, 5): response_test_debug,
}
class EventDecoderMixin(object):
def __init__(self):
self.event_type_map = {
(0, 0): self.handle_event_system_boot,
(0, 1): self.handle_event_system_debug,
(0, 2): self.handle_event_system_endpoint_watermark_rx,
(0, 3): self.handle_event_system_endpoint_watermark_tx,
(0, 4): self.handle_event_system_script_failure,
(0, 5): self.handle_event_system_no_license_key,
(1, 0): self.handle_event_flash_ps_key,
(2, 0): self.handle_event_attributes_value,
(2, 1): self.handle_event_attributes_user_read_request,
(2, 2): self.handle_event_attributes_status,
(3, 0): self.handle_event_connection_status,
(3, 1): self.handle_event_connection_version_ind,
(3, 2): self.handle_event_connection_feature_ind,
(3, 3): self.handle_event_connection_raw_rx,
(3, 4): self.handle_event_connection_disconnected,
(4, 0): self.handle_event_attclient_indicated,
(4, 1): self.handle_event_attclient_procedure_completed,
(4, 2): self.handle_event_attclient_group_found,
(4, 3): self.handle_event_attclient_attribute_found,
(4, 4): self.handle_event_attclient_find_information_found,
(4, 5): self.handle_event_attclient_attribute_value,
(4, 6): self.handle_event_attclient_read_multiple_response,
(5, 0): self.handle_event_sm_smp_data,
(5, 1): self.handle_event_sm_bonding_fail,
(5, 2): self.handle_event_sm_passkey_display,
(5, 3): self.handle_event_sm_passkey_request,
(5, 4): self.handle_event_sm_bond_status,
(6, 0): self.handle_event_gap_scan_response,
(6, 1): self.handle_event_gap_mode_changed,
(7, 0): self.handle_event_hardware_io_port_status,
(7, 1): self.handle_event_hardware_soft_timer,
(7, 2): self.handle_event_hardware_adc_result,
}
def handle_event_system_boot(self, event):
pass
def handle_event_system_debug(self, event):
pass
def handle_event_system_endpoint_watermark_rx(self, event):
pass
def handle_event_system_endpoint_watermark_tx(self, event):
pass
def handle_event_system_script_failure(self, event):
pass
def handle_event_system_no_license_key(self, event):
pass
def handle_event_flash_ps_key(self, event):
pass
def handle_event_attributes_value(self, event):
pass
def handle_event_attributes_user_read_request(self, event):
pass
def handle_event_attributes_status(self, event):
pass
def handle_event_connection_status(self, event):
pass
def handle_event_connection_version_ind(self, event):
pass
def handle_event_connection_feature_ind(self, event):
pass
def handle_event_connection_raw_rx(self, event):
pass
def handle_event_connection_disconnected(self, event):
pass
def handle_event_attclient_indicated(self, event):
pass
def handle_event_attclient_procedure_completed(self, event):
pass
def handle_event_attclient_group_found(self, event):
pass
def handle_event_attclient_attribute_found(self, event):
pass
def handle_event_attclient_find_information_found(self, event):
pass
def handle_event_attclient_attribute_value(self, event):
pass
def handle_event_attclient_read_multiple_response(self, event):
pass
def handle_event_sm_smp_data(self, event):
pass
def handle_event_sm_bonding_fail(self, event):
pass
def handle_event_sm_passkey_display(self, event):
pass
def handle_event_sm_passkey_request(self, event):
pass
def handle_event_sm_bond_status(self, event):
pass
def handle_event_gap_scan_response(self, event):
pass
def handle_event_gap_mode_changed(self, event):
pass
def handle_event_hardware_io_port_status(self, event):
pass
def handle_event_hardware_soft_timer(self, event):
pass
def handle_event_hardware_adc_result(self, event):
pass
def handle_event(self, event_id, event):
try:
method = self.event_type_map[event_id]
method(event)
except KeyError:
## Unsupported event (log? handle_unsupported_event()?
pass
| StarcoderdataPython |
3346361 | <gh_stars>1-10
# Generated by Django 3.1.5 on 2021-02-09 10:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.FileField(upload_to='contents/image/%Y_%m/', verbose_name='Фото')),
('name', models.CharField(max_length=50, verbose_name='Категория')),
('description', models.TextField(verbose_name='Описание')),
('url', models.SlugField(max_length=60, unique=True, verbose_name='Ссылка')),
],
options={
'verbose_name': 'Категория',
'verbose_name_plural': 'Категории',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.FileField(upload_to='contents/image/%Y_%m/', verbose_name='Фото')),
('title', models.CharField(max_length=50, verbose_name='Название')),
('intro', models.TextField(verbose_name='Введение')),
('price', models.PositiveIntegerField(verbose_name='Цена')),
('content', models.FileField(upload_to='contents/%Y_%m/', verbose_name='Контент')),
('url', models.SlugField(max_length=60, unique=True, verbose_name='Ссылка')),
('draft', models.BooleanField(verbose_name='Черновик')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Имя ползователя и статус')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='main.category', verbose_name='Категория')),
],
options={
'verbose_name': 'Пост',
'verbose_name_plural': 'Посты',
},
),
]
| StarcoderdataPython |
61965 | <filename>src/gamesystem/scene_transision.py
class SceneManager:
def __init__(self):
self.scene_list = {}
self.current_scene = None
def append_scene(self, scene_name, scene):
self.scene_list[scene_name] = scene
def set_current_scene(self, scene_name):
self.current_scene = self.scene_list[scene_name]
class Scene:
def __init__(self, scene_manager):
self.sm = scene_manager
def handle_event(self, event):
pass
def update(self):
pass
def render(self):
pass
| StarcoderdataPython |
3397119 | from savu.plugins.plugin_tools import PluginTools
class StageMotionTools(PluginTools):
"""A Plugin to calculate stage motion from motion positions.
"""
def define_parameters(self):
"""
in_datasets:
visibility: datasets
dtype: [list[],list[str]]
description: Create a list of the dataset(s)
default: ["pmean"]
out_datasets:
visibility: datasets
dtype: [list[],list[str]]
description: Create a list of the dataset(s)
default: ["qmean"]
use_min_max:
visibility: intermediate
dtype: bool
description: Also use the min and max datasets
including all combinations of min, mean and max.
default: False
extra_in_datasets:
visibility: intermediate
dtype: list
description: The extra datasets to use as input for min and max.
default: ["pmin", "pmax"]
extra_out_datasets:
visibility: intermediate
dtype: list
description: The extra datasets to use as output for min and max.
default: ["qmin", "qmax"]
"""
| StarcoderdataPython |
3306702 | <reponame>Goyatuzo/HackerRank<gh_stars>0
def reverse_words(words_string):
return " ".join(reversed(words_string.strip().split(" "))) | StarcoderdataPython |
1703217 | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2021 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""Logic for reading text using NVDA in the notepad text editor.
"""
# imported methods start with underscore (_) so they don't get imported into robot files as keywords
from SystemTestSpy import (
_getLib,
)
# Imported for type information
from NotepadLib import NotepadLib as _NotepadLib
from AssertsLib import AssertsLib as _AssertsLib
_notepad: _NotepadLib = _getLib("NotepadLib")
_asserts: _AssertsLib = _getLib("AssertsLib")
# unlike other symbols used, symbols.dic doesn't preserve quote symbols with SYMPRES_ALWAYS
_wordsToExpectedSymbolLevelAllSpeech = {
'Say': 'Say',
'(quietly)': 'left paren(quietly right paren)',
'"Hello,': 'quote Hello comma,',
'Jim".': 'Jim quote dot.',
}
_wordsToExpectedSymbolLevelDefaultSpeech = {
'Say': 'Say',
'(quietly)': '(quietly)',
'"Hello,': 'Hello,',
'Jim".': 'Jim .',
}
def test_symbolLevelWord_all():
textStr = ' '.join(_wordsToExpectedSymbolLevelAllSpeech.keys())
_notepad.prepareNotepad(f"Test: {textStr}")
for expectedWord in _wordsToExpectedSymbolLevelAllSpeech.values():
wordSpoken = _notepad.getSpeechAfterKey("numpad6") # navigate to next word
_asserts.strings_match(wordSpoken, expectedWord)
def test_symbolLevelWord_default():
textStr = ' '.join(_wordsToExpectedSymbolLevelDefaultSpeech.keys())
_notepad.prepareNotepad(f"Test: {textStr}")
for expectedWord in _wordsToExpectedSymbolLevelDefaultSpeech.values():
wordSpoken = _notepad.getSpeechAfterKey("numpad6") # navigate to next word
_asserts.strings_match(wordSpoken, expectedWord)
| StarcoderdataPython |
55880 | import json
import os
import time
def get_cache_path():
home = os.path.expanduser("~")
return home + '/package_list.cdncache'
def time_has_passed(last_time, time_now):
time_is_blank = time_now is None or last_time is None
if time_is_blank:
return time_is_blank
time_difference = int(time.time()) - int(last_time)
time_has_passed = time_difference > int(time_now)
print(time_difference)
print(time_has_passed)
return time_has_passed
def get_package_list(path):
packageList = {}
with open(path, 'r') as f:
packageList = json.loads(f.read())
return packageList
def set_package_list(path, packageList):
with open(path, 'w') as f:
f.write(json.dumps(packageList)) | StarcoderdataPython |
3344998 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListNotebookWorkspaceConnectionInfoResult',
'AwaitableListNotebookWorkspaceConnectionInfoResult',
'list_notebook_workspace_connection_info',
]
@pulumi.output_type
class ListNotebookWorkspaceConnectionInfoResult:
"""
The connection info for the given notebook workspace
"""
def __init__(__self__, auth_token=None, notebook_server_endpoint=None):
if auth_token and not isinstance(auth_token, str):
raise TypeError("Expected argument 'auth_token' to be a str")
pulumi.set(__self__, "auth_token", auth_token)
if notebook_server_endpoint and not isinstance(notebook_server_endpoint, str):
raise TypeError("Expected argument 'notebook_server_endpoint' to be a str")
pulumi.set(__self__, "notebook_server_endpoint", notebook_server_endpoint)
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> str:
"""
Specifies auth token used for connecting to Notebook server (uses token-based auth).
"""
return pulumi.get(self, "auth_token")
@property
@pulumi.getter(name="notebookServerEndpoint")
def notebook_server_endpoint(self) -> str:
"""
Specifies the endpoint of Notebook server.
"""
return pulumi.get(self, "notebook_server_endpoint")
class AwaitableListNotebookWorkspaceConnectionInfoResult(ListNotebookWorkspaceConnectionInfoResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListNotebookWorkspaceConnectionInfoResult(
auth_token=self.auth_token,
notebook_server_endpoint=self.notebook_server_endpoint)
def list_notebook_workspace_connection_info(account_name: Optional[str] = None,
notebook_workspace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListNotebookWorkspaceConnectionInfoResult:
"""
The connection info for the given notebook workspace
:param str account_name: Cosmos DB database account name.
:param str notebook_workspace_name: The name of the notebook workspace resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['notebookWorkspaceName'] = notebook_workspace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20210515:listNotebookWorkspaceConnectionInfo', __args__, opts=opts, typ=ListNotebookWorkspaceConnectionInfoResult).value
return AwaitableListNotebookWorkspaceConnectionInfoResult(
auth_token=__ret__.auth_token,
notebook_server_endpoint=__ret__.notebook_server_endpoint)
| StarcoderdataPython |
3370061 | from keras.models import model_from_json
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("saved_models/Emotion_Voice_Detection_Model.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
score = loaded_model.evaluate(x_testcnn, y_test, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
preds = loaded_model.predict(x_testcnn,
batch_size=32,
verbose=1)
preds1=preds.argmax(axis=1)
abc = preds1.astype(int).flatten()
predictions = (lb.inverse_transform((abc)))
preddf = pd.DataFrame({'predictedvalues': predictions})
actual=y_test.argmax(axis=1)
abc123 = actual.astype(int).flatten()
actualvalues = (lb.inverse_transform((abc123)))
actualdf = pd.DataFrame({'actualvalues': actualvalues})
finaldf = actualdf.join(preddf)
finaldf.groupby('actualvalues').count()
finaldf.groupby('predictedvalues').count()
finaldf.to_csv('Predictions.csv', index=False)
| StarcoderdataPython |
4841202 | """Auto-generated file, do not edit by hand. CZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CZ = PhoneMetadata(id='CZ', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_number_pattern='\\d{3,6}', possible_length=(3, 4, 5, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='116(?:00[06]|111|123)', possible_number_pattern='\\d{6}', example_number='116000', possible_length=(6,)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:12|5[058])', possible_number_pattern='\\d{3}', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:2|6\\d{3}|8\\d)|2\\d{2,3}|3\\d{3,4}|4\\d{3}|5[058]|99)', possible_number_pattern='\\d{3,6}', example_number='116123', possible_length=(3, 4, 5, 6)),
short_data=True)
| StarcoderdataPython |
1604793 | import numpy as np
import matplotlib.pyplot as plt
from multilayer_perceptron import MLP
from gradient_boosting_decision_tree import GBDT
from xgboost import XGBoost
from random_forest import RandomForest
from adaboost import AdaBoost
from factorization_machines import FactorizationMachines
from support_vector_machine import SVM
from k_nearest_neighbor import kNearestNeighbor
def gen_linear(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, (x.sum(axis=1) > 0) * 1
def gen_circle(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, (np.square(x).sum(axis=1) > 0.6) * 1
def gen_xor(train_num):
x = 2 * np.random.random((train_num, 2)) - 1
return x, np.array([(xi[0] * xi[1] > 0) for xi in x]) * 1
def gen_spiral(train_num):
r = 0.8 * np.arange(train_num) / train_num
y = np.arange(train_num) % 2
t = 1.75 * r * 2 * np.pi + y * np.pi
x = np.c_[r * np.sin(t) + np.random.random(train_num) /
10, r * np.cos(t) + np.random.random(train_num) / 10]
return x, y * 1
def gen_moon(train_num):
y = np.arange(train_num) % 2
x0 = (y - 0.5) * (.5 - np.cos(np.linspace(0, np.pi, train_num))) + \
np.random.random(train_num) / 10
x1 = (y - 0.5) * (.5 - 2 * np.sin(np.linspace(0, np.pi, train_num))
) + np.random.random(train_num) / 10
return np.c_[x0, x1], y
# visualize decision boundary change
def boundary_vis_plots(model, x, y, subplot=[1, 1, 1]):
plt.subplot(subplot[0], subplot[1], subplot[2])
xx, yy = np.meshgrid(np.linspace(-1, 1, 50), np.linspace(-1, 1, 50))
pred = model.predict(np.c_[xx.ravel(), yy.ravel()])
zz = pred.reshape(xx.shape) if len(pred.shape) == 1 or pred.shape[
1] == 1 else pred[:, 1].reshape(xx.shape)
if subplot[2] <= subplot[1]:
plt.title(type(model).__name__)
plt.contourf(xx, yy, zz, levels=np.linspace(
zz.min(), zz.max(), 40), cmap=plt.cm.RdBu)
plt.contour(xx, yy, zz, levels=[0.5], colors='darkred')
plt.scatter(x[:, 0], x[:, 1], c=np.array(
['red', 'blue'])[y], s=10, edgecolors='k')
if subplot[2] == subplot[0] * subplot[1]:
plt.show()
def main():
data_loaders = [gen_linear, gen_circle, gen_xor, gen_spiral, gen_moon]
models = [
(kNearestNeighbor, {'k': 5}),
(FactorizationMachines, {'learning_rate': 1, 'embedding_dim': 1}),
(SVM, {}),
(AdaBoost, {'esti_num': 10}),
(RandomForest, {'tree_num': 20, 'max_depth': 3}),
(XGBoost, {'tree_num': 20, 'max_depth': 3}),
(MLP, {'act_type': 'Tanh', 'opt_type': 'Adam', 'layers': [
2, 8, 7, 2], 'epochs': 200, 'learning_rate': 0.5, 'lmbda': 1e-4})
]
for i, data_loader in enumerate(data_loaders):
x, y = data_loader(256)
for j, model in enumerate(models):
clf = model[0](**model[1])
clf.fit(x, y if not j in [2, 3] else 2 * y - 1)
boundary_vis_plots(clf, x, y, subplot=[len(
data_loaders), len(models), len(models) * i + 1 + j])
if __name__ == "__main__":
main()
| StarcoderdataPython |
3327519 | <reponame>Lucassardao/WebProjetoPradopolis
from flask import Flask, render_template
#criar a instancia do flask
app = Flask(__name__)
#criar a rota
@app.route('/')
def index():
return render_template("index.html")
@app.route('/index.html')
def index_1():
return render_template("index.html")
@app.route("/registro.html")
def registro():
return render_template('registro.html') | StarcoderdataPython |
3360614 | <reponame>triplejingle/cito
function_name = "test"
def test():
print("hoi")
| StarcoderdataPython |
1739236 | from ecdsa import SigningKey
from ecdsa.keys import VerifyingKey
class Card:
def __init__(self, poke_id, name, poke_type, hp, attack, defense, speed, total, legendary):
self.poke_id = poke_id
self.name = name
self.poke_type = poke_type
self.hp = hp
self.attack = attack
self.defense = defense
self.speed = speed
self.total = total
self.legendary = legendary
def give_print_str(self, key, value):
temp_str = key + " " + str(value)
space_left = 50 - len(temp_str) - 4
print_str = "| " + temp_str + " "*space_left + " |"
return print_str
def view_card(self):
print("*******************POKEMON CARD*******************")
print(self.give_print_str("ID:", self.poke_id))
print(self.give_print_str("NAME:", self.name))
print(self.give_print_str("TYPE:", self.poke_type))
print(self.give_print_str("HP:", self.hp))
print(self.give_print_str("ATTACK:", self.attack))
print(self.give_print_str("DEFENSE:", self.defense))
print(self.give_print_str("SPEED:", self.speed))
print(self.give_print_str("TOTAL:", self.total))
print(self.give_print_str("LEGENDARY:", self.legendary))
print("*******************POKEMON CARD*******************")
| StarcoderdataPython |
1745042 | <reponame>mlockett42/eosfactory
#!/usr/bin/python3
import sys
import os
import json
import re
import eosfactory.core. config as config
import eosfactory.core.logger as logger
import eosfactory.core.interface as interface
import eosfactory.core.setup as setup
import eosfactory.core.teos as teos
import eosfactory.core.cleos as cleos
import eosfactory.core.cleos_get as cleos_get
def reboot():
logger.INFO('''
######### Reboot EOSFactory session.
''')
stop()
import eosfactory.shell.account as account
account.reboot()
def clear_testnet_cache():
''' Remove wallet files associated with the current testnet.
'''
if not setup.file_prefix():
return
logger.TRACE('''
Removing testnet cache for prefix `{}`
'''.format(setup.file_prefix()))
kill_keosd() # otherwise the manager may protects the wallet files
dir = config.keosd_wallet_dir()
files = os.listdir(dir)
try:
for file in files:
if file.startswith(setup.file_prefix()):
os.remove(os.path.join(dir, file))
except Exception as e:
logger.ERROR('''
Cannot remove testnet cache. The error message is:
{}
'''.format(str(e)))
logger.TRACE('''
Testnet cache successfully removed.
''')
def accout_names_2_object_names(sentence, keys=False):
if not setup.is_translating:
return sentence
exceptions = ["eosio"]
map = account_map()
for name in map:
account_object_name = map[name]
if name in exceptions:
continue
sentence = sentence.replace(name, account_object_name)
if keys:
account = cleos.GetAccount(
name, is_info=False, is_verbose=False)
owner_key = account.owner()
if owner_key:
sentence = sentence.replace(
owner_key, account_object_name + "@owner")
active_key = account.active()
if active_key:
sentence = sentence.replace(
active_key, account_object_name + "@active")
return sentence
def object_names_2_accout_names(sentence):
map = account_map()
for name in map:
account_object_name = map[name]
sentence = sentence.replace(account_object_name, name)
return sentence
def stop_keosd():
cleos.WalletStop(is_verbose=False)
def kill_keosd():
os.system("pkill keosd")
class Transaction():
def __init__(self, msg):
self.transaction_id = ""
msg_keyword = "executed transaction: "
if msg_keyword in msg:
beg = msg.find(msg_keyword, 0)
end = msg.find(" ", beg + 1)
self.transaction_id = msg[beg : end]
else:
try:
self.transaction_id = msg.json["transaction_id"]
except:
pass
def get_transaction(self):
pass
def is_local_testnet():
cleos.set_local_nodeos_address_if_none()
return setup.is_local_address
def node_start(clear=False, nodeos_stdout=None):
try:
teos.node_start(clear, nodeos_stdout)
teos.node_probe()
except:
try:
teos.node_start(clear, nodeos_stdout)
teos.node_probe()
except:
teos.on_nodeos_error(clear)
def reset(nodeos_stdout=None):
''' Start clean the local EOSIO node.
The procedure addresses problems with instabilities of EOSIO *nodeos*
executable: it happens that it blocks itself on clean restart.
The issue is patched with one subsequent restart if the first attempt
fails. However, it happens that both launches fail, rarely due to
instability of *nodeos*, sometimes because of misconfiguration.
When both launch attempts fail, an exception routine passes. At first,
the command line is printed, for *example*::
ERROR:
The local ``nodeos`` failed to start twice in sequence. Perhaps, something is
wrong with configuration of the system. See the command line issued:
/usr/bin/nodeosx
--http-server-address 127.0.0.1:8888
--data-dir /mnt/c/Workspaces/EOS/eosfactory/localnode/
--config-dir /mnt/c/Workspaces/EOS/eosfactory/localnode/
--chain-state-db-size-mb 200 --contracts-console --verbose-http-errors --enable-stale-production --producer-name eosio
--signature-provider <KEY>KEY:<KEY>
--plugin eosio::producer_plugin
--plugin eosio::chain_api_plugin
--plugin eosio::http_plugin
--plugin eosio::history_api_plugin
--genesis-json /mnt/c/Workspaces/EOS/eosfactory/localnode/genesis.json
--delete-all-blocks
Next, the command line is executed, for *example*::
Now, see the result of an execution of the command line.
/bin/sh: 1: /usr/bin/nodeosx: not found
The exemplary case is easy, it explains itself. Generally, the command
line given can be executed in a *bash* terminal separately, in order to
understand a problem.
Args:
nodeos_stdout (str): If set, a file where *stdout* stream of
the local *nodeos* is send. Note that the file can be included to
the configuration of EOSFactory, see :func:`.core.config.nodeos_stdout`.
If the file is set with the configuration, and in the same time
it is set with this argument, the argument setting prevails.
'''
if not cleos.set_local_nodeos_address_if_none():
logger.INFO('''
No local nodeos is set: {}
'''.format(setup.nodeos_address()))
import eosfactory.shell.account as account
teos.keosd_start()
account.reboot()
clear_testnet_cache()
node_start(clear=True, nodeos_stdout=nodeos_stdout)
def resume(nodeos_stdout=None):
''' Resume the local EOSIO node.
Args:
nodeos_stdout (str): If set, a file where *stdout* stream of
the local *nodeos* is send. Note that the file can be included to
the configuration of EOSFactory, see :func:`.core.config.nodeos_stdout`.
If the file is set with the configuration, and in the same time
it is set with this argument, the argument setting prevails.
'''
if not cleos.set_local_nodeos_address_if_none():
logger.INFO('''
Not local nodeos is set: {}
'''.format(setup.nodeos_address()))
node_start(nodeos_stdout=nodeos_stdout)
def stop():
''' Stops all running EOSIO nodes.
'''
teos.node_stop()
def status():
'''
Display EOS node status.
'''
logger.INFO('''
######### Node ``{}``, head block number ``{}``.
'''.format(
setup.nodeos_address(),
cleos_get.GetInfo(is_verbose=0).head_block))
def info():
'''
Display EOS node info.
'''
logger.INFO(str(cleos_get.GetInfo(is_verbose=False)))
def verify_testnet_production():
head_block_num = 0
try: # if running, json is produced
head_block_num = cleos_get.GetInfo(is_verbose=False).head_block
except:
pass
domain = "LOCAL" if is_local_testnet() else "REMOTE"
if not head_block_num:
logger.ERROR('''
{} testnet is not running or is not responding @ {}.
'''.format(domain, setup.nodeos_address()))
else:
logger.INFO('''
{} testnet is active @ {}.
'''.format(domain, setup.nodeos_address()))
return head_block_num
def account_map(logger=None):
'''Return json account map
Attempt to open the account map file named ``setup.account_map``, located
in the wallet directory ``config.keosd_wallet_dir()``, to return its json
contents. If the file does not exist, return an empty json.
If the file is corrupted, offer editing the file with the ``nano`` linux
editor. Return ``None`` if the the offer is rejected.
'''
wallet_dir_ = config.keosd_wallet_dir(raise_error=False)
if not wallet_dir_:
return {}
path = os.path.join(wallet_dir_, setup.account_map)
while True:
try: # whether the setup map file exists:
with open(path, "r") as input_file:
return json.load(input_file)
except Exception as e:
if isinstance(e, FileNotFoundError):
return {}
else:
logger.OUT('''
The account mapping file is misformed. The error message is:
{}
Do you want to edit the file?
'''.format(str(e)))
answer = input("y/n <<< ")
if answer == "y":
edit_account_map()
continue
else:
logger.ERROR('''
Use the function 'efman.edit_account_map(text_editor="nano")'
or the corresponding method of any object of the 'eosfactory.wallet.Wallet`
class to edit the file.
''')
return None
def save_account_map(map):
save_map(map, setup.account_map)
def edit_account_map():
edit_map(setup.account_map)
def save_map(map, file_name):
map = json.dumps(map, indent=3, sort_keys=True)
with open(os.path.join(config.keosd_wallet_dir(), file_name), "w") as out:
out.write(map)
def edit_map(file_name, text_editor="nano"):
import subprocess
subprocess.run([text_editor, os.path.join(
config.keosd_wallet_dir(), file_name)])
read_map(file_name, text_editor)
def read_map(file_name, text_editor="nano"):
'''Return json account map
Attempt to open the account map file named ``setup.account_map``, located
in the wallet directory ``config.keosd_wallet_dir()``, to return its json
contents. If the file does not exist, return an empty json.
If the file is corrupted, offer editing the file with the ``nano`` linux
editor. Return ``None`` if the the offer is rejected.
'''
wallet_dir_ = config.keosd_wallet_dir()
path = os.path.join(wallet_dir_, file_name)
while True:
try: # whether the setup map file exists:
with open(path, "r") as input_file:
return json.load(input_file)
except Exception as e:
if isinstance(e, FileNotFoundError):
return {}
else:
logger.ERROR('''
The json file
{}
is misformed. The error message is:
{}
Do you want to edit the file?
'''.format(str(path), str(e)), is_fatal=False, translate=False)
answer = input("y/n <<< ")
if answer == "y":
import subprocess
subprocess.run([text_editor, path])
continue
else:
logger.ERROR('''
Use the function 'manager.edit_account_map(text_editor="nano")' to edit the file.
''', translate=False)
return None
def data_json(data):
class Encoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, interface.Account):
return str(o)
else:
json.JSONEncoder.default(self, o)
if not data:
return data
data_json = data
if isinstance(data, dict) or isinstance(data, list):
data_json = json.dumps(data, cls=Encoder)
else:
if isinstance(data, str):
data_json = re.sub("\s+|\n+|\t+", " ", data)
data_json = object_names_2_accout_names(data_json)
return data_json | StarcoderdataPython |
1797084 | <filename>chrF/measure.py
#!/usr/bin/env python3
# -*- coding: utf-8
"""
chrF - Reimplementation of the character-F evaluation measure for SMT
<NAME>. (2015). ChrF: character n-gram F-score
for automatic MT evaluation. EMNLP 2015, 392.
This implementation (c) <NAME> 2016.
"""
import collections
import itertools
import sys
Errors = collections.namedtuple('Errors',
['count', 'precrec', 'missing', 'hyplen', 'reflen'])
#__all__ = []
def chrf(hypothesis, references, beta=2.0, use_space=True):
"""convenience function for the most common setting:
equally weighted n-grams up to length 6"""
factor = beta ** 2
max_n = 6
nw = [1/float(max_n) for _ in range(max_n)]
stats = evaluate_single(hypothesis, references, max_n, factor)
pres, recs, fs = stats.ngram_prf(factor)
_, _, score = apply_ngram_weights(pres, recs, fs, nw)
return score
def ngrams(line, n):
"""Yields ngrams of length exactly n."""
offsets = [line[i:] for i in range(n)]
for ngram in zip(*offsets):
yield ngram
def ngrams_up_to(line, max_n, use_space=True):
"""Yields all character n-grams of lengths from 1 to max_n.
If use_space is False, spaces are not counted as chars."""
# space appended to treat last word equally to others
# and for compatibility with original
line += ' '
if not use_space:
line = line.replace(' ', '')
use_n = min(max_n, len(line))
result = []
for n in range(1, use_n + 1):
result.append(list(ngrams(line, n)))
for _ in range(max_n - use_n):
# empty lists for too long n-grams
result.append([])
return result
def errors_n(hypothesis, reference):
"""Errors for a single length of n-gram"""
errorcount = 0.0
precrec = 0.0
missing = []
ref_counts = collections.Counter(reference)
for ngram in hypothesis:
if ref_counts[ngram] > 0:
ref_counts[ngram] -= 1
else:
errorcount += 1.
missing.append(ngram)
if len(hypothesis) != 0:
precrec = 100. * errorcount / len(hypothesis)
else:
if len(reference) != 0:
precrec = 100
else:
precrec = 0.
return Errors(errorcount, precrec, missing,
len(hypothesis), len(reference))
def errors_multiref(hypothesis, references, max_n, use_space=True):
"""Yields errors in both directions,
against the best matching of multiple references,
for all ngram lengths up to max_n"""
hyp_ngrams = ngrams_up_to(hypothesis, max_n, use_space=use_space)
ref_ngrams = zip(*(ngrams_up_to(line, max_n, use_space=use_space)
for line in references))
for (i, (hyp, refs)) in enumerate(zip(hyp_ngrams, ref_ngrams)):
best_hyp_error = min((errors_n(hyp, ref) for ref in refs),
key=lambda x: x.precrec)
best_ref_error = min((errors_n(ref, hyp) for ref in refs),
key=lambda x: x.precrec)
yield (i, best_hyp_error, best_ref_error)
def print_missing_ngrams(n_sentences, side, i, missing, compatible=False):
sys.stdout.write('{}::{}-{}grams: '.format(
n_sentences, side, i + 1))
if compatible:
# output compatible with original implementation
sys.stdout.write(' '.join(
'=='.join(ngram).replace(' ', '=')
for ngram in missing))
sys.stdout.write(' ')
else:
sys.stdout.write(' '.join(
''.join(ngram)
for ngram in missing))
sys.stdout.write('\n')
class Stats(object):
def __init__(self, max_n):
self.max_n = max_n
self.hyp_err = [0. for _ in range(max_n)]
self.hyp_len = [0. for _ in range(max_n)]
self.hyp_missing = [list() for _ in range(max_n)]
self.ref_err = [0. for _ in range(max_n)]
self.ref_len = [0. for _ in range(max_n)]
self.ref_missing = [list() for _ in range(max_n)]
def __iadd__(self, other):
for i in range(self.max_n):
self.hyp_err[i] += other.hyp_err[i]
self.hyp_len[i] += other.hyp_len[i]
self.ref_err[i] += other.ref_err[i]
self.ref_len[i] += other.ref_len[i]
self.hyp_missing = None # not aggregated
self.ref_missing = None # not aggregated
return self
def ngram_prf(self, factor):
pre = [100 - (100 * (self.hyp_err[i] / self.hyp_len[i]))
if self.hyp_len[i] > 0 else 0
for i in range(self.max_n)]
rec = [100 - (100 * (self.ref_err[i] / self.ref_len[i]))
if self.ref_len[i] > 0 else 0
for i in range(self.max_n)]
divisors = [(factor * pre[i] + rec[i]) for i in range(self.max_n)]
f = [(1 + factor) * pre[i] * rec[i] / divisors[i]
if divisors[i] > 0 else 0
for i in range(self.max_n)]
return (pre, rec, f)
def apply_ngram_weights(pres, recs, fs, ngram_weights):
pre = sum(w * p for (w, p) in zip(ngram_weights, pres))
rec = sum(w * r for (w, r) in zip(ngram_weights, recs))
f = sum(w * f for (w, f) in zip(ngram_weights, fs))
return (pre, rec, f)
def evaluate_single(hypothesis, references, max_n, factor=None,
use_space=True):
stats = Stats(max_n)
errors = errors_multiref(hypothesis, references,
max_n, use_space=use_space)
for (i, hyp_error, ref_error) in errors:
# in both cases .hyplen is correct
# hyplen is a misnomer: should be "length used for normalization"
stats.hyp_err[i] = hyp_error.count
stats.hyp_len[i] = hyp_error.hyplen
stats.ref_err[i] = ref_error.count
stats.ref_len[i] = ref_error.hyplen
stats.hyp_missing[i] = hyp_error.missing
stats.ref_missing[i] = ref_error.missing
return stats
def print_single(stats, line_n, beta, ngram_weights,
print_missing, sentence_level, ngram_level, compatible):
factor = beta ** 2
if print_missing:
for i in range(stats.max_n):
print_missing_ngrams(line_n, 'ref', i, stats.ref_missing[i],
compatible)
for i in range(stats.max_n):
print_missing_ngrams(line_n, 'hyp', i, stats.hyp_missing[i],
compatible)
pres, recs, fs = stats.ngram_prf(factor)
if ngram_level:
for i in range(stats.max_n):
sys.stdout.write('{}::{}gram-{:6s}{:.4f}\n'.format(
line_n, i + 1, 'F', fs[i]))
sys.stdout.write('{}::{}gram-{:6s}{:.4f}\n'.format(
line_n, i + 1, 'Prec', pres[i]))
sys.stdout.write('{}::{}gram-{:6s}{:.4f}\n'.format(
line_n, i + 1, 'Rec', recs[i]))
if sentence_level:
pre, rec, f = apply_ngram_weights(pres, recs, fs, ngram_weights)
sys.stdout.write('{}::chr{}-{}\t{:.4f}\n'.format(
line_n, 'F', beta, f))
sys.stdout.write('{}::chr{}\t{:.4f}\n'.format(
line_n, 'Prec', pre))
sys.stdout.write('{}::chr{}\t{:.4f}\n'.format(
line_n, 'Rec', rec))
def print_summary(stats, beta, ngram_weights,
ngram_level=False, hide_precrec=False):
factor = beta ** 2
tot_pre, tot_rec, tot_f = stats.ngram_prf(factor)
if ngram_level:
for i in range(stats.max_n):
sys.stdout.write('{}gram-{:6s}{:.4f}\n'.format(
i + 1, 'F', tot_f[i]))
sys.stdout.write('{}gram-{:6s}{:.4f}\n'.format(
i + 1, 'Prec', tot_pre[i]))
sys.stdout.write('{}gram-{:6s}{:.4f}\n'.format(
i + 1, 'Rec', tot_rec[i]))
pre, rec, f = apply_ngram_weights(tot_pre, tot_rec, tot_f, ngram_weights)
sys.stdout.write('chr{}-{}\t{:.4f}\n'.format(
'F', beta, f))
if not hide_precrec:
sys.stdout.write('chr{}\t{:.4f}\n'.format(
'Prec', pre))
sys.stdout.write('chr{}\t{:.4f}\n'.format(
'Rec', rec))
def evaluate(hyp_lines,
ref_tuples,
max_n,
beta=1.0,
ngram_weights=None,
use_space=True,
summary=True,
hide_precrec=False,
print_missing=False,
sentence_level=False,
ngram_level=False,
compatible=False):
n_sentences = 0
factor = beta ** 2
tot_stats = Stats(max_n)
if ngram_weights is None:
ngram_weights = [1/float(max_n) for _ in range(max_n)]
else:
tot = sum(ngram_weights)
ngram_weights = [float(w) / tot for w in ngram_weights]
for (hyp_line, refs) in safe_zip(hyp_lines, ref_tuples):
n_sentences += 1
sent_stats = evaluate_single(
hyp_line,
refs,
max_n,
factor,
use_space=use_space)
tot_stats += sent_stats
if sentence_level:
print_single(sent_stats,
n_sentences,
beta,
ngram_weights,
print_missing=print_missing,
sentence_level=sentence_level,
ngram_level=ngram_level,
compatible=compatible)
if summary:
print_summary(tot_stats, beta, ngram_weights,
ngram_level, hide_precrec)
return tot_stats
def safe_zip(*iterables):
iters = [iter(x) for x in iterables]
sentinel = object()
for (j, tpl) in enumerate(itertools.zip_longest(*iterables, fillvalue=sentinel)):
for (i, val) in enumerate(tpl):
if val is sentinel:
raise ValueError('Input {} was too short. '
'Row {} (and later) missing.'.format(i, j))
yield tpl
| StarcoderdataPython |
22850 | <gh_stars>100-1000
from django.conf.urls import url, patterns
from .views import tutorial_email, tutorial_message
urlpatterns = patterns("", # flake8: noqa
url(r"^mail/(?P<pk>\d+)/(?P<pks>[0-9,]+)/$", tutorial_email, name="tutorial_email"),
url(r"^message/(?P<pk>\d+)/$", tutorial_message, name="tutorial_message"),
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.