id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1694763 | import logging
import sonrai.platform.aws.arn
def run(ctx):
iam_client = ctx.get_client().get('iam')
# Get role name
resource_arn = sonrai.platform.aws.arn.parse(ctx.resource_id)
role_name = resource_arn \
.assert_service("iam") \
.assert_type("role") \
.name
# https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_manage_delete.html#roles-managingrole-deleting-api
instance_profiles = iam_client.list_instance_profiles_for_role(RoleName=role_name)
for instance_profile in instance_profiles['InstanceProfiles']:
iam_client.remove_role_from_instance_profile(InstanceProfileName=instance_profile['InstanceProfileName'], RoleName=role_name)
role_policies = iam_client.list_role_policies(RoleName=role_name)
for policy_name in role_policies['PolicyNames']:
iam_client.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
role_attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)
for attached_policy in role_attached_policies['AttachedPolicies']:
iam_client.detach_role_policy(RoleName=role_name, PolicyArn=attached_policy['PolicyArn'])
logging.info('deleting role: {}'.format(ctx.resource_id))
iam_client.delete_role(RoleName=role_name)
| StarcoderdataPython |
1640920 | import os
import sys
import json
import logging
from github import Github
from typing import Any, Dict, List, Mapping, Optional
import reconcile.openshift_base as ob
from reconcile.utils import helm
from reconcile import queries
from reconcile.status import ExitCodes
from reconcile.utils.oc import OCDeprecated, OC_Map
from reconcile.utils.semver_helper import make_semver
from reconcile.github_org import GH_BASE_URL, get_default_config
from reconcile.utils.openshift_resource import OpenshiftResource, ResourceInventory
from reconcile.utils.defer import defer
QONTRACT_INTEGRATION = "integrations-manager"
QONTRACT_INTEGRATION_VERSION = make_semver(0, 1, 0)
def construct_values_file(
integration_specs: List[Mapping[str, Any]]
) -> Mapping[str, Any]:
values: Dict[str, Any] = {
"integrations": [],
"cronjobs": [],
}
for spec in integration_specs:
key = "cronjobs" if spec.get("cron") else "integrations"
values[key].append(spec)
return values
def get_image_tag_from_ref(ref: str) -> str:
settings = queries.get_app_interface_settings()
gh_token = get_default_config()["token"]
github = Github(gh_token, base_url=GH_BASE_URL)
commit_sha = github.get_repo("app-sre/qontract-reconcile").get_commit(sha=ref).sha
return commit_sha[: settings["hashLength"]]
def collect_parameters(
template: Mapping[str, Any],
environment: Mapping[str, Any],
image_tag_from_ref: Optional[Mapping[str, str]],
) -> Mapping[str, Any]:
parameters: Dict[str, Any] = {}
environment_parameters = environment.get("parameters")
if environment_parameters:
parameters.update(json.loads(environment_parameters))
template_parameters = template.get("parameters")
if template_parameters:
tp_env_vars = {
p["name"]: os.environ[p["name"]]
for p in template_parameters
if p["name"] in os.environ
}
parameters.update(tp_env_vars)
if image_tag_from_ref:
for e, r in image_tag_from_ref.items():
if environment["name"] == e:
parameters["IMAGE_TAG"] = get_image_tag_from_ref(r)
return parameters
def construct_oc_resources(
namespace_info: Mapping[str, Any],
oc: OCDeprecated,
image_tag_from_ref: Optional[Mapping[str, str]],
) -> List[OpenshiftResource]:
template = helm.template(construct_values_file(namespace_info["integration_specs"]))
parameters = collect_parameters(
template, namespace_info["environment"], image_tag_from_ref
)
resources = oc.process(template, parameters)
return [
OpenshiftResource(
r,
QONTRACT_INTEGRATION,
QONTRACT_INTEGRATION_VERSION,
error_details=r.get("metadata", {}).get("name"),
)
for r in resources
]
def fetch_desired_state(
namespaces: List[Mapping[str, Any]],
ri: ResourceInventory,
oc_map: OC_Map,
image_tag_from_ref: Optional[Mapping[str, str]],
):
for namespace_info in namespaces:
namespace = namespace_info["name"]
cluster = namespace_info["cluster"]["name"]
oc = oc_map.get(cluster)
if not oc:
continue
oc_resources = construct_oc_resources(namespace_info, oc, image_tag_from_ref)
for r in oc_resources:
ri.add_desired(cluster, namespace, r.kind, r.name, r)
def collect_namespaces(
integrations: List[Mapping[str, Any]], environment_name: str
) -> List[Mapping[str, Any]]:
unique_namespaces: Dict[str, Dict[str, Any]] = {}
for i in integrations:
managed = i.get("managed") or []
for m in managed:
ns = m["namespace"]
if environment_name and ns["environment"]["name"] != environment_name:
continue
ns = unique_namespaces.setdefault(ns["path"], ns)
spec = m["spec"]
spec["name"] = i["name"]
# create a backref from namespace to integration spec
ns.setdefault("integration_specs", []).append(spec)
return list(unique_namespaces.values())
@defer
def run(
dry_run,
environment_name,
thread_pool_size=10,
internal=None,
use_jump_host=True,
image_tag_from_ref=None,
defer=None,
):
namespaces = collect_namespaces(
queries.get_integrations(managed=True), environment_name
)
if not namespaces:
logging.debug("Nothing to do, exiting.")
sys.exit(ExitCodes.SUCCESS)
ri, oc_map = ob.fetch_current_state(
namespaces=namespaces,
thread_pool_size=thread_pool_size,
integration=QONTRACT_INTEGRATION,
integration_version=QONTRACT_INTEGRATION_VERSION,
override_managed_types=["Deployment", "StatefulSet", "CronJob", "Service"],
internal=internal,
use_jump_host=use_jump_host,
)
defer(oc_map.cleanup)
fetch_desired_state(namespaces, ri, oc_map, image_tag_from_ref)
ob.realize_data(dry_run, oc_map, ri, thread_pool_size)
if ri.has_error_registered():
sys.exit(ExitCodes.ERROR)
| StarcoderdataPython |
1679454 | <gh_stars>1-10
"""Collection on GDB commands useful for low-level debugging, aimed at bringing debug.exe flavor into GDB command line interface.
"""
import sys
import gdb
if sys.version_info < (3,0,0):
gdb.write("Warning: Janitor expects Python version >= 3.0.0\n");
gdb_version = gdb.VERSION.split('.')
if int(gdb_version[0]) < 7 or (int(gdb_version[0]) == 7 and int(gdb_version[1]) < 12):
gdb.write("Warning: Janitor expects GDB version >= 7.12\n");
| StarcoderdataPython |
1743346 | #41) Pandigital prime
#We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime.
#What is the largest n-digit pandigital prime that exists?
#%% Solution
def is_pandigital(x):
return sorted(str(x)) == list('123456789')[:len(str(x))]
def primes(n):
sieve = [True] * (n//2)
for i in range(3,int(n**0.5)+1,2):
if sieve[i//2]:
sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)
return [2] + [2*i+1 for i in range(1, n//2) if sieve[i]]
list_primes = primes(10**9)
list_pandigital_primes = [x for x in list_primes if is_pandigital(x)]
max(list_pandigital_primes)
| StarcoderdataPython |
89283 | <filename>pyugrid/test/test_save_as_netcdf.py
#!/usr/bin/env python
"""
tests for saving a UGrid in netcdf format
designed to be run with pytest
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import netCDF4
from pyugrid.ugrid import UGrid, UVar
from pyugrid.test_examples import two_triangles, twenty_one_triangles
# code to check netcdf files for stuff:
def nc_has_variable(ds, var_name):
"""
checks that a netcdf file has the given variable defined
:param ds: a netCDF4 Dataset object, or a netcdf file name
"""
if not isinstance(ds, netCDF4.Dataset):
ds = netCDF4.Dataset(ds)
if var_name in ds.variables:
return True
else:
print(var_name, " is not a variable in the Dataset")
return False
def nc_has_dimension(ds, dim_name):
"""
checks that a netcdf file has the given dimension defined
:param ds: a netCDF4 Dataset object, or a netcdf file name
"""
if not isinstance(ds, netCDF4.Dataset):
ds = netCDF4.Dataset(ds)
if dim_name in ds.dimensions:
return True
else:
print(dim_name, " is not a dimension in the Dataset")
return False
def nc_var_has_attr(ds, var_name, att_name):
"""
checks that the variable, var_name, has the attribute, att_name
"""
if not isinstance(ds, netCDF4.Dataset):
ds = netCDF4.Dataset(ds)
try:
getattr(ds.variables[var_name], att_name)
return True
except AttributeError:
print(att_name, "is not in the var:", var_name)
return False
def nc_var_has_attr_vals(ds, var_name, att_dict):
"""
checks that the variable, var_name, as teh attribtes (and values) in the att_dict
"""
if not isinstance(ds, netCDF4.Dataset):
ds = netCDF4.Dataset(ds)
for key, val in att_dict.items():
try:
if val != getattr(ds.variables[var_name], key):
print("attribute:", key)
print("expected val:", val)
print("val in file:", repr( getattr(ds.variables[var_name], key) ))
return False
except AttributeError:
print(key, "is not an attribute of var:", var_name)
return False
return True
def test_simple_write():
fname = 'temp.nc'
grid = two_triangles()
grid.save_as_netcdf(fname)
## could be lots of tests here...
with netCDF4.Dataset(fname) as ds:
assert nc_has_variable(ds, 'mesh')
assert nc_var_has_attr_vals(ds, 'mesh', {'cf_role':'mesh_topology',
'topology_dimension' : 2,
'long_name': u'Topology data of 2D unstructured mesh'
})
def test_set_mesh_name():
fname = 'temp.nc'
grid = two_triangles()
grid.mesh_name = "mesh_2"
grid.save_as_netcdf(fname)
with netCDF4.Dataset(fname) as ds:
assert nc_has_variable(ds, 'mesh_2')
assert nc_var_has_attr_vals(ds, 'mesh_2', {'cf_role':'mesh_topology',
'topology_dimension' : 2,
'long_name': u'Topology data of 2D unstructured mesh'
})
assert nc_var_has_attr_vals(ds, 'mesh_2', {'cf_role':'mesh_topology',
'topology_dimension' : 2,
'long_name': u'Topology data of 2D unstructured mesh',
'node_coordinates': 'mesh_2_node_lon mesh_2_node_lat',
})
assert nc_has_variable(ds, 'mesh_2_node_lon')
assert nc_has_variable(ds, 'mesh_2_node_lat')
assert nc_has_variable(ds, 'mesh_2_face_nodes')
assert nc_has_variable(ds, 'mesh_2_edge_nodes')
assert nc_has_dimension(ds, "mesh_2_num_node")
assert nc_has_dimension(ds, "mesh_2_num_edge")
assert nc_has_dimension(ds, "mesh_2_num_face")
assert nc_has_dimension(ds, "mesh_2_num_vertices")
assert not nc_var_has_attr(ds, 'mesh_2', "face_edge_connectivity")
def test_write_with_depths():
'''
tests writing a netcdf file with depth data
'''
fname = 'temp.nc'
grid = two_triangles()
grid.mesh_name='mesh1'
# create a UVar object for the depths:
depths = UVar('depth', location='node', data=[1.0, 2.0, 3.0, 4.0])
depths.attributes['units'] = 'm'
depths.attributes["standard_name"] = "sea_floor_depth_below_geoid"
depths.attributes["positive"] = "down"
grid.add_data(depths)
grid.save_as_netcdf(fname)
with netCDF4.Dataset(fname) as ds:
assert nc_has_variable(ds, 'mesh1')
assert nc_has_variable(ds, 'depth')
assert nc_var_has_attr_vals(ds, 'depth', {"coordinates" : "mesh1_node_lon mesh1_node_lat",
"location" : "node",
"mesh": "mesh1"})
def test_write_with_velocities():
'''
tests writing a netcdf file with velocities on the faces
'''
fname = 'temp.nc'
grid = two_triangles()
grid.mesh_name = 'mesh2'
# create a UVar object for u velocity:
u_vel = UVar('u', location='face', data=[1.0, 2.0])
u_vel.attributes['units'] = 'm/s'
u_vel.attributes["standard_name"] = "eastward_sea_water_velocity"
grid.add_data(u_vel)
# create a Uvar object for v velocity:
v_vel = UVar('v', location='face', data=[3.2, 4.3])
v_vel.attributes['units'] = 'm/s'
v_vel.attributes["standard_name"] = "northward_sea_water_velocity"
grid.add_data(v_vel)
# add coordinates for face data
grid.build_face_coordinates()
grid.save_as_netcdf(fname)
with netCDF4.Dataset(fname) as ds:
assert nc_has_variable(ds, 'mesh2')
assert nc_has_variable(ds, 'u')
assert nc_has_variable(ds, 'v')
assert nc_var_has_attr_vals(ds, 'u', {
"coordinates" : "mesh2_face_lon mesh2_face_lat",
"location" : "face",
"mesh": "mesh2",
})
def test_write_with_edge_data():
'''
tests writing a netcdf file with data on the edges (fluxes, maybe?)
'''
fname = 'temp.nc'
grid = two_triangles()
grid.mesh_name = 'mesh2'
# create a UVar object for fluxes:
flux = UVar('flux', location='edge', data=[0.0, 0.0, 4.1, 0.0, 5.1, ])
flux.attributes['units'] = 'm^3/s'
flux.attributes["long_name"] = "volume flux between cells"
flux.attributes["standard_name"] = "ocean_volume_transport_across_line"
grid.add_data(flux)
#add coordinates for edges
grid.build_edge_coordinates()
grid.save_as_netcdf(fname)
with netCDF4.Dataset(fname) as ds:
assert nc_has_variable(ds, 'mesh2')
assert nc_has_variable(ds, 'flux')
assert nc_var_has_attr_vals(ds, 'flux', {
"coordinates" : "mesh2_edge_lon mesh2_edge_lat",
"location" : "edge",
'units' : 'm^3/s',
"mesh": "mesh2",
})
assert np.array_equal( ds.variables['mesh2_edge_lon'], grid.edge_coordinates[:,0] )
assert np.array_equal( ds.variables['mesh2_edge_lat'], grid.edge_coordinates[:,1] )
def test_write_with_bound_data():
'''
tests writing a netcdf file with data on the boundaries
suitable for boundary conditions, for example -- (fluxes, maybe?)
'''
fname = 'temp.nc'
grid = two_triangles() # using default mesh name
# add the boundary definitions:
grid.boundaries = [(0,1),
(0,2),
(1,3),
(2,3),
]
# create a UVar object for boundary conditions:
bnds = UVar('bnd_cond', location='boundary', data=[0, 1, 0, 0])
bnds.attributes["long_name"] = "model boundary conditions"
bnds.attributes["flag_values"] = "0 1"
bnds.attributes["flag_meanings"] = "no_flow_boundary open_boundary"
grid.add_data(bnds)
grid.save_as_netcdf(fname)
with netCDF4.Dataset(fname) as ds:
assert nc_has_variable(ds, 'mesh')
assert nc_has_variable(ds, 'bnd_cond')
assert nc_var_has_attr_vals(ds, 'mesh', {
"boundary_node_connectivity" : "mesh_boundary_nodes",
})
assert nc_var_has_attr_vals(ds, 'bnd_cond', {
"location" : "boundary",
"flag_values" : "0 1",
"flag_meanings" : "no_flow_boundary open_boundary",
"mesh": "mesh",
})
## there should be no coordinates attribute or variable for the boundaries
## as there is no boundaries_coordinates defined
assert not nc_has_variable(ds, 'mesh_boundary_lon')
assert not nc_has_variable(ds, 'mesh_boundary_lat')
assert not nc_var_has_attr(ds, 'bnd_cond', 'coordinates')
def test_write_everything():
""" An example with all features enabled, and a less trivial grid """
# use a small, but interesting grid
fname = 'full_example.nc'
grid = twenty_one_triangles() # using default mesh name
grid.build_face_face_connectivity()
grid.build_edges()
grid.build_edge_coordinates()
grid.build_face_coordinates()
grid.build_boundary_coordinates()
# depth on the nodes
depths = UVar('depth', location='node', data=np.linspace(1,10,20))
depths.attributes['units'] = 'm'
depths.attributes["standard_name"] = "sea_floor_depth_below_geoid"
depths.attributes["positive"] = "down"
grid.add_data(depths)
# velocities on the faces:
u_vel = UVar('u', location='face', data=np.sin(np.linspace(3,12,21)))
u_vel.attributes['units'] = 'm/s'
u_vel.attributes["standard_name"] = "eastward_sea_water_velocity"
grid.add_data(u_vel)
# create a UVar object for v velocity:
v_vel = UVar('v', location='face', data=np.sin(np.linspace(12,15,21)))
v_vel.attributes['units'] = 'm/s'
v_vel.attributes["standard_name"] = "northward_sea_water_velocity"
grid.add_data(v_vel)
# fluxes on the edges:
flux = UVar('flux', location='edge', data=np.linspace(1000,2000,41))
flux.attributes['units'] = 'm^3/s'
flux.attributes["long_name"] = "volume flux between cells"
flux.attributes["standard_name"] = "ocean_volume_transport_across_line"
grid.add_data(flux)
# Some boundary conditions:
bounds = np.zeros( (19,), dtype=np.uint8 )
bounds[7] = 1
bnds = UVar('bnd_cond', location='boundary', data=bounds)
bnds.attributes["long_name"] = "model boundary conditions"
bnds.attributes["flag_values"] = "0 1"
bnds.attributes["flag_meanings"] = "no_flow_boundary open_boundary"
grid.add_data(bnds)
grid.save_as_netcdf(fname)
## now the tests:
with netCDF4.Dataset(fname) as ds:
assert nc_has_variable(ds, 'mesh')
assert nc_has_variable(ds, 'depth')
assert nc_var_has_attr_vals(ds, 'depth', {"coordinates" : "mesh_node_lon mesh_node_lat",
"location" : "node"})
assert nc_has_variable(ds, 'u')
assert nc_has_variable(ds, 'v')
assert nc_var_has_attr_vals(ds, 'u', {
"coordinates" : "mesh_face_lon mesh_face_lat",
"location" : "face",
"mesh": "mesh"
})
assert nc_var_has_attr_vals(ds, 'v', {
"coordinates" : "mesh_face_lon mesh_face_lat",
"location" : "face",
"mesh": "mesh",
})
assert nc_has_variable(ds, 'flux')
assert nc_var_has_attr_vals(ds, 'flux', {
"coordinates" : "mesh_edge_lon mesh_edge_lat",
"location" : "edge",
'units' : 'm^3/s',
"mesh": "mesh",
})
assert nc_has_variable(ds, 'mesh')
assert nc_has_variable(ds, 'bnd_cond')
assert nc_var_has_attr_vals(ds, 'mesh', {
"boundary_node_connectivity" : "mesh_boundary_nodes",
})
assert nc_var_has_attr_vals(ds, 'bnd_cond', {
"location" : "boundary",
"flag_values" : "0 1",
"flag_meanings" : "no_flow_boundary open_boundary",
"mesh": "mesh",
})
# and make sure pyugrid can reload it!
grid = UGrid.from_ncfile(fname,load_data=True)
# and that some things are the same:
# note: more testing might be good here...
# maybe some grid comparison functions?
assert grid.mesh_name == 'mesh'
print("grid data:", grid.data)
assert len(grid.nodes) == 20
depth = grid.data['depth']
assert depth.attributes['units'] == 'm'
u = grid.data['u']
assert u.attributes['units'] == 'm/s'
if __name__ == "__main__":
# run the tests:
test_simple_write()
test_set_mesh_name()
test_write_with_depths()
test_write_with_velocities()
test_write_with_edge_data()
| StarcoderdataPython |
1720444 | <gh_stars>1-10
import pygame
from pygame import mixer
from moviepy.editor import *
def play_music(music_name):
mixer.init()
mixer.music.load(music_name)
mixer.music.play()
def set_volume_start(vol):
mixer.music.set_volume(vol)
def set_volume(vol):
curr_vol = mixer.music.get_volume()
# print("{} currr".format(curr_vol))
if curr_vol >= vol:
while curr_vol > vol:
print(curr_vol)
mixer.music.set_volume(curr_vol)
curr_vol -= 0.1
else:
while curr_vol > vol:
print(curr_vol)
mixer.music.set_volume(curr_vol)
curr_vol += 0.1
mixer.music.set_volume(vol)
def get_busy():
# print(mixer.music.get_busy())
return mixer.music.get_busy()
def get_volume():
return mixer.music.get_volume()
| StarcoderdataPython |
1601038 | <gh_stars>0
import numpy as np
from typing import List, Tuple
def vectorize_1_hot(word: str, vocabulary: List[str]) -> np.array:
return np.fromiter((w == word for w in vocabulary), dtype=int)
def training_matrix(word_pairs: List[Tuple[str, str]], vocabulary: List[str]) -> Tuple[np.array, np.array]:
in_words: List[str] = [x for (x, y) in word_pairs]
out_words: List[str] = [y for (x, y) in word_pairs]
return (
(np.column_stack([vectorize_1_hot(w, vocabulary) for w in in_words])).transpose(),
(np.column_stack([vectorize_1_hot(w, vocabulary) for w in out_words]).transpose())
)
| StarcoderdataPython |
3274041 | <reponame>yifeiren/vnpy-1.8
# encoding: UTF-8
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
#from vnpy.trader.gateway import ctpGateway
from vnpy.trader.app import dataRecorder
from vnpy.trader.gateway import okexGateway
from datetime import datetime, timedelta
from collections import OrderedDict
from itertools import product
import multiprocessing
import copy
import pymongo
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# 如果安装了seaborn则设置为白色风格
try:
import seaborn as sns
sns.set_style('whitegrid')
except ImportError:
pass
from vnpy.trader.vtGlobal import globalSetting
from vnpy.trader.vtObject import VtTickData, VtBarData
from vnpy.trader.vtConstant import *
from vnpy.trader.vtGateway import VtOrderData, VtTradeData
from matplotlib.pylab import date2num
import datetime
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
# ----------------------------------------------------------------------
def loadHistoryData(dbName,symbol,type, forecast = 0):
"""载入历史数据"""
try:
dbClient = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'])
collection = dbClient[dbName][symbol]
#output(u'开始载入数据')
#flt = {'type': {'$eq': type}}
d1 = datetime.datetime.now()
d2 = d1 - datetime.timedelta(days=1)
flt = {'futureindex':{'$eq':0}}
#flt = {'buy': {'$eq': 0}}
#flt = {'thisweekvsspot':{'$gt':1}}
cx = collection.find(flt)
for data in cx:
# 获取时间戳对象
#dt = data['datetime'].time()
collection.delete_one(data)
print u'删除无效数据,时间戳:%s' % data['datetime']
if forecast ==0:
flt = {'datetime': {'$gte': d2,
'$lt': d1},'type': {'$eq': type} }
else:
flt = {'datetime': {'$gte': d2,
'$lt': d1},'type': {'$eq': type}, 'forecast': {'$gt':0} }
initCursor = collection.find(flt).sort('datetime')
#data = pd.DataFrame(list(collection.find(flt).sort('datetime')))
data = pd.DataFrame(list(collection.find(flt)))
del data['_id']
data = data[['datetime', 'buy', 'sell', 'type', 'nextweekvsthisweek', 'quartervsthisweek', 'quartervsnextweek', 'futureindex', 'forecast', 'thisweekvsspot']]
return (data)
except Exception,e:
print e
def visualize():
try:
plt.ion()
fig, (ax,ax2) = plt.subplots(2,sharex=True)
ax.xaxis_date()
#plt.show()
while True:
hist_data_type1 = loadHistoryData('VnTrader_Tick_Db', 'eos.OKEX', 1)
hist_data_type2 = loadHistoryData('VnTrader_Tick_Db', 'eos.OKEX', 2)
hist_data_type3 = loadHistoryData('VnTrader_Tick_Db', 'eos.OKEX', 3)
hist_data_type4 = loadHistoryData('VnTrader_Tick_Db', 'eos.OKEX', 3,1)
ax.clear()
ax2.clear()
ax.plot(hist_data_type1['datetime'],hist_data_type1['buy'], label='this_week')
ax.plot(hist_data_type2['datetime'],hist_data_type2['buy'], label='next_week')
ax.plot(hist_data_type3['datetime'],hist_data_type3['buy'], label='quarter')
ax.plot(hist_data_type3['datetime'],hist_data_type3['futureindex'], label='futureindex')
if hist_data_type4 != None:
ax.plot(hist_data_type4['datetime'],hist_data_type4['forecast'], label='forecast')
ax2.plot(hist_data_type3['datetime'],hist_data_type3['quartervsthisweek'], label='quarter/this week')
ax2.plot(hist_data_type3['datetime'],hist_data_type3['quartervsnextweek'], label='quarter/next week')
ax2.plot(hist_data_type1['datetime'],hist_data_type1['nextweekvsthisweek'], label='next week/this week')
ax2.plot(hist_data_type1['datetime'],hist_data_type1['thisweekvsspot'], label='this week/spot')
props = font_manager.FontProperties(size=10)
leg = ax.legend(loc='upper left', shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
xfmt = mdates.DateFormatter('%y-%m-%d %H:%M')
ax.xaxis.set_major_formatter(xfmt)
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc='upper left', shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
xfmt = mdates.DateFormatter('%y-%m-%d %H:%M')
ax2.xaxis.set_major_formatter(xfmt)
#plt.grid()
plt.draw()
plt.pause(300)
# plt.plot()
# plt.show()
#sleep(1)
except Exception,e:
print e
if __name__ == '__main__':
visualize()
| StarcoderdataPython |
3394286 | # -*- coding: utf-8 -*-
"""minor2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1actLRdpDfBgRyQ0yYrgH7LWB-CoU3O3w
"""
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import cv2
import os
from PIL import Image
from keras.models import Sequential
from keras.layers import Input,Convolution2D,MaxPool2D,Flatten,Dense,Dropout,Convolution3D,MaxPool3D
from keras.utils import np_utils
import tensorflow
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ModelCheckpoint, EarlyStopping
def drawImage(img, title = "image"):
imarray = np.array(img)
plt.title(title+str(imarray.shape))
plt.imshow(img)
plt.show()
from google.colab import drive
drive.mount('/content/drive')
dirname = './DataSet/'
timeaccord= []
for fname in os.listdir(dirname):
timeaccord.append(fname)
timeaccord.sort()
final = []
for fname in timeaccord[:24]:
img = Image.open(os.path.join(dirname, fname))
img = img.resize((100,100))
drawImage(img,fname)
imarray = np.array(img)
final.append(imarray)
final = np.array(final)
final = final/255
print(final.shape)
x_train = []
y_train = []
for i in range(10,final.shape[0]):
x_train.append(final[i-10:i])
for i in range(10,final.shape[0]):
y_train.append(final[i-9:i+1])
X_train = np.array(x_train)
Y_train = np.array(y_train)
print(X_train.shape)
print(Y_train.shape)
seq = Sequential()
seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),input_shape=(None, 100, 100, 1), padding='same', return_sequences=True))
seq.add(Dropout(0.1))
seq.add(BatchNormalization())
seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True))
seq.add(Dropout(0.1))
seq.add(BatchNormalization())
seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),padding='same', return_sequences=True))
seq.add(Dropout(0.1))
seq.add(BatchNormalization())
seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),padding='same', return_sequences=True))
seq.add(Dropout(0.1))
seq.add(BatchNormalization())
seq.add(Conv3D(filters=1, kernel_size=(3, 3, 3),activation='sigmoid',padding='same', data_format='channels_last'))
seq.add(Dropout(0.1))
seq.summary()
seq.compile(loss='binary_crossentropy', optimizer='adadelta')
checkpoint = ModelCheckpoint("best_model.h5", monitor='val_loss', save_best_only=True, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', patience=2)
hist = seq.fit(X_train.reshape((14,10,100,100,1)), Y_train.reshape((14,10,100,100,1)), batch_size=10,
epochs=32, validation_split=0.2,callbacks=[checkpoint,early])
#seq.load_weights("best_model.h5")
plt.figure(0)
plt.title("LOSS")
plt.plot(hist.history['loss'],'g')
plt.plot(hist.history['val_loss'],'b')
plt.show()
X_train = X_train.reshape((14,10,100,100,1))
which = 12
track = X_train[which][:10, ::, ::, ::]
for j in range(16):
new_pos = seq.predict(track[np.newaxis, ::, ::, ::, ::])
new = new_pos[::, -1, ::, ::, ::]
track = np.concatenate((track, new), axis=0)
# And then compare the predictions
# to the ground truth
track2 = X_train[which][::, ::, ::, ::]
for i in range(15):
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(121)
if i >= 10:
ax.text(1, 3, 'Predictions !', fontsize=20, color='w')
else:
ax.text(1, 3, 'Initial trajectory', fontsize=20)
toplot = track[i, ::, ::, 0]
plt.imshow(toplot)
ax = fig.add_subplot(122)
plt.text(1, 3, 'Ground truth', fontsize=20)
#toplot = track2[i, ::, ::, 0]
# if i >= 2:
# toplot = Y_train[which][i - 1,::, ::, 0]
plt.imshow(toplot)
plt.savefig('%i_animate.png' % (i + 1))
img = Image.open(os.path.join(dirname, timeaccord[25]))
img = img.resize((100,100))
plt.imshow(np.array(img)/9)
| StarcoderdataPython |
55312 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Bernoulli
from src.models.nns import Decoder
class ConvDecoder(nn.Module):
def __init__(self, z_dim):
super().__init__()
self.z_dim = z_dim
self.decoder = Decoder(
128,
z_dim,
28,
28,
1
)
def decode(self, z: torch.tensor) -> torch.tensor:
"""Decode latent points into data points
Parameters
----------
z : torch.tensor
[B, Z]
Returns
-------
torch.tensor
[B, D]
"""
# Output is Bernoulli
loc_img = torch.sigmoid(self.decoder(z))
return loc_img
def forward(self, x: torch.tensor, z: torch.tensor) -> torch.tensor:
"""log p(x|z), reconstruction term
Parameters
----------
x : torch.tensor
[B, C, H, W], Data points
z : torch.tensor
[B, Z], Latent points
Returns
-------
torch.tensor
[B,], log p(x|z)
"""
x = x.reshape(-1, 784)
loc_img = self.decode(z)
# FIXME Use F.binary_cross_entropy instead? it's the same
dist = Bernoulli(probs=loc_img)
log_px_z = dist.log_prob(x).sum(-1)
# log_px_z = -F.binary_cross_entropy(loc_img, x, reduction="none").sum(-1)
return log_px_z
class MNISTDecoder(nn.Module):
def __init__(self, z_dim, hidden_dim=400):
super().__init__()
self.z_dim = z_dim
self.fc1 = nn.Linear(z_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, 784)
def decode(self, z: torch.tensor) -> torch.tensor:
"""Decode latent points into data points
Parameters
----------
z : torch.tensor
[B, Z]
Returns
-------
torch.tensor
[B, D]
"""
hidden = F.relu(self.fc1(z))
# Output is Bernoulli
loc_img = torch.sigmoid(self.fc2(hidden))
return loc_img
def forward(self, x: torch.tensor, z: torch.tensor) -> torch.tensor:
"""log p(x|z), reconstruction term
Parameters
----------
x : torch.tensor
[B, C, H, W], Data points
z : torch.tensor
[B, Z], Latent points
Returns
-------
torch.tensor
[B,], log p(x|z)
"""
x = x.reshape(-1, 784)
loc_img = self.decode(z)
# FIXME Use F.binary_cross_entropy instead? it's the same
dist = Bernoulli(probs=loc_img)
log_px_z = dist.log_prob(x).sum(-1)
# log_px_z = -F.binary_cross_entropy(loc_img, x, reduction="none").sum(-1)
return log_px_z
| StarcoderdataPython |
3307709 | <reponame>jkavan/highlite<filename>highlite.py
#!/usr/bin/env python
import sys
import getopt
import re
from termcolor import colored
#
# You can freely customize the colors and/or styles if you like (though
# the changes may be overwritten by the upgrade process).
#
# Available colors:
# fore back
# ---- ----
# grey on_grey
# red on_red
# green on_green
# yellow on_yellow
# blue on_blue
# magenta on_magenta
# cyan on_cyan
# white on_white
#
# Available styles:
# bold
# dark
# underline
# blink
# reverse
# concealed
colors = [
["green", ""],
["red", ""],
["yellow", ""],
["blue", ""],
["magenta", ""],
["white", ""],
["cyan", ""],
["grey", "on_white"],
["blue", "on_white"],
["yellow", "on_white"],
["white", "on_red"],
["white", "on_yellow"],
["white", "on_blue"],
["white", "on_magenta"],
]
styles = ["bold", "underline"]
# Search is case-sensitive by default. Can be overriden with `-i`
ignore_case = False
# ---
USAGE = ("An utility for highlighting command line output using one or more regular expressions.\n"
"\n"
"Usage: [COMMAND] | hl [OPTION] REGEX...\n"
"\n"
"OPTIONS:\n"
" -h, --help Print this help message\n"
" -v, --version Print version information\n"
" -i, --ignore-case Ignore case when searching\n"
)
VERSION = "highlite version 0.1.0"
def get_fore_color(index):
""" Returns a foreground color from the list """
index = index % (len(colors))
color = colors[index][0]
if color == '':
return None
return color
def get_back_color(index):
""" Returns a background color from the list """
index = index % (len(colors))
color = colors[index][1]
if color == '':
return None
return color
def colorize(text, regexes, ignore_case):
"""
Surrounds regex matches with ANSI colors and returns the colored text
:param text: Text that will be colorized.
:param regexes: A list of search terms (in regexp format). Which text matches to colorize.
:return: Colorized text.
"""
flags = re.IGNORECASE if ignore_case else 0
# Loop through each argument (regex)
for index, regex in enumerate(regexes):
# Add color around the matches
text = re.sub(regex,
lambda m: colored(
'{}'.format(m.group()),
get_fore_color(index),
get_back_color(index),
styles),
text,
flags=flags)
return text
def validate_regex(regexes):
""" Checks if the given regex(es) are valid. """
try:
for regex in regexes:
re.compile(regex)
except re.error:
print("Invalid regex pattern: " + regex)
sys.exit(1)
def parse_args(args):
"""
Parses command line arguments and sets global options
:returns: operands (list of regexes)
"""
global ignore_case
try:
options, arguments = getopt.getopt(
args,
'vhi',
["version", "help", "ignore-case"])
for o, a in options:
if o in ("-v", "--version"):
print(VERSION)
sys.exit()
if o in ("-h", "--help"):
print(VERSION)
print(USAGE)
sys.exit()
if o in ("-i", "--ignore-case"):
ignore_case = True
if not arguments:
print(USAGE)
sys.exit(1)
# save regexes (operands) to a list
operands = [str(arg) for arg in arguments]
except (getopt.GetoptError, ValueError) as e:
print("Error: " + e.msg)
print(USAGE)
sys.exit(1)
return operands
def main():
""" Main function """
regexes = parse_args(sys.argv[1:])
global ignore_case
try:
# Use command line arguments as regexes
validate_regex(regexes)
# Tell Python to ignore the line if it contains invalid Unicode data
sys.stdin.reconfigure(errors='ignore')
# Read lines from stdin
for line in sys.stdin:
line = colorize(line.rstrip(), regexes, ignore_case)
sys.stdout.write(line + '\n')
# Catch Ctrl+C and exit cleanly
except KeyboardInterrupt:
sys.stdout.flush()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3219233 | <filename>media.py
import webbrowser
class Movie():
""" This class provides a way to store movie related information"""
VALID_RATINGS = ["G","PG","PG-13","R"]
# Class variables are capitalized
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
# Self is object being created, can be named anything but self is convention
# Creates space in memory, self allows this to be global
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
# Opens trailer video with given url in web browser
webbrowser.open(self.trailer_youtube_url) | StarcoderdataPython |
3292240 | #!/usr/bin/env python2.7
# twitterwin.py by <NAME> http://raspi.tv/?p=5281
import tweepy
import random
# Consumer keys and access tokens, used for OAuth
consumer_key = 'copy your consumer key here'
consumer_secret = 'copy your consumer secret here'
access_token = 'copy your access token here'
access_token_secret = 'copy your access token secret here'
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
follow2 = api.followers_ids() # gives a list of followers ids
print "you have %d followers" % len(follow2)
show_list = str(raw_input("Do you want to list the followers array?"))
if show_list == ('y' or 'yes' or 'Y' or 'Yes' or 'YES'):
print follow2
def pick_winner():
random_number = random.randint(0, len(follow2)-1)
winner = api.get_user(follow2[random_number])
print winner.screen_name, random_number
while True:
pick = raw_input("Press Enter to pick a winner, Q to quit.")
if pick == ('q' or 'Q' or 'quit' or 'QUIT' or 'Quit'):
break
pick_winner() | StarcoderdataPython |
1770922 | #!/usr/bin/env python
"""
Module to launch and control running jobs.
Contains job_controller, job, and inherited classes. A job_controller can
create and manage multiple jobs. The worker or user-side code can issue
and manage jobs using the launch, poll and kill functions. Job attributes
are queried to determine status. Functions are also provided to access
and interrogate files in the job's working directory.
"""
import os
import subprocess
import logging
import signal
import itertools
import time
import json
import math
from mpi4py import MPI
from libensemble.register import Register
from libensemble.resources import Resources
if Resources.am_I_manager():
wrkid = 'Manager'
else:
wrkid = 'w' + str(Resources.get_workerID())
logger = logging.getLogger(__name__ + '(' + wrkid + ')')
#For debug messages in this module - uncomment (see libE.py to change root logging level)
#logger.setLevel(logging.DEBUG)
STATES = '''
UNKNOWN
CREATED
WAITING
RUNNING
FINISHED
USER_KILLED
FAILED'''.split()
SIGNALS = '''
SIGTERM
SIGKILL'''.split()
#I may want to use a top-level abstract/base class for maximum re-use
# - else inherited controller will be reimplementing common code
class JobControllerException(Exception): pass
class Job:
'''
Manage the creation, configuration and status of a launchable job.
'''
newid = itertools.count()
def __init__(self, app=None, app_args=None, num_procs=None, num_nodes=None, ranks_per_node=None,
machinefile=None, hostlist=None, workdir=None, stdout=None, stderr=None, workerid=None):
'''Instantiate a new Job instance.
A new job object is created with an id, status and configuration attributes
This will normally be created by the job_controller on a launch
'''
self.id = next(Job.newid)
#Status attributes
self.state = 'CREATED' #: test1 docstring
self.process = None
self.errcode = None
self.finished = False # True means job has run - not whether was successful
self.success = False
self.launch_time = None
self.runtime = None
self.total_time = None
#self.manager_signal = 'none'
#Run attributes
self.app = app
self.app_args = app_args
self.num_procs = num_procs
self.num_nodes = num_nodes
self.ranks_per_node = ranks_per_node
self.machinefile = machinefile
self.hostlist = hostlist
#self.stdout = stdout
#self.stderr = stderr
self.workerID = workerid
if app is None:
raise JobControllerException("Job must be created with an app - no app found for job {}".format(self.id))
worker_name = "_worker{}".format(self.workerID) if self.workerID else ""
self.name = "job_{}{}_{}".format(app.name, worker_name, self.id)
self.stdout = stdout or self.name + '.out'
self.stderr = stderr or self.name + '.err'
self.workdir = workdir
def workdir_exists(self):
''' Returns True if the job's workdir exists, else False '''
return self.workdir and os.path.exists(self.workdir)
def file_exists_in_workdir(self, filename):
''' Returns True if the named file exists in the job's workdir, else False '''
return self.workdir and os.path.exists(os.path.join(self.workdir, filename))
def read_file_in_workdir(self, filename):
''' Open and reads the named file in the job's workdir '''
path = os.path.join(self.workdir, filename)
if not os.path.exists(path):
raise ValueError("{} not found in working directory".format(filename))
with open(path) as f:
return f.read()
def stdout_exists(self):
''' Returns True if the job's stdout file exists in the workdir, else False '''
return self.file_exists_in_workdir(self.stdout)
def read_stdout(self):
''' Open and reads the job's stdout file in the job's workdir '''
return self.read_file_in_workdir(self.stdout)
def stderr_exists(self):
''' Returns True if the job's stderr file exists in the workdir, else False '''
return self.file_exists_in_workdir(self.stderr)
def read_stderr(self):
''' Open and reads the job's stderr file in the job's workdir '''
return self.read_file_in_workdir(self.stderr)
#Note - this is currently only final job-time. May make running job time.
#And prob want to use for polling in sim func - esp in balsam - where want acutal runtime not time since launch
def calc_job_timing(self):
"""Calculate timing information for this job"""
if self.launch_time is None:
logger.warning("Cannot calc job timing - launch time not set")
return
#In case already been killed and set then
if self.runtime is None:
self.runtime = time.time() - self.launch_time
#For direct launched jobs - these should be the same.
if self.total_time is None:
self.total_time = self.runtime
class BalsamJob(Job):
'''Wraps a Balsam Job from the Balsam service.
The same attributes and query routines are implemented.
'''
#newid = itertools.count() #hopefully can use the one in Job
def __init__(self, app=None, app_args=None, num_procs=None, num_nodes=None, ranks_per_node=None, machinefile=None, hostlist=None, workdir=None, stdout=None, stderr=None, workerid=None):
'''Instantiate a new BalsamJob instance.
A new BalsamJob object is created with an id, status and configuration attributes
This will normally be created by the job_controller on a launch
'''
super().__init__(app, app_args, num_procs, num_nodes, ranks_per_node, machinefile, hostlist, workdir, stdout, workerid)
self.balsam_state = None
#prob want to override workdir attribute with Balsam value - though does it exist yet?
#self.workdir = None #Don't know until starts running
self.workdir = workdir #Default for libe now is to run in place.
def read_file_in_workdir(self, filename):
return self.process.read_file_in_workdir(filename)
def read_stdout(self):
return self.process.read_file_in_workdir(self.stdout)
def read_stderr(self):
return self.process.read_file_in_workdir(self.stderr)
def calc_job_timing(self):
"""Calculate timing information for this job"""
#Get runtime from Balsam
self.runtime = self.process.runtime_seconds
if self.launch_time is None:
logger.warning("Cannot calc job total_time - launch time not set")
return
if self.total_time is None:
self.total_time = time.time() - self.launch_time
class JobController:
''' The job_controller can create, poll and kill runnable jobs
**Class Attributes:**
:cvar JobController: controller: A class attribute holding the default job_controller.
**Object Attributes:**
:ivar Register registry: The registry associated with this job_controller
:ivar String manager_signal: Contains any signals received by manager ('none'|'finish'|'kill')
:ivar String kill_signal: The kill signal to be sent to jobs
:ivar boolean wait_and_kill: Whether running in wait_and_kill mode (If True a hard kill will be sent after a timeout period)
:ivar int wait_time: Timeout period for hard kill, when wait_and_kill is set.
:ivar list list_of_jobs: A list of jobs created in this job controller
:ivar int workerID: The workerID associated with this job controller
'''
controller = None
@staticmethod
def job_partition(num_procs, num_nodes, ranks_per_node, machinefile=None):
""" Takes provided nprocs/nodes/ranks and outputs working configuration of procs/nodes/ranks or error """
#If machinefile is provided - ignore everything else
if machinefile is not None:
if num_procs is not None or num_nodes is not None or ranks_per_node is not None:
logger.warning('Machinefile provided - overriding procs/nodes/ranks_per_node')
num_procs = None
num_nodes = None
ranks_per_node = None
return num_procs, num_nodes, ranks_per_node
#If all set then check num_procs equals num_nodes*ranks_per_node and set values as given
if num_procs is not None and num_nodes is not None and ranks_per_node is not None:
if num_procs != num_nodes*ranks_per_node:
raise JobControllerException("num_procs does not equal num_nodes*ranks_per_node")
return num_procs, num_nodes, ranks_per_node
#If num_procs not set then need num_nodes and ranks_per_node and set num_procs
if num_procs is None:
#Note this covers case where none are set - may want to use job_controller defaults in that case - not implemented yet.
if num_nodes is None or ranks_per_node is None:
raise JobControllerException("Must set either num_procs or num_nodes/ranks_per_node or machinefile")
num_procs = num_nodes * ranks_per_node
return num_procs, num_nodes, ranks_per_node
#If num_procs is set - fill in any other values
#if num_procs is not None:
else:
if num_nodes is None:
if ranks_per_node is None:
#Currently not auto-detecting so if only num_procs - you are on 1 node
num_nodes = 1
ranks_per_node = num_procs
else:
num_nodes = num_procs//ranks_per_node
else:
ranks_per_node = num_procs//num_nodes
return num_procs, num_nodes, ranks_per_node
#def _calc_job_timing(job):
#if job.launch_time is None:
#logger.warning("Cannot calc job timing - launch time not set")
#return
##In case already been killed and set then
#if job.runtime is None:
#job.runtime = time.time() - job.launch_time
##For direct launched jobs - these should be the same.
#if job.total_time is None:
#if job.runtime is not None:
#job.total_time = job.runtime
#else:
#job.total_time = time.time() - job.launch_time
def __init__(self, registry=None, auto_resources=True, nodelist_env_slurm=None, nodelist_env_cobalt=None):
'''Instantiate a new JobController instance.
A new JobController object is created with an application registry and configuration attributes. A
registry object must have been created.
This is typically created in the user calling script. If auto_resources is True, an evaluation of system resources is performance during this call.
Parameters
----------
registry: obj: Registry, optional
A registry containing the applications to use in this job_controller (Default: Use Register.default_registry).
auto_resources: Boolean, optional
Auto-detect available processor resources and assign to jobs if not explicitly provided on launch.
nodelist_env_slurm: String, optional
The environment variable giving a node list in Slurm format (Default: Uses SLURM_NODELIST)
Note: This is only queried if a worker_list file is not provided and auto_resources=True.
nodelist_env_cobalt: String, optional
The environment variable giving a node list in Cobalt format (Default: Uses COBALT_PARTNAME)
Note: This is only queried if a worker_list file is not provided and auto_resources=True.
'''
self.registry = registry or Register.default_registry
if self.registry is None:
raise JobControllerException("Cannot find default registry")
self.top_level_dir = os.getcwd()
self.auto_resources = auto_resources
self.manager_signal = 'none'
if self.auto_resources:
self.resources = Resources(top_level_dir=self.top_level_dir,
nodelist_env_slurm=nodelist_env_slurm,
nodelist_env_cobalt=nodelist_env_cobalt)
#logger.debug("top_level_dir is {}".format(self.top_level_dir))
#todo Configure by autodetection
#In fact it will be a sub-object - most likely with inhertience - based on detection or specification
#Also the construction of the run-line itself will prob. be a function of that object
#For now though - do like this:
mpi_variant = Resources.get_MPI_variant()
if mpi_variant == 'mpich':
self.mpi_launcher = 'mpirun'
self.mfile = '-machinefile'
self.nprocs = '-np'
self.nnodes = ''
self.ppn = '--ppn'
self.hostlist = '-hosts'
elif mpi_variant == 'openmpi':
self.mpi_launcher = 'mpirun'
self.mfile = '-machinefile'
self.nprocs = '-np'
self.nnodes = ''
self.ppn = '-npernode'
self.hostlist = '-host'
#self.mpi_launcher = 'srun'
#self.mfile = '-m arbitrary'
#self.nprocs = '--ntasks'
#self.nnodes = '--nodes'
#self.ppn = '--ntasks-per-node'
#self.hostlist = '-w'
#Job controller settings - can be set in user function.
self.kill_signal = 'SIGTERM'
self.wait_and_kill = True #If true - wait for wait_time after signal and then kill with SIGKILL
self.wait_time = 60
#list_of_jobs: Need to decide on reset... - reset for each calc?
#and how link to libe job (or calc) class - if reset for each calc - could store this in job
self.list_of_jobs = []
self.workerID = None
#self.auto_machinefile = True #Create a machinefile automatically
JobController.controller = self
#self.resources = Resources(top_level_dir = self.top_level_dir)
#If this could share multiple launches could set default job parameters here (nodes/ranks etc...)
# May change job_controller launch functions to use **kwargs and then init job empty - and use setattr
#eg. To pass through args:
#def launch(**kwargs):
#...
#job = Job()
#for k,v in kwargs.items():
#try:
#getattr(job, k)
#except AttributeError:
#raise ValueError(f"Invalid field {}".format(k)) #Unless not passing through all
#else:
#setattr(job, k, v)
def launch(self, calc_type, num_procs=None, num_nodes=None, ranks_per_node=None,
machinefile=None, app_args=None, stdout=None, stderr=None, stage_inout=None, hyperthreads=False, test=False):
''' Creates a new job, and either launches or schedules to launch in the job controller
The created job object is returned.
Parameters
----------
calc_type: String
The calculation type: 'sim' or 'gen'
num_procs: int, optional
The total number of MPI tasks on which to launch the job.
num_nodes: int, optional
The number of nodes on which to launch the job.
ranks_per_node: int, optional
The ranks per node for this job.
machinefile: string, optional
Name of a machinefile for this job to use.
app_args: string, optional
A string of the application arguments to be added to job launch command line.
stdout: string, optional
A standard output filename.
stderr: string, optional
A standard error filename.
stage_inout: string, optional
A directory to copy files from. Default will take from current directory.
hyperthreads: boolean, optional
Whether to launch MPI tasks to hyperthreads
test: boolean, optional
Whether this is a test - No job will be launched. Instead runline is printed to logger (At INFO level).
Returns
-------
job: obj: Job
The lauched job object.
Note that if some combination of num_procs, num_nodes and ranks_per_node are provided, these will be honored if possible. If resource detection is on and these are omitted, then the available resources will be divided amongst workers.
'''
# Find the default sim or gen app from registry.sim_default_app OR registry.gen_default_app
# Could take optional app arg - if they want to supply here - instead of taking from registry
if calc_type == 'sim':
if self.registry.sim_default_app is None:
raise JobControllerException("Default sim app is not set")
app = self.registry.sim_default_app
elif calc_type == 'gen':
if self.registry.gen_default_app is None:
raise JobControllerException("Default gen app is not set")
app = self.registry.gen_default_app
else:
raise JobControllerException("Unrecognized calculation type", calc_type)
#-------- Up to here should be common - can go in a baseclass and make all concrete classes inherit ------#
hostlist = None
if machinefile is None and self.auto_resources:
#klugging this for now - not nec machinefile if more than one node - try a hostlist
num_procs, num_nodes, ranks_per_node = self.get_resources(num_procs=num_procs, num_nodes=num_nodes, ranks_per_node=ranks_per_node, hyperthreads=hyperthreads)
if num_nodes > 1:
#hostlist
hostlist = self.get_hostlist()
else:
#machinefile
if self.workerID is not None:
machinefile = 'machinefile_autogen_for_worker_' + str(self.workerID)
else:
machinefile = 'machinefile_autogen'
mfile_created, num_procs, num_nodes, ranks_per_node = self.create_machinefile(machinefile, num_procs, num_nodes, ranks_per_node, hyperthreads)
if not mfile_created:
raise JobControllerException("Auto-creation of machinefile failed")
else:
num_procs, num_nodes, ranks_per_node = JobController.job_partition(num_procs, num_nodes, ranks_per_node, machinefile)
default_workdir = os.getcwd() #Will be possible to override with arg when implemented
job = Job(app, app_args, num_procs, num_nodes, ranks_per_node, machinefile, hostlist, default_workdir, stdout, stderr, self.workerID)
#Temporary perhaps - though when create workdirs - will probably keep output in place
if stage_inout is not None:
logger.warning('stage_inout option ignored in this job_controller - runs in-place')
#Construct run line - possibly subroutine
runline = [self.mpi_launcher]
if job.machinefile is not None:
#os.environ['SLURM_HOSTFILE'] = job.machinefile
runline.append(self.mfile)
runline.append(job.machinefile)
#Should be else - if machine file - dont need any other config
if job.hostlist is not None:
#os.environ['SLURM_HOSTFILE'] = job.machinefile
runline.append(self.hostlist)
runline.append(job.hostlist)
if job.num_procs is not None:
runline.append(self.nprocs)
runline.append(str(job.num_procs))
#Not currently setting nodes
#- as not always supported - but should always have the other two after calling _job_partition
#if job.num_nodes is not None:
#runline.append(self.nnodes)
#runline.append(str(job.num_nodes))
#Currently issues - command depends on mpich/openmpi etc...
if job.ranks_per_node is not None:
runline.append(self.ppn)
runline.append(str(job.ranks_per_node))
runline.append(job.app.full_path)
if job.app_args is not None:
runline.extend(job.app_args.split())
if test:
logger.info('Test selected: Not launching job')
logger.info('runline args are {}'.format(runline))
else:
logger.debug("Launching job {}: {}".format(job.name, " ".join(runline))) #One line
#logger.debug("Launching job {}:\n{}{}".format(job.name, " "*32, " ".join(runline))) #With newline
#not good for timing job itself as dont know when finishes - if use this prob. change to date time or
#use for timeout. For now using for timing with approx end....
job.launch_time = time.time()
#job.process = subprocess.Popen(runline, cwd='./', stdout=open(job.stdout, 'w'), stderr=open(job.stderr, 'w'), shell=False)
job.process = subprocess.Popen(runline, cwd='./', stdout=open(job.stdout, 'w'), stderr=open(job.stderr, 'w'), shell=False, preexec_fn=os.setsid)
#To test when have workdir
#job.process = subprocess.Popen(runline, cwd=job.workdir, stdout=open(job.stdout, 'w'), stderr=open(job.stderr, 'w'), shell=False, preexec_fn=os.setsid)
self.list_of_jobs.append(job)
#return job.id
return job
def poll(self, job):
''' Polls and updates the status attributes of the supplied job
Parameters
-----------
job: obj: Job
The job object.to be polled.
'''
if not isinstance(job, Job):
raise JobControllerException('Invalid job has been provided')
# Check the jobs been launched (i.e. it has a process ID)
if job.process is None:
#logger.warning('Polled job has no process ID - returning stored state')
#Prob should be recoverable and return state - but currently fatal
raise JobControllerException('Polled job {} has no process ID - check jobs been launched'.format(job.name))
# Do not poll if job already finished
# Maybe should re-poll job to check (in case self.finished set in error!)???
if job.finished:
logger.warning('Polled job {} has already finished. Not re-polling. Status is {}'.format(job.name, job.state))
return
#-------- Up to here should be common - can go in a baseclass and make all concrete classes inherit ------#
# Poll the job
poll = job.process.poll()
if poll is None:
job.state = 'RUNNING'
else:
job.finished = True
#logger.debug("Process {} Completed".format(job.process))
job.calc_job_timing()
if job.process.returncode == 0:
job.success = True
job.errcode = 0
#logger.debug("Process {} completed successfully".format(job.process))
logger.debug("Job {} completed successfully".format(job.name))
job.state = 'FINISHED'
else:
#Need to differentiate failure from if job was user-killed !!!! What if remotely???
#If this process killed the job it will already be set and if not re-polling will not get here.
#But could query existing state here as backup?? - Also may add a REMOTE_KILL state???
#Not yet remote killing so assume failed....
job.errcode = job.process.returncode
logger.debug("Job {} failed".format(job.name))
job.state = 'FAILED'
#Just updates job as provided
#return job
def manager_poll(self):
''' Polls for a manager signal
The job controller manager_signal attribute will be updated.
'''
from libensemble.message_numbers import STOP_TAG, MAN_SIGNAL_FINISH, MAN_SIGNAL_KILL
from mpi4py import MPI
# Manager Signals
# Stop tag may be manager interupt as diff kill/stop/pause....
comm = MPI.COMM_WORLD
status = MPI.Status()
if comm.Iprobe(source=0, tag=STOP_TAG, status=status):
logger.info('Manager probe hit true')
man_signal = comm.recv(source=0, tag=STOP_TAG, status=status)
if man_signal == MAN_SIGNAL_FINISH:
self.manager_signal = 'finish'
elif man_signal == MAN_SIGNAL_KILL:
self.manager_signal = 'kill'
else:
logger.warning("Received unrecognized manager signal {} - ignoring".format(man_signal))
@staticmethod
def _kill_process(process, signal):
"""Launch the process kill for this system"""
time.sleep(0.1) # Without a small wait - kill signal can not work
os.killpg(os.getpgid(process.pid), signal) # Kill using process group (see launch with preexec_fn=os.setsid)
#process.send_signal(signal) # Kill by sending direct signal
# Just for you, python2
@staticmethod
def _time_out(process, timeout):
"""Loop to wait for process to finish after a kill"""
start_wait_time = time.time()
while time.time() - start_wait_time < timeout:
time.sleep(0.01)
poll = process.poll()
if poll is not None:
return False # process has finished - no timeout
return True # process has not finished - timeout
def kill(self, job):
''' Kills or cancels the supplied job
Parameters
-----------
job: obj: Job
The job object.to be polled.
The signal used is determined by the job_controller attirbute <kill_signal> will be send to the job,
followed by a wait for the process to terminate. If the <wait_and_kill> attribute is True, then
a SIGKILL will be sent if the job has not finished after <wait_time> seconds. The kill can be
configured using the set_kill_mode function.
'''
if not isinstance(job, Job):
raise JobControllerException('Invalid job has been provided')
if job.finished:
logger.warning('Trying to kill job that is no longer running. Job {}: Status is {}'.format(job.name, job.state))
return
if job.process is None:
time.sleep(0.2)
if job.process is None:
#logger.warning('Polled job has no process ID - returning stored state')
#Prob should be recoverable and return state - but currently fatal
raise JobControllerException('Attempting to kill job {} that has no process ID - check jobs been launched'.format(job.name))
logger.debug("Killing job {}".format(job.name))
# Issue signal
sig = {'SIGTERM': signal.SIGTERM, 'SIGKILL': signal.SIGKILL}
if self.kill_signal not in sig:
raise JobControllerException('Unknown kill signal')
try:
JobController._kill_process(job.process, sig[self.kill_signal])
except ProcessLookupError:
logger.warning("Tried to kill job {}. Process {} not found. May have finished".format(job.name, job.process.pid))
# Wait for job to be killed
if self.wait_and_kill:
# My python2 method works ok for py2 and py3
if JobController._time_out(job.process, self.wait_time):
logger.warning("Kill signal {} timed out for job {}: Issuing SIGKILL".format(self.kill_signal, job.name))
JobController._kill_process(job.process, signal.SIGKILL)
job.process.wait()
#Using subprocess timeout attribute where available (py3)
#try:
#job.process.wait(timeout=self.wait_time)
##stdout,stderr = self.process.communicate(timeout=self.wait_time) #Wait for process to finish
#except TypeError: #eg. Python2
##logger.warning("TimeoutExpired not supported in this version of Python. Issuing SIGKILL to job {}".format(job.name))
#if JobController._time_out(job.process, self.wait_time):
#logger.warning("Kill signal {} timed out for job {}: Issuing SIGKILL".format(self.kill_signal, job.name))
#JobController._kill_process(job.process, signal.SIGKILL)
#job.process.wait()
#except subprocess.TimeoutExpired:
#logger.warning("Kill signal {} timed out for job {}: Issuing SIGKILL".format(self.kill_signal, job.name))
#JobController._kill_process(job.process, signal.SIGKILL)
#job.process.wait()
else:
job.process.wait()
job.state = 'USER_KILLED'
job.finished = True
job.calc_job_timing()
#Need to test out what to do with
#job.errcode #Can it be discovered after killing?
#job.success #Could set to false but should be already - only set to true on success
def set_kill_mode(self, signal=None, wait_and_kill=None, wait_time=None):
''' Configures the kill mode for the job_controller
Parameters
----------
signal: String, optional
The signal type to be sent to kill job: 'SIGTERM' or 'SIGKILL'
wait_and_kill: boolean, optional
If True, a SIGKILL will be sent after <wait_time> seconds if the process has not terminated.
wait_time: int, optional
The number of seconds to wait for the job to finish before sending a SIGKILL when wait_and_kill is set.
(Default is 60).
'''
if signal is not None:
if signal not in SIGNALS:
raise JobControllerException("Unknown signal {} supplied to set_kill_mode".format(signal))
self.kill_signal = signal
if wait_and_kill is not None:
self.wait_and_kill = wait_and_kill
if wait_time is not None:
self.wait_time = wait_time
if not wait_and_kill:
logger.warning('wait_time set but will have no effect while wait_and_kill is False')
def get_job(self, jobid):
''' Returns the job object for the supplied job ID '''
if self.list_of_jobs:
for job in self.list_of_jobs:
if job.id == jobid:
return job
logger.warning("Job {} not found in joblist".format(jobid))
return None
logger.warning("Job {} not found in joblist. Joblist is empty".format(jobid))
return None
def set_workerID(self, workerid):
"""Sets the worker ID for this job_controller"""
self.workerID = workerid
#Reformat create_machinefile to use this and also use this for non-machinefile cases when auto-detecting
def get_resources(self, num_procs=None, num_nodes=None, ranks_per_node=None, hyperthreads=False):
"""
Reconciles user supplied options with available Worker resources to produce run configuration.
Detects resources available to worker, checks if an existing user supplied config is valid,
and fills in any missing config information (ie. num_procs/num_nodes/ranks_per_node)
User supplied config options are honoured, and an exception is raised if these are infeasible.
"""
node_list = self.resources.local_nodelist
if hyperthreads:
cores_avail_per_node = self.resources.logical_cores_avail_per_node
else:
cores_avail_per_node = self.resources.physical_cores_avail_per_node
num_workers = self.resources.num_workers
local_node_count = self.resources.local_node_count
if num_workers > local_node_count:
workers_per_node = self.resources.workers_per_node
cores_avail_per_node_per_worker = cores_avail_per_node//workers_per_node
else:
cores_avail_per_node_per_worker = cores_avail_per_node
if not node_list:
raise JobControllerException("Node list is empty - aborting")
#If no decomposition supplied - use all available cores/nodes
if num_procs is None and num_nodes is None and ranks_per_node is None:
num_nodes = local_node_count
ranks_per_node = cores_avail_per_node_per_worker
#logger
logger.debug("No decomposition supplied - using all available resource. Nodes: {} ranks_per_node {}".format(num_nodes, ranks_per_node))
elif num_nodes is None and ranks_per_node is None:
#Got just num_procs
num_nodes = local_node_count
#Here is where really want a compact/scatter option - go for scatter (could get cores and say if less than one node - but then hyperthreads complication if no psutil installed)
elif num_procs is None and ranks_per_node is None:
#Who would just put num_nodes???
ranks_per_node = cores_avail_per_node_per_worker
elif num_procs is None and num_nodes is None:
num_nodes = local_node_count
#checks config is consistent and sufficient to express - does not check actual resources
num_procs, num_nodes, ranks_per_node = JobController.job_partition(num_procs, num_nodes, ranks_per_node)
if num_nodes > local_node_count:
#Could just downgrade to those available with warning - for now error
raise JobControllerException("Not enough nodes to honour arguments. Requested {}. Only {} available".format(num_nodes, local_node_count))
elif ranks_per_node > cores_avail_per_node:
#Could just downgrade to those available with warning - for now error
raise JobControllerException("Not enough processors on a node to honour arguments. Requested {}. Only {} available".format(ranks_per_node, cores_avail_per_node))
elif ranks_per_node > cores_avail_per_node_per_worker:
#Could just downgrade to those available with warning - for now error
raise JobControllerException("Not enough processors per worker to honour arguments. Requested {}. Only {} available".format(ranks_per_node, cores_avail_per_node_per_worker))
elif num_procs > (cores_avail_per_node * local_node_count):
#Could just downgrade to those available with warning - for now error
raise JobControllerException("Not enough procs to honour arguments. Requested {}. Only {} available".format(num_procs, cores_avail_per_node*local_node_count))
elif num_nodes < local_node_count:
logger.warning("User constraints mean fewer nodes being used than available. {} nodes used. {} nodes available".format(num_nodes, local_node_count))
return num_procs, num_nodes, ranks_per_node
def create_machinefile(self, machinefile=None, num_procs=None, num_nodes=None, ranks_per_node=None, hyperthreads=False):
"""Create a machinefile based on user supplied config options, completed by detected machine resources"""
#Maybe hyperthreads should be mpi_hyperthreads
if machinefile is None:
machinefile = 'machinefile'
if os.path.isfile(machinefile):
try:
os.remove(machinefile)
except:
pass
#num_procs, num_nodes, ranks_per_node = self.get_resources(num_procs=num_procs, num_nodes=num_nodes, ranks_per_node=ranks_per_node, hyperthreads=hyperthreads)
node_list = self.resources.local_nodelist
logger.debug("Creating machinefile with {} nodes and {} ranks per node".format(num_nodes, ranks_per_node))
with open(machinefile, 'w') as f:
for node in node_list[:num_nodes]:
f.write((node + '\n') * ranks_per_node)
#Return true if created and not empty
built_mfile = os.path.isfile(machinefile) and os.path.getsize(machinefile) > 0
#Return new values for num_procs,num_nodes,ranks_per_node - in case want to use
return built_mfile, num_procs, num_nodes, ranks_per_node
#will prob want to adjust based on input
#def get_hostlist(self, machinefile=None, num_procs=None, num_nodes=None, ranks_per_node=None, hyperthreads=False):
def get_hostlist(self):
"""Create a hostlist based on user supplied config options, completed by detected machine resources"""
node_list = self.resources.local_nodelist
hostlist_str = ",".join([str(x) for x in node_list])
return hostlist_str
class BalsamJobController(JobController):
'''Inherits from JobController and wraps the Balsam job management service
.. note:: Job kills are currently not configurable in the Balsam job_controller.
The set_kill_mode function will do nothing but print a warning.
'''
#controller = None
def __init__(self, registry=None, auto_resources=True, nodelist_env_slurm=None, nodelist_env_cobalt=None):
'''Instantiate a new BalsamJobController instance.
A new BalsamJobController object is created with an application registry and configuration attributes
'''
#Will use super - atleast if use baseclass - but for now dont want to set self.mpi_launcher etc...
self.registry = registry or Register.default_registry
if self.registry is None:
raise JobControllerException("Cannot find default registry")
self.top_level_dir = os.getcwd()
self.auto_resources = auto_resources
self.manager_signal = 'none'
if self.auto_resources:
self.resources = Resources(top_level_dir=self.top_level_dir, central_mode=True,
nodelist_env_slurm=nodelist_env_slurm,
nodelist_env_cobalt=nodelist_env_cobalt)
#-------- Up to here should be common - can go in a baseclass and make all concrete classes inherit ------#
self.list_of_jobs = [] #Why did I put here? Will inherit
#self.auto_machinefile = False #May in future use the auto_detect part though - to fill in procs/nodes/ranks_per_node
JobController.controller = self
#BalsamJobController.controller = self
#def _calc_job_timing(job):
##Get runtime from Balsam
#if job.launch_time is None:
#logger.warning("Cannot calc job total_time - launch time not set")
#return
#if job.total_time is None:
#job.total_time = time.time() - job.launch_time
def launch(self, calc_type, num_procs=None, num_nodes=None, ranks_per_node=None,
machinefile=None, app_args=None, stdout=None, stderr=None, stage_inout=None, test=False, hyperthreads=False):
''' Creates a new job, and either launches or schedules to launch in the job controller
The created job object is returned.
'''
import balsam.launcher.dag as dag
# Find the default sim or gen app from registry.sim_default_app OR registry.gen_default_app
# Could take optional app arg - if they want to supply here - instead of taking from registry
if calc_type == 'sim':
if self.registry.sim_default_app is None:
raise JobControllerException("Default sim app is not set")
else:
app = self.registry.sim_default_app
elif calc_type == 'gen':
if self.registry.gen_default_app is not None:
raise JobControllerException("Default gen app is not set")
else:
app = self.registry.gen_default_app
else:
raise JobControllerException("Unrecognized calculation type", calc_type)
#-------- Up to here should be common - can go in a baseclass and make all concrete classes inherit ------#
#Need test somewhere for if no breakdown supplied.... or only machinefile
#Specific to this class
if machinefile is not None:
logger.warning("machinefile arg ignored - not supported in Balsam")
if num_procs is None and num_nodes is None and ranks_per_node is None:
raise JobControllerException("No procs/nodes provided - aborting")
#Set num_procs, num_nodes and ranks_per_node for this job
#Without resource detection
#num_procs, num_nodes, ranks_per_node = JobController.job_partition(num_procs, num_nodes, ranks_per_node) #Note: not included machinefile option
#With resource detection (may do only if under-specified?? though that will not tell if larger than possible
#for static allocation - but Balsam does allow dynamic allocation if too large!!
#For now allow user to specify - but default is True....
if self.auto_resources:
num_procs, num_nodes, ranks_per_node = self.get_resources(num_procs=num_procs, num_nodes=num_nodes, ranks_per_node=ranks_per_node, hyperthreads=hyperthreads)
else:
#Without resource detection
num_procs, num_nodes, ranks_per_node = JobController.job_partition(num_procs, num_nodes, ranks_per_node) #Note: not included machinefile option
#temp - while balsam does not accept a standard out name
if stdout is not None or stderr is not None:
logger.warning("Balsam does not currently accept a stdout or stderr name - ignoring")
stdout = None
stderr = None
#Will be possible to override with arg when implemented (or can have option to let Balsam assign)
default_workdir = os.getcwd()
hostlist = None
job = BalsamJob(app, app_args, num_procs, num_nodes, ranks_per_node, machinefile, hostlist, default_workdir, stdout, stderr, self.workerID)
#This is not used with Balsam for run-time as this would include wait time
#Again considering changing launch to submit - or whatever I chose before.....
job.launch_time = time.time() #Not good for timing job - as I dont know when it finishes - only poll/kill est.
add_job_args = {'name': job.name,
'workflow': "libe_workflow", #add arg for this
'user_workdir': default_workdir, #add arg for this
'application': app.name,
'args': job.app_args,
'num_nodes': job.num_nodes,
'ranks_per_node': job.ranks_per_node}
if stage_inout is not None:
#For now hardcode staging - for testing
add_job_args['stage_in_url'] = "local:" + stage_inout + "/*"
add_job_args['stage_out_url'] = "local:" + stage_inout
add_job_args['stage_out_files'] = "*.out"
job.process = dag.add_job(**add_job_args)
logger.debug("Added job to Balsam database {}: Worker {} nodes {} ppn {}".format(job.name, self.workerID, job.num_nodes, job.ranks_per_node))
#job.workdir = job.process.working_directory #Might not be set yet!!!!
self.list_of_jobs.append(job)
return job
def poll(self, job):
''' Polls and updates the status attributes of the supplied job '''
if not isinstance(job, BalsamJob):
raise JobControllerException('Invalid job has been provided')
# Check the jobs been launched (i.e. it has a process ID)
if job.process is None:
#logger.warning('Polled job has no process ID - returning stored state')
#Prob should be recoverable and return state - but currently fatal
raise JobControllerException('Polled job has no process ID - check jobs been launched')
# Do not poll if job already finished
if job.finished:
logger.warning('Polled job has already finished. Not re-polling. Status is {}'.format(job.state))
return
#-------- Up to here should be common - can go in a baseclass and make all concrete classes inherit ------#
# Get current state of jobs from Balsam database
job.process.refresh_from_db()
job.balsam_state = job.process.state #Not really nec to copy have balsam_state - already job.process.state...
#logger.debug('balsam_state for job {} is {}'.format(job.id, job.balsam_state))
import balsam.launcher.dag as dag #Might need this before get models - test
from balsam.service import models
if job.balsam_state in models.END_STATES:
job.finished = True
job.calc_job_timing()
if job.workdir is None:
job.workdir = job.process.working_directory
if job.balsam_state == 'JOB_FINISHED':
job.success = True
job.state = 'FINISHED'
elif job.balsam_state == 'PARENT_KILLED': #I'm not using this currently
job.state = 'USER_KILLED'
#job.success = False #Shld already be false - init to false
#job.errcode = #Not currently returned by Balsam API - requested - else will remain as None
elif job.balsam_state in STATES: #In my states
job.state = job.balsam_state
#job.success = False #All other end states are failrues currently - bit risky
#job.errcode = #Not currently returned by Balsam API - requested - else will remain as None
else:
logger.warning("Job finished, but in unrecognized Balsam state {}".format(job.balsam_state))
job.state = 'UNKNOWN'
elif job.balsam_state in models.ACTIVE_STATES:
job.state = 'RUNNING'
if job.workdir is None:
job.workdir = job.process.working_directory
elif job.balsam_state in models.PROCESSABLE_STATES + models.RUNNABLE_STATES: #Does this work - concatenate lists
job.state = 'WAITING'
else:
raise JobControllerException('Job state returned from Balsam is not in known list of Balsam states. Job state is {}'.format(job.balsam_state))
# DSB: With this commented out, number of return args is inconsistent (returns job above)
#return job
def kill(self, job):
''' Kills or cancels the supplied job '''
if not isinstance(job, BalsamJob):
raise JobControllerException('Invalid job has been provided')
import balsam.launcher.dag as dag
dag.kill(job.process)
#Could have Wait here and check with Balsam its killed - but not implemented yet.
job.state = 'USER_KILLED'
job.finished = True
job.calc_job_timing()
#Check if can wait for kill to complete - affect signal used etc....
def set_kill_mode(self, signal=None, wait_and_kill=None, wait_time=None):
''' Not currently implemented for BalsamJobController.
No action is taken
'''
logger.warning("set_kill_mode currently has no action with Balsam controller")
class CuttrJobController(JobController):
controller = None
def __init__(self, registry=None, auto_resources=True, nodelist_env_slurm=None, nodelist_env_cobalt=None):
if MPI.COMM_WORLD.Get_rank() == 0:
worker_count = MPI.COMM_WORLD.Get_size() - 1
slice_count = os.environ.get('SLICE_COUNT')
if slice_count is not None and slice_count != '':
slice_count = int(slice_count)
else:
slice_count = worker_count
total_core_count = int(subprocess.check_output('hwloc-calc all -N pu', shell=True))
core_count = os.environ.get('CORE_COUNT')
if core_count is not None and core_count != '':
core_count = int(core_count)
else:
core_count = total_core_count
cat_masks = os.environ.get('CAT_MASKS').split(' ') * int(math.ceil(float(total_core_count) / float(core_count)))
cpulists = subprocess.check_output('hwloc-distrib {} | hwloc-calc --li --po --pulist'.format(total_core_count // core_count), shell=True).strip().split('\n') * int(math.ceil(float(worker_count) / float(slice_count)))
cpumask = subprocess.check_output('hwloc-distrib {}'.format(total_core_count // core_count), shell=True).strip().replace('0x', '').split('\n') * int(math.ceil(float(worker_count) / float(slice_count)))
numanodes = subprocess.check_output('hwloc-distrib {} | hwloc-calc --li --po --pulist --intersect NUMAnode'.format(total_core_count // core_count), shell=True).strip().split('\n') * int(math.ceil(float(worker_count) / float(slice_count)))
command_list = ''
for rank in range(1, MPI.COMM_WORLD.Get_size()):
if command_list != '':
command_list = command_list + ','
first_cpu = cpulists[rank - 1].split(",")[0]
cache_id = subprocess.check_output('cat /sys/devices/system/cpu/cpu{}/cache/index3/id'.format(first_cpu), shell=True).strip()
schemata = 'L3:{}={}'.format(cache_id, cat_masks[rank - 1])
command_list = (command_list +
('{{'
'"command":"create",'
'"name":"{}",'
'"args":['
'"cpuset",'
'"perf_event",'
'"resctrl"'
'],'
'"properties":{{'
'"cpuset.cpus":"{}",'
'"cpuset.mems":"{}",'
'"cpus":"{}",'
'"schemata":"{}\\n"'
'}}'
'}}').format(rank, cpulists[rank - 1], numanodes[rank - 1],
cpumask[rank - 1], schemata))
json = '{{"commands":[{}]}}'.format(command_list)
subprocess.check_call(['cuttr', 'transaction', json])
# Because of Python2
JobController.__init__(self, registry, auto_resources, nodelist_env_slurm, nodelist_env_cobalt)
def launch(self, calc_type, num_procs=None, num_nodes=None, ranks_per_node=None,
machinefile=None, app_args=None, stdout=None, stderr=None, stage_inout=None, hyperthreads=False, test=False):
''' Creates a new job, and either launches or schedules to launch in the job controller
The created job object is returned.
Parameters
----------
calc_type: String
The calculation type: 'sim' or 'gen'
num_procs: int, optional
The total number of MPI tasks on which to launch the job.
num_nodes: int, optional
The number of nodes on which to launch the job.
ranks_per_node: int, optional
The ranks per node for this job.
machinefile: string, optional
Name of a machinefile for this job to use.
app_args: string, optional
A string of the application arguments to be added to job launch command line.
stdout: string, optional
A standard output filename.
stderr: string, optional
A standard error filename.
stage_inout: string, optional
A directory to copy files from. Default will take from current directory.
hyperthreads: boolean, optional
Whether to launch MPI tasks to hyperthreads
test: boolean, optional
Whether this is a test - No job will be launched. Instead runline is printed to logger (At INFO level).
Returns
-------
job: obj: Job
The lauched job object.
Note that if some combination of num_procs, num_nodes and ranks_per_node are provided, these will be honored if possible. If resource detection is on and these are omitted, then the available resources will be divided amongst workers.
'''
# Find the default sim or gen app from registry.sim_default_app OR registry.gen_default_app
# Could take optional app arg - if they want to supply here - instead of taking from registry
if calc_type == 'sim':
if self.registry.sim_default_app is None:
raise JobControllerException("Default sim app is not set")
app = self.registry.sim_default_app
elif calc_type == 'gen':
if self.registry.gen_default_app is None:
raise JobControllerException("Default gen app is not set")
app = self.registry.gen_default_app
else:
raise JobControllerException("Unrecognized calculation type", calc_type)
#-------- Up to here should be common - can go in a baseclass and make all concrete classes inherit ------#
hostlist = None
if machinefile is None and self.auto_resources:
#klugging this for now - not nec machinefile if more than one node - try a hostlist
num_procs, num_nodes, ranks_per_node = self.get_resources(num_procs=num_procs, num_nodes=num_nodes, ranks_per_node=ranks_per_node, hyperthreads=hyperthreads)
if num_nodes > 1:
#hostlist
hostlist = self.get_hostlist()
else:
#machinefile
if self.workerID is not None:
machinefile = 'machinefile_autogen_for_worker_' + str(self.workerID)
else:
machinefile = 'machinefile_autogen'
mfile_created, num_procs, num_nodes, ranks_per_node = self.create_machinefile(machinefile, num_procs, num_nodes, ranks_per_node, hyperthreads)
if not mfile_created:
raise JobControllerException("Auto-creation of machinefile failed")
else:
num_procs, num_nodes, ranks_per_node = JobController.job_partition(num_procs, num_nodes, ranks_per_node, machinefile)
default_workdir = os.getcwd() #Will be possible to override with arg when implemented
job = Job(app, app_args, num_procs, num_nodes, ranks_per_node, machinefile, hostlist, default_workdir, stdout, stderr, self.workerID)
#Temporary perhaps - though when create workdirs - will probably keep output in place
if stage_inout is not None:
logger.warning('stage_inout option ignored in this job_controller - runs in-place')
#Construct run line - possibly subroutine
runline = []
use_perf = os.environ.get('USE_PERF')
if use_perf is None:
cmd = self.mpi_launcher
else:
cmd = 'perf'
use_perf = use_perf.replace('{rank}', str(MPI.COMM_WORLD.Get_rank()))
runline.extend(use_perf.split())
runline.append(self.mpi_launcher)
if job.machinefile is not None:
#os.environ['SLURM_HOSTFILE'] = job.machinefile
runline.append(self.mfile)
runline.append(job.machinefile)
#Should be else - if machine file - dont need any other config
if job.hostlist is not None:
#os.environ['SLURM_HOSTFILE'] = job.machinefile
runline.append(self.hostlist)
runline.append(job.hostlist)
if job.num_procs is not None:
runline.append(self.nprocs)
runline.append(str(job.num_procs))
#Not currently setting nodes
#- as not always supported - but should always have the other two after calling _job_partition
#if job.num_nodes is not None:
#runline.append(self.nnodes)
#runline.append(str(job.num_nodes))
#Currently issues - command depends on mpich/openmpi etc...
if job.ranks_per_node is not None:
runline.append(self.ppn)
runline.append(str(job.ranks_per_node))
runline.append(job.app.full_path)
if job.app_args is not None:
runline.extend(job.app_args.split())
runline = ['cuttr', 'transaction',
('{{"commands":[{{'
'"command":"fork_exec",'
'"name":"{}",'
'"cmd":"{}",'
'"cmd_args":{}'
'}}]}}').format(MPI.COMM_WORLD.Get_rank(), cmd, json.dumps(runline))
]
if test:
logger.info('Test selected: Not launching job')
logger.info('runline args are {}'.format(runline))
else:
logger.debug("Launching job {}: {}".format(job.name, " ".join(runline))) #One line
#logger.debug("Launching job {}:\n{}{}".format(job.name, " "*32, " ".join(runline))) #With newline
#not good for timing job itself as dont know when finishes - if use this prob. change to date time or
#use for timeout. For now using for timing with approx end....
job.launch_time = time.time()
#job.process = subprocess.Popen(runline, cwd='./', stdout=open(job.stdout, 'w'), stderr=open(job.stderr, 'w'), shell=False)
job.process = subprocess.Popen(runline, cwd='./', stdout=open(job.stdout, 'w'), stderr=open(job.stderr, 'w'), shell=False, preexec_fn=os.setsid)
#To test when have workdir
#job.process = subprocess.Popen(runline, cwd=job.workdir, stdout=open(job.stdout, 'w'), stderr=open(job.stderr, 'w'), shell=False, preexec_fn=os.setsid)
self.list_of_jobs.append(job)
#return job.id
return job
| StarcoderdataPython |
3363538 | <gh_stars>10-100
'''
Description:
Implement the following operations of a stack using queues.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
empty() -- Return whether the stack is empty.
Example:
MyStack stack = new MyStack();
stack.push(1);
stack.push(2);
stack.top(); // returns 2
stack.pop(); // returns 2
stack.empty(); // returns false
Notes:
You must use only standard operations of a queue -- which means only push to back, peek/pop from front, size, and is empty operations are valid.
Depending on your language, queue may not be supported natively. You may simulate a queue by using a list or deque (double-ended queue), as long as you use only standard operations of a queue.
You may assume that all operations are valid (for example, no pop or top operations will be called on an empty stack).
'''
class MyStack:
def __init__(self):
"""
Initialize your data structure here.
"""
self.inbox = []
self.buffer = []
def push(self, x: int) -> None:
"""
Push element x onto stack.
"""
self.inbox.append( x )
def pop(self) -> int:
"""
Removes the element on top of the stack and returns that element.
"""
while len( self.inbox ) != 0:
front_of_inbox = self.inbox.pop( 0 )
if len( self.inbox ) != 0:
self.buffer.append( front_of_inbox )
else:
top_element = front_of_inbox
# swap inbox and buffer
self.inbox, self.buffer = self.buffer, self.inbox
return top_element
def top(self) -> int:
"""
Get the top element.
"""
while len( self.inbox ) != 0:
front_of_inbox = self.inbox.pop( 0 )
if len( self.inbox ) != 0:
self.buffer.append( front_of_inbox )
else:
self.buffer.append( front_of_inbox )
top_element = front_of_inbox
# swap inbox and buffer
self.inbox, self.buffer = self.buffer, self.inbox
return top_element
def empty(self) -> bool:
"""
Returns whether the stack is empty.
"""
if len( self.inbox ) + len( self.buffer ) == 0:
return True
else:
return False
# N : the lenth of input elements
## Time Complexity: O( N )
#
# The overhead of push() is O( 1 )
# The overhead of top() is O( N )
# The overhead of pop() is O( N )
# The overhead of empty() is O( 1 )
## Space Complexity: O( N )
#
# The overhead in space is to maintain two queues: one is inbox, the other is buffer
# The length of inbox and buffer is at most O( N )
def test_bench():
# expected output:
'''
2
2
False
'''
_stack = MyStack()
_stack.push(1)
_stack.push(2)
print( _stack.top() )
print( _stack.pop() )
print( _stack.empty() )
return
if __name__ == '__main__':
test_bench() | StarcoderdataPython |
21877 | <filename>DominantSparseEigenAD/tests/demos/2ndderivative.py<gh_stars>10-100
"""
A small toy example demonstrating how the process of computing 1st
derivative can be added to the original computation graph to produce an enlarged
graph whose back-propagation yields the 2nd derivative.
"""
import torch
x = torch.randn(10, requires_grad=True)
exp = torch.exp(x)
cos = torch.cos(x)
y = exp * cos
cosbar = exp
expbar = cos
minussin = -torch.sin(x)
grad1 = cosbar * minussin
grad2 = expbar * exp
dydx = grad1 + grad2
d2ydx2 = torch.autograd.grad(dydx, x, grad_outputs=torch.ones(dydx.shape[0]))
print("y: ", y, "\ngroundtruth: ", torch.exp(x) * torch.cos(x))
print("dy/dx: ", dydx, "\ngroundtruth: ", torch.exp(x) * (torch.cos(x)- torch.sin(x)))
print("d2y/dx2: ", d2ydx2, "\ngroundtruth", -2 * torch.exp(x) * torch.sin(x))
| StarcoderdataPython |
71852 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Google Factory Tool.
This tool is intended to be used on factory assembly lines. It
provides all of the Google required test functionality and must be run
on each device as part of the assembly process.
"""
import logging
import os
import pipes
import re
import sys
from tempfile import gettempdir
import threading
import time
import xmlrpc.client
from cros.factory.gooftool.common import ExecFactoryPar
from cros.factory.gooftool.common import Shell
from cros.factory.gooftool.core import Gooftool
from cros.factory.gooftool import crosfw
from cros.factory.gooftool import report_upload
from cros.factory.gooftool import vpd
from cros.factory.hwid.v3 import hwid_utils
from cros.factory.probe.functions import chromeos_firmware
from cros.factory.test.env import paths
from cros.factory.test import event_log
from cros.factory.test.rules import phase
from cros.factory.test.rules.privacy import FilterDict
from cros.factory.test import state
from cros.factory.test.utils.cbi_utils import CbiEepromWpStatus
from cros.factory.utils import argparse_utils
from cros.factory.utils.argparse_utils import CmdArg
from cros.factory.utils.argparse_utils import ParseCmdline
from cros.factory.utils.argparse_utils import VERBOSITY_CMD_ARG
from cros.factory.utils.debug_utils import SetupLogging
from cros.factory.utils import file_utils
from cros.factory.utils.process_utils import Spawn
from cros.factory.utils import sys_utils
from cros.factory.utils import time_utils
from cros.factory.utils.type_utils import Error
# TODO(tammo): Replace calls to sys.exit with raise Exit, and maybe
# treat that specially (as a smoot exit, as opposed to the more
# verbose output for generic Error).
_global_gooftool = None
_gooftool_lock = threading.Lock()
_has_fpmcu = None
def GetGooftool(options):
global _global_gooftool # pylint: disable=global-statement
if _global_gooftool is None:
with _gooftool_lock:
if _global_gooftool is None:
project = getattr(options, 'project', None)
hwdb_path = getattr(options, 'hwdb_path', None)
_global_gooftool = Gooftool(hwid_version=3, project=project,
hwdb_path=hwdb_path)
return _global_gooftool
def HasFpmcu():
global _has_fpmcu # pylint: disable=global-statement
if _has_fpmcu is None:
FPMCU_PATH = '/dev/cros_fp'
has_cros_config_fpmcu = False
cros_config_output = Shell(['cros_config', '/fingerprint', 'board'])
if cros_config_output.success and cros_config_output.stdout:
has_cros_config_fpmcu = True
if not os.path.exists(FPMCU_PATH) and has_cros_config_fpmcu:
raise Error('FPMCU found in cros_config but missing in %s.' % FPMCU_PATH)
_has_fpmcu = has_cros_config_fpmcu
return _has_fpmcu
def Command(cmd_name, *args, **kwargs):
"""Decorator for commands in gooftool.
This is similar to argparse_utils.Command, but all gooftool commands
can be waived during `gooftool finalize` or `gooftool verify` using
--waive_list or --skip_list option.
"""
def Decorate(fun):
def CommandWithWaiveSkipCheck(options):
waive_list = vars(options).get('waive_list', [])
skip_list = vars(options).get('skip_list', [])
if phase.GetPhase() >= phase.PVT_DOGFOOD and (
waive_list != [] or skip_list != []):
raise Error(
'waive_list and skip_list should be empty for phase %s' %
phase.GetPhase())
if cmd_name not in skip_list:
try:
fun(options)
except Exception as e:
if cmd_name in waive_list:
logging.exception(e)
else:
raise
return argparse_utils.Command(cmd_name, *args, **kwargs)(
CommandWithWaiveSkipCheck)
return Decorate
@Command('write_hwid',
CmdArg('hwid', metavar='HWID', help='HWID string'))
def WriteHWID(options):
"""Write specified HWID value into the system BB."""
logging.info('writing hwid string %r', options.hwid)
GetGooftool(options).WriteHWID(options.hwid)
event_log.Log('write_hwid', hwid=options.hwid)
print('Wrote HWID: %r' % options.hwid)
@Command('read_hwid')
def ReadHWID(options):
"""Read the HWID string from GBB."""
logging.info('reading the hwid string')
print(GetGooftool(options).ReadHWID())
_project_cmd_arg = CmdArg(
'--project', metavar='PROJECT',
default=None, help='Project name to test.')
_hwdb_path_cmd_arg = CmdArg(
'--hwdb_path', metavar='PATH',
default=hwid_utils.GetDefaultDataPath(),
help='Path to the HWID database.')
_hwid_status_list_cmd_arg = CmdArg(
'--status', nargs='*', default=['supported'],
help='allow only HWIDs with these status values')
# TODO(yhong): Replace this argument with `--hwid-material-file` when
# `cros.factory.hwid.v3.hwid_utils` provides methods to parse such file.
_probe_results_cmd_arg = CmdArg(
'--probe_results', metavar='RESULTS.json',
help=('Output from "hwid probe" (used instead of probing this system).'))
_device_info_cmd_arg = CmdArg(
'--device_info', metavar='DEVICE_INFO.yaml', default=None,
help='A dict of device info to use instead of fetching from shopfloor '
'server.')
_hwid_cmd_arg = CmdArg(
'--hwid', metavar='HWID',
help='HWID to verify (instead of the currently set HWID of this system).')
_hwid_run_vpd_cmd_arg = CmdArg(
'--hwid-run-vpd', action='store_true',
help=('Specify the hwid utility to obtain the vpd data by running the '
'`vpd` commandline tool.'))
_hwid_vpd_data_file_cmd_arg = CmdArg(
'--hwid-vpd-data-file', metavar='FILE.json', type=str, default=None,
help=('Specify the hwid utility to obtain the vpd data from the specified '
'file.'))
_no_write_protect_cmd_arg = CmdArg(
'--no_write_protect', action='store_true',
help='Do not enable firmware write protection.')
_rma_mode_cmd_arg = CmdArg(
'--rma_mode', action='store_true',
help='Enable RMA mode, do not check for deprecated components.')
_mlb_mode_cmd_arg = CmdArg('--mlb_mode', action='store_true',
help='Enable MLB mode, only do cr50 finalize.')
_cros_core_cmd_arg = CmdArg(
'--cros_core', action='store_true',
help='Finalize for ChromeOS Core devices (may add or remove few test '
'items. For example, registration codes or firmware bitmap '
'locale settings).')
_has_ec_pubkey_cmd_arg = CmdArg(
'--has_ec_pubkey', action='store_true', default=None,
help='The device has EC public key for EFS and need to run VerifyECKey.')
_enforced_release_channels_cmd_arg = CmdArg(
'--enforced_release_channels', nargs='*', default=None,
help='Enforced release image channels.')
_ec_pubkey_path_cmd_arg = CmdArg(
'--ec_pubkey_path',
default=None,
help='Path to public key in vb2 format. Verify EC key with pubkey file.')
_ec_pubkey_hash_cmd_arg = CmdArg(
'--ec_pubkey_hash',
default=None,
help='A string for public key hash. Verify EC key with the given hash.')
_release_rootfs_cmd_arg = CmdArg(
'--release_rootfs', help='Location of release image rootfs partition.')
_firmware_path_cmd_arg = CmdArg(
'--firmware_path', help='Location of firmware image partition.')
_shopfloor_url_args_cmd_arg = CmdArg(
'--shopfloor_url',
help='Shopfloor server url to be informed when wiping is done. '
'After wiping, a XML-RPC request will be sent to the '
'given url to indicate the completion of wiping.')
_station_ip_cmd_arg = CmdArg(
'--station_ip',
help='IP of remote station')
_station_port_cmd_arg = CmdArg(
'--station_port',
help='Port on remote station')
_wipe_finish_token_cmd_arg = CmdArg(
'--wipe_finish_token',
help='Required token when notifying station after wipe finished')
_keep_developer_mode_flag_after_clobber_state_cmd_arg = CmdArg(
# The argument name is super long because you should never use it by
# yourself when using command line tools.
'--keep_developer_mode_flag_after_clobber_state',
action='store_true', default=None,
help='After clobber-state, do not delete .developer_mode')
_waive_list_cmd_arg = CmdArg(
'--waive_list', nargs='*', default=[], metavar='SUBCMD',
help='A list of waived checks, separated by whitespace. '
'Each item should be a sub-command of gooftool. '
'e.g. "gooftool verify --waive_list verify_tpm clear_gbb_flags".')
_skip_list_cmd_arg = CmdArg(
'--skip_list', nargs='*', default=[], metavar='SUBCMD',
help='A list of skipped checks, separated by whitespace. '
'Each item should be a sub-command of gooftool. '
'e.g. "gooftool verify --skip_list verify_tpm clear_gbb_flags".')
_test_umount_cmd_arg = CmdArg(
'--test_umount', action='store_true',
help='(For testing only) Only umount rootfs and stateful partition '
'instead of running full wiping and cutoff process.')
_rlz_embargo_end_date_offset_cmd_arg = CmdArg(
'--embargo_offset', type=int, default=7, choices=list(range(7, 15)),
help='Change the offset of embargo end date, cannot less than 7 days or '
'more than 14 days.')
_no_ectool_cmd_arg = CmdArg(
'--no_ectool', action='store_false', dest='has_ectool',
help='There is no ectool utility so tests rely on ectool should be '
'skipped.')
_no_generate_mfg_date_cmd_arg = CmdArg(
'--no_generate_mfg_date', action='store_false', dest='generate_mfg_date',
help='Do not generate manufacturing date nor write mfg_date into VPD.')
_enable_zero_touch_cmd_arg = CmdArg(
'--enable_zero_touch', action='store_true',
help='Set attested_device_id for zero-touch feature.')
_cbi_eeprom_wp_status_cmd_arg = CmdArg(
'--cbi_eeprom_wp_status', type=str, default=CbiEepromWpStatus.Locked,
choices=CbiEepromWpStatus,
help='The expected status of CBI EEPROM after factory mode disabled.')
_use_generic_tpm2_arg = CmdArg(
'--use_generic_tpm2', action='store_true',
help=('Most Chromebooks are using Google security chips. If this project '
'is using a generic TPM (e.g. infineon), set this to true. The '
'steps in `cr50_finalize` will be adjusted. The EC will bypass the '
'CBI EEPROM write protect check and will return general error code '
'if failed to write.'))
@Command(
'verify_ec_key',
_ec_pubkey_path_cmd_arg,
_ec_pubkey_hash_cmd_arg)
def VerifyECKey(options):
"""Verify EC key."""
return GetGooftool(options).VerifyECKey(
options.ec_pubkey_path, options.ec_pubkey_hash)
@Command('verify_fp_key')
def VerifyFpKey(options):
"""Verify fingerprint firmware key."""
return GetGooftool(options).VerifyFpKey()
@Command('verify_keys',
_release_rootfs_cmd_arg,
_firmware_path_cmd_arg)
def VerifyKeys(options):
"""Verify keys in firmware and SSD match."""
return GetGooftool(options).VerifyKeys(
options.release_rootfs, options.firmware_path)
@Command('set_fw_bitmap_locale')
def SetFirmwareBitmapLocale(options):
"""Use VPD locale value to set firmware bitmap default language."""
(index, locale) = GetGooftool(options).SetFirmwareBitmapLocale()
logging.info('Firmware bitmap initial locale set to %d (%s).',
index, locale)
@Command('verify_system_time',
_release_rootfs_cmd_arg,
_rma_mode_cmd_arg)
def VerifySystemTime(options):
"""Verify system time is later than release filesystem creation time."""
return GetGooftool(options).VerifySystemTime(options.release_rootfs,
rma_mode=options.rma_mode)
@Command('verify_rootfs',
_release_rootfs_cmd_arg)
def VerifyRootFs(options):
"""Verify rootfs on SSD is valid by checking hash."""
return GetGooftool(options).VerifyRootFs(options.release_rootfs)
@Command('verify_tpm')
def VerifyTPM(options):
"""Verify TPM is cleared."""
return GetGooftool(options).VerifyTPM()
@Command('verify_me_locked')
def VerifyManagementEngineLocked(options):
"""Verify Management Engine is locked."""
return GetGooftool(options).VerifyManagementEngineLocked()
@Command('verify_switch_wp',
_no_ectool_cmd_arg)
def VerifyWPSwitch(options):
"""Verify hardware write protection switch is enabled."""
GetGooftool(options).VerifyWPSwitch(options.has_ectool)
@Command('verify_vpd')
def VerifyVPD(options):
"""Verify that VPD values are properly set.
Check if mandatory fields are set, and deprecated fields don't exist.
"""
ro_vpd = vpd.VPDTool().GetAllData(partition=vpd.VPD_READONLY_PARTITION_NAME)
rw_vpd = vpd.VPDTool().GetAllData(partition=vpd.VPD_READWRITE_PARTITION_NAME)
event_log.Log('vpd', ro=FilterDict(ro_vpd), rw=FilterDict(rw_vpd))
return GetGooftool(options).VerifyVPD()
@Command('verify_release_channel',
_enforced_release_channels_cmd_arg)
def VerifyReleaseChannel(options):
"""Verify that release image channel is correct.
ChromeOS has four channels: canary, dev, beta and stable.
The last three channels support image auto-updates, checks
that release image channel is one of them.
"""
return GetGooftool(options).VerifyReleaseChannel(
options.enforced_release_channels)
@Command('verify_cros_config')
def VerifyCrosConfig(options):
"""Verify entries in cros config make sense."""
return GetGooftool(options).VerifyCrosConfig()
@Command('verify-sn-bits',
_enable_zero_touch_cmd_arg)
def VerifySnBits(options):
if options.enable_zero_touch:
GetGooftool(options).VerifySnBits()
@Command('verify_cbi_eeprom_wp_status', _cbi_eeprom_wp_status_cmd_arg,
_use_generic_tpm2_arg)
def VerifyCBIEEPROMWPStatus(options):
"""Verify CBI EEPROM status.
If cbi_eeprom_wp_status is Absent, CBI EEPROM must be absent. If
cbi_eeprom_wp_status is Locked, write protection must be on. Otherwise, write
protection must be off.
If use_generic_tmp2, the EEPROM write protect checks in EC will be bypassed
and will return general error (code: 4) even if it is write protected. So we
must handle this situation in cbi_util.
"""
return GetGooftool(options).VerifyCBIEEPROMWPStatus(
options.cbi_eeprom_wp_status, options.use_generic_tpm2)
@Command('write_protect')
def EnableFwWp(options):
"""Enable then verify firmware write protection."""
del options # Unused.
def WriteProtect(fw):
"""Calculate protection size, then invoke flashrom.
The region (offset and size) to write protect may be different per chipset
and firmware layout, so we have to read the WP_RO section from FMAP to
decide that.
"""
wp_section = 'WP_RO'
fmap_image = fw.GetFirmwareImage(
sections=(['FMAP'] if fw.target == crosfw.TARGET_MAIN else None))
if not fmap_image.has_section(wp_section):
raise Error('Could not find %s firmware section: %s' %
(fw.target.upper(), wp_section))
section_data = fw.GetFirmwareImage(
sections=[wp_section]).get_section_area(wp_section)
ro_offset, ro_size = section_data[0:2]
logging.debug('write protecting %s [off=%x size=%x]', fw.target.upper(),
ro_offset, ro_size)
crosfw.Flashrom(fw.target).EnableWriteProtection(ro_offset, ro_size)
if HasFpmcu():
# TODO(b/143991572): Implement enable_fpmcu_write_protection in gooftool.
cmd = os.path.join(
paths.FACTORY_DIR, 'sh', 'enable_fpmcu_write_protection.sh')
cmd_result = Shell(cmd)
if not cmd_result.success:
raise Error(
'Failed to enable FPMCU write protection, stdout=%r, stderr=%r' %
(cmd_result.stdout, cmd_result.stderr))
WriteProtect(crosfw.LoadMainFirmware())
event_log.Log('wp', fw='main')
# Some EC (mostly PD) does not support "RO_NOW". Instead they will only set
# "RO_AT_BOOT" when you request to enable RO (These platforms consider
# --wp-range with right range identical to --wp-enable), and requires a
# 'ectool reboot_ec RO at-shutdown; reboot' to let the RO take effect.
# After reboot, "flashrom -p host --wp-status" will return protected range.
# If you don't reboot, returned range will be (0, 0), and running command
# "ectool flashprotect" will not have RO_NOW.
# generic_common.test_list.json provides "EnableECWriteProtect" test group
# which can be run individually before finalization. Try that out if you're
# having trouble enabling RO_NOW flag.
for fw in [crosfw.LoadEcFirmware(), crosfw.LoadPDFirmware()]:
if fw.GetChipId() is None:
logging.warning('%s not write protected (seems there is no %s flash).',
fw.target.upper(), fw.target.upper())
continue
WriteProtect(fw)
event_log.Log('wp', fw=fw.target)
@Command('clear_gbb_flags')
def ClearGBBFlags(options):
"""Zero out the GBB flags, in preparation for transition to release state.
No GBB flags are set in release/shipping state, but they are useful
for factory/development. See "futility gbb --flags" for details.
"""
GetGooftool(options).ClearGBBFlags()
event_log.Log('clear_gbb_flags')
@Command('clear_factory_vpd_entries')
def ClearFactoryVPDEntries(options):
"""Clears factory.* items in the RW VPD."""
entries = GetGooftool(options).ClearFactoryVPDEntries()
event_log.Log('clear_factory_vpd_entries', entries=FilterDict(entries))
@Command('generate_stable_device_secret')
def GenerateStableDeviceSecret(options):
"""Generates a fresh stable device secret and stores it in the RO VPD."""
GetGooftool(options).GenerateStableDeviceSecret()
event_log.Log('generate_stable_device_secret')
@Command('cr50_set_ro_hash')
def Cr50SetROHash(options):
GetGooftool(options).Cr50SetROHash()
event_log.Log('cr50_set_ro_hash')
@Command('cr50_set_sn_bits_and_board_id',
_rma_mode_cmd_arg)
def Cr50SetSnBitsAndBoardId(options):
"""Deprecated: use Cr50WriteFlashInfo instead."""
logging.warning('This function is renamed to Cr50WriteFlashInfo')
Cr50WriteFlashInfo(options)
@Command('cr50_write_flash_info', _rma_mode_cmd_arg, _mlb_mode_cmd_arg,
_enable_zero_touch_cmd_arg)
def Cr50WriteFlashInfo(options):
"""Set the serial number bits, board id and flags on the Cr50 chip."""
GetGooftool(options).Cr50WriteFlashInfo(
enable_zero_touch=options.enable_zero_touch, rma_mode=options.rma_mode,
mlb_mode=options.mlb_mode)
event_log.Log('cr50_write_flash_info')
@Command('cr50_write_whitelabel_flags', _enable_zero_touch_cmd_arg,
_rma_mode_cmd_arg)
def Cr50WriteWhitelabelFlags(options):
"""Call this function to set the cr50 fields in SMT stage.
This is required if the MLB will leave factory after SMT stage, such as RMA
spare boards, local OEM projects.
"""
GetGooftool(options).Cr50WriteFlashInfo(
enable_zero_touch=options.enable_zero_touch, rma_mode=options.rma_mode,
mlb_mode=True)
event_log.Log('cr50_write_whitelabel_flags')
@Command('cr50_disable_factory_mode')
def Cr50DisableFactoryMode(options):
"""Reset Cr50 state back to default state after RMA."""
return GetGooftool(options).Cr50DisableFactoryMode()
@Command('cr50_finalize', _no_write_protect_cmd_arg, _rma_mode_cmd_arg,
_mlb_mode_cmd_arg, _enable_zero_touch_cmd_arg, _use_generic_tpm2_arg)
def Cr50Finalize(options):
"""Finalize steps for cr50."""
if options.no_write_protect:
logging.warning('SWWP is not enabled. Skip setting RO hash.')
elif options.rma_mode:
logging.warning('RMA mode. Skip setting RO hash.')
elif options.mlb_mode:
logging.warning('MLB mode. Skip setting RO hash.')
elif options.use_generic_tpm2:
logging.warning('Generic TPM2 device. Skip setting RO hash.')
else:
Cr50SetROHash(options)
Cr50WriteFlashInfo(options)
if options.mlb_mode:
logging.warning('MLB mode. Skip disabling factory mode.')
elif options.use_generic_tpm2:
logging.warning('Generic TPM2 device. No need to disable factory mode.')
else:
Cr50DisableFactoryMode(options)
@Command('enable_release_partition',
CmdArg('--release_rootfs',
help=('path to the release rootfs device. If not specified, '
'the default (5th) partition will be used.')))
def EnableReleasePartition(options):
"""Enables a release image partition on the disk."""
GetGooftool(options).EnableReleasePartition(options.release_rootfs)
@Command('wipe_in_place',
CmdArg('--fast', action='store_true',
help='use non-secure but faster wipe method.'),
_shopfloor_url_args_cmd_arg,
_station_ip_cmd_arg,
_station_port_cmd_arg,
_wipe_finish_token_cmd_arg,
_test_umount_cmd_arg)
def WipeInPlace(options):
"""Start factory wipe directly without reboot."""
GetGooftool(options).WipeInPlace(options.fast,
options.shopfloor_url,
options.station_ip,
options.station_port,
options.wipe_finish_token,
options.test_umount)
@Command('wipe_init',
CmdArg('--wipe_args', help='arguments for clobber-state'),
CmdArg('--state_dev', help='path to stateful partition device'),
CmdArg('--root_disk', help='path to primary device'),
CmdArg('--old_root', help='path to old root'),
_shopfloor_url_args_cmd_arg,
_release_rootfs_cmd_arg,
_station_ip_cmd_arg,
_station_port_cmd_arg,
_wipe_finish_token_cmd_arg,
_keep_developer_mode_flag_after_clobber_state_cmd_arg,
_test_umount_cmd_arg)
def WipeInit(options):
GetGooftool(options).WipeInit(
options.wipe_args,
options.shopfloor_url,
options.state_dev,
options.release_rootfs,
options.root_disk,
options.old_root,
options.station_ip,
options.station_port,
options.wipe_finish_token,
options.keep_developer_mode_flag_after_clobber_state,
options.test_umount)
@Command(
'verify',
_hwid_status_list_cmd_arg,
_hwdb_path_cmd_arg,
_project_cmd_arg,
_probe_results_cmd_arg,
_hwid_cmd_arg,
_hwid_run_vpd_cmd_arg,
_hwid_vpd_data_file_cmd_arg,
_no_write_protect_cmd_arg,
_rma_mode_cmd_arg,
_cros_core_cmd_arg,
_has_ec_pubkey_cmd_arg,
_ec_pubkey_path_cmd_arg,
_ec_pubkey_hash_cmd_arg,
_release_rootfs_cmd_arg,
_firmware_path_cmd_arg,
_enforced_release_channels_cmd_arg,
_waive_list_cmd_arg,
_skip_list_cmd_arg,
_no_ectool_cmd_arg,
_enable_zero_touch_cmd_arg,
_cbi_eeprom_wp_status_cmd_arg,
)
def Verify(options):
"""Verifies if whole factory process is ready for finalization.
This routine performs all the necessary checks to make sure the
device is ready to be finalized, but does not modify state. These
checks include dev switch, firmware write protection switch, hwid,
system time, keys, and root file system.
"""
if not options.no_write_protect:
VerifyWPSwitch(options)
VerifyManagementEngineLocked(options)
if not options.use_generic_tpm2:
# Verify this after EnableFwWp for use_generic_tpm2
VerifyCBIEEPROMWPStatus(options)
VerifyHWID(options)
VerifySystemTime(options)
if options.has_ec_pubkey:
VerifyECKey(options)
if HasFpmcu():
VerifyFpKey(options)
VerifyKeys(options)
VerifyRootFs(options)
VerifyTPM(options)
VerifyVPD(options)
VerifyReleaseChannel(options)
VerifyCrosConfig(options)
VerifySnBits(options)
@Command('untar_stateful_files')
def UntarStatefulFiles(unused_options):
"""Untars stateful files from stateful_files.tar.xz on stateful partition.
If that file does not exist (which should only be R30 and earlier),
this is a no-op.
"""
# Path to stateful partition on device.
device_stateful_path = '/mnt/stateful_partition'
tar_file = os.path.join(device_stateful_path, 'stateful_files.tar.xz')
if os.path.exists(tar_file):
Spawn(['tar', 'xf', tar_file], cwd=device_stateful_path,
log=True, check_call=True)
else:
logging.warning('No stateful files at %s', tar_file)
@Command('log_source_hashes')
def LogSourceHashes(options):
"""Logs hashes of source files in the factory toolkit."""
del options # Unused.
# WARNING: The following line is necessary to validate the integrity
# of the factory software. Do not remove or modify it.
#
# 警告:此行会验证工厂软件的完整性,禁止删除或修改。
factory_par = sys_utils.GetRunningFactoryPythonArchivePath()
if factory_par:
event_log.Log(
'source_hashes',
**file_utils.HashPythonArchive(factory_par))
else:
event_log.Log(
'source_hashes',
**file_utils.HashSourceTree(os.path.join(paths.FACTORY_DIR, 'py')))
@Command('log_system_details')
def LogSystemDetails(options):
"""Write miscellaneous system details to the event log."""
event_log.Log('system_details', **GetGooftool(options).GetSystemDetails())
def CreateReportArchiveBlob(*args, **kwargs):
"""Creates a report archive and returns it as a blob.
Args:
See CreateReportArchive.
Returns:
An xmlrpc.client.Binary object containing a .tar.xz file.
"""
report_archive = CreateReportArchive(*args, **kwargs)
try:
return xmlrpc.client.Binary(
file_utils.ReadFile(report_archive, encoding=None))
finally:
os.unlink(report_archive)
def CreateReportArchive(device_sn=None, add_file=None):
"""Creates a report archive in a temporary directory.
Args:
device_sn: The device serial number (optional).
add_file: A list of files to add (optional).
Returns:
Path to the archive.
"""
# Flush Testlog data to DATA_TESTLOG_DIR before creating a report archive.
result, reason = state.GetInstance().FlushTestlog(
uplink=False, local=True, timeout=10)
if not result:
logging.warning('Failed to flush testlog data: %s', reason)
def NormalizeAsFileName(token):
return re.sub(r'\W+', '', token).strip()
target_name = '%s%s.tar.xz' % (
time.strftime('%Y%m%dT%H%M%SZ',
time.gmtime()),
('' if device_sn is None else
'_' + NormalizeAsFileName(device_sn)))
target_path = os.path.join(gettempdir(), target_name)
# Intentionally ignoring dotfiles in EVENT_LOG_DIR.
tar_cmd = 'cd %s ; tar cJf %s * -C /' % (event_log.EVENT_LOG_DIR, target_path)
tar_files = [paths.FACTORY_LOG_PATH, paths.DATA_TESTLOG_DIR]
if add_file:
tar_files = tar_files + add_file
for f in tar_files:
# Require absolute paths since we use -C / to change current directory to
# root.
if not f.startswith('/'):
raise Error('Not an absolute path: %s' % f)
if not os.path.exists(f):
raise Error('File does not exist: %s' % f)
tar_cmd += ' %s' % pipes.quote(f[1:])
cmd_result = Shell(tar_cmd)
if cmd_result.status == 1:
# tar returns 1 when some files were changed during archiving,
# but that is expected for log files so should ignore such failure
# if the archive looks good.
Spawn(['tar', 'tJf', target_path], check_call=True, log=True,
ignore_stdout=True)
elif not cmd_result.success:
raise Error('unable to tar event logs, cmd %r failed, stderr: %r' %
(tar_cmd, cmd_result.stderr))
return target_path
_upload_method_cmd_arg = CmdArg(
'--upload_method', metavar='METHOD:PARAM',
help=('How to perform the upload. METHOD should be one of '
'{ftp, shopfloor, ftps, cpfe, smb}.'))
_upload_max_retry_times_arg = CmdArg(
'--upload_max_retry_times', type=int, default=0,
help='Number of tries to upload. 0 to retry infinitely.')
_upload_retry_interval_arg = CmdArg(
'--upload_retry_interval', type=int, default=None,
help='Retry interval in seconds.')
_upload_allow_fail_arg = CmdArg(
'--upload_allow_fail', action='store_true',
help='Continue finalize if report upload fails.')
_add_file_cmd_arg = CmdArg(
'--add_file', metavar='FILE', action='append',
help='Extra file to include in report (must be an absolute path)')
@Command('upload_report',
_upload_method_cmd_arg,
_upload_max_retry_times_arg,
_upload_retry_interval_arg,
_upload_allow_fail_arg,
_add_file_cmd_arg)
def UploadReport(options):
"""Create a report containing key device details."""
ro_vpd = vpd.VPDTool().GetAllData(partition=vpd.VPD_READONLY_PARTITION_NAME)
device_sn = ro_vpd.get('serial_number', None)
if device_sn is None:
logging.warning('RO_VPD missing device serial number')
device_sn = 'MISSING_SN_' + time_utils.TimedUUID()
target_path = CreateReportArchive(device_sn, options.add_file)
if options.upload_method is None or options.upload_method == 'none':
logging.warning('REPORT UPLOAD SKIPPED (report left at %s)', target_path)
return
method, param = options.upload_method.split(':', 1)
if options.upload_retry_interval is not None:
retry_interval = options.upload_retry_interval
else:
retry_interval = report_upload.DEFAULT_RETRY_INTERVAL
if method == 'shopfloor':
report_upload.ShopFloorUpload(
target_path, param,
'GRT' if options.command_name == 'finalize' else None,
max_retry_times=options.upload_max_retry_times,
retry_interval=retry_interval,
allow_fail=options.upload_allow_fail)
elif method == 'ftp':
report_upload.FtpUpload(target_path, 'ftp:' + param,
max_retry_times=options.upload_max_retry_times,
retry_interval=retry_interval,
allow_fail=options.upload_allow_fail)
elif method == 'ftps':
report_upload.CurlUrlUpload(target_path, '--ftp-ssl-reqd ftp:%s' % param,
max_retry_times=options.upload_max_retry_times,
retry_interval=retry_interval,
allow_fail=options.upload_allow_fail)
elif method == 'cpfe':
report_upload.CpfeUpload(target_path, pipes.quote(param),
max_retry_times=options.upload_max_retry_times,
retry_interval=retry_interval,
allow_fail=options.upload_allow_fail)
elif method == 'smb':
# param should be in form: <dest_path>.
report_upload.SmbUpload(target_path, 'smb:' + param,
max_retry_times=options.upload_max_retry_times,
retry_interval=retry_interval,
allow_fail=options.upload_allow_fail)
else:
raise Error('unknown report upload method %r' % method)
@Command(
'finalize',
CmdArg('--fast', action='store_true',
help='use non-secure but faster wipe method.'),
_no_ectool_cmd_arg,
_shopfloor_url_args_cmd_arg,
_hwdb_path_cmd_arg,
_hwid_status_list_cmd_arg,
_upload_method_cmd_arg,
_upload_max_retry_times_arg,
_upload_retry_interval_arg,
_upload_allow_fail_arg,
_add_file_cmd_arg,
_probe_results_cmd_arg,
_hwid_cmd_arg,
_hwid_run_vpd_cmd_arg,
_hwid_vpd_data_file_cmd_arg,
_no_write_protect_cmd_arg,
_rma_mode_cmd_arg,
_mlb_mode_cmd_arg,
_cros_core_cmd_arg,
_has_ec_pubkey_cmd_arg,
_ec_pubkey_path_cmd_arg,
_ec_pubkey_hash_cmd_arg,
_release_rootfs_cmd_arg,
_firmware_path_cmd_arg,
_enforced_release_channels_cmd_arg,
_station_ip_cmd_arg,
_station_port_cmd_arg,
_wipe_finish_token_cmd_arg,
_rlz_embargo_end_date_offset_cmd_arg,
_waive_list_cmd_arg,
_skip_list_cmd_arg,
_no_generate_mfg_date_cmd_arg,
_enable_zero_touch_cmd_arg,
_cbi_eeprom_wp_status_cmd_arg,
_use_generic_tpm2_arg,
)
def Finalize(options):
"""Verify system readiness and trigger transition into release state.
This routine does the following:
- Verifies system state (see verify command)
- Untars stateful_files.tar.xz, if it exists, in the stateful partition, to
initialize files such as the CRX cache
- Modifies firmware bitmaps to match locale
- Clears all factory-friendly flags from the GBB
- Removes factory-specific entries from RW_VPD (factory.*)
- Enables firmware write protection (cannot rollback after this)
- Initialize Fpmcu entropy
- Uploads system logs & reports
- Wipes the testing kernel, rootfs, and stateful partition
"""
if options.mlb_mode:
# MLB mode only do cr50 finalize.
Cr50Finalize(options)
LogSourceHashes(options)
LogSystemDetails(options)
UploadReport(options)
return
if not options.rma_mode:
# Write VPD values related to RLZ ping into VPD.
GetGooftool(options).WriteVPDForRLZPing(options.embargo_offset)
if options.generate_mfg_date:
GetGooftool(options).WriteVPDForMFGDate()
Cr50Finalize(options)
Verify(options)
LogSourceHashes(options)
UntarStatefulFiles(options)
if options.cros_core:
logging.info('SetFirmwareBitmapLocale is skipped for ChromeOS Core device.')
else:
SetFirmwareBitmapLocale(options)
ClearFactoryVPDEntries(options)
GenerateStableDeviceSecret(options)
ClearGBBFlags(options)
if options.no_write_protect:
logging.warning('WARNING: Firmware Write Protection is SKIPPED.')
event_log.Log('wp', fw='both', status='skipped')
else:
EnableFwWp(options)
if options.use_generic_tpm2:
VerifyCBIEEPROMWPStatus(options)
FpmcuInitializeEntropy(options)
LogSystemDetails(options)
UploadReport(options)
event_log.Log('wipe_in_place')
wipe_args = []
if options.shopfloor_url:
wipe_args += ['--shopfloor_url', options.shopfloor_url]
if options.fast:
wipe_args += ['--fast']
if options.station_ip:
wipe_args += ['--station_ip', options.station_ip]
if options.station_port:
wipe_args += ['--station_port', options.station_port]
if options.wipe_finish_token:
wipe_args += ['--wipe_finish_token', options.wipe_finish_token]
ExecFactoryPar('gooftool', 'wipe_in_place', *wipe_args)
@Command('verify_hwid',
_probe_results_cmd_arg,
_hwdb_path_cmd_arg,
_hwid_cmd_arg,
_hwid_run_vpd_cmd_arg,
_hwid_vpd_data_file_cmd_arg,
_rma_mode_cmd_arg)
def VerifyHWID(options):
"""A simple wrapper that calls out to HWID utils to verify version 3 HWID.
This is mainly for Gooftool to verify v3 HWID during finalize. For testing
and development purposes, please use `hwid` command.
"""
database = GetGooftool(options).db
encoded_string = options.hwid or GetGooftool(options).ReadHWID()
probed_results = hwid_utils.GetProbedResults(infile=options.probe_results)
device_info = hwid_utils.GetDeviceInfo()
vpd_data = hwid_utils.GetVPDData(run_vpd=options.hwid_run_vpd,
infile=options.hwid_vpd_data_file)
event_log.Log('probed_results', probed_results=FilterDict(probed_results))
hwid_utils.VerifyHWID(database, encoded_string, probed_results,
device_info, vpd_data, options.rma_mode)
event_log.Log('verified_hwid', hwid=encoded_string)
@Command('get_firmware_hash',
CmdArg('--file', required=True, metavar='FILE', help='Firmware File.'))
def GetFirmwareHash(options):
"""Get firmware hash from a file"""
if os.path.exists(options.file):
value_dict = chromeos_firmware.CalculateFirmwareHashes(options.file)
for key, value in value_dict.items():
print(' %s: %s' % (key, value))
else:
raise Error('File does not exist: %s' % options.file)
@Command('fpmcu_initialize_entropy')
def FpmcuInitializeEntropy(options):
"""Initialze entropy of FPMCU."""
if HasFpmcu():
GetGooftool(options).FpmcuInitializeEntropy()
else:
logging.info('No FPS on this board.')
def main():
"""Run sub-command specified by the command line args."""
options = ParseCmdline(
'Perform Google required factory tests.',
CmdArg('-l', '--log', metavar='PATH',
help='Write logs to this file.'),
CmdArg('--suppress-event-logs', action='store_true',
help='Suppress event logging.'),
CmdArg('--phase', default=None,
help=('override phase for phase checking (defaults to the current '
'as returned by the "factory phase" command)')),
VERBOSITY_CMD_ARG)
SetupLogging(options.verbosity, options.log)
event_log.SetGlobalLoggerDefaultPrefix('gooftool')
event_log.GetGlobalLogger().suppress = options.suppress_event_logs
logging.debug('gooftool options: %s', repr(options))
phase.OverridePhase(options.phase)
try:
logging.debug('GOOFTOOL command %r', options.command_name)
options.command(options)
logging.info('GOOFTOOL command %r SUCCESS', options.command_name)
except Error as e:
logging.exception(e)
sys.exit('GOOFTOOL command %r ERROR: %s' % (options.command_name, e))
except Exception as e:
logging.exception(e)
sys.exit('UNCAUGHT RUNTIME EXCEPTION %s' % e)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3328508 | from .base import *
from .request import *
from .response import *
| StarcoderdataPython |
3356118 | #!/usr/bin/env mayapy
#
# Copyright 2022 Animal Logic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fixturesUtils
import imageUtils
import mayaUtils
import testUtils
from maya import cmds
import os
class testVP2RenderDelegateTextureLoading(imageUtils.ImageDiffingTestCase):
"""
Test texture loading in sync and async mode.
"""
@classmethod
def setUpClass(cls):
input_path = fixturesUtils.setUpClass(
__file__, initializeStandalone=False, loadPlugin=False
)
cls._baseline_dir = os.path.join(
input_path, "VP2RenderDelegateTextureLoadingTest", "baseline"
)
cls._test_dir = os.path.abspath(".")
cls._optVarName = "mayaUsd_DisableAsyncTextureLoading"
# Save optionVar preference
cls._hasDisabledAsync = cmds.optionVar(exists=cls._optVarName)
if cls._hasDisabledAsync:
cls._prevDisableAsync = cmds.optionVar(q=cls._optVarName)
mayaUtils.loadPlugin("mayaUsdPlugin")
# Resume running idle tasks
cmds.flushIdleQueue(resume=True)
@classmethod
def tearDownClass(cls):
# Restore user optionVar
if cls._hasDisabledAsync:
cmds.optionVar(iv=(cls._optVarName, cls._prevDisableAsync))
else:
cmds.optionVar(remove=cls._optVarName)
def assertSnapshotClose(self, imageName):
baseline_image = os.path.join(self._baseline_dir, imageName)
snapshot_image = os.path.join(self._test_dir, imageName)
imageUtils.snapshot(snapshot_image, width=768, height=768)
return self.assertImagesClose(baseline_image, snapshot_image)
def testTextureLoadingSync(self):
cmds.file(force=True, new=True)
# Make sure the sync mode is ON (disable async loading)
cmds.optionVar(iv=(self._optVarName, 1))
cmds.xform("persp", t=(2, 2, 5.8))
cmds.xform("persp", ro=[0, 0, 0], ws=True)
panel = mayaUtils.activeModelPanel()
cmds.modelEditor(panel, edit=True, lights=False, displayLights="default")
testFile = testUtils.getTestScene("multipleMaterialsAssignment",
"MultipleMaterialsAssignment.usda")
# Default purpose mode is "proxy"
shapeNode, _ = mayaUtils.createProxyFromFile(testFile)
cmds.select(cl=True)
self.assertSnapshotClose("TextureLoading_Proxy_Sync.png")
# Switch purpose to "render"
cmds.setAttr("{}.drawProxyPurpose".format(shapeNode), 0)
cmds.setAttr("{}.drawRenderPurpose".format(shapeNode), 1)
cmds.select(cl=True)
self.assertSnapshotClose("TextureLoading_Render_Sync.png")
def testTextureLoadingAsync(self):
cmds.file(force=True, new=True)
# Make sure the async mode is ON (enable async loading)
cmds.optionVar(iv=(self._optVarName, 0))
cmds.xform("persp", t=(2, 2, 5.8))
cmds.xform("persp", ro=[0, 0, 0], ws=True)
panel = mayaUtils.activeModelPanel()
cmds.modelEditor(panel, edit=True, lights=False, displayLights="default")
testFile = testUtils.getTestScene("multipleMaterialsAssignment",
"MultipleMaterialsAssignment.usda")
# Default purpose mode is "proxy"
shapeNode, _ = mayaUtils.createProxyFromFile(testFile)
cmds.select(cl=True)
# Force all idle tasks to finish
cmds.flushIdleQueue()
self.assertSnapshotClose("TextureLoading_Proxy_Async.png")
# Switch purpose to "render"
cmds.setAttr("{}.drawProxyPurpose".format(shapeNode), 0)
cmds.setAttr("{}.drawRenderPurpose".format(shapeNode), 1)
# Force all idle tasks to finish
cmds.flushIdleQueue()
self.assertSnapshotClose("TextureLoading_Render_Async.png")
if __name__ == '__main__':
fixturesUtils.runTests(globals())
| StarcoderdataPython |
90509 | from collections import defaultdict
from nertoolkit.geneontology.GeneOntology import GeneOntology
from synonymes.Synonym import Synonym
from synonymes.SynonymUtils import handleCommonExcludeWords
from utils.idutils import dataDir, loadExludeWords, printToFile, speciesName2TaxID
celloObo = GeneOntology(dataDir + "miRExplore/textmine/neutrophils.obo")
vAllSyns = []
for cellID in celloObo.dTerms:
oboNode = celloObo.dTerms[cellID]
oboID = oboNode.id
oboName = oboNode.name
oboSyns = oboNode.synonym
oboRels = oboNode.is_a
newSyn = Synonym(oboID)
newSyn.addSyn(oboName)
if oboSyns != None:
for x in oboSyns:
newSyn.addSyn(x.syn)
#print(str(taxID) + " " + str(newSyn))
print(newSyn)
vAllSyns.append(newSyn)
globalKeywordExcludes = loadExludeWords()
vPrintSyns = handleCommonExcludeWords(vAllSyns, globalKeywordExcludes, mostCommonCount=10, maxCommonCount=5)
printToFile(vPrintSyns, dataDir + "/miRExplore/textmine/synonyms/neutrophils.syn") | StarcoderdataPython |
1733244 | <reponame>gammasky/cta-dc
"""
Make an HDU and observation index tables for the CTA 1DC dataset.
Format is described here:
http://gamma-astro-data-formats.readthedocs.io/en/latest/data_storage/index.html
"""
from collections import OrderedDict
import logging
from glob import glob
from pathlib import Path
import subprocess
from astropy.io import fits
from astropy.table import Table
from astropy.table import vstack as table_vstack
from astropy.coordinates import SkyCoord
log = logging.getLogger()
BASE_PATH = Path('1dc/1dc')
def write_fits_gz(table, path):
"""Write BinTableHDU to fits.gz in a reproducible way.
Writing to `.fits.gz` with Astropy directly never gave
reproducible files, probably because in the background
some date string was inserted in the zip file.
So this helper function first write to `.fits` and then
calls `gzip` with options that avoid the issue.
"""
log.info(f'Writing {path}')
table.writeto(str(path), overwrite=True)
cmd = f'gzip -f -n {path}'
log.info(f'Executing: {cmd}')
subprocess.call(cmd, shell=True)
def get_events_file_info(filename):
log.debug(f'Reading {filename}')
header = fits.open(filename)['EVENTS'].header
info = OrderedDict()
info['OBS_ID'] = header['OBS_ID']
info['RA_PNT'] = header['RA_PNT']
info['DEC_PNT'] = header['DEC_PNT']
pos = SkyCoord(info['RA_PNT'], info['DEC_PNT'], unit='deg').galactic
info['GLON_PNT'] = pos.l.deg
info['GLAT_PNT'] = pos.b.deg
info['ZEN_PNT'] = 90 - float(header['ALT_PNT'])
info['ALT_PNT'] = header['ALT_PNT']
info['AZ_PNT'] = header['AZ_PNT']
info['ONTIME'] = header['ONTIME']
info['LIVETIME'] = header['LIVETIME']
info['DEADC'] = header['DEADC']
info['TSTART'] = header['TSTART']
info['TSTOP'] = header['TSTOP']
info['DATE-OBS'] = header['DATE_OBS']
info['TIME-OBS'] = header['TIME_OBS']
info['DATE-END'] = header['DATE_END']
info['TIME-END'] = header['TIME_END']
info['N_TELS'] = header['N_TELS']
info['OBJECT'] = header['OBJECT']
info['CALDB'] = header['CALDB']
info['IRF'] = header['IRF']
# Not part of the spec, but good to know from which file the info comes
info['EVENTS_FILENAME'] = filename
# import IPython; IPython.embed(); 1/0
info['EVENT_COUNT'] = header['NAXIS2']
# info['EVENT_TIME_MIN'] = events['TIME'].min()
# info['EVENT_TIME_MAX'] = events['TIME'].max()
# info['EVENT_ENERGY_MIN'] = events['ENERGY'].min()
# info['EVENT_ENERGY_MAX'] = events['ENERGY'].max()
# gti = Table.read(filename, hdu='GTI')
# info['GTI_START'] = gti['START'][0]
# info['GTI_STOP'] = gti['STOP'][0]
return info
class ObservationDefinition:
"""Helper class to create a row for the HDU index table."""
def __init__(self, data):
expected_keys = {
'obs_id',
'dataset',
'irf',
}
if set(data.keys()) != expected_keys:
log.error(data)
raise ValueError('No no no...')
self.data = data
def make_hdu_index_rows(self):
yield self.make_hdu_index_entry_events()
yield self.make_hdu_index_entry_gti()
yield self.make_hdu_index_entry_aeff()
yield self.make_hdu_index_entry_edisp()
yield self.make_hdu_index_entry_psf()
yield self.make_hdu_index_entry_bkg()
def make_hdu_index_entry_events(self):
return dict(
OBS_ID=self.data['obs_id'],
HDU_TYPE='events',
HDU_CLASS='events',
FILE_DIR=self.events_dir,
FILE_NAME=self.events_filename,
HDU_NAME='EVENTS',
)
def make_hdu_index_entry_gti(self):
return dict(
OBS_ID=self.data['obs_id'],
HDU_TYPE='gti',
HDU_CLASS='gti',
FILE_DIR=self.events_dir,
FILE_NAME=self.events_filename,
HDU_NAME='GTI',
)
def make_hdu_index_entry_aeff(self):
return dict(
OBS_ID=self.data['obs_id'],
HDU_TYPE='aeff',
HDU_CLASS='aeff_2d',
FILE_DIR=self.irf_dir,
FILE_NAME=self.irf_filename,
HDU_NAME='EFFECTIVE AREA',
)
def make_hdu_index_entry_edisp(self):
return dict(
OBS_ID=self.data['obs_id'],
HDU_TYPE='edisp',
HDU_CLASS='edisp_2d',
FILE_DIR=self.irf_dir,
FILE_NAME=self.irf_filename,
HDU_NAME='ENERGY DISPERSION',
)
def make_hdu_index_entry_psf(self):
return dict(
OBS_ID=self.data['obs_id'],
HDU_TYPE='psf',
HDU_CLASS='psf_3gauss',
FILE_DIR=self.irf_dir,
FILE_NAME=self.irf_filename,
HDU_NAME='POINT SPREAD FUNCTION',
)
def make_hdu_index_entry_bkg(self):
return dict(
OBS_ID=self.data['obs_id'],
HDU_TYPE='bkg',
HDU_CLASS='bkg_3d',
FILE_DIR=self.irf_dir,
FILE_NAME=self.irf_filename,
HDU_NAME='BACKGROUND',
)
@property
def base_dir(self):
"""Base dir for all files, what Jürgen calls CTADATA.
We use relative paths instead of relying on that env variable.
TODO: good choice or change?
"""
return '../..'
@property
def events_dir(self):
return self.base_dir + '/data/baseline/' + self.data['dataset']
@property
def events_filename(self):
return '{}_baseline_{:06d}.fits'.format(
self.data['dataset'],
self.data['obs_id'],
)
@property
def irf_dir(self):
return self.base_dir + '/caldb/data/cta/1dc/bcf/' + self.data['irf']
@property
def irf_filename(self):
return 'irf_file.fits'
def add_provenance(meta):
meta['observer'] = 'CTA first data challenge (1DC)'
def make_observation_index_table(dataset, out_dir, max_rows=-1, progress_bar=True):
"""
Make observation index table.
Format: http://gamma-astro-data-formats.readthedocs.io/en/latest/data_storage/obs_index/index.html
"""
log.info(f'Gathering observation index info from events for: {dataset}')
glob_pattern = str(BASE_PATH / f'data/baseline/{dataset}/*.fits')
log.debug(f'glob pattern: {glob_pattern}')
filenames = sorted(glob(glob_pattern))
log.debug(f'Number of files matching: {len(filenames)}')
if max_rows > 0:
log.warning(f'Selecting subset of observations: first {max_rows}')
filenames = filenames[:max_rows]
if progress_bar:
from tqdm import tqdm
filenames = tqdm(filenames)
rows = []
for filename in filenames:
row = get_events_file_info(filename)
rows.append(row)
names = list(rows[0].keys())
obs_table = Table(rows=rows, names=names)
obs_table['RA_PNT'].unit = 'deg'
obs_table['DEC_PNT'].unit = 'deg'
obs_table['GLON_PNT'].unit = 'deg'
obs_table['GLAT_PNT'].unit = 'deg'
obs_table['ZEN_PNT'].unit = 'deg'
obs_table['ALT_PNT'].unit = 'deg'
obs_table['AZ_PNT'].unit = 'deg'
obs_table['ONTIME'].unit = 's'
obs_table['LIVETIME'].unit = 's'
obs_table['TSTART'].unit = 's'
obs_table['TSTOP'].unit = 's'
meta = obs_table.meta
add_provenance(meta)
meta['dataset'] = dataset
# Values copied from one of the EVENTS headers
# Should be the same for all CTA files
meta['MJDREFI'] = 51544
meta['MJDREFF'] = 5.0000000000E-01
meta['TIMEUNIT'] = 's'
meta['TIMESYS'] = 'TT'
meta['TIMEREF'] = 'LOCAL'
meta['HDUCLASS'] = 'GADF'
meta['HDUDOC'] = 'https://github.com/open-gamma-ray-astro/gamma-astro-data-formats'
meta['HDUVERS'] = '0.2'
meta['HDUCLAS1'] = 'INDEX'
meta['HDUCLAS2'] = 'OBS'
path = out_dir / 'obs-index.fits'
hdu = fits.BinTableHDU(obs_table)
hdu.name = 'OBS_INDEX'
write_fits_gz(hdu, path)
def make_hdu_index_table(dataset, out_dir, max_rows=-1):
"""Make HDU index table.
This is not a general solution, it has some hard-coded stuff
and needs the observation index file to be there already.
"""
filename = out_dir / 'obs-index.fits.gz'
log.info(f'Reading {filename}')
obs_table = Table.read(filename)
if max_rows > 0:
log.warning(f'Selecting subset of observations: first {max_rows}')
obs_table = obs_table[:max_rows]
rows = []
for obs_table_row in obs_table:
obs_def = ObservationDefinition(data=dict(
dataset=dataset,
obs_id=obs_table_row['OBS_ID'],
irf=obs_table_row['IRF'],
))
rows.extend(obs_def.make_hdu_index_rows())
names = ['OBS_ID', 'HDU_TYPE', 'HDU_CLASS', 'FILE_DIR', 'FILE_NAME', 'HDU_NAME']
hdu_table = Table(rows=rows, names=names)
meta = hdu_table.meta
add_provenance(meta)
meta['dataset'] = dataset
meta['HDUCLASS'] = 'GADF'
meta['HDUDOC'] = 'https://github.com/open-gamma-ray-astro/gamma-astro-data-formats'
meta['HDUVERS'] = '0.2'
meta['HDUCLAS1'] = 'INDEX'
meta['HDUCLAS2'] = 'HDU'
path = out_dir / 'hdu-index.fits'
hdu = fits.BinTableHDU(hdu_table)
hdu.name = 'HDU_INDEX'
write_fits_gz(hdu, path)
def make_concatenated_index_files():
"""Make index files for all observations combined."""
datasets = ['agn', 'egal', 'gc', 'gps']
(BASE_PATH / 'index/all').mkdir(exist_ok=True)
table = table_vstack([
Table.read(BASE_PATH / 'index' / dataset / 'obs-index.fits.gz')
for dataset in datasets
], metadata_conflicts='silent')
# table.meta = OrderedDict()
add_provenance(table.meta)
table.meta['dataset'] = 'all'
path = BASE_PATH / 'index/all/obs-index.fits'
hdu = fits.BinTableHDU(table)
hdu.name = 'OBS_INDEX'
write_fits_gz(hdu, path)
table = table_vstack([
Table.read(BASE_PATH / 'index' / dataset / 'hdu-index.fits.gz')
for dataset in datasets
], metadata_conflicts='silent')
# table.meta = OrderedDict()
add_provenance(table.meta)
table.meta['dataset'] = 'all'
path = BASE_PATH / 'index/all/hdu-index.fits'
hdu = fits.BinTableHDU(table)
hdu.name = 'HDU_INDEX'
write_fits_gz(hdu, path)
def make_tarball():
"""Make index file tarball, ready for upload to CTA server."""
cmd = 'cd 1dc; tar zcf index.tar.gz 1dc/index'
log.info(f'Executing: {cmd}')
subprocess.call(cmd, shell=True)
def main():
# Config options
out_base_dir = BASE_PATH / 'index'
datasets = ['agn', 'egal', 'gc', 'gps']
max_rows = -1
progress_bar = True
loglevel = 'INFO'
tarball = True
# For debugging the script, use these options:
# out_base_dir = BASE_PATH / 'index-test'
# datasets = ['agn', 'gps']
# max_rows = 10
# tarball = False
# Execute steps
logging.basicConfig(level=loglevel)
for dataset in datasets:
out_dir = out_base_dir / dataset
if not out_dir.is_dir():
log.info('Making directory: {}'.format(out_dir))
out_dir.mkdir(parents=True)
make_observation_index_table(dataset, out_dir, max_rows=max_rows, progress_bar=progress_bar)
make_hdu_index_table(dataset, out_dir, max_rows=max_rows)
make_concatenated_index_files()
if tarball:
make_tarball()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1603918 | from tkinter import *
from tkinter.messagebox import *
def label(parent, text_label, x=0, y=0):
label_tables = Label(parent,text=text_label).grid(row=x, column=y)
def text(parent, text, x=0, y=0, height=2, width=30):
text_to_display = Text(parent,height=height, width=width)
text_to_display.insert(END, text)
text_to_display.grid(row=x, column=y)
def label_frame(parent, text_label, x=0, y=0):
label_fr = LabelFrame(parent, text=text_label,padx=5, pady=0)
label_fr.grid(row=x,column=y)
return label_fr
def list_box(parent, vbar=True, hbar=False, x=0, y=0, width=20, height=2,
text_label="", selectmode="single", values=[]):
local_parent = parent
x_local = x
y_local = y
if text_label != "":
label_frame = LabelFrame(parent, text=text_label,padx=5, pady=0)
label_frame.grid(row=x,column=y)
local_parent = label_frame
x_local = 0
y_local = 0
if vbar == True:
yDefilB = Scrollbar(local_parent, orient='vertical')
yDefilB.grid(row=x_local, column=y_local+1, sticky='ns')
if hbar == True:
xDefilB = Scrollbar(local_parent, orient='horizontal')
xDefilB.grid(row=x_local+1, column=y_local, sticky='ew')
canvas = Listbox(local_parent, selectmode=selectmode, width=width, height=height)
if vbar == True:
canvas.config(yscrollcommand=yDefilB.set)
yDefilB.config(command=canvas.yview)
if hbar == True:
canvas.config(xscrollcommand=xDefilB.set)
xDefilB.config(command=canvas.xview)
canvas.grid(row=x_local, column=y_local, sticky='nsew')
if values !=[]:
i = 0
for value in values:
canvas.insert(i, value)
i += 1
return canvas
def entry(parent, title, title_value="", x=0, y=0, width_entry=30, show="", disable=False):
if title != "":
Label(parent,text=title + " :").grid(row=x,column=y)
entry_var = StringVar(parent)
entry = Entry(parent, textvariable=entry_var, width=width_entry)
if show != "":
entry.config(show = show)
if title != "":
entry.grid(row = x, column = y+1)
else:
entry.grid(row = x, column = y)
entry_var.set(title_value)
if disable == True:
entry.config(state='disabled')
return [entry_var, entry]
def button(parent, title, command_button, x=0, y=0, display=None):
button=Button(parent, text=title, command=command_button)
button.grid(row = x, column = y)
if display != None:
display[button] = True
return button
def show_hide(object, x=0, y=0, action="show"):
if action == "show":
object.grid(row=x, column=y)
elif action == "hide":
object.grid_forget()
def bind_evt(evt, obj, fct):
if evt == "click":
obj.bind('<ButtonRelease>', fct)
obj.bind('<ButtonPress>',fct)
obj.bind('<KeyPress-Up>',fct)
obj.bind('<KeyPress-Down>',fct)
elif evt == "dclick":
obj.bind('<Double-Button-1>', fct)
def check_button(parent, text_label, x=0, y=0, width=10, height=1):
chk_var = IntVar()
chk = Checkbutton(parent, text = text_label, variable = chk_var,
onvalue = 1, offvalue = 0, height=height, width = width)
chk.grid(row = x, column = y)
return [chk_var, chk]
def radio_button(parent, options, text_label="", x=0, y=0, select=0):
v = IntVar()
local_parent = parent
x_local = x
y_local = y
if text_label != "":
label_frame = LabelFrame(parent, text=text_label,padx=5, pady=0)
label_frame.grid(row=x,column=y)
local_parent = label_frame
for i in range(0, len(options)):
exec("radio" + str(i) + " = Radiobutton(local_parent, text='" + str(options[i]) +
"', padx = 20, variable=v, value=" + str(i) +")")
exec("radio" + str(i) + ".grid(row=x_local, column=y_local+i)")
if select == i:
exec("radio" + str(i) + ".select()")
return [v, options]
def msg(title, text, type="info", command_true=None, command_false=None):
"""showinfo()
showwarning()
showerror()
askquestion()
askokcancel()
askyesno()
askretrycancel()"""
types = ["info", "warning", "error", "question", "okcancel", "yesno", "retrycancel"]
functions = ["showinfo", "showwarning", "showerror", "askquestion", "askokcancel", "askyesno", "askretrycancel"]
funct = {}
for i in range(0, len(types)-1):
funct[types[i]] = functions[i]
if command_true != None and command_false != None:
if eval(funct[type] + "(\"" + title + "\", \"" + text + "\")"):
command_true
else:
command_false
else:
exec(funct[type] + "(\"" + title + "\", \"" + text + "\")")
| StarcoderdataPython |
4806177 | <gh_stars>0
# Copyright (c) 2020 <NAME> <jan.vrany (a) fit.cvut.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import logging
from asyncio import sleep, CancelledError
from bricknil import attach, start
from bricknil.hub import CPlusHub
from bricknil.sensor.motor import CPlusXLMotor, CPlusLargeMotor as CPlusLMotor
from bricknil.sensor.sensor import PoweredUpHubIMUPosition, PoweredUpHubIMUAccelerometer, PoweredUpHubIMUGyro, VoltageSensor, CurrentSensor
@attach(CPlusXLMotor, name='motor_a', port=0, capabilities=[('sense_speed', 5), ('sense_load', 5), ('sense_power', 5)])
@attach(CPlusXLMotor, name='motor_b', port=1, capabilities=[('sense_speed', 5), ('sense_load', 5), ('sense_power', 5)])
@attach(CPlusLMotor, name='steering', port=2, capabilities=[('sense_pos', 5), ('sense_speed', 5), ('sense_load', 5), ('sense_power', 5)])
@attach(PoweredUpHubIMUAccelerometer, name='accel', capabilities=[('sense_grv', 5)])
@attach(PoweredUpHubIMUGyro, name='gyro', capabilities=[('sense_rot', 5)])
@attach(PoweredUpHubIMUPosition, name='position', port=99, capabilities=[('sense_pos', 5)])
@attach(VoltageSensor, name='voltage', capabilities=[('sense_l', 10)])
@attach(CurrentSensor, name='current', capabilities=[('sense_l', 10)])
class Vehicle(CPlusHub):
def __init__(self, name="4x4 off-roader", query_port_info=False, ble_id=None):
SteerIncrement = 10
SpeedIncrement = 30
super().__init__(name, query_port_info=query_port_info, ble_id=ble_id)
self.steering_angle = 0
self.steering_target = 0
self.steering_angle_min = 0
self.steering_angle_max = 0
self.steering_calibration_in_process = False
self.motor_a_speed = 0
self.motor_b_speed = 0
async def get_speed(self):
return 0
# return (f_speed + r_speed) / 2
async def set_speed(self, speed):
await self.motor_a.set_speed(speed)
await self.motor_b.set_speed(speed)
async def steering_change(self):
self.steering_angle = self.steering.sense_pos
# steering_speed = self.steering.sense_speed
# self.message_info(": steering pos: %s, target %s" % (self.steering_angle, self.steering_target))
# if self.steering_calibration_in_process:
# return
# diff = 0
# if (abs(self.steering_target) - diff) <= abs(self.steering_angle) and abs(self.steering_angle) <= (abs(self.steering_target) + diff):
# if steering_speed != 0:
# await self.steering.set_speed(0)
async def steering_calibrate(self):
async def wait_until_steering_stop():
angle1 = 100000
angle2 = self.steering_angle
while angle1 != angle2:
angle1 = angle2
await sleep(1)
angle2 = self.steering_angle
await self.steering.reset_pos();
self.steering_calibration_in_process = True
# await self.steering.set_speed(60)
await self.steering.rotate(180, 50, 100)
await wait_until_steering_stop()
self.steering_angle_max = self.steering_angle
self.message_info(": steering_calibrate - right stop: %s" % self.steering_angle_max)
# await self.steering.set_speed(-60)
await self.steering.rotate(180,-50, 100)
await wait_until_steering_stop()
self.steering_angle_min = self.steering_angle
self.message_info(": steering_calibrate - left stop: %s" % self.steering_angle_min)
zero = int( (self.steering_angle_max + self.steering_angle_min) / 2)
half = int( abs(self.steering_angle_max - self.steering_angle_min) / 2 )
# Now, adjust min and max by 5% to avoid servo motor to try
# to go beyond steering stop (which relies on hub cutting the power
# to avoid motor damage)
half = int(1 * half)
self.steering_angle_min = zero - half
self.steering_angle_max = zero + half
self.message_info(": steering_calibrate 1: %s (zero) %s (min) %s (max)" % (zero, self.steering_angle_min, self.steering_angle_max))
await self.steering.set_pos(zero, speed=50)
await sleep(2)
await self.steering.reset_pos()
zero = 0
self.steering_angle_min = -half
self.steering_angle_max = +half
self.steering_target = 0
self.message_info(": steering_calibrate 2: %s (zero) %s (min) %s (max)" % (zero, self.steering_angle_min, self.steering_angle_max))
await sleep(2)
self.steering_calibration_in_process = False
async def steer(self, pct, speed=60):
if abs(pct) < 10:
pct = 0
zero = int((self.steering_angle_min + self.steering_angle_max) / 2)
half = abs(self.steering_angle_max - zero)
new_target = zero + int((pct / 100) * half)
if new_target != 0 and abs(new_target - self.steering_target) < 5:
return
self.steering_target = zero + int((pct / 100) * half)
#breakpoint()
diff = abs(self.steering_target - self.steering.sense_pos)
#speed = int((diff / half) * speed)
speed = 50
#self.message_info("steering_target = %d, speed = %d" % (self.steering_target, speed))
print("I:steering_target = %d, speed = %d" % (self.steering_target, speed))
await self.steering.set_pos(self.steering_target, speed=speed, max_power=100)
async def speed(self, pct):
"""
Set speed in perctentage, 100 is full speed forward,
-100 is full speed reversing
"""
#await self.motor_a.set_speed(-1*pct)
#await self.motor_b.set_speed(-1*pct)
self.message_info(": speed = %d" % pct)
if abs(pct) < 10:
await self.motor_a.set_speed(0)
await self.motor_b.set_speed(0)
else:
#await self.motor_a.ramp_speed2(-1*pct,500)
#await self.motor_b.ramp_speed2(-1*pct,500)
await self.motor_a.set_speed(-1*pct)
await self.motor_b.set_speed(-1*pct)
async def initialize(self):
await self.steering_calibrate()
async def finalize(self):
await self.speed(0)
await self.steer(0)
| StarcoderdataPython |
165471 | <reponame>WebCampZg/conference-web
# Generated by Django 2.1.7 on 2019-03-02 14:05
from django.db import migrations
def populate_applicants(apps, schema_editor):
Talk = apps.get_model("talks", "Talk")
for talk in Talk.objects.all():
talk.applicants.add(talk.application.applicant)
if talk.co_presenter:
talk.applicants.add(talk.co_presenter)
class Migration(migrations.Migration):
dependencies = [
('talks', '0003_talk_applicants'),
]
operations = [
migrations.RunPython(
populate_applicants,
migrations.RunPython.noop
),
]
| StarcoderdataPython |
3312955 | <filename>test/unit/api/response/response_type/test_question_answering_phase_response.py
import unittest
from unittest.mock import patch, call
from src.api.response.response_tag import ResponseTag
from src.api.response.response_type.question_answering_phase_response import QuestionAnsweringPhaseResponse
class TestQuestionAnsweringPhaseResponse(unittest.TestCase):
_SOME_REUBERT_OUTPUT = "some reuBERT output"
def setUp(self):
self.question_answering_phase_response = QuestionAnsweringPhaseResponse().with_output(self._SOME_REUBERT_OUTPUT)
@patch('builtins.print')
def test__when__printing__then__prints_reuBERT_output_in_appropriate_response_format(self, print_mock):
expected_response_format = "\n" + ResponseTag.ANSWER_QUESTION_TAG.__str__().format(self._SOME_REUBERT_OUTPUT) + "\n" \
+ ResponseTag.ENTER_QUESTION_TAG.__str__() + "\n"
self.question_answering_phase_response.print()
print_mock.assert_has_calls([call(expected_response_format)], any_order=False) | StarcoderdataPython |
3382307 | <filename>freeAgents.py<gh_stars>0
import time
import dataLoader
from itertools import combinations
positions = dataLoader.loadData("CrowdsourcingResults.csv")
dataLoader.printPositions(positions)
print ""
print ""
bold = lambda val: ("*" + str(val) + "*")
def getHighestKey(positions, pos, key, usedPlayers=[]):
bestPlayer = None
def doBest(pos, bestPlayer=None):
for player in positions[pos]:
if player in usedPlayers:
continue
elif bestPlayer == None:
bestPlayer = player
else:
if float(player[key]) > float(bestPlayer[key]):
#print bestPlayer["Player"], "->", player["Player"]
bestPlayer = player
return bestPlayer
if pos == "DH":
# Any player can be a DH
for position in positions:
if position != "P":
bestPlayer = doBest(position, bestPlayer)
else:
bestPlayer = doBest(pos)
return bestPlayer
dataLoader.printFields(["Positions", "Player", "Exp. 2016 fWAR", "Exp. Salary"])
dataLoader.printSeparator(4)
totalWar = 0
totalSalary = 0
usedPlayers = []
positionOrder = ["CF", "LF", "RF", "1B", "2B", "3B", "SS", "C", "P", "P", "P", "P", "P"]
for pos in positionOrder:
bestPlayer = getHighestKey(positions, pos, "Exp. 2016 fWAR", usedPlayers)
dataLoader.printFields([pos, bestPlayer["Player"], bestPlayer["Exp. 2016 fWAR"], bestPlayer["Expected 2016 AAV"]])
if bestPlayer != None:
usedPlayers += [ bestPlayer ]
totalWar += float(bestPlayer["Exp. 2016 fWAR"])
totalSalary += float(bestPlayer["Expected 2016 AAV"])
dataLoader.printFields([bold("Total"), "", bold(totalWar), bold(totalSalary)])
print ""
print ""
dataLoader.printFields(["Positions", "Player", "Exp. 2016 fWAR", "Exp. Salary", "Exp. Wins/$"])
dataLoader.printSeparator(5)
totalWar = 0
totalSalary = 0
usedPlayers = []
for pos in positionOrder:
bestPlayer = getHighestKey(positions, pos, "Expected Wins/$", usedPlayers)
dataLoader.printFields([pos, bestPlayer["Player"], bestPlayer["Exp. 2016 fWAR"],
bestPlayer["Expected 2016 AAV"], bestPlayer["Expected Wins/$"]])
if bestPlayer != None:
usedPlayers += [ bestPlayer ]
totalWar += float(bestPlayer["Exp. 2016 fWAR"])
totalSalary += float(bestPlayer["Expected 2016 AAV"])
dataLoader.printFields([bold("Total"), "", bold(totalWar), bold(totalSalary), bold(totalWar / totalSalary)])
print ""
print "" | StarcoderdataPython |
135544 | import os
from uuid import getnode as get_mac
APP_PATH = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir))
LIB_PATH = os.path.join(APP_PATH, "robot")
DATA_PATH = os.path.join(APP_PATH, "static")
TEMP_PATH = os.path.join(APP_PATH, "temp")
OUTFILES_PATH = os.path.join(APP_PATH, "outfile")
TEMPLATE_PATH = os.path.join(APP_PATH, "server", "templates")
PLUGIN_PATH = os.path.join(APP_PATH, "plugins")
DEFAULT_CONFIG_NAME = 'default.yml'
CUSTOM_CONFIG_NAME = 'config.yml'
CONTRIB_PATH = os.path.expanduser(
os.getenv('WUKONG_CONFIG', '~/.wukong/contrib')
)
CUSTOM_PATH = os.path.expanduser(
os.getenv('WUKONG_CONFIG', '~/.wukong/custom')
)
CONFIG_PATH = 'config.yml'
LOGGING_PATH = 'outfile/wukong.log'
DB_PATH ='outfile/response.db'
NEWDB_PATH ='outfile/response'
PCM_PATH='outfile/demo.pcm'
XUNFEITTS_PATH='outfile/xunfei.wav'
Baidu_tts_apikey = '<KEY>'
Baidu_tts_secret_key = '<KEY>'
Baidu_TTS_URL = 'http://tsn.baidu.com/text2audio'
Baidu_tts_SCOPE = 'audio_tts_post' # 有此scope表示有tts能力,没有请在网页里勾选
Baidu_tts_TOKEN_URL = 'http://openapi.baidu.com/oauth/2.0/token'
tuling_robot_URL = "http://openapi.tuling123.com/openapi/api/v2"
mac_id =str(get_mac())[:32] #cuid,身份识别符
LOG = 'DEBUG' #log 打印等级(debug/info)
def getConfigPath():
"""
返回配置文件的完整路径
:returns:配置文件的完整路径
"""
return os.path.join(APP_PATH, CONFIG_PATH) | StarcoderdataPython |
26114 | import threading
import traceback
import logging
import requests
from json.decoder import JSONDecodeError
from ping3 import ping
logging.basicConfig(level=logging.INFO)
GATEWAY_IP = "192.168.100.1"
STATIC_IP_MIN = 200
STATIC_IP_MAX = 254
lastDot = GATEWAY_IP.rfind(".")
ipAddressBase = GATEWAY_IP[0:lastDot+1]
threadLock = threading.Lock()
availableForStaticIp = []
dchpNeedToReconfigure = []
def registerShellyFound(outputFile, ip, mac = "", type = "", ipv4_method ="", name = ""):
threadLock.acquire()
try:
outputFile.write(ip + '\t' + mac + '\t' + type + '\t' + ipv4_method + '\t' + str(name) + '\n')
if ipv4_method == "dhcp":
dchpNeedToReconfigure.append(ip)
finally:
threadLock.release()
def detectDevice(ipLast):
ip = ipAddressBase + str(ipLast)
if STATIC_IP_MIN < ipLast & ipLast < STATIC_IP_MAX:
logging.debug('No Shelly on IP %s, pinging IP to check availability...', ip)
pingResult = ping(ip)
if pingResult == False:
logging.debug("No device on IP %s, registering as available static IP", ip)
availableForStaticIp.append(ipLast)
else:
logging.debug('Network device detected on IP %s, ping in %f sec.', ip, pingResult)
return
def detectShelly(ipLast, outputFile):
try:
ip = ipAddressBase + str(ipLast)
logging.debug('Checking for Shelly at IP %s...', ip)
url = "http://" + ip + "/settings"
response = requests.get(url, timeout=10)
if response.status_code != 200:
detectDevice(ipLast)
return
json = response.json()
device = json["device"]
cloud = json["cloud"]
cloud_enabled = cloud["enabled"]
name = json["name"]
mac = device["mac"]
type = device["type"]
wifi_sta = json["wifi_sta"]
ipv4_method = wifi_sta["ipv4_method"]
logging.info("Found: ip=%s, mac=%s, type=%s, name=%s, cloud=%d, ipv4_method=%s", ip, mac, type, name, cloud_enabled, ipv4_method)
registerShellyFound(outputFile, ip, mac, type, ipv4_method, name)
except JSONDecodeError:
return
except AttributeError:
return
except requests.ConnectionError as error:
detectDevice(ipLast)
return
def configureStaticIp(currentIp, newIp, gatewayIp):
try:
# example: http://192.168.100.165/settings/sta?ipv4_method=static&ip=192.168.100.208&netmask=255.255.255.0&gateway=192.168.100.1
logging.info("Reconfiguring Shelly with DHCP on IP %s to new IP %s with gateway %s", currentIp, newIp, gatewayIp)
url = "http://" + currentIp + "/settings/sta?ipv4_method=static&ip=" + newIp + "&netmask=255.255.255.0&gateway=" + gatewayIp
response = requests.get(url, timeout=5)
if response.status_code != 200:
logging.error("Error reconfiguring %s error code %d", currentIp, response.status_code)
return
except Exception as e:
logging.error(traceback.format_exc())
return
def scanForShellys():
ipTableFile = open("shelly-ip-table.txt", "w", encoding="utf-8")
threads = []
for c in range(2, 254):
t = threading.Thread(target=detectShelly, args=(c, ipTableFile))
threads.append(t)
t.start()
for t in threads:
t.join()
ipTableFile.close()
availableForStaticIp.sort()
dchpNeedToReconfigure.sort()
def reconfigureDhcpShellys():
for ipToReconfigure in dchpNeedToReconfigure:
if availableForStaticIp.count == 0:
logging.error("No more static IP slot available for %s. Stopping.", ipToReconfigure)
break
staticIpLast = availableForStaticIp.pop(0)
staticIp = ipAddressBase + str(staticIpLast)
configureStaticIp(ipToReconfigure, staticIp, GATEWAY_IP)
scanForShellys()
reconfigureDhcpShellys() | StarcoderdataPython |
61055 | <gh_stars>1-10
#!/usr/bin/env python
import time
def level3():
while True:
level3="level3"
time.sleep(1)
def level2():
level3()
def level1():
level2()
if __name__ == '__main__':
level1()
| StarcoderdataPython |
4806926 | """
File: WfSchemaMap.py
A data class containing schema definitions for WF Database.
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "V0.01"
__Date__ = "April 21, 2010"
"""
class WfSchemaMap(object):
_schemaMap = {
"DEPOSITION": {
"ATTRIBUTES": {
"DEP_SET_ID": "dep_set_id",
"PDB_ID": "pdb_id",
"INITIAL_DEPOSITION_DATE": "initial_deposition_date",
"ANNOTATOR_INITIALS": "annotator_initials",
"DEPOSIT_SITE": "deposit_site",
"PROCESS_SITE": "process_site",
"STATUS_CODE": "status_code",
"AUTHOR_RELEASE_STATUS_CODE": "author_release_status_code",
"TITLE": "title",
"AUTHOR_LIST": "author_list",
"EXP_METHOD": "exp_method",
"STATUS_CODE_EXP": "status_code_exp",
"SG_CENTER": "sg_center",
"TITLE_EMDB": "title_emdb",
"AUTHOR_LIST_EMDB": "author_list_emdb",
"DEP_AUTHOR_RELEASE_STATUS_CODE_EMDB": "dep_author_release_status_code_emdb",
"STATUS_CODE_EMDB": "status_code_emdb",
"DATE_BEGIN_PROCESSING": "date_begin_processing",
"DATE_END_PROCESSING": "date_end_processing",
"DEPPW": "depPW",
"NOTIFY": "notify",
"EMAIL": "email",
"LOCKING": "locking",
"COUNTRY": "country",
"NMOLECULE": "nmolecule",
"EMDB_ID": "emdb_id",
"BMRB_ID": "bmrb_id",
"STATUS_CODE_BMRB": "status_code_bmrb",
"STATUS_CODE_OTHER": "status_code_other",
"POST_REL_STATUS": "post_rel_status",
"POST_REL_RECVD_COORD": "post_rel_recvd_coord",
"POST_REL_RECVD_COORD_DATE": "post_rel_recvd_coord_date",
},
"TABLE_NAME": "deposition",
},
"DATABASE_REF": {"ATTRIBUTES": {"DEP_SET_ID": "dep_set_id", "DATABASE_NAME": "database_name", "DATABASE_CODE": "database_code"}, "TABLE_NAME": "database_ref"},
"WF_TASK": {
"ATTRIBUTES": {
"ORDINAL": "ordinal",
"WF_TASK_ID": "wf_task_id",
"WF_INST_ID": "wf_inst_id",
"WF_CLASS_ID": "wf_class_id",
"DEP_SET_ID": "dep_set_id",
"TASK_NAME": "task_name",
"TASK_STATUS": "task_status",
"STATUS_TIMESTAMP": "status_timestamp",
"TASK_TYPE": "task_type",
},
"TABLE_NAME": "wf_task",
},
"WF_INSTANCE": {
"ATTRIBUTES": {
"ORDINAL": "ordinal",
"WF_INST_ID": "wf_inst_id",
"WF_CLASS_ID": "wf_class_id",
"DEP_SET_ID": "dep_set_id",
"OWNER": "owner",
"INST_STATUS": "inst_status",
"STATUS_TIMESTAMP": "status_timestamp",
},
"TABLE_NAME": "wf_instance",
},
"WF_REFERENCE": {
"ATTRIBUTES": {
"DEP_SET_ID": "dep_set_id",
"WF_INST_ID": "wf_inst_id",
"WF_TASK_ID": "wf_task_id",
"WF_CLASS_ID": "wf_class_id",
"HASH_ID": "hash_id",
"VALUE": "value",
},
"TABLE_NAME": "wf_reference",
},
"WF_CLASS_DICT": {
"ATTRIBUTES": {
"WF_CLASS_ID": "wf_class_id",
"WF_CLASS_NAME": "wf_class_name",
"WF_CLASS_FILE": "class_file",
"TITLE": "title",
"AUTHOR": "author",
"VERSION": "version",
},
"TABLE_NAME": "wf_class_dict",
},
"PROCESS_INFORMATION": {
"ATTRIBUTES": {"DEP_SET_ID": "dep_set_id", "SERIAL_NUMBER": "serial_number", "PROCESS_BEGIN": "process_begin", "PROCESS_END": "process_end", "REMARK": "remark"},
"TABLE_NAME": "process_information",
},
"DA_USERS": {
"ATTRIBUTES": {"USER_NAME": "user_name", "PASSWORD": "password", "GROUPNAME": "groupname", "EMAIL": "email", "INITIALS": "initials"},
"TABLE_NAME": "da_users",
},
"DATABASE_RELATED": {
"ATTRIBUTES": {"DEP_SET_ID": "dep_set_id", "DB_NAME": "db_name", "DETAILS": "details", "CONTENT_TYPE": "content_type", "DB_ID": "db_id"},
"TABLE_NAME": "database_related",
},
"DATABASE_PDB_OBS_SPR": {
"ATTRIBUTES": {"DEP_SET_ID": "dep_set_id", "ID": "id", "DATE": "date", "PDB_ID": "pdb_id", "REPLACE_PDB_ID": "replace_pdb_id"},
"TABLE_NAME": "database_PDB_obs_spr",
},
"AUTHOR_CORRECTIONS": {
"ATTRIBUTES": {"DEP_SET_ID": "dep_set_id", "CORRECTIONS": "corrections", "SENDING_DATE": "sending_date", "REMARK": "content_type"},
"TABLE_NAME": "author_corrections",
},
"DEP_WITH_PROBLEMS": {"ATTRIBUTES": {"DEP_SET_ID": "dep_set_id", "PROBLEM_TYPE": "problem_type", "PROBLEM_DETAILS": "problem_details"}, "TABLE_NAME": "dep_with_problems"},
"RELEASE_REQUEST": {
"ATTRIBUTES": {"DEP_SET_ID": "dep_set_id", "REQ_CITATION": "req_citation", "RELEASE_DATE": "release_date", "PUBMED_ID": "PubMed_id"},
"TABLE_NAME": "release_request",
},
"CONTACT_AUTHOR": {
"ATTRIBUTES": {
"DEP_SET_ID": "dep_set_id",
"CITATION": "name_salutation",
"NAME_FIRST": "name_first",
"NAME_LAST": "name_last",
"NAME_MI": "name_mi",
"ROLE": "role",
"EMAIL": "email",
"ADDRESS_1": "address_1",
"ADDRESS_2": "address_2",
"ADDRESS_3": "address_3",
"CITY": "city",
"STATE_PROVINCE": "state_province",
"POSTAL_CODE": "postal_code",
"COUNTRY": "country",
"PHONE": "phone",
"FAX": "fax",
"ORGANIZATION_TYPE": "organization_type",
},
"TABLE_NAME": "release_request",
},
"SITE": {"ATTRIBUTES": {"CODE": "code", "VERBOSE_NAME": "verbose_name"}, "TABLE_NAME": "site"},
"DA_GROUP": {"ATTRIBUTES": {"CODE": "code", "GROUPNAME": "groupname", "SITE": "site", "MAIN_PAGE": "main_page"}, "TABLE_NAME": "da_group"},
"SGCENTERS": {"ATTRIBUTES": {"CODE": "code", "VERBOSE_NAME": "verbose_name"}, "TABLE_NAME": "sgcenters"},
"COMMUNICATION": {
"ATTRIBUTES": {
"ORDINAL": "ordinal",
"SENDER": "sender",
"RECEIVER": "receiver",
"DEP_SET_ID": "dep_set_id",
"WF_CLASS_ID": "wf_class_id",
"WF_INST_ID": "wf_inst_id",
"WF_CLASS_FILE": "wf_class_file",
"COMMAND": "command",
"STATUS": "status",
"STATUS_TIMESTAMP": "status_timestamp",
"PARENT_DEP_SET_ID": "parent_dep_set_id",
"PARENT_WF_CLASS_ID": "parent_wf_class_id",
"PARENT_WF_INST_ID": "parent_wf_inst_id",
},
"TABLE_NAME": "communication",
},
"ENGINE_MONITORING": {
"ATTRIBUTES": {
"CPU_ID": "cpu_id",
"TOTAL_MEM": "total_mem",
"TOTAL_PHYSICAL_MEM": "total_physical_mem",
"TOTAL_VIRTUAL_MEM": "total_virtual_mem",
"TOTAL_MEM_USAGE": "total_mem_usage",
"PHYSICAL_MEM_USAGE": "physical_mem_usage",
"VIRTUAL_MEM_USAGE": "virtual_mem_usage",
"CPU_PROCESSES": "cpu_processes",
"IDS_SET": "ids_set",
},
"TABLE_NAME": "engine_monitoring",
},
}
# following lists are used in the WfDbApi
_columnForStatus = ["STATUS_CODE", "INST_STATUS", "TASK_STATUS"]
_objectTables = ["DEPOSITION", "WF_CLASS_DICT", "WF_INSTANCE", "WF_TASK", "WF_REFERENCE", "DA_USERS"]
_tables = [
"DEPOSITION",
"WF_CLASS_DICT",
"WF_INSTANCE",
"WF_TASK",
"WF_REFERENCE",
"DA_USERS",
"DATABASE_PDB_OBS_SPR",
"DATABASE_REF",
"DATABASE_RELATED",
"AUTHOR_CORRECTIONS",
"RELEASE_REQUEST",
"DEP_WITH_PROBLEMS",
"CONTACT_AUTHOR",
"PROCESS_INFORMATION",
"SITE",
"DA_GROUP",
"SGCENTERS",
"COMMUNICATION",
"ENGINE_MONITORING",
]
_usefulItems = [
"STATUS_CODE",
"INST_STATUS",
"TASK_STATUS",
"DATABASE_CODE",
"REPLACE_PDB_ID",
"ID",
"CONTENT_TYPE",
"DB_ID",
"DB_NAME",
"RELATIONSHIP",
"ASSOCIATED_IDS",
"ASSESSION_CODE",
"CORRECTIONS",
"REQ_CITATION",
"PROBLEM_TYPE",
"PROBLEM_DETAILS",
"PUBMED_ID",
]
_objIds = ["DEP_SET_ID", "WF_CLASS_ID", "WF_INST_ID", "WF_TASK_ID"]
_referencePairs = ["HASH_ID", "VALUE"]
_userInfo = ["USER_NAME", "PASSWORD", "GROUPNAME", "EMAIL", "INITIALS"]
_selectColumns = {
2: [
"deposition.dep_set_id",
"deposition.pdb_id",
"deposition.status_code",
"deposition.author_release_status_code",
"deposition.exp_method",
"deposition.annotator_initials",
"wf_class_dict.wf_class_id",
"wf_class_dict.wf_class_name",
"wf_class_dict.version",
"wf_instance.wf_inst_id",
"wf_instance.owner",
"wf_instance.inst_status",
"wf_instance.status_timestamp",
"wf_task.wf_task_id",
"wf_task.task_name",
"wf_task.task_status",
"wf_task.status_timestamp",
],
1: [
"DEP_SET_ID",
"EXP_METHOD",
"PDB_ID",
"STATUS_CODE",
"AUTHOR_RELEASE_STATUS_CODE",
"INITIAL_DEPOSITION_DATE",
"STATUS_CODE_EXP",
"ANNOTATOR_INITIALS",
"AUTHOR_LIST",
"SG_CENTER",
],
}
_constraintList = {
"DEP_SET_ID": "deposition.dep_set_id",
"WF_CLASS_ID": "wf_class_dict.wf_class_id",
"WF_INST_ID": "wf_instance.wf_inst_id",
"WF_TASK_ID": "wf_task.wf_task_id",
"STATUS_CODE": "deposition.status_code",
"PDB_ID": "deposition.pdb_id",
"DEPOSIT_SITE": "deposition.deposit_site",
"PROCESS_SITE": "deposition.process_site",
"ANNOTATOR_INITIALS": "deposition.annotator_initials",
"EXP_METHOD": "deposition.exp_method",
"SG_CENTER": "deposition.sg_center",
"INST_STATUS": "wf_instance.inst_status",
"INST_STATUS_TIMESTP": "wf_instance.status_timestamp",
"TASK_STATUS_TIMESTP": "wf_task.status_timestamp",
"TASK_STATUS": "wf_task.task_status",
"OWNER": "wf_instance.owner",
}
# crossing search from wf_instance,wf_class_dict,deposition and wf_task
_tableJoinSyntext = (
" FROM wf_instance left join wf_class_dict on "
+ "wf_instance.wf_class_id= wf_class_dict.wf_class_id "
+ "left join deposition on (wf_instance.wf_class_id="
+ "wf_class_dict.wf_class_id and wf_instance.dep_set_id"
+ "=deposition.dep_set_id) left join wf_task on "
+ "(wf_task.wf_inst_id=wf_instance.wf_inst_id and "
+ "wf_task.dep_set_id=deposition.dep_set_id)"
)
_orderBy = {
1: "order by dep_set_id",
2: " order by deposition.dep_set_id, wf_instance.wf_inst_id desc," + "wf_instance.ordinal desc,wf_task.ordinal desc",
3: ["author_release_status_code", "initial_deposition_date"],
}
def __init__(self):
pass
| StarcoderdataPython |
49096 | from ..api import _v1
from pathlib import Path
from app.error import Error
import pandas as pd
from app.components._data import dataframeHandler
import numpy as np
from sklearn.impute import KNNImputer
from sklearn import preprocessing
# ** ALL CONTENT COMENTED BETWEEN ASTERISCS MUST BE EDITED **
# ** Set the plugin id inside the internal api, it must be unique **
pluginId = "processorPluginExample"
# ** Set the plugin name, must be equals than the class name, and variable must be pluginName **
pluginName = "ProcessorPluginExample"
# ** Set the plugin description **
pluginDescription = "Plugin description"
# ** Name of the plugin in the interface **
pluginInterfaceName = "Procesar..."
# ** List of implemented actions with their parameters. It will be rendered in the UI forms. **
Actions = [ _v1.Action(
name="exampleAction",
description="example action",
params=[
_v1.Param(name="exampleSelect", kind="select", options=["option1", "option2", "option3"]),
_v1.Param(name="exampleNumber", kind="number"),
_v1.Param(name="exampleString", kind="string"),
_v1.Param(name="exampleFile", kind="file"),
]),
_v1.Action(
name="exampleAction",
description="example action",
params=[
_v1.Param(name="exampleSelect", kind="select", options=["option1", "option2", "option3"]),
_v1.Param(name="exampleNumber", kind="number"),
_v1.Param(name="exampleString", kind="string"),
_v1.Param(name="exampleFile", kind="file"),
])
]
class ProcessorPluginExample:
def __init__(self):
# ** Actions dict must be updated with new actions **
self.actions = {
"default": self.exampleActionHandler,
"exampleAction": self.exampleActionHandler,
}
self.pagination = {
"startRow": None,
"endRow": None,
}
def exampleActionHandler(self, request):
df = dataframeHandler.getDataframe()
column = request.form.get('column')
axis = request.form.get('axis')
# ** HERE YOUR CODE FOR EXAMPLE ACTION HANDLER OF THIS PLUGIN **
# modify df and it will be saved with dataframeHandler class in the
# local cache and then returned in
# Obtain the params from the request
exampleSelect = request.form.get('exampleSelect')
exampleNumber = request.form.get('exampleNumber')
exampleString = request.form.get('exampleString')
exampleFile = request.files['exampleFile']
# do something like print params
print("exampleSelect: ", exampleSelect)
print("exampleNumber: ", exampleNumber)
print("exampleString: ", exampleString)
print("exampleFile: ", exampleFile)
# always save the dataframe in the local cache
dataframeHandler.saveDataframe(df)
# ** add new handlers for aditional actions and then place it in the actions dict **
# Don't change this method if is not necessary
def _updatePagination (self, request: any):
startRowParam = request.args.get('startRow')
endRowParam = request.args.get('endRow')
self.pagination["startRow"] = None if startRowParam is None else int(startRowParam)
self.pagination["endRow"]= None if endRowParam is None else int(endRowParam)
# Don't change this method if is not necessary
def __call__(self, request: any):
print("ProcessorPluginExample called")
self._updatePagination(request)
action = request.args.get("action")
if action is None:
self.actions["default"](request)
elif action not in self.actions:
raise Error('Accion {} desconocida'.format(action))
else:
self.actions[action](request)
return dataframeHandler.getAllData(self.pagination)
# Don't change that if is not necessary
component = _v1.ProcessorPlugin(name=pluginName, description=pluginDescription, interfacename=pluginInterfaceName, actions=Actions, handler_class=eval(pluginName))
_v1.register_processor_plugin(component) | StarcoderdataPython |
3359431 | <reponame>aisk/ironpython3
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
#
# Copyright (c) <NAME>.
#
import os
import unittest
import zlib
from iptest import IronPythonTestCase, run_test
def create_gzip(text):
import gzip
with gzip.open('test_data.gz', 'wb') as f:
f.write(text)
with open('test_data.gz', 'rb') as f:
gzip_compress = f.read()
return gzip_compress
class ZlibTest(IronPythonTestCase):
def setUp(self):
super(ZlibTest, self).setUp()
self.text = b"""
Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Maecenas porttitor congue massa. Fusce posuere, magna sed pulvinar ultricies, purus lectus malesuada libero, sit amet commodo magna eros quis urna.
Nunc viverra imperdiet enim. Fusce est. Vivamus a tellus.
Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Proin pharetra nonummy pede. Mauris et orci.
Aenean nec lorem. In porttitor. Donec laoreet nonummy augue.
Suspendisse dui purus, scelerisque at, vulputate vitae, pretium mattis, nunc. Mauris eget neque at sem venenatis eleifend. Ut nonummy.
Fusce aliquet pede non pede. Suspendisse dapibus lorem pellentesque magna. Integer nulla.
Donec blandit feugiat ligula. Donec hendrerit, felis et imperdiet euismod, purus ipsum pretium metus, in lacinia nulla nisl eget sapien. Donec ut est in lectus consequat consequat.
Etiam eget dui. Aliquam erat volutpat. Sed at lorem in nunc porta tristique.
Proin nec augue. Quisque aliquam tempor magna. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
Nunc ac magna. Maecenas odio dolor, vulputate vel, auctor ac, accumsan id, felis. Pellentesque cursus sagittis felis.
Pellentesque porttitor, velit lacinia egestas auctor, diam eros tempus arcu, nec vulputate augue magna vel risus. Cras non magna vel ante adipiscing rhoncus. Vivamus a mi.
Morbi neque. Aliquam erat volutpat. Integer ultrices lobortis eros.
Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Proin semper, ante vitae sollicitudin posuere, metus quam iaculis nibh, vitae scelerisque nunc massa eget pede. Sed velit urna, interdum vel, ultricies vel, faucibus at, quam.
Donec elit est, consectetuer eget, consequat quis, tempus quis, wisi. In in nunc. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos hymenaeos.
Donec ullamcorper fringilla eros. Fusce in sapien eu purus dapibus commodo. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus.
Cras faucibus condimentum odio. Sed ac ligula. Aliquam at eros.
Etiam at ligula et tellus ullamcorper ultrices. In fermentum, lorem non cursus porttitor, diam urna accumsan lacus, sed interdum wisi nibh nec nisl. Ut tincidunt volutpat urna.
Mauris eleifend nulla eget mauris. Sed cursus quam id felis. Curabitur posuere quam vel nibh.
Cras dapibus dapibus nisl. Vestibulum quis dolor a felis congue vehicula. Maecenas pede purus, tristique ac, tempus eget, egestas quis, mauris.
Curabitur non eros. Nullam hendrerit bibendum justo. Fusce iaculis, est quis lacinia pretium, pede metus molestie lacus, at gravida wisi ante at libero.
"""
deflate_compress = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS)
zlib_compress = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)
self.deflate_data = deflate_compress.compress(self.text) + deflate_compress.flush()
self.zlib_data = zlib_compress.compress(self.text) + zlib_compress.flush()
self.gzip_data = create_gzip(self.text)
def tearDown(self):
super(ZlibTest, self).tearDown()
os.remove("test_data.gz")
def test_gzip(self):
"""decompression with gzip header"""
do = zlib.decompressobj(zlib.MAX_WBITS | 16)
self.assertEqual(do.decompress(self.gzip_data), self.text)
self.assertEqual(zlib.decompress(self.gzip_data, zlib.MAX_WBITS | 16), self.text)
def test_header_auto_detect(self):
"""autodetect zlib and gzip header"""
do = zlib.decompressobj(zlib.MAX_WBITS | 32)
self.assertEqual(do.decompress(self.gzip_data), self.text)
do = zlib.decompressobj(zlib.MAX_WBITS | 32)
self.assertEqual(do.decompress(self.zlib_data), self.text)
self.assertEqual(zlib.decompress(self.gzip_data, zlib.MAX_WBITS | 32), self.text)
self.assertEqual(zlib.decompress(self.zlib_data, zlib.MAX_WBITS | 32), self.text)
def test_deflate(self):
"""raw data, no header"""
do = zlib.decompressobj(-zlib.MAX_WBITS)
self.assertEqual(do.decompress(self.deflate_data), self.text)
self.assertEqual(zlib.decompress(self.deflate_data, -zlib.MAX_WBITS), self.text)
def test_gzip_stream(self):
"""gzip header, uncomplete header"""
for delta in range(1, 25):
do = zlib.decompressobj(zlib.MAX_WBITS | 16)
bufs = []
for i in range(0, len(self.gzip_data), delta):
bufs.append(do.decompress(self.gzip_data[i:i+delta]))
self.assertEqual(len(do.unconsumed_tail), 0)
bufs.append(do.flush())
self.assertEqual(b"".join(bufs), self.text)
def test_gzip_with_extra(self):
"""gzip header with extra field"""
# the file was picked up from boost bug report
with open(os.path.join(self.test_dir, 'sample.txt.gz'), "rb") as f:
gzipped = f.read()
self.assertEqual(zlib.decompress(gzipped, zlib.MAX_WBITS | 16), b'hello there\n')
def test_gzip_stream_with_extra(self):
with open(os.path.join(self.test_dir, 'sample.txt.gz'), "rb") as f:
gzipped = f.read()
for delta in range(1, 25):
do = zlib.decompressobj(zlib.MAX_WBITS | 16)
bufs = []
for i in range(0, len(gzipped), delta):
bufs.append(do.decompress(gzipped[i:i+delta]))
self.assertEqual(len(do.unconsumed_tail), 0)
bufs.append(do.flush())
self.assertEqual(b"".join(bufs), b'hello there\n')
run_test(__name__)
| StarcoderdataPython |
3219939 | from pcfg import PCFG
from collections import deque
import time
def dfs(G : PCFG):
'''
A generator that enumerates all programs using a DFS.
'''
# We need to reverse the rules:
new_rules = {}
for S in G.rules:
new_rules[S] = {}
sorted_derivation_list = sorted(
G.rules[S], key=lambda P: G.rules[S][P][1]
)
for P in sorted_derivation_list:
new_rules[S][P] = G.rules[S][P]
G = PCFG(start = G.start,
rules = new_rules,
max_program_depth = G.max_program_depth)
frontier = deque()
initial_non_terminals = deque()
initial_non_terminals.append(G.start)
frontier.append((None, initial_non_terminals))
# A frontier is a queue of pairs (partial_program, non_terminals) describing a partial program:
# partial_program is the list of primitives and variables describing the leftmost derivation, and
# non_terminals is the queue of non-terminals appearing from left to right
while len(frontier) != 0:
partial_program, non_terminals = frontier.pop()
if len(non_terminals) == 0:
yield partial_program
else:
S = non_terminals.pop()
for P in G.rules[S]:
args_P, w = G.rules[S][P]
new_partial_program = (P, partial_program)
new_non_terminals = non_terminals.copy()
for arg in args_P:
new_non_terminals.append(arg)
frontier.append((new_partial_program, new_non_terminals))
| StarcoderdataPython |
4828744 | from __future__ import absolute_import
# Django verions 1.6 and worse don't have the "apps" package so we have to mock
# it up when its not available
try:
from django.apps import AppConfig, apps
is_installed = apps.is_installed
except ImportError:
class AppConfig:
pass
def is_installed(dotted_app_path):
from django.conf import settings
return dotted_app_path in settings.INSTALLED_APPS
from . import DEFAULT_FEATURES
class ARCUtilsConfig(AppConfig):
name = "arcutils"
def ready(self):
"""
Do a bunch of monkey patching based on the ARCUTILS_FEATURES setting
"""
from django.conf import settings
ARCUTILS_FEATURES = getattr(settings, 'ARCUTILS_FEATURES', DEFAULT_FEATURES)
# monkey patch the PasswordResetForm so it indicates if a user does not exist
if ARCUTILS_FEATURES.get('warn_on_invalid_email_during_password_reset'):
from django.contrib.auth.forms import PasswordResetForm
original_clean_email = getattr(PasswordResetForm, "clean_email", lambda self: self.cleaned_data['email'])
def _clean_email(self):
from django.contrib.auth import get_user_model
email = self.cleaned_data['email']
UserModel = get_user_model()
if not UserModel.objects.filter(email=email, is_active=True).exists():
raise forms.ValidationError("A user with that email address does not exist!")
return original_clean_email(self)
PasswordResetForm.clean_email = _clean_email
# hook up the session clearer
CLEAR_EXPIRED_SESSIONS_AFTER_N_REQUESTS = getattr(settings, 'CLEAR_EXPIRED_SESSIONS_AFTER_N_REQUESTS', 100)
CLEAR_EXPIRED_SESSIONS_ENABLED = is_installed('django.contrib.sessions') and CLEAR_EXPIRED_SESSIONS_AFTER_N_REQUESTS is not None
if ARCUTILS_FEATURES.get('clear_expired_sessions') and CLEAR_EXPIRED_SESSIONS_ENABLED:
from .sessions import patch_sessions
patch_sessions(CLEAR_EXPIRED_SESSIONS_AFTER_N_REQUESTS)
# add all the templatetag libraries we want available by default
if ARCUTILS_FEATURES.get('templatetags'):
from django.template.base import add_to_builtins
add_to_builtins('django.contrib.staticfiles.templatetags.staticfiles')
# add the arc template tags to the builtin tags, and the bootstrap tag
add_to_builtins('arcutils.templatetags.arc')
add_to_builtins('arcutils.templatetags.bootstrap')
# make the `required_css_class` attribute for forms and fields
# "required" by default
if ARCUTILS_FEATURES.get('add_css_classes_to_forms'):
from django.forms.fields import Field
from django import forms
forms.Form.required_css_class = "required"
forms.ModelForm.required_css_class = "required"
Field.required_css_class = "required"
| StarcoderdataPython |
3309207 | <reponame>baklanovp/pystella
import numpy as np
import unittest
from scipy.optimize import curve_fit
import pylab as plt
import pystella.rf.rad_func as rf
import pystella.rf.spectrum as spectrum
__author__ = 'bakl'
class TestSpectrumFitting(unittest.TestCase):
def setUp(self):
nf = 100
start, end = 10., 1e5
wl = np.exp(np.linspace(np.log(start), np.log(end), nf))
nu = rf.val_to_hz(wl, inp="A")
T = 5500
s = spectrum.SpectrumPlanck(nu, T)
self.sp = s
def test_fit_t_color(self):
Tcol = self.sp.T_color
self.assertAlmostEqual(Tcol, self.sp.T,
msg="For planck Tcolor [%f] should be equal Tbb [%f]." % (Tcol, self.sp.T))
def test_t_wien(self):
Twien = self.sp.T_wien
self.assertAlmostEqual(Twien, self.sp.T,
msg="For planck Twien [%f] should be equal Tbb [%f]." % (Twien, self.sp.T)
, delta=Twien * 0.05) # ACCURACY 5 %
def plot(self):
# plot the input model and synthetic data
nu = self.sp.Freq
flux = self.sp.Flux
Tcol = self.sp.T_color
ybest = rf.planck(nu, Tcol)
# plot the solution
plt.plot(nu, flux, 'b*', nu, ybest, 'r-', label='Tinit=%f, Tcol=%f' % (self.sp.T, Tcol))
plt.xscale('log')
plt.yscale('log')
plt.show()
def test_fit_bb(self):
def func(nu, T):
return np.pi * rf.planck(nu, T, inp="Hz", out="freq")
self.sp.cut_flux(max(self.sp.Flux) * 1e-5)
freq = self.sp.Freq
flux = self.sp.Flux
Tinit = 1.e4
popt, pcov = curve_fit(func, freq, flux, p0=Tinit)
Tbest = float(popt)
# bestT, pcov = curve_fit(rf.fit_planck(nu, inp='Hz'), nu, flux, p0=Tinit, sigma=sigma)
sigmaT = np.sqrt(np.diag(pcov))
print('True model values')
print(' Tbb = %.2f K' % self.sp.T)
print('Parameters of best-fitting model:')
print(" T = {} +/- {} K".format(Tbest, sigmaT))
# Tcol = self.sp.temp_color
ybest = np.pi * rf.planck(freq, Tbest, inp="Hz", out="freq")
# plot the solution
plt.plot(freq, flux, 'b*', label='Spectral T: %f' % self.sp.T)
plt.plot(freq, ybest, 'r-', label='Best Tcol: %f' % Tbest)
plt.xscale('log')
plt.yscale('log')
plt.legend(loc=3)
plt.show()
self.assertAlmostEqual(Tbest, self.sp.T,
msg="For planck Tcolor [{:,f}] should be equal sp.T [{:,f}].".format(Tbest, self.sp.T),
delta=Tbest * 0.01)
| StarcoderdataPython |
1637799 | <reponame>Dodo33/alchemist-lib<filename>alchemist_lib/populate/bittrexpopulate.py
from .populate import PopulateBaseClass
from ..datafeed import BittrexDataFeed
from ..database.asset import Asset
from ..database.instrument import Instrument
from ..database.exchange import Exchange
from .. import utils
class BittrexPopulate(PopulateBaseClass):
"""
Class that manages the population of the database with data from Bittrex.
Inherits from alchemist_lib.populate.populate.PopulateBaseClass.
Attributes:
saver (alchemist_lib.populate.saver.Saver): Saver class instance.
"""
def __init__(self, saver):
"""
Costructor method.
Args:
saver (alchemist_lib.populate.saver.Saver): Saver class instance.
"""
PopulateBaseClass.__init__(self, saver = saver)
def get_exchange_instance(self):
"""
Save all informations about Bittrex and returns an Exchange instance.
Return:
exchange (alchemist_lib.database.exchange.Exchange): Exchange instance.
"""
instrument = self.saver.instrument(kind = "cryptocurrency")
m30 = self.saver.timeframe(id = "30M", description = "thirty minutes")
h1 = self.saver.timeframe(id = "1H", description = "one hour")
d1 = self.saver.timeframe(id = "1D", description = "one day")
broker = self.saver.broker(name = "bittrex", site = "www.bittrex.com")
datasource = self.saver.data_source(name = "bittrex", site = "www.bittrex.com", timeframes = [m30, h1, d1])
timetable = None
exchange = self.saver.exchange(name = "bittrex", website = "www.bittrex.com", data_source = datasource, timetable = timetable, brokers = [broker])
return exchange
def populate(self):
"""
Populate the database with all tradable assets on Bittrex.
"""
exchange = self.get_exchange_instance()
assets = BittrexDataFeed(session = self.saver.session).get_assets()
for asset in assets:
self.saver.asset(ticker = asset.ticker, instrument_id = asset.instrument_id, name = asset.name, exchanges = exchange)
cryptocurrency_id = self.saver.session.query(Instrument).filter(Instrument.instrument_type == "cryptocurrency").one().instrument_id
self.saver.asset(ticker = "BTC", instrument_id = cryptocurrency_id, name = "Bitcoin", exchanges = exchange)
def update_asset_list(self):
"""
Update the list of assets traded on Bittrex.
"""
db_assets = self.saver.session.query(Asset).join(Exchange, Asset.exchanges).filter(Exchange.exchange_name == "bittrex").all()
bittrex_assets = BittrexDataFeed(session = self.saver.session).get_assets()
cryptocurrency_id = self.saver.session.query(Instrument).filter(Instrument.instrument_type == "cryptocurrency").one().instrument_id
bittrex_assets.append(Asset(ticker = "BTC", instrument_id = cryptocurrency_id, name = "Bitcoin"))
assets_to_save = utils.subtract_list(first = bittrex_assets, second = db_assets)
assets_to_remove = utils.subtract_list(first = db_assets, second = bittrex_assets)
for asset in assets_to_save:
exchange = self.get_exchange_instance()
self.saver.asset(ticker = asset.ticker, instrument_id = asset.instrument_id, name = asset.name, exchanges = [exchange])
for asset in assets_to_remove:
self.saver.session.delete(asset)
self.saver.session.commit()
| StarcoderdataPython |
1754050 | <filename>main.py
import os
import sys
from PySide2 import QtWidgets, QtCore, QtGui
class Window(QtWidgets.QWidget):
def __init__(self):
super().__init__()
# 隐藏任务栏|去掉边框|顶层显示
self.setWindowFlags(QtCore.Qt.Tool | QtCore.Qt.X11BypassWindowManagerHint |
QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint)
self.progress = 18
self.setWindowOpacity(0)
# 窗口显示flag
self.isShow = False
self.pix = QtGui.QBitmap(self.resource_path(r'res\image\mask.png'))
self.resize(self.pix.size())
# 设置掩膜,窗口就是掩膜的形状
self.setMask(self.pix)
# 设置窗口为透明
# 初始化计时器
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.updateProgress)
# 初始化进度条
self.progressBar = QtWidgets.QProgressBar(self)
self.progressBar.setFixedWidth(200)
self.progressBar.setFixedHeight(32)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.progressBar)
self.progressBar.move(115, 50)
self.progressBar.setValue(18)
self.progressBar.setFormat('安全防护启动中')
# 载入进度条样式 最低圆形进度为 16
with open(self.resource_path(r'res\progressBar.qss'), 'r', encoding='utf8') as style:
self.setStyleSheet(style.read())
# 动画
self.animation = QtCore.QPropertyAnimation(self, b"windowOpacity")
self.animation.finished.connect(self.onAnimationEnd)
self.animation.setDuration(1000)
def paintEvent(self, event=None):
painter = QtGui.QPainter(self)
painter.drawPixmap(-8, 0, self.pix.width() + 15, self.pix.height(),
QtGui.QPixmap(self.resource_path(r'res\image\bg.png')))
def startAnimate(self):
self.timer.start(450) # 间隔 500ms
# 开始动画
if not self.isShow:
self.isShow = True
self.animation.stop() # 先停止之前的动画,重新开始
self.animation.setStartValue(0.0)
self.animation.setEndValue(1.0)
self.animation.start()
def updateProgress(self, event=None):
# print(self.progress)
if self.progress <= 100:
self.progress = self.progress * 1.25
if self.progress > 100:
self.progress = 100
self.progressBar.setValue(self.progress)
self.progress = 101
else:
self.progressBar.setValue(self.progress)
else:
# TODO run new exe
# 关闭动画
self.isShow = False
self.timer.stop()
self.animation.stop()
self.animation.setStartValue(1.0)
self.animation.setEndValue(0.0)
self.animation.start()
def onAnimationEnd(self):
# 动画结束
print("onAnimationEnd isShow", self.isShow)
if not self.isShow:
print("onAnimationEnd close()")
# 自定义你想启动的文件 参考QProcess文档
target = QtCore.QProcess()
target.setProgram("msinfo32")
target.startDetached()
QtWidgets.QApplication.quit()
def show(self):
super().show()
self.startAnimate()
def resource_path(self, relative_path):
if getattr(sys, 'frozen', False): # 是否Bundle Resource
base_path = sys._MEIPASS
else:
# base_path = os.path.abspath(".")
base_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(base_path, relative_path)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
form = Window()
screen = app.primaryScreen().availableSize()
form.move(screen.width() - 400, screen.height() - 150)
form.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1772940 | <gh_stars>1-10
"""Functions to write output."""
# Copyright 2020-2022 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import h5py
import numpy as np
def write_responses(responses, output_dir):
"""Write each response in a file.
Args:
responses (dict): time and recorded value of each recording
Should have structure "key": {"time": time, "voltage": response}
Note that all response have a "voltage" field, even if the recorded value
was not the voltage
output_dir (str): path to the output repository
"""
for key, resp in responses.items():
output_path = os.path.join(output_dir, key + ".dat")
# Some resp are None when spike is not found
if resp is None:
continue
if isinstance(resp, (float, np.float)):
np.savetxt(output_path, np.array([resp]))
else:
time = np.array(resp["time"])
soma_voltage = np.array(resp["voltage"])
np.savetxt(output_path, np.transpose(np.vstack((time, soma_voltage))))
def write_current(currents, output_dir):
"""Write currents into separate files.
Args:
currents (dict): time and trace to each recording
Should have structure "key": {"time": time, "current": current}
output_dir (str): path to the output repository
"""
for key, curr_dict in currents.items():
output_path = os.path.join(output_dir, key + ".dat")
np.savetxt(
output_path,
np.transpose(np.vstack((curr_dict["time"], curr_dict["current"]))),
)
def write_synplas_output(
responses,
pre_spike_train,
output_path="./output.h5",
syn_prop_path="synapses/synapse_properties.json",
):
"""Write output as h5.
Args:
responses (dict): responses of the postsynaptic cell
pre_spike_train (list): times at which the synapses fire (ms)
output_path (str): path to the (postsynaptic data) output file
syn_prop_path (str): path to the synapse properties file
"""
results = {"prespikes": pre_spike_train}
# add synprop
if os.path.isfile(syn_prop_path):
with open(syn_prop_path, "r", encoding="utf-8") as f:
synprop = json.load(f)
results["synprop"] = synprop
# add responses
for key, resp in responses.items():
if isinstance(resp, list):
results[key] = np.transpose([np.array(rec["voltage"]) for rec in resp])
else:
results["t"] = np.array(resp["time"])
results["v"] = np.array(resp["voltage"])
# Store results
h5file = h5py.File(output_path, "w")
for key, result in results.items():
if key == "synprop":
h5file.attrs.update(results["synprop"])
else:
h5file.create_dataset(
key,
data=result,
chunks=True,
compression="gzip",
compression_opts=9,
)
h5file.close()
def write_synplas_precell_output(
responses,
protocol_name,
precell_output_path="./output_precell.h5",
):
"""Write precell output as h5.
Args:
responses (dict): responses of the presynaptic cell
protocol_name (str): name of the presynaptic protocol
precell_output_path (str): path to the presynaptic data output file
"""
results = {}
# add responses
results["t"] = np.array(responses[protocol_name]["time"])
results["v"] = np.array(responses[protocol_name]["voltage"])
# Store results
h5file = h5py.File(precell_output_path, "w")
for key, result in results.items():
h5file.create_dataset(
key,
data=result,
chunks=True,
compression="gzip",
compression_opts=9,
)
h5file.close()
| StarcoderdataPython |
1699463 | <filename>flow/target.py
# coding: utf-8
import json
from .base import Base
from log import Log
from setting import RecordsStatus, MAX_SWIPE_DOWN_COUNT
from exception import ValidationError
logger = Log.logger(__file__)
class TARGETModel(Base):
def __init__(self, driver):
super().__init__(driver)
self.target_list = []
self.init()
def exe(self):
logger.info("-" * 30 + " step 1 " + "-" * 30)
self.go_to_target()
logger.info("-" * 30 + " step 2 " + "-" * 30)
self.update_exe_on_target()
logger.info("-" * 30 + " step 3 " + "-" * 30)
self.click()
logger.info("-" * 30 + " step 4 " + "-" * 30)
self.update_app_status()
def init(self):
for i in self.rule.TARGET_SETTING:
steps = i["STEPS"]
if steps[0] != "LAUNCH":
steps = ["LAUNCH"] + steps
exe_key = " ||| ".join(steps)
if exe_key not in self.target_list:
self.target_list.append(exe_key)
self.exe_records[exe_key] = {}
self.exe_records[exe_key]["0"] = {}
self.exe_records[exe_key]["status"] = RecordsStatus.TO_DO
self.exe_records[exe_key]["roll_back_steps"] = steps
if "TARGET_ACTIVITY" in i.keys():
self.exe_records[exe_key]["target_activity"] = i["TARGET_ACTIVITY"]
if "TARGET_TEXT" in i.keys():
self.exe_records[exe_key]["target_text"] = i["TARGET_TEXT"]
self.driver.sleep(5)
self.target_activity = self.target_list[0]
current_activity_key, clickable_item_dict, current_page_source = self.get_page_structure()
self.root_activity = current_activity_key
if self.target_activity == "LAUNCH":
self.exe_records[exe_key]["0"] = clickable_item_dict
logger.info(json.dumps(self.exe_records, ensure_ascii=False))
def go_to_target(self):
logger.info("action : go to target page")
status = self.update_activity_status(self.target_activity)
logger.info("go_to_target ::: target activity status is {0}".format(status))
if status == RecordsStatus.DONE:
logger.info("should switch target activity, exe records is {0}".format(
json.dumps(self.exe_records, ensure_ascii=False)))
self.update_target()
self.exe()
self.current_activity_status()
if self.current_activity == self.target_activity:
logger.info("当前页面是target页面, target activity key is {0}".format(self.target_activity))
return
else:
logger.info("当前页面不是target页面")
if self.current_activity != self.root_activity:
self.back_to_root()
# current page is root
if self.target_activity == self.root_activity:
self.known_activity_todo(self.root_activity)
else:
self.go_to_target_by_steps() # click to target page
def go_to_target_by_steps(self):
for step in self.exe_records[self.target_activity]["roll_back_steps"][1:]:
tmp = step.split(" ||| ")
action = tmp[0]
xpath = tmp[1]
element = self.driver.find_element(xpath)
if action == "CLICK":
self.driver.click_element(element)
self.driver.sleep()
if action == "ENTER":
text = tmp[2]
element.send_keys(text)
self.driver.sleep()
if "target_activity" in self.exe_records[self.target_activity].keys():
if self.validate_target_activity(self.exe_records[self.target_activity]["target_activity"]) is False:
raise ValidationError("different target activity in config file")
if "target_text" in self.exe_records[self.target_activity].keys():
if self.validate_target_text(self.exe_records[self.target_activity]["target_text"]) is False:
raise ValidationError("different target activity in config file")
self.current_activity = self.target_activity
def validate_target_activity(self, target_activity):
current_android_activity = self.driver.current_activity()
if current_android_activity == target_activity:
return True
return False
def validate_target_text(self, target_text: list):
current_activity_key = self.get_page_structure()[0]
current_activity_key_list = current_activity_key.split(" |||| ")
for i in target_text:
if i not in current_activity_key_list:
return False
return True
def update_exe_on_target(self):
status = self.update_activity_status(self.target_activity)
if status == RecordsStatus.TO_SWIPE:
activity_swipe_count = self.latest_swipe_count(self.target_activity)
self.bounds_swipe_count = activity_swipe_count + 1
for i in range(0, self.bounds_swipe_count):
self.swipe_up()
if self.target_activity == self.root_activity:
action = "launch ||| {0}".format(self.bounds_swipe_count)
self.steps = [action]
else:
action = "click ||| {0} ||| {1}".format(self.bounds_swipe_count, self.focus_bounds)
self.steps = self.steps.append(action)
current_activity_key, clickable_item_dict, current_page_source = self.get_page_structure()
self.update_known_activity_records(current_page_source, self.target_activity,
self.bounds_swipe_count)
if status == RecordsStatus.TO_DO:
self.update_bounds()
if status == RecordsStatus.DONE:
self.exe()
def click(self):
self.update_action_count()
self.driver.click_by_coordinate(self.focus_bounds)
self.driver.sleep(1)
def update_activity_status(self, activity_key=None):
if activity_key is None:
activity_key = self.current_activity
if self.exe_records[activity_key]["status"] == RecordsStatus.DONE:
return self.exe_records[activity_key]["status"]
activity_swipe_count = self.latest_swipe_count(activity_key)
if activity_swipe_count is None:
self.exe_records[activity_key]["status"] = RecordsStatus.DONE
return self.exe_records[activity_key]["status"]
for k in self.exe_records[activity_key].keys():
if k not in ["roll_back_steps", "status", "target_activity", "target_text"]:
for kk, vv in self.exe_records[activity_key][k].items():
if vv["covered"] is False:
self.exe_records[activity_key]["status"] = RecordsStatus.TO_DO
return self.exe_records[activity_key]["status"]
if activity_swipe_count < MAX_SWIPE_DOWN_COUNT:
self.exe_records[activity_key]["status"] = RecordsStatus.TO_SWIPE
else:
self.exe_records[activity_key]["status"] = RecordsStatus.DONE
return self.exe_records[activity_key]["status"]
def current_activity_status(self):
current_activity_key, clickable_item_dict, current_page_source = self.get_page_structure()
if self.is_root_page(current_activity_key):
self.init_steps()
similar_known_activity_key = self.root_activity
self.update_current_activity(self.root_activity)
return current_activity_key, clickable_item_dict, current_page_source, similar_known_activity_key
similar_known_activity_key = self.similar_known_activity_key_in_records(current_activity_key)
if similar_known_activity_key is None:
self.update_current_activity(current_activity_key)
else:
self.update_current_activity(similar_known_activity_key)
return current_activity_key, clickable_item_dict, current_page_source, similar_known_activity_key
def is_root_page(self, current_activity_key):
logger.info("action : confirm it is root page or not")
if self.is_same_activity_key(self.root_activity, current_activity_key):
logger.info("info : current page is root page")
return True
else:
logger.info("info : current page is not root page")
return False
def back_to_root(self, back_loop_count=3):
"""
返回初始页面
判断: 获取activity = root activity, page_source = root page source
:return:
"""
logger.info("action: back to root page")
# 多次返回回不到初始页面, 重新启动
if back_loop_count <= 0:
logger.info("info : cannot turn back to root, try 3 times")
self.launch()
return
# self.init_steps()
logger.info("action : press back count is {0}".format(str(back_loop_count)))
self.back()
current_activity_key = self.get_page_structure()[0]
if self.is_root_page(current_activity_key) is False:
back_loop_count -= 1
self.back_to_root(back_loop_count)
else:
self.init_steps()
self.update_current_activity(self.root_activity)
logger.info("info : back to root successfully") | StarcoderdataPython |
3362173 | <reponame>nxtlo/Tsujigiri
# -*- cofing: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Consts and stuff that we don't modify."""
from __future__ import annotations
__all__: list[str] = [
"COLOR",
"API",
"GENRES",
"iter",
"randomize",
"generate_component",
"naive_datetime",
]
import datetime
import random
import typing
import hikari
import tanjun
import yuyo
if typing.TYPE_CHECKING:
import collections.abc as collections
_T = typing.TypeVar("_T", covariant=True)
COLOR: typing.Final[dict[str, hikari.Colourish]] = {
"invis": hikari.Colour(0x36393F),
"random": hikari.Colour(random.randint(0, 0xFFFFFF)),
}
"""Colors."""
API: dict[str, typing.Any] = {
"anime": "https://api.jikan.moe/v3",
"urban": "https://api.urbandictionary.com/v0/define",
"git": {
"user": "https://api.github.com/users",
"repo": "https://api.github.com/search/repositories?q={}&page=0&per_page=11&sort=stars&order=desc",
},
}
"""A dict that holds api endpoints."""
GENRES: dict[str, int] = {
"Action": 1,
"Advanture": 2,
"Drama": 8,
"Daemons": 6,
"Ecchi": 9, # :eyes:
"Sci-Fi": 24,
"Shounen": 27,
"Harem": 35, # :eyes:
"Seinen": 42,
"Saumrai": 21,
"Games": 11,
"Psycho": 40,
"Superpowers": 37,
"Vampire": 32,
}
"""Anime only genres."""
def naive_datetime(datetime_: datetime.datetime) -> datetime.datetime:
return datetime_.astimezone(datetime.timezone.utc)
async def generate_component(
ctx: tanjun.abc.SlashContext | tanjun.abc.MessageContext,
iterable: (
collections.Generator[tuple[hikari.UndefinedType, hikari.Embed], None, None]
| collections.Iterator[tuple[hikari.UndefinedType, hikari.Embed]]
),
component_client: yuyo.ComponentClient,
timeout: datetime.timedelta | None = None,
) -> None:
pages = yuyo.ComponentPaginator(
iterable,
authors=(ctx.author,),
triggers=(
yuyo.pagination.LEFT_DOUBLE_TRIANGLE,
yuyo.pagination.LEFT_TRIANGLE,
yuyo.pagination.STOP_SQUARE,
yuyo.pagination.RIGHT_TRIANGLE,
yuyo.pagination.RIGHT_DOUBLE_TRIANGLE,
),
timeout=timeout or datetime.timedelta(seconds=90),
)
if next_ := await pages.get_next_entry():
content, embed = next_
msg = await ctx.respond(
content=content, embed=embed, component=pages, ensure_result=True
)
component_client.set_executor(msg, pages)
def iter(map: collections.Mapping[str, typing.Any]) -> collections.Sequence[typing.Any]:
return [k for k in map.keys()]
def randomize(seq: collections.Sequence[_T] | None = None) -> _T | str:
if not seq:
return random.choice(list(GENRES.keys()))
return random.choice(list(seq))
| StarcoderdataPython |
3340256 | <reponame>BitMask-Technologies/route-lift-api<filename>routelift_api/message_templates/routers.py
from message_templates.views import (create_message_settings, create_message_template, delete_message_settings,
delete_message_template,
get_all_message_settings, get_all_message_templates, get_all_notification_types,
retrieve_message_settings, retrieve_message_template, update_message_settings,
update_message_templates, )
def notification_types_router(request):
if request.method == 'GET':
return get_all_notification_types(request)
def single_message_templates(request, message_template_id):
if request.method == 'PUT':
return update_message_templates(request, message_template_id)
if request.method == 'GET':
return retrieve_message_template(request, message_template_id)
if request.method == 'DELETE':
return delete_message_template(request, message_template_id)
def all_message_templates(request):
if request.method == 'POST':
return create_message_template(request)
return get_all_message_templates(request)
def message_settings(request):
if request.method == 'POST':
return create_message_settings(request)
if request.method == 'PUT':
return update_message_settings(request)
if request.method == 'GET':
return retrieve_message_settings(request)
if request.method == 'DELETE':
return delete_message_settings(request)
def all_message_settings(request):
return get_all_message_settings(request)
| StarcoderdataPython |
1726759 | <filename>Cracking_the_Coding_Interview/20_1_custom_add.py
#!/usr/bin/env python
"""
Write a function that adds two numbers -- but no arithmetic operators can be
used.
"""
def sum_add(first, second):
"""Kind of cheating to use Python's sum function."""
return sum([first, second])
def binary_add(first, second):
"""Binary voodoo magic."""
if second == 0:
return first
elif first == 0:
return second
xor = first ^ second
carry = (first & second) << 1
return binary_add(xor, carry)
def array_add(first, second):
"""
Addition with bytearrays. Since arrays allocate a specific amount of
memory, this way is somewhat inefficient -- but about twice as fast as
using Python's range function.
"""
first_array = bytearray(first)
second_array = bytearray(second)
first_array.extend(second_array)
return len(first_array)
def main():
# Sum way.
print(sum_add(0, 1)) # 1
print(sum_add(2, 3)) # 5
print(sum_add(20, 12)) # 32
print(sum_add(50, 49) ) # 99
# Binary way.
print(binary_add(0, 1)) # 1
print(binary_add(2, 3)) # 5
print(binary_add(20, 12)) # 32
print(binary_add(50, 49)) # 99
# With bytearrays.
print(array_add(0, 1)) # 1
print(array_add(2, 3)) # 5
print(array_add(20, 12)) # 32
print(array_add(50, 49)) # 99
if __name__ == '__main__':
main()
| StarcoderdataPython |
4831189 | import pytest
import mock
import numpy as np
import awkward as awk
from zinv.utils.AwkwardOps import (
get_nth_object,
get_nth_sorted_object_indices,
get_attr_for_min_ref,
jagged_prod,
)
@pytest.mark.parametrize("array,id,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
0, 3,
np.array([0, 3, 5]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
1, 3,
np.array([1, 4, 6]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
2, 3,
np.array([2, np.nan, 7]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
3, 3,
np.array([np.nan, np.nan, 8]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
4, 3,
np.array([np.nan, np.nan, np.nan]),
]))
def test_get_nth_object(array, id, size, out):
assert np.allclose(get_nth_object(array, id, size), out, rtol=1e-5, equal_nan=True)
@pytest.mark.parametrize("array,ref,id,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
0, 3,
np.array([0, 4, 5]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
1, 3,
np.array([2, 3, 7]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
2, 3,
np.array([1, np.nan, 8]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
3, 3,
np.array([np.nan, np.nan, 6]),
], [
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
4, 3,
np.array([np.nan, np.nan, np.nan]),
]))
def test_get_nth_sorted_object_indices(array, ref, id, size, out):
assert np.allclose(get_nth_sorted_object_indices(array, ref, id, size), out, rtol=1e-5, equal_nan=True)
@pytest.mark.parametrize("array,ref,size,out", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]),
awk.JaggedArray.fromiter([[3, 1, 2], [1, 2], [4, 1, 3, 2]]),
3,
np.array([1, 3, 6]),
],))
def test_get_attr_for_min_ref(array, ref, size, out):
assert np.allclose(get_attr_for_min_ref(array, ref, size), out, rtol=1e-5, equal_nan=True)
@pytest.mark.parametrize("input_,output", ([
awk.JaggedArray.fromiter([[0, 1, 2], [3, 4], [5, 6, 7, 8]]).astype(np.float32),
np.array([0, 12, 1680]),
],))
def test_jagged_prod(input_, output):
assert np.allclose(jagged_prod(input_), output, rtol=1e-5, equal_nan=True)
| StarcoderdataPython |
1713988 | import glob
import json
import itertools
import numpy as np
import tifffile
import zarr
from numcodecs import Blosc
import os
import tqdm
"""
Default chunk size is (64, 64, 64)
"""
class ZarrStack:
def __init__(self, src, dest, compressor=None):
"""
:param src: glob for tiffs or a zarr store
:param dest: the destination folder for zarr arrays
:param compressor: numcodecs compressor to use on eachj chunk. Default
is Zstd level 1 with bitshuffle
"""
self.files = None
self.z_arr = None
if isinstance(src, str): # Assume it's a glob if src is a string
self.files = sorted(glob.glob(src))
self.z_extent = len(self.files)
img0 = tifffile.imread(self.files[0])
self.y_extent, self.x_extent = img0.shape
self.dtype = img0.dtype
elif isinstance(src, zarr.NestedDirectoryStore):
self.z_arr = zarr.open(src, mode='r')
self.z_extent, self.y_extent, self.x_extent = self.z_arr.shape
self.dtype = self.z_arr.dtype
else:
raise ValueError('Unrecognized data source for ZarrStack')
self.dest = dest
if compressor is None:
self.compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.BITSHUFFLE)
else:
self.compressor = compressor
def resolution(self, level):
"""The pixel resolution at a given level
:param level: 1 to N, the mipmap level
:returns: the number of pixels at the base level per pixel at this
level
"""
return 2 ** (level - 1)
def n_x(self, level):
"""The number of blocks in the X direction at the given level
:param level: mipmap level, starting at 1
:return: # of blocks in the X direction
"""
resolution = self.resolution(level)
return (self.x_extent // resolution + 63) // 64
def x0(self, level):
"""The starting X coordinates at a particular level
:param level: 1 to N
:return: an array of starting X coordinates
"""
resolution = self.resolution(level)
return np.arange(0, (self.x_extent + resolution - 1) // resolution, 64)
def x1(self, level):
"""The ending X coordinates at a particular level
:param level: the mipmap level (1 to N)
:return: an array of ending X coordinates
"""
resolution = self.resolution(level)
x1 = self.x0(level) + 64
x1[-1] = (self.x_extent + resolution - 1) // resolution
return x1
def n_y(self, level):
"""The number of blocks in the Y direction at the given level
:param level: mipmap level, starting at 1
:return: # of blocks in the Y direction
"""
resolution = self.resolution(level)
return (self.y_extent // resolution + 63) // 64
def y0(self, level):
"""The starting Y coordinates at a particular level
:param level: 1 to N
:return: an array of starting Y coordinates
"""
resolution = self.resolution(level)
return np.arange(0, (self.y_extent + resolution - 1) // resolution, 64)
def y1(self, level):
"""The ending Y coordinates at a particular level
:param level: the mipmap level (1 to N)
:return: an array of ending Y coordinates
"""
resolution = self.resolution(level)
y1 = self.y0(level) + 64
y1[-1] = (self.y_extent + resolution - 1) // resolution
return y1
def n_z(self, level):
"""The number of blocks in the Z direction at the given level
:param level: mipmap level, starting at 1
:return: # of blocks in the Z direction
"""
resolution = self.resolution(level)
return (self.z_extent // resolution + 63) // 64
def z0(self, level):
"""The starting Z coordinates at a particular level
:param level: 1 to N
:return: an array of starting Z coordinates
"""
resolution = self.resolution(level)
return np.arange(0, (self.z_extent + resolution - 1) // resolution, 64)
def z1(self, level):
"""The ending Z coordinates at a particular level
:param level: the mipmap level (1 to N)
:return: an array of ending Z coordinates
"""
resolution = self.resolution(level)
z1 = self.z0(level) + 64
z1[-1] = (self.z_extent + resolution - 1) // resolution
return z1
def write_info_file(self, n_levels, voxel_size=(1800, 1800, 2000)):
"""Write the precomputed info file that defines the volume
:param n_levels: the number of levels to be written
"""
if not os.path.exists(self.dest):
os.mkdir(self.dest)
d = dict(data_type = self.dtype.name,
mesh="mesh",
num_channels=1,
type="image")
scales = []
z_extent = self.z_extent
y_extent = self.y_extent
x_extent = self.x_extent
for level in range(1, n_levels + 1):
resolution = self.resolution(level)
scales.append(
dict(chunk_sizes=[[64, 64, 64]],
encoding="raw",
key="%d_%d_%d" % (resolution, resolution, resolution),
resolution=[resolution * _ for _ in voxel_size],
size=[x_extent, y_extent, z_extent],
voxel_offset=[0, 0, 0]))
z_extent = (z_extent + 1) // 2
y_extent = (y_extent + 1) // 2
x_extent = (x_extent + 1) // 2
d["scales"] = scales
with open(os.path.join(self.dest, "info"), "w") as fd:
json.dump(d, fd, indent=2, sort_keys=True)
def write_level_1(self, silent=False):
"""Write the first mipmap level, loading from tiff planes"""
dest_lvl1 = os.path.join(self.dest, "1_1_1")
store = zarr.NestedDirectoryStore(dest_lvl1)
z_arr_1 = zarr.open(store,
mode='w',
chunks=(64, 64, 64),
dtype=self.dtype,
shape=(self.z_extent, self.y_extent, self.x_extent),
compression=self.compressor)
z0 = self.z0(1)
z1 = self.z1(1)
y0 = self.y0(1)
y1 = self.y1(1)
x0 = self.x0(1)
x1 = self.x1(1)
if self.files is not None:
for z0a, z1a in tqdm.tqdm(zip(z0, z1), total=len(z0), disable=silent):
img = np.zeros((z1a-z0a, y1[-1], x1[-1]), self.dtype)
for z in range(z0a, z1a):
img[z-z0a] = tifffile.imread(self.files[z])
z_arr_1[z0a:z1a] = img
elif self.z_arr is not None: # need to decompress to re-chunk the original store
for z0a, z1a in tqdm.tqdm(zip(z0, z1), total=len(z0), disable=silent):
z_arr_1[z0a:z1a] = self.z_arr[z0a:z1a]
def write_level_n(self, level, silent=False):
src_resolution = self.resolution(level - 1)
dest_resolution = self.resolution(level)
src = os.path.join(
self.dest,
"%d_%d_%d" % (src_resolution, src_resolution, src_resolution))
dest = os.path.join(
self.dest,
"%d_%d_%d" % (dest_resolution, dest_resolution, dest_resolution))
src_store = zarr.NestedDirectoryStore(src)
src_zarr = zarr.open(src_store, mode='r')
dest_store = zarr.NestedDirectoryStore(dest)
dest_zarr = zarr.open(dest_store,
mode='w',
chunks=(64, 64, 64),
dtype=self.dtype,
shape=(self.z1(level)[-1],
self.y1(level)[-1],
self.x1(level)[-1]),
compression=self.compressor)
z0s = self.z0(level - 1) # source block coordinates
z1s = self.z1(level - 1)
y0s = self.y0(level - 1)
y1s = self.y1(level - 1)
x0s = self.x0(level - 1)
x1s = self.x1(level - 1)
z0d = self.z0(level) # dest block coordinates
z1d = self.z1(level)
y0d = self.y0(level)
y1d = self.y1(level)
x0d = self.x0(level)
x1d = self.x1(level)
for xidx, yidx, zidx in tqdm.tqdm(list(itertools.product(
range(self.n_x(level)),
range(self.n_y(level)),
range(self.n_z(level)))),
disable=silent): # looping over destination block indicies (fewer blocks than source)
block = np.zeros((z1d[zidx] - z0d[zidx],
y1d[yidx] - y0d[yidx],
x1d[xidx] - x0d[xidx]), np.uint64)
hits = np.zeros((z1d[zidx] - z0d[zidx],
y1d[yidx] - y0d[yidx],
x1d[xidx] - x0d[xidx]), np.uint64)
for xsi1, ysi1, zsi1 in itertools.product((0, 1), (0, 1), (0, 1)): # looping over source blocks for this destination
xsi = xsi1 + xidx * 2
if xsi == self.n_x(level-1): # Check for any source blocks that are out-of-bounds
continue
ysi = ysi1 + yidx * 2
if ysi == self.n_y(level-1):
continue
zsi = zsi1 + zidx * 2
if zsi == self.n_z(level-1):
continue
src_block = src_zarr[z0s[zsi]:z1s[zsi],
y0s[ysi]:y1s[ysi],
x0s[xsi]:x1s[xsi]]
for offx, offy, offz in \
itertools.product((0, 1), (0, 1), (0,1)):
dsblock = src_block[offz::2, offy::2, offx::2]
block[zsi1*32:zsi1*32 + dsblock.shape[0],
ysi1*32:ysi1*32 + dsblock.shape[1],
xsi1*32:xsi1*32 + dsblock.shape[2]] += \
dsblock.astype(block.dtype) # 32 is half-block size of source
hits[zsi1*32:zsi1*32 + dsblock.shape[0],
ysi1*32:ysi1*32 + dsblock.shape[1],
xsi1*32:xsi1*32 + dsblock.shape[2]] += 1
block[hits > 0] = block[hits > 0] // hits[hits > 0]
dest_zarr[z0d[zidx]:z1d[zidx],
y0d[yidx]:y1d[yidx],
x0d[xidx]:x1d[xidx]] = block
| StarcoderdataPython |
86499 | #!/usr/bin/env python
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import sys
from setuptools import setup, find_packages
srcdir = os.path.join(os.path.dirname(sys.argv[0]), 'src')
setup(name='nova',
version='0.3.0',
description='None Other, Vaguely Awesome',
author='nova-core',
author_email='<EMAIL>',
url='http://novacc.org/',
packages = find_packages(),
)
| StarcoderdataPython |
1672580 | <reponame>artemrys/poetry
from typing import Dict
from typing import List
import pytest
from poetry.core.packages.package import Package
from poetry.factory import Factory
from poetry.utils.extras import get_extra_package_names
_PACKAGE_FOO = Package("foo", "0.1.0")
_PACKAGE_SPAM = Package("spam", "0.2.0")
_PACKAGE_BAR = Package("bar", "0.3.0")
_PACKAGE_BAR.add_dependency(Factory.create_dependency("foo", "*"))
# recursive dependency
_PACKAGE_BAZ = Package("baz", "0.4.0")
_PACKAGE_BAZ.add_dependency(Factory.create_dependency("quix", "*"))
_PACKAGE_QUIX = Package("quix", "0.5.0")
_PACKAGE_QUIX.add_dependency(Factory.create_dependency("baz", "*"))
@pytest.mark.parametrize(
["packages", "extras", "extra_names", "expected_extra_package_names"],
[
# Empty edge case
([], {}, [], []),
# Selecting no extras is fine
([_PACKAGE_FOO], {}, [], []),
# An empty extras group should return an empty list
([_PACKAGE_FOO], {"group0": []}, ["group0"], []),
# Selecting an extras group should return the contained packages
(
[_PACKAGE_FOO, _PACKAGE_SPAM, _PACKAGE_BAR],
{"group0": ["foo"]},
["group0"],
["foo"],
),
# If a package has dependencies, we should also get their names
(
[_PACKAGE_FOO, _PACKAGE_SPAM, _PACKAGE_BAR],
{"group0": ["bar"], "group1": ["spam"]},
["group0"],
["bar", "foo"],
),
# Selecting multpile extras should get us the union of all package names
(
[_PACKAGE_FOO, _PACKAGE_SPAM, _PACKAGE_BAR],
{"group0": ["bar"], "group1": ["spam"]},
["group0", "group1"],
["bar", "foo", "spam"],
),
(
[_PACKAGE_BAZ, _PACKAGE_QUIX],
{"group0": ["baz"], "group1": ["quix"]},
["group0", "group1"],
["baz", "quix"],
),
],
)
def test_get_extra_package_names(
packages: List[Package],
extras: Dict[str, List[str]],
extra_names: List[str],
expected_extra_package_names: List[str],
):
assert expected_extra_package_names == list(
get_extra_package_names(packages, extras, extra_names)
)
| StarcoderdataPython |
1657333 | from django.apps import AppConfig
class TodoApiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'todo_api'
| StarcoderdataPython |
4810487 | #!/usr/bin/env python
import sys, os, cmd
class CLI(cmd.Cmd):
def __init__(self, fh):
#super(CLI, self).__init__()
cmd.Cmd.__init__(self)
self.fh = fh
self.prompt = '> '
def do_send(self, *args):
line = ' '.join(args)
fh.write(line + '\n')
fh.flush()
print("Sent [{}]".format(line))
def help_send(self):
print("syntax: send [word] [word] [word]...")
print("-- sends a message through the FIFO")
def do_hello(self, arg):
print("Hello again {}!".format(arg))
def help_hello(self):
print("syntax: hello [message]")
print("-- prints a hello message")
def do_quit(self, arg):
sys.exit(arg)
def help_quit(self):
print("syntax: quit [returncode]")
print("-- terminates the application")
# shortcuts
do_s = do_send
do_q = do_quit
fifo_name = '/tmp/test.fifo'
with open(fifo_name, "w") as fh:
cli = CLI(fh)
cli.cmdloop()
| StarcoderdataPython |
88382 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
def raise_error(*args, **kwds):
print args, kwds
raise ValueError('Invalid value:' + str(args) + str(kwds))
class ExceptionTest(unittest.TestCase):
def testTrapLocally(self):
try:
raise_error('a', b='c')
except ValueError:
pass
else:
self.fail('Did not see ValueError')
def testFailUnlessRaises(self):
self.failUnlessRaises(ValueError, raise_error, 'a', b='c')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3213152 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 16:46:12 2020
@author: skyjones
"""
import os
import re
import shutil
import sys
import pandas as pd
from glob import glob
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
import scipy
import statsmodels.api as sm
import itertools
from matplotlib.lines import Line2D
from matplotlib.patches import Circle
from matplotlib.offsetbox import (TextArea, DrawingArea, OffsetImage,
AnnotationBbox)
from matplotlib.cbook import get_sample_data
import matplotlib
in_file = '/Users/skyjones/Documents/repositories/md_lj_lab/bin/final_lymph_clean.xlsx'
out_folder = '/Users/skyjones/Documents/repositories/md_lj_lab/figs/'
##########
def plot_ci_manual(t, s_err, n, x, x2, y2, ax=None, color='#b9cfe7'):
"""Return an axes of confidence bands using a simple approach.
Notes
-----
.. math:: \left| \: \hat{\mu}_{y|x0} - \mu_{y|x0} \: \right| \; \leq \; T_{n-2}^{.975} \; \hat{\sigma} \; \sqrt{\frac{1}{n}+\frac{(x_0-\bar{x})^2}{\sum_{i=1}^n{(x_i-\bar{x})^2}}}
.. math:: \hat{\sigma} = \sqrt{\sum_{i=1}^n{\frac{(y_i-\hat{y})^2}{n-2}}}
References
----------
.. [1] <NAME>. "Curve fitting," Jupyter Notebook.
http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/CurveFitting.ipynb
"""
if ax is None:
ax = plt.gca()
ci = t * s_err * np.sqrt(1/n + (x2 - np.mean(x))**2 / np.sum((x - np.mean(x))**2))
ax.fill_between(x2, y2 + ci, y2 - ci, color=color, edgecolor="", alpha=0.25)
return ax
def plot_ci_bootstrap(xs, ys, resid, nboot=500, ax=None):
"""Return an axes of confidence bands using a bootstrap approach.
Notes
-----
The bootstrap approach iteratively resampling residuals.
It plots `nboot` number of straight lines and outlines the shape of a band.
The density of overlapping lines indicates improved confidence.
Returns
-------
ax : axes
- Cluster of lines
- Upper and Lower bounds (high and low) (optional) Note: sensitive to outliers
References
----------
.. [1] <NAME>. "Visualizing Confidence Intervals", Various Consequences.
http://www.variousconsequences.com/2010/02/visualizing-confidence-intervals.html
"""
if ax is None:
ax = plt.gca()
bootindex = scipy.random.randint
for _ in range(nboot):
resamp_resid = resid[bootindex(0, len(resid) - 1, len(resid))]
# Make coeffs of for polys
pc = scipy.polyfit(xs, ys + resamp_resid, 1)
# Plot bootstrap cluster
ax.plot(xs, scipy.polyval(pc, xs), "b-", linewidth=2, alpha=3.0 / float(nboot))
return ax
def conf_plot(exes, whys, ax, icolor, subcolor, lab=None):
## BOOT STRAPPING. courtesy of pylang from stackoverflow
x, y = exes, whys
# Modeling with Numpy
def equation(a, b):
"""Return a 1D polynomial."""
return np.polyval(a, b)
# Data
ax.plot(
x, y, "o", color="#b9cfe7", markersize=7,
markeredgewidth=1, markeredgecolor="black", markerfacecolor="None"
)
p, cov = np.polyfit(x, y, 1, cov=True) # parameters and covariance from of the fit of 1-D polynom.
y_model = equation(p, x) # model using the fit parameters; NOTE: parameters here are coefficients
# Statistics
n = len(exes) # number of observations
m = p.size # number of parametersƒsize
dof = n - m # degrees of freedom
t = stats.t.ppf(0.975, n - m) # used for CI and PI bands
# Estimates of Error in Data/Model
resid = y - y_model
chi2 = np.sum((resid / y_model)**2) # chi-squared; estimates error in data
chi2_red = chi2 / dof # reduced chi-squared; measures goodness of fit
s_err = np.sqrt(np.sum(resid**2) / dof) # standard deviation of the error
# Fit
ax.plot(x, y_model, "-", color=icolor, linewidth=1.5, alpha=0.4)
x2 = np.linspace(np.min(x), np.max(x), 100)
y2 = equation(p, x2)
# Confidence Interval (select one)
plot_ci_manual(t, s_err, n, x, x2, y2, ax=ax, color=icolor)
#plot_ci_bootstrap(x, y, resid, ax=ax)
# Prediction Interval
pi = t * s_err * np.sqrt(1 + 1/n + (x2 - np.mean(x))**2 / np.sum((x - np.mean(x))**2))
ax.fill_between(x2, y2 + pi, y2 - pi, color="None", linestyle="--")
ax.plot(x2, y2 - pi, "--", color=icolor, alpha=0.3)#, label="95% Prediction Limits")
ax.plot(x2, y2 + pi, "--", color=icolor, alpha=0.3)
ax.scatter(exes, whys, color=subcolor, alpha=0.5, s=40, label=lab)
def set_axis_style(ax, labels):
ax.get_xaxis().set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
#ax.set_xlabel('Sample name')
xls = pd.ExcelFile(in_file)
ptctrl_df = pd.read_excel(xls, 'patient_control_clean')
prog_df = pd.read_excel(xls, 'progression_clean')
ptctrl_df['Arm T1 (sec)'] = ptctrl_df['Arm T1 (ms)'] / 1000
ctrl_df = ptctrl_df[ptctrl_df['Group'] == 'Control']
pt_df = ptctrl_df[ptctrl_df['Group'] == 'BCRL']
## violin plots for control vs patient
fig_names = [os.path.join(out_folder, 'ptctrl_violin_age.pdf'),
os.path.join(out_folder, 'ptctrl_violin_bmi.pdf'),
os.path.join(out_folder, 'ptctrl_violin_armt2.pdf'),
os.path.join(out_folder, 'ptctrl_violin_armt1.pdf'),
os.path.join(out_folder, 'ptctrl_violin_fwfrac.pdf'),
]
ylabs = ['Age (years)', 'BMI ($kg/m^2$)', 'Arm T2 (s)', 'Arm T1 (s)', 'Fat-water fraction']
colnames = ['Age', 'BMI', 'Arm T2 (sec)', 'Arm T1 (sec)', 'Fat/Wat standard slice']
yscales = [(15, 85), (15, 45), None, None, None]
for fig_name, ylab, colname, yscale in zip(fig_names, ylabs, colnames, yscales):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 8))
#ax1.set_title('BMI')
ax.set_ylabel(ylab)
data = [list(ctrl_df[colname]), list(pt_df[colname])]
x_labs = ['Control', 'BCRL']
parts1 = ax.violinplot(data, showmeans=True, showmedians=True)
set_axis_style(ax, x_labs)
med_col = 'cornflowerblue'
mean_col = 'gray'
med_style = 'dashed'
mean_style = 'solid'
lwdth = 1
custom_lines = [Line2D([0], [0], color=med_col, lw=lwdth, linestyle=med_style),
Line2D([0], [0], color=mean_col, lw=lwdth, linestyle=mean_style)]
ax.legend(custom_lines, ['Median', 'Mean'])
if yscale:
ax.set_ylim(yscale[0], yscale[1])
for parts in [parts1]:
for pc in parts['bodies']:
pc.set_facecolor('green')
pc.set_edgecolor('black')
pc.set_alpha(0.2)
parts['cbars'].set_color('black')
parts['cmaxes'].set_color('black')
parts['cmins'].set_color('black')
parts['cmedians'].set_color(med_col)
parts['cmeans'].set_color(mean_col)
parts['cmedians'].set_linestyle(med_style)
parts['cmeans'].set_linestyle(mean_style)
parts['cmedians'].set_linewidth(lwdth)
parts['cmeans'].set_linewidth(lwdth)
plt.tight_layout()
plt.savefig(fig_name, dpi=400, format='pdf')
## violin plots for progressed vs non progressed
progressed_df = prog_df[prog_df['Progressed'] == 1]
unprogressed_df = prog_df[prog_df['Progressed'] == 0]
fig_names = [os.path.join(out_folder, 'prognonprog_violin_age.pdf'),
os.path.join(out_folder, 'prognonprog_violin_bmi.pdf'),
os.path.join(out_folder, 'prognonprog_violin_ln.pdf'),
os.path.join(out_folder, 'prognonprog_violin_voldiff.pdf'),
os.path.join(out_folder, 'prognonprog_violin_ldex.pdf'),
os.path.join(out_folder, 'prognonprog_violin_tdc.pdf')
]
ylabs = ['Age (years)', 'BMI ($kg/m^2$)', 'Lymph nodes removed', 'Volume difference (%)', 'L-Dex ratio', 'TDC involved']
colnames = ['Age', 'BMI', '# LNs removed', '% Vol Diff Perometer', 'L-Dex', 'TDC - Upper Arm Avg (Involved)']
#yscales = [(15, 85), (15, 45), None, None, None]
for fig_name, ylab, colname in zip(fig_names, ylabs, colnames):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 8))
#ax1.set_title('BMI')
ax.set_ylabel(ylab)
data = [list(unprogressed_df[colname].dropna()), list(progressed_df[colname].dropna())]
x_labs = ['Unprogressed', 'Progressed']
parts1 = ax.violinplot(data, showmeans=True, showmedians=True)
set_axis_style(ax, x_labs)
med_col = 'green'
mean_col = 'gray'
med_style = 'dashed'
mean_style = 'solid'
lwdth = 1
custom_lines = [Line2D([0], [0], color=med_col, lw=lwdth, linestyle=med_style),
Line2D([0], [0], color=mean_col, lw=lwdth, linestyle=mean_style)]
ax.legend(custom_lines, ['Median', 'Mean'])
if yscale:
ax.set_ylim(yscale[0], yscale[1])
for parts in [parts1]:
for pc in parts['bodies']:
pc.set_facecolor('cornflowerblue')
pc.set_edgecolor('black')
pc.set_alpha(0.2)
parts['cbars'].set_color('black')
parts['cmaxes'].set_color('black')
parts['cmins'].set_color('black')
parts['cmedians'].set_color(med_col)
parts['cmeans'].set_color(mean_col)
parts['cmedians'].set_linestyle(med_style)
parts['cmeans'].set_linestyle(mean_style)
parts['cmedians'].set_linewidth(lwdth)
parts['cmeans'].set_linewidth(lwdth)
plt.tight_layout()
plt.savefig(fig_name, dpi=400, format='pdf')
# special
fig_name = os.path.join(out_folder, 'prognonprog_violin_fwfrac.pdf')
ylab = 'Fat-water fraction'
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 8))
#ax1.set_title('BMI')
ax.set_ylabel(ylab)
data = [list(unprogressed_df['Fat/Water ratio (Uninvolved)'].dropna()), list(unprogressed_df['Fat/Water ratio (Involved)'].dropna()),
list(progressed_df['Fat/Water ratio (Uninvolved)'].dropna()), list(progressed_df['Fat/Water ratio (Involved)'].dropna())]
x_labs = ['Unprogressed\n(Uninvolved)', 'Unprogressed\n(Involved)',
'Progressed\n(Uninvolved)', 'Progressed\n(Involved)']
parts1 = ax.violinplot(data, showmeans=True, showmedians=True)
set_axis_style(ax, x_labs)
med_col = 'green'
mean_col = 'gray'
med_style = 'dashed'
mean_style = 'solid'
lwdth = 1
custom_lines = [Line2D([0], [0], color=med_col, lw=lwdth, linestyle=med_style),
Line2D([0], [0], color=mean_col, lw=lwdth, linestyle=mean_style)]
ax.legend(custom_lines, ['Median', 'Mean'])
if yscale:
ax.set_ylim(yscale[0], yscale[1])
for parts in [parts1]:
for pc in parts['bodies']:
pc.set_facecolor('cornflowerblue')
pc.set_edgecolor('black')
pc.set_alpha(0.2)
parts['cbars'].set_color('black')
parts['cmaxes'].set_color('black')
parts['cmins'].set_color('black')
parts['cmedians'].set_color(med_col)
parts['cmeans'].set_color(mean_col)
parts['cmedians'].set_linestyle(med_style)
parts['cmeans'].set_linestyle(mean_style)
parts['cmedians'].set_linewidth(lwdth)
parts['cmeans'].set_linewidth(lwdth)
plt.tight_layout()
plt.savefig(fig_name, dpi=400, format='pdf')
| StarcoderdataPython |
3239867 | <reponame>huykingsofm/FileTransmitter<filename>src/sft/qsft/server.py<gh_stars>1-10
import os
from hks_pylib.logger import Display
from hks_pylib.logger.standard import StdUsers
from hks_pylib.cryptography.ciphers.hkscipher import HKSCipher
from hks_pylib.cryptography.ciphers.symmetrics import NoCipher
from hks_pylib.logger.logger_generator import InvisibleLoggerGenerator, LoggerGenerator
from sft.listener import SFTListener
from sft.qsft.definition import DEFAULT_ADDRESS
from sft.protocol.definition import SFTProtocols, SFTRoles
class QSFTServer(object):
def __init__(self,
address: tuple = DEFAULT_ADDRESS,
cipher: HKSCipher = NoCipher(),
logger_generator: LoggerGenerator = InvisibleLoggerGenerator(),
display: dict = {StdUsers.DEV: Display.ALL}
) -> None:
self._listener = SFTListener(
cipher=cipher,
address=address,
logger_generator=logger_generator,
display=display
)
def config(self, role: SFTRoles, **kwargs):
self._listener.session_manager().get_session(
SFTProtocols.SFT,
role
).scheme().config(**kwargs)
def send(self):
self._listener.listen()
self._server = self._listener.accept()
self._listener.close()
result = self._server.wait_result(SFTProtocols.SFT, SFTRoles.SENDER)
self._server.close()
return result
def receive(self):
self._listener.listen()
self._server = self._listener.accept()
self._listener.close()
result = self._server.wait_result(SFTProtocols.SFT, SFTRoles.RECEIVER)
self._server.close()
return result
| StarcoderdataPython |
1795469 | <gh_stars>0
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import os
import platform
import re
import subprocess
import time
import traceback
"""
Create conda environment with desired python and packages
"""
def create_conda_env(conda_activate, env_name, python, packages=[], channels=''):
packages_list = ' '.join(packages)
format_print(f'Setup conda {env_name} environment')
run_command(f'{conda_activate}conda remove -q -y --name {env_name} --all')
run_command(f'{conda_activate}conda create -q -y -n {env_name} python={python} {packages_list} {channels}')
"""
Create list of packages required for build and test from conda recipe
"""
def get_sdc_env(conda_activate, sdc_src, sdc_recipe, python, numpy, channels):
def create_env_list(packages, exclude=''):
env_list = []
env_set = set()
for item in packages:
package = re.search(r"[\w-]+" , item).group()
version = ''
if re.search(r"\d+\.[\d\*]*\.?[\d\*]*", item) and '<=' not in item and '>=' not in item:
version = '={}'.format(re.search(r"\d+\.[\d\*]*\.?[\d\*]*", item).group())
if package not in env_set and package not in exclude:
env_set.add(package)
env_list.append(f'{package}{version}')
return env_list
from ruamel_yaml import YAML
yaml=YAML()
sdc_recipe_render = os.path.join(sdc_src, 'sdc_recipe_render.yaml')
# Create environment with conda-build
sdc_render_env = 'sdc_render'
sdc_render_env_activate = get_activate_env_cmd(conda_activate, sdc_render_env)
format_print('Render sdc build and test environment using conda-build')
create_conda_env(conda_activate, sdc_render_env, python, ['conda-build'])
run_command('{} && {}'.format(sdc_render_env_activate,
' '.join([f'conda render --python={python}',
f'--numpy={numpy}',
f'{channels} -f {sdc_recipe_render} {sdc_recipe}'])))
with open(sdc_recipe_render, 'r') as recipe:
data = yaml.load(recipe)
build = data['requirements']['build']
host = data['requirements']['host']
run = data['requirements']['run']
test = data['test']['requires']
return {'build': create_env_list(build + host + run, 'vs2017_win-64'),
'test': create_env_list(run + test)}
"""
Return list of conda and wheel packages in build_output folder
"""
def get_sdc_build_packages(build_output):
if platform.system() == 'Windows':
os_dir = 'win-64'
elif platform.system() == 'Linux':
os_dir = 'linux-64'
elif platform.system() == 'Darwin':
os_dir = 'osx-64'
sdc_packages = []
sdc_build_dir = os.path.join(build_output, os_dir)
for item in os.listdir(sdc_build_dir):
item_path = os.path.join(sdc_build_dir, item)
if os.path.isfile(item_path) and re.search(r'^sdc.*\.tar\.bz2$|^sdc.*\.whl$', item):
sdc_packages.append(item_path)
return sdc_packages
"""
Return platform specific activation cmd
"""
def get_activate_env_cmd(conda_activate, env_name):
if platform.system() == 'Windows':
return f'{conda_activate}activate {env_name}'
else:
return f'{conda_activate}source activate {env_name}'
"""
Return platform specific conda activation cmd
"""
def get_conda_activate_cmd(conda_prefix):
if 'CONDA_PREFIX' in os.environ:
return ''
else:
if platform.system() == 'Windows':
return '{} && '.format(os.path.join(conda_prefix, 'Scripts', 'activate.bat'))
else:
return 'source {} && '.format(os.path.join(conda_prefix, 'bin', 'activate'))
"""
Print format message with timestamp
"""
def format_print(msg, new_block=True):
if new_block:
print('='*80, flush=True)
print(f'{time.strftime("%d/%m/%Y %H:%M:%S")}: {msg}', flush=True)
"""
Execute command
"""
def run_command(command):
print('='*80, flush=True)
print(f'{time.strftime("%d/%m/%Y %H:%M:%S")}: {command}', flush=True)
print('-'*80, flush=True)
if platform.system() == 'Windows':
subprocess.check_call(command, stdout=None, stderr=None, shell=True)
else:
subprocess.check_call(command, executable='/bin/bash', stdout=None, stderr=None, shell=True)
"""
Set environment variable
"""
def set_environment_variable(key, value):
if key in os.environ:
os.environ[key] += os.pathsep + value
else:
os.environ[key] = value
| StarcoderdataPython |
3278571 | <gh_stars>10-100
import gym
import os
import numpy as np
from gym.wrappers import Monitor
from gym import Wrapper
from gym.envs.classic_control import AcrobotEnv, PendulumEnv
class PickleableEnv(Wrapper):
def __init__(self, env, env_name='acrobot', **kwargs):
super(PickleableEnv, self).__init__(env)
self._env_name = env_name
self._prev_saved_state = None
def __getstate__(self):
state = {
'env_state': self.unwrapped.state,
'env_np_random': self.unwrapped.np_random,
'reset_state': True
}
if hasattr(self, 'last_u'):
state['last_u'] = self.last_u
return state
def __setstate__(self, state):
self._reset_count = state['reset_count']
self._prev_saved_state = state
def reinitialize(self, env, directory, video_callable=None):
assert self._prev_saved_state is not None
super(PickleableEnv, self).__init__(env)
self.unwrapped.np_random = self._prev_saved_state['env_np_random']
def reset(self, **kwargs):
if self._prev_saved_state is not None and self._prev_saved_state['reset_state']:
init_state = self._prev_saved_state['env_state']
self._prev_saved_state['reset_state'] = False
prev_count = self.env._elapsed_steps
state = super(PickleableEnv, self).reset(init_state=init_state, **kwargs)
self.env._elapsed_steps = prev_count
if 'last_u' in self._prev_saved_state and hasattr(self, 'last_u'):
self.last_u = self._prev_saved_state['last_u']
return state
else:
return super(PickleableEnv, self).reset(**kwargs)
class StepMonitor(Monitor):
def __init__(self, env, directory, env_name='acrobot', pickleable=True, **kwargs):
super(StepMonitor, self).__init__(env, directory, **kwargs)
self._env_name = env_name
self._reset_count = 0
self._original_prefix = self.file_prefix
self.file_prefix = self._original_prefix + f"_{self._reset_count}"
self._previous_file_prefix = None
self._prev_saved_state = None
self._experiment_dir = directory
self._start = True
self._pickleable = pickleable
def reset_video_recorder(self):
super(StepMonitor, self).reset_video_recorder()
steps_taken = 0
if hasattr(self.env, '_elapsed_steps'):
steps_taken = self.env._elapsed_steps
elif hasattr(self.env, 'step_count'):
steps_taken = self.env.step_count
# add num_steps to beginning of video
if steps_taken > 0 and not self._start:
for fname in os.listdir(self._experiment_dir):
if fname.startswith(self._previous_file_prefix):
full_src_fname = os.path.join(self._experiment_dir, fname)
full_dst_fname = os.path.join(self._experiment_dir, f'{steps_taken}_{fname}')
os.rename(full_src_fname, full_dst_fname)
self._reset_count += 1
self._previous_file_prefix = self.file_prefix
self.file_prefix = self._original_prefix + f"_{self._reset_count}"
def step(self, action):
self._start = False
return super(StepMonitor, self).step(action)
def __getstate__(self):
state = {
'reset_count': self._reset_count,
'original_prefix': self._original_prefix,
'env_np_random': self.unwrapped.np_random,
'reset_state': True
}
if self._pickleable:
state['env_state'] = self.unwrapped.state
if hasattr(self, 'last_u'):
state['last_u'] = self.last_u
return state
def __setstate__(self, state):
self._reset_count = state['reset_count']
self._original_prefix = state['original_prefix']
self._prev_saved_state = state
def reinitialize(self, env, directory, video_callable=None):
assert self._prev_saved_state is not None
super(StepMonitor, self).__init__(env, directory, video_callable=video_callable)
self._original_prefix = self.file_prefix
self.file_prefix = self._original_prefix + f"_{self._reset_count}"
self._experiment_dir = directory
self.unwrapped.np_random = self._prev_saved_state['env_np_random']
def reset(self, **kwargs):
if self._prev_saved_state is not None and self._prev_saved_state['reset_state']:
init_state = self._prev_saved_state['env_state']
self._prev_saved_state['reset_state'] = False
prev_count = self.env._elapsed_steps
state = super(StepMonitor, self).reset(init_state=init_state, **kwargs)
self.env._elapsed_steps = prev_count
if 'last_u' in self._prev_saved_state and hasattr(self, 'last_u'):
self.last_u = self._prev_saved_state['last_u']
return state
else:
return super(StepMonitor, self).reset(**kwargs)
class ContinuablePendulumEnv(PendulumEnv):
def __init__(self, *args, **kwargs):
super(ContinuablePendulumEnv, self).__init__(*args, **kwargs)
def reset(self, init_state=None):
low = np.array([3 * np.pi / 4, -1])
# low = np.array([0, -1])
# high = np.array([0.1, 1])
high = np.array([np.pi, 1])
if init_state is None:
self.state = self.np_random.uniform(low=low, high=high)
sign = -1 if self.np_random.uniform() > 0.5 else 1
self.state[0] *= sign
else:
self.state = init_state
self.last_u = None
return self._get_obs()
def step(self,u):
obs, reward, terminal, info = super(ContinuablePendulumEnv, self).step(u)
# abs_state_angle = np.abs(self.state[0])
cos_state = np.cos(self.state[0])
# done = abs_state_angle < np.pi / 4
done = cos_state > 0.75
if done:
print("here")
reward = 0
self.done = done
terminal = done
return obs, reward, terminal, info
class ContinuableAcrobotEnv(AcrobotEnv):
def __init__(self, *args, **kwargs):
super(ContinuableAcrobotEnv, self).__init__(*args, **kwargs)
def reset(self, init_state=None):
if init_state is None:
self.state = self.np_random.uniform(low=-0.1, high=0.1, size=(4,))
else:
self.state = init_state
return self._get_ob()
| StarcoderdataPython |
3280544 | <filename>code/branch_and_bound/time_opti.py<gh_stars>0
###########################################################################
# In main directory
# Usage: '$ python code/branch_and_bound/time_opti.py'
#
# Check '$ python code/branch_and_bound/time_opti.py -h' for help
###########################################################################
# Imports
###########################################################################
# Standard library imports
import __init__
import os
import copy
import time
import argparse
import numpy as np
from numba import njit
from datetime import datetime
from queue import PriorityQueue
# Local imports
from code.data_input.input_final import get_input_loader
from code.branch_and_bound.get_input import get_input
###########################################################################
# Code
###########################################################################
global OUTPUT_DIR
OUTPUT_DIR = os.path.join(os.getcwd(), "output", "branch_and_bound")
if os.path.exists(OUTPUT_DIR) is False:
os.mkdir(OUTPUT_DIR)
INF = np.infty
N = 5 # Number of exhibits
def cmd_line_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--ext",
dest="ext",
type=str,
default="",
help="Add a prefix to the plots, summary_data and summary_log "
"before saving it",
)
parser.add_argument(
"--tcn", dest="tc_number", type=int, default=1, help="Test case number"
)
parser.add_argument(
"-d",
dest="output_dir",
type=str,
default="BnB",
help="Output folder name",
)
args = parser.parse_args()
return args
def make_output_dir(folder_name, OUTPUT_DIR=OUTPUT_DIR):
output_dir = os.path.join(OUTPUT_DIR, folder_name)
if os.path.exists(output_dir) is False:
os.mkdir(output_dir)
return output_dir
def function_call_counter(func):
"""
Count number of times a function was called. To be used as decorator.
"""
def wrapper(*args, **kwargs):
wrapper.calls += 1
return func(*args, **kwargs)
wrapper.calls = 0
return wrapper
def function_timer(func):
"""
Calculates the time required for function execution.
"""
def wrapper(*args, **kwargs):
begin = time.time()
f = func(*args, **kwargs)
end = time.time()
wrapper.time_taken = end - begin
print("Total time taken in : ", func.__name__, wrapper.time_taken)
return f
wrapper.time_taken = 0
return wrapper
class Node:
"""
State Space Tree nodes (exhibits)
"""
def __init__(self, tour, reduced_matrix, cost, Id, level):
# stores edges of the state-space tree; help in tracing the path when
# the answer is found
self.tour = copy.deepcopy(tour)
self.reduced_matrix = copy.deepcopy(reduced_matrix)
self.cost = cost # stores the lower bound
self.Id = Id # vertex -> stores the current node number
self.level = level # stores the total number of nodes visited so far
def __gt__(self, other):
if self.cost > other.cost:
return True
else:
return False
def __lt__(self, other):
if self.cost < other.cost:
return True
else:
return False
def debug(self, with_tour=False):
if with_tour:
print(
"Level = {} | Cost = {} | Node = {} | Tour = {}".format(
self.level, self.cost, self.Id, self.tour
)
)
else:
print(
"Level = {} | Cost = {} | Node = {}".format(
self.level, self.cost, self.Id
)
)
def CreateNode(parent_matrix, tour, level, i, j):
"""
Function to allocate a new node `(i, j)` corresponds to visiting node
`j` from node `i`
Args:
parent_matrix (N*N matrix): penalty matrix
tour (list of [i,j]): edges visited till the node
level (int): the total number of nodes visited so far
i (int): come from node Id
j (int): goto node Id
Returns:
Node
"""
node = Node(tour, [], 0, 0, 0)
if level != 0: # skip for the root node
node.tour.append([i, j])
node.reduced_matrix = copy.deepcopy(parent_matrix)
# Change all entries of row `i` and column `j` to `INFINITY`
# skip for the root node
if level != 0:
for k in range(N):
node.reduced_matrix[i][k] = INF
node.reduced_matrix[k][j] = INF
# Set `(j, 0)` to `INFINITY`
# here start node is 0
node.reduced_matrix[j][0] = INF
# set number of nodes visited so far
node.level = level
# assign current node number
node.Id = j
return node
@function_call_counter
# @njit
def matrix_reduction(node):
# reduce each row so that there must be at least one zero in each row
# node.reduced_matrix
row = INF * np.ones(N)
# `row[i]` contains minimum in row `i`
for i in range(N):
for j in range(N):
if node.reduced_matrix[i][j] < row[i]:
row[i] = node.reduced_matrix[i][j]
# reduce the minimum value from each element in each row
for i in range(N):
for j in range(N):
if node.reduced_matrix[i][j] != INF and row[i] != INF:
node.reduced_matrix[i][j] -= row[i]
# reduce each column so that there must be at least one zero in each column
# node.reduced_matrix
col = INF * np.ones(N)
# `col[j]` contains minimum in col `j`
for i in range(N):
for j in range(N):
if node.reduced_matrix[i][j] < col[j]:
col[j] = node.reduced_matrix[i][j]
# reduce the minimum value from each element in each column
for i in range(N):
for j in range(N):
if node.reduced_matrix[i][j] != INF and col[j] != INF:
node.reduced_matrix[i][j] -= col[j]
# get the lower bound on the path starting at the current minimum node
cost = 0
for i in range(N):
if row[i] != INF:
cost += row[i]
if col[i] != INF:
cost += col[i]
node.cost = cost
@function_call_counter
@njit
def matrix_reduction_generic(reduced_matrix):
# reduce each row so that there must be at least one zero in each row
# node.reduced_matrix
row = INF * np.ones(N)
# `row[i]` contains minimum in row `i`
for i in range(N):
for j in range(N):
if reduced_matrix[i][j] < row[i]:
row[i] = reduced_matrix[i][j]
# reduce the minimum value from each element in each row
for i in range(N):
for j in range(N):
if reduced_matrix[i][j] != INF and row[i] != INF:
reduced_matrix[i][j] -= row[i]
# reduce each column so that there must be at least one zero in each column
# node.reduced_matrix
col = INF * np.ones(N)
# `col[j]` contains minimum in col `j`
for i in range(N):
for j in range(N):
if reduced_matrix[i][j] < col[j]:
col[j] = reduced_matrix[i][j]
# reduce the minimum value from each element in each column
for i in range(N):
for j in range(N):
if reduced_matrix[i][j] != INF and col[j] != INF:
reduced_matrix[i][j] -= col[j]
# get the lower bound on the path starting at the current minimum node
cost = 0
for i in range(N):
if row[i] != INF:
cost += row[i]
if col[i] != INF:
cost += col[i]
return cost, reduced_matrix
@function_timer
def solve(cost_matrix, is_tour_stored=False):
# Create a priority queue to store live nodes of the search tree
live_nodes = PriorityQueue()
tour = []
full_tour = []
# The TSP starts from the first node, i.e., node 0
root = CreateNode(cost_matrix, tour, 0, -1, 0)
# get the lower bound of the path starting at node 0
# matrix_reduction(root)
root.cost, root.reduced_matrix = matrix_reduction_generic(
root.reduced_matrix
)
live_nodes.put((root.cost, root)) # add root to the list of live nodes
while not live_nodes.empty():
# a live node with the least estimated cost is selected
minimum = live_nodes.get()[1]
minimum.debug(with_tour=True) # for debugging purposes
if is_tour_stored:
full_tour.append(copy.deepcopy(minimum.tour))
i = minimum.Id # `i` stores the current node number
# if all nodes are visited; termination of loop
if minimum.level == N - 1:
minimum.tour.append([i, 0]) # return to starting node
full_tour.append(minimum.tour)
# print("Returning minimum node of type", type(minimum))
return minimum, full_tour # final node
# do for each child of min
# `(i, j)` forms an edge in a space tree
for j in range(N):
if minimum.reduced_matrix[i][j] != INF:
# create a child node and calculate its cost
branch_node = CreateNode(
minimum.reduced_matrix,
minimum.tour,
minimum.level + 1,
i,
j,
)
# calculate the cost
# matrix_reduction(branch_node)
(
branch_node.cost,
branch_node.reduced_matrix,
) = matrix_reduction_generic(branch_node.reduced_matrix)
branch_node.cost += minimum.cost + minimum.reduced_matrix[i][j]
# # For debugging
# print(
# "Branch node cost: ",
# branch_node.cost
# )
# added the child to list of live nodes
live_nodes.put((branch_node.cost, branch_node))
del minimum
def print_tour(node):
if node.level == N - 1:
print("\nThe optimal tour is:")
for i in range(N):
print(node.tour[i][0], "-->", node.tour[i][1])
else:
print(node.tour)
def print_summary(
output_dir, node, full_tour=[], tc_name=None, ext="", coordi=None
):
"""
Prints the solver summary, and stores it in a `.log` file.
Parameters:
-----------
save = (Boolean), default=True
If True, saves the metadata in a log file
"""
try:
logname = os.path.join(output_dir, "BnB_summary.log")
if os.path.exists(logname):
os.remove(logname)
outputFile = open(logname, "a")
def printing(text):
print(text)
if outputFile:
outputFile.write(f"{text}\n")
time_taken, reduction_calls = (
solve.time_taken,
matrix_reduction_generic.calls,
)
printing("\n===================================================")
printing("Solver Summary:")
printing("===================================================")
dt_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
pc_name = os.environ["COMPUTERNAME"]
printing(f"\nRun on: {dt_string} | PC: {pc_name}")
if tc_name is not None:
printing(f"Test case name: {tc_name}")
printing(f"\nSolved in: {time_taken} s")
printing(f"Number of reduction function calls: {reduction_calls}")
print("\nThe optimal tour is:")
for i in range(N):
print(node.tour[i][0], "-->", node.tour[i][1])
print("\nTotal cost is {}".format(node.cost))
printing("\n===================================================\n")
print(f"Log file saved at: {logname}")
if True:
fname = os.path.join(output_dir, f"{ext}BnB_results.npz")
np.savez(
fname,
time_taken=time_taken,
func_calls=reduction_calls,
opt_cost=node.cost,
tour=node.tour,
full_tour=np.array(full_tour, dtype="object"),
coordi=coordi,
)
print(f"\nSummary data saved at: {fname}")
finally:
outputFile.close()
def main():
# Read data off of standard library for symmetric
loader = get_input_loader("Choose_TC_Sym_NPZ.txt", False)
print("Solving symmetric problem...")
# # Read data off of standard library for asymmetric
# loader = get_input_loader('Choose_TC_Asym_NPZ.txt', False)
# print("\nSolving asymmetric problem...")
# Parse command line arguments
args = cmd_line_parser()
ext = args.ext
# Make output directory
output_dir = make_output_dir(args.output_dir)
tc_number = args.tc_number
tc_name = loader.get_test_case_name(tc_number)
coordi = None
cost_matrix = loader.get_input_test_case(tc_number).get_cost_matrix()
# COST_MATRIX = cost_matrix
tc_name = "Manual input"
print(tc_name)
inf = INF
# COST_MATRIX = np.load("data/manual/2floorcostmatrix.npy")
# `N` is the total number of total nodes on the graph
global N
N = 14
COST_MATRIX, coordi = get_input(N, fetch_coordinates=True)
# COST_MATRIX = [
# [INF, 10, 8, 9, 7],
# [10, INF, 10, 5, 6],
# [8, 10, INF, 8, 9],
# [9, 5, 8, INF, 6],
# [7, 6, 9, 6, INF],
# ] # optimal cost is 34
# COST_MATRIX = [
# [INF, 3, 1, 5, 8],
# [3, INF, 6, 7, 9],
# [1, 6, INF, 4, 2],
# [5, 7, 4, INF, 3],
# [8, 9, 2, 3, INF]
# ] # optimal cost is 16
# COST_MATRIX = [
# [INF, 2, 1, INF],
# [2, INF, 4, 3],
# [1, 4, INF, 2],
# [INF, 3, 2, INF]
# ] # optimal cost is 8
COST_MATRIX = np.array(COST_MATRIX)
N = len(COST_MATRIX)
# print(COST_MATRIX)
# Person cannot travel from one node to the same node
for i in range(N):
COST_MATRIX[i][i] = INF
# Person cannot travel on restricted edges
for i in range(N):
for j in range(N):
if COST_MATRIX[i][j] == 0:
COST_MATRIX[i][j] = INF
print("Number of nodes are {}".format(N))
# print(COST_MATRIX)
final_node, full_tour = solve(COST_MATRIX, is_tour_stored=True)
# print(type(final_node))
optimal_cost = final_node.cost
# print_tour(final_node)
if True:
print_summary(
output_dir,
final_node,
full_tour=full_tour,
tc_name=tc_name,
ext=ext,
coordi=coordi,
)
# print("Total cost is {}".format(optimal_cost))
if __name__ == "__main__":
main()
| StarcoderdataPython |
3268946 | <filename>src/error.py
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class NoReader(Error):
"""Exception raised when no readers are plug to the computer.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class NoCommunication(Error):
"""Exception raised when the communication can't be established.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class OptionOutOfRange(Error):
"""Exception raised when you try to access an element not in the `option.options` dictionary
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class InstructionFailed(Error):
"""Exception raised when the instruction failed
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
| StarcoderdataPython |
3317613 | #!/usr/bin/python -Wall
# ================================================================
# <NAME>
# <EMAIL>
# 2008-02-05
# ================================================================
from __future__ import division # 1/2 = 0.5, not 0.
import sackmat_m
from math import *
# ----------------------------------------------------------------
basis = [[1, -1], [1, 2]]
v = [3, 4]
c = sackmat_m.basis_coeffs(v, basis)
print "v = ", v
print "c = ", c
w = sackmat_m.linear_combination(c, basis)
print "w = ", w
print
# ----------------------------------------------------------------
s = 1.0/sqrt(2.0)
basis = [[s, s], [s, -s]]
n = len(basis)
for i in range(0, n):
ui = basis[i]
for j in range(0, n):
uj = basis[j]
uiuj = sackmat_m.vecdot(ui, uj)
print " <u[%d],u[%d]>=%11.7f" % (i, j, uiuj),
print
print
v = [3, 4]
c = sackmat_m.basis_coeffs_on(v, basis)
print "v = ", v
print "c = ", c
w = sackmat_m.linear_combination(c, basis)
print "w = ", w
| StarcoderdataPython |
3314062 | <reponame>GBrachetta/guillermo
from django.contrib import admin
from .models import Event
# Register your models here.
class EventAdmin(admin.ModelAdmin):
"""
Fields available in the admin, ordered by date
"""
list_display = (
"name",
"venue",
"programme",
"date",
"time",
"event_url",
)
ordering = ("date",)
admin.site.register(Event, EventAdmin)
| StarcoderdataPython |
3326969 | from __future__ import unicode_literals
import multiprocessing, time
from gensim.models import Word2Vec
from gensim.models import Word2Vec as WV_model
from gensim.models.word2vec import LineSentence
from gensim import utils
class MyCorpus(object):
"""An interator that yields sentences (lists of str)."""
def __iter__(self):
corpus_path = './extracted_lemmatized.txt'
for line in open(corpus_path):
# assume there's one document per line, tokens separated by whitespace
yield utils.simple_preprocess(line)
# inp = "extracted.txt"
sentences = MyCorpus()
out_model = "../../../models/suhomlinskyy.lowercased.lemmatized.word2vec.500d"
size = 500 # size is the dimensionality of the feature vectors.
window = 4 # window is the maximum distance between the current and predicted word within a sentence.
sg = 1 # By default (sg=0), CBOW is used. Otherwise (sg=1), skip-gram is employed.
# cbow_mean = 1 # cbow_mean = if 0, use the sum of the context word vectors. If 1 (default), use the mean. Only applies when cbow is used.
sample = 1e-5 # sample = threshold for configuring which higher-frequency words are randomly downsampled; default is 1e-3, useful range is (0, 1e-5).
negativeSampling = 7 # negative = if > 0, negative sampling will be used, the int for negative specifies how many “noise words” should be drawn (usually between 5-20). Default is 5. If set to 0, no negative samping is used.
hs = 0 # hs = if 1, hierarchical softmax will be used for model training. If set to 0 (default), and negative is non-zero, negative sampling will be used.
iter = 10
min_count = 10
workers = multiprocessing.cpu_count()
start = time.time()
# model = Word2Vec(LineSentence(inp), sg = sg, size = size, window = window, workers = workers, negative = negativeSampling, iter = iter, min_count = min_count, hs = hs, sample = sample)
model = Word2Vec(sentences = sentences, sg = sg, size = size, window = window, workers = workers, negative = negativeSampling, iter = iter, min_count = min_count, hs = hs, sample = sample)
# trim unneeded model memory = use (much) less RAM
model.init_sims(replace=True)
print(time.time()-start)
sim = model.wv.similarity('сухомлинський', 'комуністичний')
print(sim)
s = model.wv.most_similar('сухомлинський')
print(s)
m = model.wv.most_similar('василь')
print(m)
lc = model.wv.most_similar(positive=['сухомлинський', 'комуністичний'])
print(lc)
model.save(out_model)
# model500 = WV_model.load('../models/honchar.lowercased.lemmatized.word2vec.FINAL.500d')
# model500.init_sims(replace=True)
# print(model500.wv.most_similar('гончар')) | StarcoderdataPython |
3313246 | <filename>freefall-archiver/freefall.py
#!/usr/bin/env python3
# pylint: disable=C0111
import re
from pathlib import Path
from urllib.parse import urljoin
from urllib.request import urlopen
IMG_PER_PAGE = 10
CSS_VISIBLE = "visible"
CSS_HIDDEN = "hidden"
HTML_INDEX_ENTRY = """<a href="p%05d/index.html">%s - %s</a>"""
HTML_PAGE_ENTRY = """%s<br/>
<img src="%s"/>"""
HTML_PAGE_NAV = """<a href="../p%05d/index.html" style="visibility: %s">Previous</a>
<span style="visibility: %s"> - </span>
<a style="visibility: %s" href="../p%05d/index.html">Next</a>"""
HTML_MAIN = u"""<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8"/>
<title>%s</title>
</head><body>
<p>%s</p>
<hr>
<a href="http://freefall.purrsia.com/grayff.htm">© http://freefall.purrsia.com</a>
</body>
</html>"""
def main():
with open("index.html", "w") as out_index:
out_index.write(HTML_MAIN % ("Freefall Index",
"</p><p>".join(write_pages(scan_freefall()))))
def write_pages(freefall):
result = []
partitions = partition(freefall, IMG_PER_PAGE)
for page in partitions:
image_list = page[1]
von, bis = image_list[0][1], image_list[-1][1]
result.append(HTML_INDEX_ENTRY % (page[0], von, bis))
subdir = Path("p%05d" % page[0])
subdir.mkdir(parents=True, exist_ok=True)
for image in image_list:
print(image[2], end='', flush=True)
with urlopen(image[2]) as response:
with open(subdir / image[0], "wb") as out_image:
out_image.write(response.read())
print(' OK', flush=True)
with open(subdir / "index.html", "w") as out_page:
out_page.write(HTML_MAIN % (
"%s - %s" % (von, bis),
"</p><p>".join(create_nav(page, len(partitions)))))
return result
def partition(data, step):
return [(i / step, data[i:i+step]) for i in range(0, len(data), step)]
def create_nav(page, max_nav):
prev_page = page[0]-1
next_page = page[0]+1
prev_visible = prev_page >= 0
next_visible = next_page < max_nav
nav = HTML_PAGE_NAV % (prev_page,
css_visibility(prev_visible),
css_visibility(prev_visible and next_visible),
css_visibility(next_visible),
next_page)
return [nav] + [HTML_PAGE_ENTRY % (entry[1], entry[0]) for entry in page[1]] + [nav]
def css_visibility(visible):
return CSS_VISIBLE if visible else CSS_HIDDEN
def scan_freefall():
result = []
last_url = "http://freefall.purrsia.com"
next_url = "grayff.htm"
while next_url:
last_url = urljoin(last_url, next_url)
print(last_url, end='', flush=True)
with urlopen(last_url) as response:
image, image_path, title_old, title_new, next_url = parse_freefall_page(
str(response.read(), encoding='ISO-8859-1'))
image_url = urljoin(last_url, image_path)
result.append((image,
title_old if title_old else title_new,
image_url))
print(" => %s" % image_url, flush=True)
return [e for e in reversed(result)]
def parse_freefall_page(html):
return (
find(r'<img src=".*?(\w+\.gif)"', html),
find(r'<img src="(.+\.gif)"', html),
find(r'<!-+\s(.+)\s-+>', html),
find(r'<title>(.+)</title>', html),
find(r'<a href="(.+)">Previous</a>', html)
)
def find(regexp, text):
match = re.search(regexp, text, re.I)
if match:
return match.group(1)
return None
main()
| StarcoderdataPython |
3315624 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import os
import sys
from libs import config_libs
from libs import utils_libs
from libs import verify_libs
def main():
# Run the Testcases:
test = test_gbp_pr_neg()
if test.test_gbp_pr_neg_1() == 0:
test.cleanup(tc_name='TESTCASE_GBP_PR_NEG_1')
if test.test_gbp_pr_neg_2() == 0:
test.cleanup(tc_name='TESTCASE_GBP_PR_NEG_2')
if test.test_gbp_pr_neg_3() == 0:
test.cleanup(tc_name='TESTCASE_GBP_PR_NEG_3')
if test.test_gbp_pr_neg_4() == 0:
test.cleanup(tc_name='TESTCASE_GBP_PR_NEG_4')
test.cleanup()
utils_libs.report_results('test_gbp_pr_neg', 'test_results.txt')
sys.exit(1)
class test_gbp_pr_neg(object):
# Initialize logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s - %(message)s',
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_pr_neg.log'
commands.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_pr_neg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_log.addHandler(hdlr)
_log.setLevel(logging.INFO)
_log.setLevel(logging.DEBUG)
def __init__(self):
"""
Init def
"""
self._log.info("\n START OF GBP POLICY_RULE NEGATIVE TESTSUITE")
self.gbpcfg = config_libs.Gbp_Config()
self.gbpverify = verify_libs.Gbp_Verify()
self.act_name = 'demo_pa'
self.cls_name = 'demo_pc'
self.rule_name = 'demo_pr'
self._log.info('\n## Step 1: Create a PC needed for PR Testing ##')
self.cls_uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'classifier', self.cls_name)
if self.cls_uuid == 0:
self._log.info(
"\nReqd Classifier Create Failed, hence GBP Policy Rule "
"Negative Test Suite Run ABORTED\n")
os._exit(1)
self._log.info('\n## Step 1: Create a PA needed for PR Testing ##')
self.act_uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'action', self.act_name)
if self.act_uuid == 0:
self._log.info(
"\nReqd Action Create Failed, hence GBP Policy Rule "
"Negative Test Suite Run ABORTED\n")
os._exit(1)
def cleanup(self, tc_name=''):
if tc_name != '':
self._log.info('Testcase %s: FAILED' % (tc_name))
for obj in ['rule', 'classifier', 'action']:
self.gbpcfg.gbp_del_all_anyobj(obj)
def test_gbp_pr_neg_1(self):
self._log.info(
"\n###################################################\n"
"TESTCASE_GBP_PR_NEG_1: TO CREATE/VERIFY a POLICY RULE with "
"INVALID PC\n"
"TEST_STEP::\n"
"Create Policy Rule Object with Invalid PC\n"
"Verify PR creation failed and was rolled back\n"
"###################################################\n")
# Testcase work-flow starts
self._log.info("\n## Step 1: Create Rule with Invalid PC##\n")
if self.gbpcfg.gbp_policy_cfg_all(
1, 'rule', self.rule_name, classifier="INVALID") != 0:
self._log.info(
"\n## Step 1: Create Policy Rule with Invalid Policy "
"Classifier did NOT Fail")
return 0
self._log.info("\n## Step 1A: Verify Policy Rule has been rolled back")
if self.gbpverify.gbp_policy_verify_all(
1, 'rule', self.rule_name) != 0:
self._log.info(
"\n## Step 1B: Verify Policy Rule did NOT roll back")
return 0
self._log.info("\n## TESTCASE_GBP_PR_NEG_1: PASSED")
return 1
def test_gbp_pr_neg_2(self):
self._log.info(
"\n#################################################\n"
"TESTCASE_GBP_PR_NEG_2: TO CREATE/VERIFY/ POLICY RULE with "
"VALIC PC but INVALID PA\n"
"TEST_STEP::\n"
"Create Policy Rule Object with Valid PC but Invalid PA\n"
"Verify the Policy Rule creation fails and config is rolled back\n"
"#################################################\n")
# Testcase work-flow starts
self._log.info(
"\n## Step 1: Create Policy Rule with Valid PC & Invalid PA ##")
if self.gbpcfg.gbp_policy_cfg_all(
1,
'rule',
self.rule_name,
classifier=self.cls_name,
action='INVALID') != 0:
self._log.info(
"\n## Step 1: Create Policy Rule with Invalid PA did NOT Fail")
return 0
self._log.info("\n## Step 1A: Verify Policy Rule has been rolled back")
if self.gbpverify.gbp_policy_verify_all(
1, 'rule', self.rule_name) != 0:
self._log.info(
"\n## Step 1A: Verify Policy Rule did NOT roll back")
return 0
self._log.info("\n## TESTCASE_GBP_PR_NEG_2: PASSED")
return 1
def test_gbp_pr_neg_3(self):
self._log.info(
"\n################################################\n"
"TESTCASE_GBP_PR_NEG_3: TO CREATE/UPDATE/VERIFY/ POLICY RULE "
"with Invalid PC and PA ##\n"
"TEST_STEP::\n"
"Create Policy Rule with Valid PC and Valid PR\n"
"Update the Policy Rule's PA by an Invalid PA\n"
"Verify the Policy Rule's Update failed and config rolled back "
"to original attr values\n"
"Update the Policy Rule's PC by an Invalid PC\n"
"Verify the Policy Rule's Update failed and config rolled back "
"to original attr values\n"
"#################################################\n")
# Testcase work-flow starts
self._log.info('\n## Step 1: Create Policy Rule with PA and PC##\n')
rule_uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'rule', self.rule_name, classifier=self.cls_name,
action=self.act_name)
if rule_uuid != 0:
self._log.info(
"Step 1: Create Rule Passed, UUID == %s\n" %
(rule_uuid))
else:
self._log.info("# Step 1: Create Rule == Failed")
return 0
self._log.info(
'\n## Step 2: Update Policy Rule with Invalid PA and Invalid '
'PC one at a time ##\n')
attrib_list = [{'classifier': 'INVALID'}, {'action': 'INVALID'}]
for attr_val in attrib_list:
if self.gbpcfg.gbp_policy_cfg_upd_all(
'rule', rule_uuid, attr_val) != 0:
self._log.info(
"\nStep 2: Updating Policy Rule's Attribute %s with "
"Invalid Value did NOT Fail" %
(attr_val))
return 0
# Verify starts
if self.gbpverify.gbp_policy_verify_all(
1,
'rule',
rule_uuid,
name=self.rule_name,
policy_classifier_id=self.cls_uuid,
policy_actions=self.act_uuid) == 0:
self._log.info(
"# Step 2B: Verify Policy Rule Updated did NOT roll back")
return 0
self._log.info("\n## TESTCASE_GBP_PR_NEG_3: PASSED")
return 1
def test_gbp_pr_neg_4(self):
self._log.info(
"\n###############################################\n"
"TESTCASE_GBP_PR_NEG_4: DELETE NON-EXISTENT/INVALID POLICY RULE \n"
"TEST_STEP::\n"
"Delete unknown/invalid policy-rule\n"
"##############################################\n")
self._log.info("\n## Step 1: Delete non-existent Polic Rule ##")
if self.gbpcfg.gbp_policy_cfg_all(0, 'rule', 'INVALID') != 0:
self._log.info(
"\n## Step 1: Delete Non-existent policy rule did NOT Fail")
return 0
self._log.info("\n## TESTCASE_GBP_PR_NEG_4: PASSED")
return 1
if __name__ == '__main__':
main()
| StarcoderdataPython |
3205010 | print('olá mundo')
name = input('Qual é o seu nome ')
print('Seja bem vindo ', name)
| StarcoderdataPython |
2778 | <filename>jp_doodle/quantity_forest.py
from jp_doodle import doodle_files
qf_js = doodle_files.vendor_path("js/quantity_forest.js")
from jp_doodle import dual_canvas
import jp_proxy_widget
import os
from subprocess import check_output
import pprint
if bytes != str:
unicode = str
def directory_usage(directory, epsilon=0.02):
if not os.path.isdir(directory):
return None
ls = os.listdir(directory)
result = {}
total = 0.0
for fn in ls:
path = os.path.join(directory, fn)
try:
usage = check_output(["du", "-s", path])
except Exception:
pass
else:
usage = unicode(usage, "utf8") # py 3
[snum, sname] = usage.strip().split("\t")
num = float(snum)
total += num
result[fn] = (path, num)
final = {}
other = 0
for fn in result:
(path, num) = result[fn]
portion = num/total
if portion < epsilon:
other += num
else:
final[fn] = {"name": fn, "file_size": num, "percent": portion*100, "id": path}
if other>epsilon:
final["*other"] = {"name": "*other", "file_size": other, "percent": other*100/total, "id": "*" + directory}
return final
RIGHT = {"x": 1, "y":0}
UP = {"x": 0, "y":1}
class FileSystemExplorer:
color_counter = 333
opacity = 0.5
def __init__(self, canvas_widget, path, width=600, enable_deletions=False,
horizontal=False, x_vector=None, y_vector=None,
dy=50, dh=20, epsilon=0.02, degrees=15, font="normal 10px Arial",
background="rgba(244,230,255,0.8)", opacity=0.7,
clearHeight=300,
):
self.opacity = opacity
if y_vector is None:
y_vector = UP
if horizontal:
y_vector = RIGHT
if x_vector is None:
x_vector = RIGHT
if horizontal:
x_vector = UP
self.epsilon = epsilon
self.enable_deletions = enable_deletions
path = os.path.expanduser(path)
path = os.path.abspath(path)
self.color_cache = {}
self.usage_cache = {}
self.id_to_data = {}
self.expanded = {}
self.widget = canvas_widget
self.path = path
members = self.directory_members(path)
self.widget = canvas_widget
canvas_widget.load_js_files([qf_js])
canvas_widget.js_init("""
var forest_config = {
top_label: top_label,
roots: members,
width: width,
dy: dy,
dh: dh,
id_click: id_click,
degrees: degrees,
background: background,
x_vector: x_vector,
y_vector: y_vector,
font: font,
clearHeight: clearHeight,
}
element.quantity_forest(forest_config);
element.detail = $("<div>Initialized</div>").appendTo(element);
element.show_detail = function(identity, info) {
var d = element.detail
d.html("<div/>");
for (key in info) {
$("<div>" + key + " : " + info[key] + "<div>").appendTo(d);
}
if (!identity.startsWith("*")) {
var deleter = $("<a>delete " + identity + "</a>").appendTo(d);
deleter.on("click", function() { delete_id(identity); });
}
};
""",
width=width,
members=members,
dy=dy, dh=dh,
id_click=self.id_click,
top_label=path,
delete_id=self.delete_id,
degrees=degrees,
x_vector=x_vector,
y_vector=y_vector,
font=font,
background=background,
clearHeight=clearHeight,
)
if enable_deletions:
self.widget.element.detail.html("<div>DELETIONS ARE ENABLED!</div>");
def directory_usage(self, directory):
cache = self.usage_cache
if directory in cache:
return cache[directory]
usage = directory_usage(directory, self.epsilon)
cache[directory] = usage
if not usage:
return usage
for u in usage.values():
u["parent"] = directory
self.id_to_data[u["id"]] = u
return usage
def get_color(self, identity):
cache = self.color_cache
if identity in cache:
return cache[identity]
result = cache[identity] = self.pick_color()
return result
def pick_color(self):
self.color_counter += 1
counter = self.color_counter
rgb = [0, 0, 0]
for i in range(8):
for j in range(3):
rgb[j] = (rgb[j] << 1) | (counter & 1)
counter = (counter >> 1)
# darken
for i in range(3):
rgb[i] = (rgb[i] * 200) // 255
return "rgba(%s,%s,%s,%s)" % (tuple(rgb) + (self.opacity,))
def delete_id(self, identity):
try:
self.widget.element.css("cursor", "wait")
self.widget.element.detail.html("<div>attempting delete...</div>")
self.delete_id1(identity)
finally:
self.widget.element.css("cursor", "default")
def delete_id1(self, identity):
if self.enable_deletions:
# for simplicity for now just clear the usage cache
self.usage_cache = {}
cmd = ["rm", "-rf", identity]
self.widget.element["print"](repr(cmd))
#w.element.css("cursor", "wait")
try:
#try:
checked = check_output(cmd)
#finally:
#w.element.css("cursor", "default")
except Exception as e:
self.widget.element.detail.html("<div>delete " + repr((identity, e)) + " failed</div>");
else:
roots = self.directory_members(self.path)
#pprint.pprint(roots)
self.widget.element.reset_roots(roots)
self.widget.element.detail.html("<div>" + repr(identity) + " deleted</div>");
else:
self.widget.element.detail.html("<div>delete " + repr(identity) + " disabled</div>");
def id_click(self, identity):
try:
self.widget.element.css("cursor", "wait")
self.widget.element.detail.html("<div>click...</div>")
self.expanded[identity] = not self.expanded.get(identity, False)
roots = self.directory_members(self.path)
#pprint.pprint(roots)
self.widget.element.reset_roots(roots)
#self.widget.element.detail.html("<div>expand " + repr(identity) + "</div>");
self.widget.element.show_detail(identity, self.id_to_data[identity])
finally:
self.widget.element.css("cursor", "default")
def directory_members(self, directory):
self.expanded[directory] = True
usage = self.directory_usage(directory)
if not usage:
return []
result = []
sorter = [(u["percent"], u["name"]) for u in usage.values()]
for (pct, filename) in reversed(sorted(sorter)):
u = usage[filename]
identity = u["id"]
expanded = self.expanded.get(identity, False)
children = None
if expanded:
children = self.directory_members(identity)
r = {
"id": identity,
"label": u["name"],
"size": u["file_size"],
"children": children,
"expanded": expanded,
"color": self.get_color(identity),
}
result.append(r)
return result
| StarcoderdataPython |
3357439 | import csv
from pymongo import MongoClient
import datetime
client = MongoClient('mongodb://localhost:27017/')
db = client['accounts']
collection = db['transactions']
transactions = db.transactions
with open('trx.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
transaction = { 'date' : row[1],
'description' : row[2],
'cheque' : row[3],
'debit' : row[4],
'credit' : row[5],
'balance' : row[6],
'category': row[7],
'remarks': row[8],
'account' : "HDFC Bank"}
transactionId = transactions.insert_one(transaction).inserted_id
print(transactionId)
client.close()
| StarcoderdataPython |
3203498 | # Exercise 3.14
# Author: <NAME>
from math import sqrt, exp, pi
def gauss(x, m=0, s=1):
gaussian = 1 / (sqrt(2 * pi) * s) * exp(-0.5 * ((x - m) / s) ** 2)
return gaussian
print '%8s' % 'x',
for x in range(-5, 6):
print '%9d' % x,
print "\nGaussian",
for x in range(-5, 6):
print '%.7f' % gauss(x),
| StarcoderdataPython |
71376 | # -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
# Create your models here.
class Clarification(models.Model):
cid = models.IntegerField()
asker = models.TextField()
question = models.TextField()
reply = models.TextField()
time = models.DateTimeField(default=datetime.now(), editable=True, auto_now_add=True)
def __unicode__(self):
return 'question: ' + self.question + ' | reply: ' + self.reply + ' @' + str(self.time.date())
class Contest(models.Model):
cid = models.IntegerField(unique=True)
problem_url = models.TextField()
scoreboard_url = models.TextField()
solution_url = models.TextField()
signup_url = models.TextField()
date = models.TextField()
title = models.TextField()
content = models.TextField()
STATUS = (
('incoming', 'incoming'),
('running', 'running'),
('ended', 'ended'),
)
status = models.TextField(choices=STATUS)
def __unicode__(self):
return 'cid: ' + str(self.cid) + '| date: ' + self.date
class SignUp(models.Model):
nthu_oj_id = models.TextField()
name = models.TextField()
email = models.TextField()
message = models.TextField()
time = models.DateTimeField(default=datetime.now(), editable=True, auto_now_add=True)
cid = models.IntegerField()
def __unicode__(self):
return 'oj_id: ' + str(self.nthu_oj_id) + '| time: ' + str(self.time)
class Feedback(models.Model):
name = models.TextField()
email = models.TextField()
message = models.TextField()
time = models.DateTimeField(default=datetime.now(), editable=True, auto_now_add=True)
cid = models.IntegerField()
def __unicode__(self):
return 'cid ' + str(self.cid) + '| time: ' + str(self.time)
# Dictionary Helper Models
class Dictionary(models.Model):
"""A model that represents a dictionary. This model implements most of the dictionary interface,
allowing it to be used like a python dictionary.
"""
name = models.CharField(max_length=255)
@staticmethod
def getDict(name):
"""Get the Dictionary of the given name.
"""
df = Dictionary.objects.select_related().get(name=name)
return df
def __getitem__(self, key):
"""Returns the value of the selected key.
"""
return self.keyvaluepair_set.get(key=key).value
def __setitem__(self, key, value):
"""Sets the value of the given key in the Dictionary.
"""
try:
kvp = self.keyvaluepair_set.get(key=key)
except KeyValuePair.DoesNotExist:
KeyValuePair.objects.create(container=self, key=key, value=value)
else:
kvp.value = value
kvp.save()
def __delitem__(self, key):
"""Removed the given key from the Dictionary.
"""
try:
kvp = self.keyvaluepair_set.get(key=key)
except KeyValuePair.DoesNotExist:
raise KeyError
else:
kvp.delete()
def __len__(self):
"""Returns the length of this Dictionary.
"""
return self.keyvaluepair_set.count()
def iterkeys(self):
"""Returns an iterator for the keys of this Dictionary.
"""
return iter(kvp.key for kvp in self.keyvaluepair_set.all())
def itervalues(self):
"""Returns an iterator for the keys of this Dictionary.
"""
return iter(kvp.value for kvp in self.keyvaluepair_set.all())
__iter__ = iterkeys
def iteritems(self):
"""Returns an iterator over the tuples of this Dictionary.
"""
return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())
def keys(self):
"""Returns all keys in this Dictionary as a list.
"""
return [kvp.key for kvp in self.keyvaluepair_set.all()]
def values(self):
"""Returns all values in this Dictionary as a list.
"""
return [kvp.value for kvp in self.keyvaluepair_set.all()]
def items(self):
"""Get a list of tuples of key, value for the items in this Dictionary.
This is modeled after dict.items().
"""
return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]
def get(self, key, default=None):
"""Gets the given key from the Dictionary. If the key does not exist, it
returns default.
"""
try:
return self[key]
except KeyError:
return default
def has_key(self, key):
"""Returns true if the Dictionary has the given key, false if not.
"""
return self.contains(key)
def contains(self, key):
"""Returns true if the Dictionary has the given key, false if not.
"""
try:
self.keyvaluepair_set.get(key=key)
return True
except KeyValuePair.DoesNotExist:
return False
def clear(self):
"""Deletes all keys in the Dictionary.
"""
self.keyvaluepair_set.all().delete()
def __unicode__(self):
"""Returns a unicode representation of the Dictionary.
"""
return unicode(self.asPyDict())
def asPyDict(self):
"""Get a python dictionary that represents this Dictionary object.
This object is read-only.
"""
fieldDict = dict()
for kvp in self.keyvaluepair_set.all():
fieldDict[kvp.key] = kvp.value
return fieldDict
class KeyValuePair(models.Model):
"""A Key-Value pair with a pointer to the Dictionary that owns it.
"""
container = models.ForeignKey(Dictionary, db_index=True)
key = models.CharField(max_length=240, db_index=True)
value = models.CharField(max_length=65536, db_index=True) | StarcoderdataPython |
38061 | <gh_stars>0
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
from odoo import fields
from odoo.tests.common import SavepointCase, new_test_user
from odoo.addons.mail.tests.common import MailCase
class TestEventNotifications(SavepointCase, MailCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.event = cls.env['calendar.event'].create({
'name': "<NAME>",
'start': datetime(2019, 10, 25, 8, 0),
'stop': datetime(2019, 10, 27, 18, 0),
}).with_context(mail_notrack=True)
cls.user = new_test_user(cls.env, 'xav', email='<EMAIL>', notification_type='inbox')
cls.partner = cls.user.partner_id
def test_message_invite(self):
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.event.partner_ids = self.partner
def test_message_invite_self(self):
with self.assertNoNotifications():
self.event.with_user(self.user).partner_ids = self.partner
def test_message_inactive_invite(self):
self.event.active = False
with self.assertNoNotifications():
self.event.partner_ids = self.partner
def test_message_set_inactive_invite(self):
self.event.active = False
with self.assertNoNotifications():
self.event.write({
'partner_ids': [(4, self.partner.id)],
'active': False,
})
def test_message_datetime_changed(self):
self.event.partner_ids = self.partner
"Invitation to Presentation of the new Calendar"
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.event.start = fields.Datetime.now() + relativedelta(days=1)
def test_message_date_changed(self):
self.event.write({
'allday': True,
'start_date': fields.Date.today() + relativedelta(days=7),
'stop_date': fields.Date.today() + relativedelta(days=8),
})
self.event.partner_ids = self.partner
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.event.start_date += relativedelta(days=-1)
def test_message_date_changed_past(self):
self.event.write({
'allday': True,
'start_date': fields.Date.today(),
'stop_date': fields.Date.today() + relativedelta(days=1),
})
self.event.partner_ids = self.partner
with self.assertNoNotifications():
self.event.write({'start': date(2019, 1, 1)})
def test_message_set_inactive_date_changed(self):
self.event.write({
'allday': True,
'start_date': date(2019, 10, 15),
'stop_date': date(2019, 10, 15),
})
self.event.partner_ids = self.partner
with self.assertNoNotifications():
self.event.write({
'start_date': self.event.start_date - relativedelta(days=1),
'active': False,
})
def test_message_inactive_date_changed(self):
self.event.write({
'allday': True,
'start_date': date(2019, 10, 15),
'stop_date': date(2019, 10, 15),
'active': False,
})
self.event.partner_ids = self.partner
with self.assertNoNotifications():
self.event.start_date += relativedelta(days=-1)
def test_message_add_and_date_changed(self):
self.event.partner_ids -= self.partner
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.event.write({
'start': self.event.start - relativedelta(days=1),
'partner_ids': [(4, self.partner.id)],
})
def test_bus_notif(self):
alarm = self.env['calendar.alarm'].create({
'name': 'Alarm',
'alarm_type': 'notification',
'interval': 'minutes',
'duration': 30,
})
now = fields.Datetime.now()
with patch.object(fields.Datetime, 'now', lambda: now):
with self.assertBus([(self.env.cr.dbname, 'calendar.alarm', self.partner.id)]):
self.event.with_context(no_mail_to_attendees=True).write({
'start': now + relativedelta(minutes=50),
'stop': now + relativedelta(minutes=55),
'partner_ids': [(4, self.partner.id)],
'alarm_ids': [(4, alarm.id)]
})
bus_message = [{
"alarm_id": alarm.id,
"event_id": self.event.id,
"title": "Doom's day",
"message": self.event.display_time,
"timer": 20*60,
"notify_at": fields.Datetime.to_string(now + relativedelta(minutes=20)),
}]
notif = self.env['calendar.alarm_manager'].with_user(self.user).get_next_notif()
self.assertEqual(notif, bus_message)
def test_email_alarm(self):
alarm = self.env['calendar.alarm'].create({
'name': 'Alarm',
'alarm_type': 'email',
'interval': 'minutes',
'duration': 20,
})
now = fields.Datetime.now()
self.event.write({
'start': now + relativedelta(minutes=15),
'stop': now + relativedelta(minutes=18),
'partner_ids': [(4, self.partner.id)],
'alarm_ids': [(4, alarm.id)],
})
with patch.object(fields.Datetime, 'now', lambda: now):
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.env['calendar.alarm_manager'].with_context(lastcall=now - relativedelta(minutes=15))._get_partner_next_mail(self.partner)
| StarcoderdataPython |
1761032 | <gh_stars>1-10
from injector import inject
from .default_execution_initializer import DefaultExecutionInitializer
from .execution_initializer import ExecutionInitializer
from .....dependency import IScoped
from .....dependency.provider import ServiceProvider
class ExecutionInitializerFactory(IScoped):
@inject
def __init__(
self,
service_provider: ServiceProvider
):
self.service_provider = service_provider
def get(self) -> ExecutionInitializer:
subclasses = ExecutionInitializer.__subclasses__()
if subclasses is not None and len(subclasses) > 0:
if len(subclasses) > 1:
initializer_classes = [subclass for subclass in subclasses if
not isinstance(subclass, DefaultExecutionInitializer)]
initializer_class = initializer_classes[0]
else:
initializer_class = subclasses[0]
initializer = self.service_provider.get(initializer_class)
return initializer
| StarcoderdataPython |
4836025 | <filename>Encryption/main.py
from Utils import mtk
from random import randint
import math
def getKey():
key = '0'
x = 0
while '0' in str(key) or len(str(key)) < 7:
a = mtk.getRandPrime()
b = mtk.getRandPrime()
c = mtk.getRandPrime()
key = mtk.getRandPrime(0,a*b*c)
# x = x + 1
# print("{} tries : {}".format(x, key))
k = []
for c in str(key):
k.append(chr(int(c)+67))
# print(key)
# print("".join(k))
return "".join(k[::-1])
def encrypt(msg, key):
err = False
k = []
for c in key:
try:
k.append(str(ord(c)-67))
except ValueError:
err = True
break
try:
k = int("".join(k[::-1]))
except ValueError:
err = True
try:
mtk.isPrime(k)
except TypeError:
err = True
if(not err):
if(mtk.isPrime(k) and len(key) >= 7):
x = len(str(k))
y = len(msg)
if x <= y:
z = y-x
key = str(k)
for i in range(math.ceil(z / x)):
z = len(msg) - len(key)
key = key + str(k)[0:z]
s = []
s.append("=")
# debug = []
for i, c in enumerate(msg):
# debug.append(str(ord(c) * int(key[i])))
s.append(chr(ord(c) * int(key[i])))
s.append("=")
# print(key)
# print(msg)
# print("".join(debug))
return "".join(s)
else:
z = x-y
key = str(k)[:7]
s = []
for i, c in enumerate(msg):
s.append(chr(ord(c) * int(key[i])))
return "".join(s).encode().decode('utf-8')
print("Kunci Invalid!")
def decrypt(msg, key):
err = False
k = []
for c in key:
try:
k.append(str(ord(c)-67))
except ValueError:
err = True
break
try:
k = int("".join(k[::-1]))
except ValueError:
err = True
try:
mtk.isPrime(k)
except TypeError:
err = True
if(not err):
if(mtk.isPrime(k) and len(key) >= 7):
x = len(str(k))
y = len(msg)
msg = msg.replace('=', '').encode().decode('utf-8')
if x <= y:
z = y-x
key = str(k)
for i in range(math.ceil(z / x)):
z = len(msg) - len(key)
key = key + str(k)[0:z]
s = []
# debug = []
for i, c in enumerate(msg):
# debug.append(str(ord(c) * int(key[i])))
try:
s.append(chr(int(ord(c) / int(key[i]))))
except:
print("Kunci Invalid!")
break
return "".join(s)
else:
z = x-y
key = str(k)[:7]
s = []
for i, c in enumerate(msg):
s.append(chr(int(ord(c) / int(key[i]))))
return "".join(s)
print("Kunci Invalid!") | StarcoderdataPython |
3324792 | <gh_stars>1-10
# %%
import time
#%%
start1 = time.time()
!python3 practice.py
end1 = time.time()
# %%
start2 = time.time()
!python3 practice_m.py
end2 = time.time()
# %%
print(f'single-core execution time {end1 - start1}')
print(f'multi-core execution time {end2 - start2}')
# %%
| StarcoderdataPython |
1700796 | <reponame>rgharris/libcloud<filename>docs/examples/compute/profitbricks/create_lan.py
import os
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.PROFIT_BRICKS)
# Get ProfitBricks credentials from environment variables
pb_username = os.environ.get("PROFITBRICKS_USERNAME")
pb_password = os.environ.get("PROFITBRICKS_PASSWORD")
driver = cls(pb_username, pb_password)
datacenters = driver.list_datacenters()
# Looks for existing data centers named 'demo-dc'
datacenter = [dc for dc in datacenters if dc.name == "demo-dc"][0]
# Create a public LAN
lan = driver.ex_create_lan(datacenter, is_public=True)
print(lan)
| StarcoderdataPython |
1624494 | <gh_stars>0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from types import SimpleNamespace
class Perceptron:
def __init__(self, algorithm="batch"):
self._coef = []
self._n_feats = 0
self.algorithm = algorithm
def fit(self, X, Y, theta, eta=None):
self._n_feats = X.shape[1]
y = np.where(Y == 0, -1, Y)
x = np.insert(X, 0, 1, axis=1)
self._coef = np.zeros((self._n_feats + 1, ))
if self.algorithm == "batch":
while True:
pred = self.predict(x, False)
f = np.sign(pred)
miss = np.zeros(self._n_feats + 1)
for i in range(X.shape[0]):
if f[i] != y[i]:
miss += y[i] * x[i, :]
self._coef = self._coef + eta * miss
if all(x < theta for x in eta * miss):
break
elif self.algorithm == "pocket":
if isinstance(theta, float):
raise ValueError(
"theta should be integer in pocket algorithm!")
for t in range(theta):
i = t % X.shape[0]
pred = np.sign(np.dot(self._coef.T, x[i, :]))
if pred != y[i]:
w = self._coef + y[i] * x[i, :]
if np.sum(self.predict(x, False) != y) > np.sum(np.sign(np.matmul(w.T, x.T).reshape((x.shape[0],)))):
self._coef = w
else:
raise ValueError("algorithm can either be batch or pocket!")
return self
def predict(self, x, user=True):
if x.shape[1] == self._n_feats + 1:
pred = np.sign(np.matmul(self._coef.T, x.T).reshape((x.shape[0],)))
else:
pred = np.sign(np.matmul(self._coef.T, np.insert(
x, 0, 1, axis=1).T).reshape((x.shape[0],)))
return np.where(pred == -1, 0, pred) if user else pred
def score(self, x, y_true):
pred = self.predict(x)
return 1 - np.sum((pred - y_true) ** 2) / len(y_true)
def load_data(train_file, test_file):
train_df = pd.read_csv(train_file)
test_df = pd.read_csv(test_file)
x_train = train_df.iloc[:, :-1]
y_train = train_df.iloc[:, -1]
x_test = test_df.iloc[:, :-1]
y_test = test_df.iloc[:, -1]
return {
"x_train": x_train.values,
"y_train": y_train.values,
"x_test": x_test.values,
"y_test": y_test.values
}
def plot(X, Y, clf=None, h=0.01, fname=None):
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
fig, ax = plt.subplots()
if clf is not None:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=plt.cm.Paired)
ax.axis('off')
ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
if fname is not None:
plt.savefig(fname)
else:
plt.show()
def __main__():
data = SimpleNamespace(**load_data("train.csv", "test.csv"))
# plot(data.x_train, data.y_train)
# plot(data.x_test, data.y_test)
print("BATCH PERCEPTRON:")
clf = Perceptron() # batch perceptron
clf.fit(data.x_train, data.y_train, 0.01, 0.001)
plot(data.x_train, data.y_train, clf, fname="batch-train.png")
print("TRAIN ACCURACY =", clf.score(data.x_train, data.y_train))
# train_pred = clf.predict(data.x_train)
# print(train_pred)
plot(data.x_test, data.y_test, clf, fname="batch-test.png")
print("TEST ACCURACY =", clf.score(data.x_test, data.y_test))
# test_pred = clf.predict(data.x_test)
# print(test_pred)
print("POCKET ALGORITHM:")
clf = Perceptron(algorithm="pocket") # batch perceptron
clf.fit(data.x_train, data.y_train, 10000)
plot(data.x_train, data.y_train, clf, fname="pocket-train.png")
print("TRAIN ACCURACY =", clf.score(data.x_train, data.y_train))
# train_pred = clf.predict(data.x_train)
# print(train_pred)
plot(data.x_test, data.y_test, clf, fname="pocket-test.png")
print("TEST ACCURACY =", clf.score(data.x_test, data.y_test))
# test_pred = clf.predict(data.x_test)
# print(test_pred)
__main__()
| StarcoderdataPython |
110197 | import time
from googlesearch import search
import urllib.request
#one of my more major projects that I've worked on
#url = "https://paintwithbob.com"
#testing url opener with paintwithbob
#f = urllib.request.urlopen(url)
#test = f.read()
#if ("Paint".encode("utf-8") in test):
# print (test)
searchterm = input("Search: ")
ctrlf = input("Find on Page: ")
botnolikey = 0 #keep track of number of no-bot websites
#get top 20 results for reddit
print("when google searching "+searchterm+", these websites contain the phrase "+ctrlf+" somewhere on the page")
for url in search(searchterm, stop=20):
try: #if the website hates robots
f = urllib.request.urlopen(url)
webdata = f.read()
if (ctrlf.encode("utf-8") in webdata): #ctrl-f the page
print(url)
except: #if there is ANY error
for x in range (0,3): #try it again 3 times, see what happens.
try:
time.sleep(3) #attempting to fix most "too many requests" errors
f = urllib.request.urlopen(url)
webdata = f.read()
if (ctrlf.encode("utf-8") in webdata): #ctrl-f the page
print(url)
break; #if no errors after checking second time, exit for loop
except:
continue #keep trying if errors.
botnolikey = botnolikey+1 #record number of websites with errors
continue
print("This many websites didn't like bots:")
print(botnolikey)
| StarcoderdataPython |
97319 | <reponame>torcolvin/rbtools
"""Unit tests for rbtools.utils.aliases."""
from __future__ import unicode_literals
from rbtools.utils.aliases import replace_arguments
from rbtools.utils.testbase import RBTestBase
class AliasTests(RBTestBase):
"""Tests for rbtools.utils.aliases."""
def test_replace_arguments_basic(self):
"""Testing replace_arguments with variables and arguments"""
self.assertEqual(replace_arguments('$1', ['HEAD'], posix=True),
['HEAD'])
def test_replace_arguments_multiple(self):
"""Testing replace_arguments with multiple variables and arguments"""
self.assertEqual(replace_arguments('$1..$2', ['a', 'b'], posix=True),
['a..b'])
def test_replace_arguments_blank(self):
"""Testing replace_arguments with variables and a missing argument"""
self.assertEqual(replace_arguments('rbt post $1', [], posix=True),
['rbt', 'post'])
def test_replace_arguments_append(self):
"""Testing replace_arguments with no variables or arguments."""
self.assertEqual(
replace_arguments('echo', ['a', 'b', 'c'], posix=True),
['echo', 'a', 'b', 'c'])
def test_replace_arguments_unrecognized_variables(self):
"""Testing replace_arguments with an unrecognized variable name"""
self.assertEqual(replace_arguments('$1 $test', ['f'], posix=True),
['f', '$test'])
def test_replace_arguments_star(self):
"""Testing replace_arguments with the special $* variable"""
self.assertEqual(replace_arguments('$*', ['a', 'b', 'c'], posix=True),
['a', 'b', 'c'])
def test_replace_arguments_star_whitespace(self):
"""Testing replace_arguments with the special $* variable with
whitespace-containing arguments
"""
self.assertEqual(
replace_arguments('$*', ['a', 'b', 'c d e'], posix=True),
['a', 'b', 'c d e'])
def test_replace_arguments_unescaped_non_posix(self):
"""Testing replace_arguments in non-POSIX mode does not evaluate escape
sequences
"""
self.assertEqual(replace_arguments(r'"$1 \\"', ['a'], posix=False),
[r'"a \\"'])
def test_replace_arguments_invalid_quote(self):
"""Testing replace_arguments with invalid quotes in POSIX and non-POSIX
mode raises an error
"""
self.assertRaises(
ValueError,
lambda: replace_arguments('"foo', [], posix=True))
self.assertRaises(
ValueError,
lambda: replace_arguments('"foo', [], posix=False))
def test_replace_arguments_invalid_quote_posix(self):
"""Testing replace_arguments with escaped ending quote in non-POSIX
mode does not escape the quote
"""
self.assertEqual(replace_arguments('"\\"', [], posix=False),
['"\\"'])
def test_replace_arguments_invalid_quote_non_posix(self):
"""Testing replace_arguments with escaped ending quote in POSIX mode
raises an error
"""
self.assertRaises(
ValueError,
lambda: replace_arguments('"\\"', [], posix=True))
def test_replace_arguments_quoted_non_posix(self):
"""Testing replace_arguments in non-POSIX mode with a quoted sequence
in the command
"""
self.assertEqual(
replace_arguments("find . -iname '*.pyc' -delete", [],
posix=False),
['find', '.', '-iname', "'*.pyc'", '-delete'])
def test_replace_arguments_escaped_posix(self):
"""Testing replace_arguments in POSIX mode evaluates escape sequences
"""
self.assertEqual(
replace_arguments(r'$1 \\ "\\" "\""', ['a'], posix=True),
['a', '\\', '\\', '"'])
| StarcoderdataPython |
3210591 | <reponame>minikdo/travelarchive2
# Generated by Django 2.1.4 on 2019-01-09 13:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('travels', '0011_auto_20190109_1422'),
]
operations = [
migrations.AlterField(
model_name='currency',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='travels.Country'),
),
]
| StarcoderdataPython |
1779223 | #!/usr/bin/python2.7
import os, sys, shutil, platform, time, json
import install
SUFFIX = ".so"
COMPILER = "gcc"
INCLUDE = [ ]
LINK = [ "aria" ]
DEFINE = [ ]
CFLAGS = [ "-Wall", "-Wextra", "-c", "-fPIC", "-fno-strict-aliasing" "--std=c99", "-pedantic", "-O3" ]
LFLAGS = [ "-shared", "-fPIC" ]
EXTRA = [ ]
if platform.system() == "Windows":
SUFFIX = ".dll"
LINK += [ "mingw32" ]
# if platform.system() == "Linux":
def fmt(fmt, dic):
for k in dic:
v = " ".join(dic[k]) if type(dic[k]) is list else dic[k]
fmt = fmt.replace("{" + k + "}", str(v))
return fmt
def clearup(file):
if os.path.isfile(file + SUFFIX):
os.remove(file + SUFFIX)
def main():
SEARCH = [ d + "/module.json" for d in os.listdir(".") if os.path.isdir(d) and d[0] != '.' ]
starttime = time.time()
# Handle args
verbose = "verbose" in sys.argv
for directory in SEARCH:
config = json.load(open(directory))
print "building %s..." % (config['name'] + SUFFIX)
# Build
src = []
for file in config['src']:
src += [ "%s/%s" % (config['name'], file) ]
L = []
if platform.system() != "Darwin":
config['lflags'] += [ "-Wl,-soname,%s" % config['name'] ]
else:
config['lflags'] += [ "-Wl,-install_name,%s" % config['name'] ]
cmd = fmt("%s -o {name}%s {flags} {src} {include} {link} {define} {extra}",
{
"name" : config['name'],
"src" : " ".join(src),
"include" : " ".join(map(lambda x:"-I" + x, config['include'] + INCLUDE)),
"link" : " ".join(map(lambda x:"-l" + x, config['link'] + LINK)),
"define" : " ".join(map(lambda x:"-D" + x, config['define'] + DEFINE)),
"lflags" : " ".join(config['lflags'] + L),
"cflags" : " ".join(config['cflags'] + CFLAGS),
"extra" : " ".join(config['extra'] + EXTRA),
})
cmd %= (COMPILER, SUFFIX)
if verbose:
print cmd
clearup(config['name'])
# os.system(cmd)
# if os.path.isfile(config['name'] + SUFFIX):
# os.system("strip %s" % (config['name'] + SUFFIX))
# install.process(SEARCH)
if __name__ == "__main__":
main() | StarcoderdataPython |
1638486 | # Import modules and libraries
import torch
from torch.utils.data import DataLoader
import csv
import pickle
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import glob
from skimage.io import imread
import time
import argparse
from DeepSTORM3D.data_utils import generate_batch, complex_to_tensor, ExpDataset, sort_names_tif
from DeepSTORM3D.cnn_utils import LocalizationCNN
from DeepSTORM3D.vis_utils import ShowMaskPSF, ShowRecovery3D, ShowLossJaccardAtEndOfEpoch
from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput
from DeepSTORM3D.physics_utils import EmittersToPhases
from DeepSTORM3D.postprocess_utils import Postprocess
from DeepSTORM3D.assessment_utils import calc_jaccard_rmse
from DeepSTORM3D.helper_utils import normalize_01, xyz_to_nm
def test_model(path_results, postprocess_params, exp_imgs_path=None, seed=66):
# close all existing plots
plt.close("all")
# load assumed setup parameters
path_params_pickle = path_results + 'setup_params.pickle'
with open(path_params_pickle, 'rb') as handle:
setup_params = pickle.load(handle)
# run on GPU if available
device = setup_params['device']
torch.backends.cudnn.benchmark = True
# phase term for PSF visualization
vis_term, zvis = setup_params['vis_term'], setup_params['zvis']
# phase mask for visualization
mask_param = torch.from_numpy(setup_params['mask_init']).to(device)
# plot used mask and PSF
plt.figure(figsize=(10,5))
ShowMaskPSF(mask_param, vis_term, zvis)
# load learning results
path_learning_pickle = path_results + 'learning_results.pickle'
with open(path_learning_pickle, 'rb') as handle:
learning_results = pickle.load(handle)
# plot metrics evolution in training for debugging
plt.figure()
ShowLossJaccardAtEndOfEpoch(learning_results, learning_results['epoch_converged'])
# build model and convert all the weight tensors to GPU is available
cnn = LocalizationCNN(setup_params)
cnn.to(device)
# load learned weights
cnn.load_state_dict(torch.load(path_results + 'weights_best_loss.pkl'))
# post-processing module on CPU/GPU
thresh, radius = postprocess_params['thresh'], postprocess_params['radius']
postprocessing_module = Postprocess(thresh, radius, setup_params)
# if no experimental imgs are supplied then sample a random example
if exp_imgs_path is None:
# visualization module to visualize the 3D positions recovered by the net as images
psf_module_vis = PhysicalLayerVisualization(setup_params, 0, 0, 1)
# ==============================================================================================================
# generate a simulated test image
# ==============================================================================================================
# set random number generators given the seed
torch.manual_seed(seed)
np.random.seed(seed)
# sample a single piece of data
xyz_gt, nphotons_gt = generate_batch(1, setup_params)
# calculate phases and cast them to device
phases_np = EmittersToPhases(xyz_gt, setup_params)
phases_emitter_gt = complex_to_tensor(phases_np).to(device)
# initialize the physical layer that encodes xyz into noisy PSFs
psf_module_net = PhysicalLayerVisualization(setup_params, 1, 1, 0)
# pass xyz and N through the physical layer to get the simulated image
nphotons_gt = torch.from_numpy(nphotons_gt).type(torch.FloatTensor).to(device)
test_input_im = psf_module_net(mask_param, phases_emitter_gt, nphotons_gt)
# normalize image according to the training setting
if setup_params['project_01'] is True:
test_input_im = normalize_01(test_input_im)
else:
test_input_im = (test_input_im - setup_params['global_factors'][0]) / setup_params['global_factors'][1]
# alter the mean and std to match the training set
if setup_params['project_01'] is True:
test_input_im = (test_input_im - test_input_im.mean())/test_input_im.std()
test_input_im = test_input_im*setup_params['train_stats'][1] + setup_params['train_stats'][0]
# ==============================================================================================================
# predict the positions by post-processing the net's output
# ==============================================================================================================
# prediction using model
cnn.eval()
with torch.set_grad_enabled(False):
pred_volume = cnn(test_input_im)
# post-process predicted volume
tpost_start = time.time()
xyz_rec, conf_rec = postprocessing_module(pred_volume)
tpost_elapsed = time.time() - tpost_start
print('Post-processing complete in {:.6f}s'.format(tpost_elapsed))
# time prediction using model after first forward pass which is slow
cnn.eval()
tinf_start = time.time()
with torch.set_grad_enabled(False):
pred_volume = cnn(test_input_im)
tinf_elapsed = time.time() - tinf_start
print('Inference complete in {:.6f}s'.format(tinf_elapsed))
# take out dim emitters from GT
if setup_params['nsig_unif'] is False:
nemitters = xyz_gt.shape[1]
if np.not_equal(nemitters, 1):
nphotons_gt = np.squeeze(nphotons_gt, 0)
xyz_gt = xyz_gt[:, nphotons_gt > setup_params['nsig_thresh'], :]
# plot recovered 3D positions compared to GT
plt.figure()
xyz_gt = np.squeeze(xyz_gt, 0)
ShowRecovery3D(xyz_gt, xyz_rec)
# report the number of found emitters
print('Found {:d} emitters out of {:d}'.format(xyz_rec.shape[0], xyz_gt.shape[0]))
# calculate quantitative metrics assuming a matching radius of 100 nm
jaccard_index, RMSE_xy, RMSE_z, _ = calc_jaccard_rmse(xyz_gt, xyz_rec, 0.1)
# report quantitative metrics
print('Jaccard Index = {:.2f}%, Lateral RMSE = {:.2f} nm, Axial RMSE = {:.2f}'.format(
jaccard_index*100, RMSE_xy*1e3, RMSE_z*1e3))
# ==============================================================================================================
# compare the network positions to the input image
# ==============================================================================================================
# turn recovered positions into phases
xyz_rec = np.expand_dims(xyz_rec, 0)
phases_np = EmittersToPhases(xyz_rec, setup_params)
phases_emitter_rec = complex_to_tensor(phases_np).to(device)
# use a uniform number of photons for recovery visualization
nphotons_rec = 5000 * torch.ones((1, xyz_rec.shape[1])).to(device)
# generate the recovered image by the net
test_pred_im = psf_module_vis(mask_param, phases_emitter_rec, nphotons_rec)
# compare the recovered image to the input
ShowRecNetInput(test_input_im, 'Simulated Input to Localization Net')
ShowRecNetInput(test_pred_im, 'Recovered Input Matching Net Localizations')
# return recovered locations and net confidence
return xyz_rec, conf_rec
else:
# read all imgs in the experimental data directory assuming ".tif" extension
img_names = glob.glob(exp_imgs_path + '*.tif')
img_names = sort_names_tif(img_names)
# if given only 1 image then show xyz in 3D and recovered image
if len(img_names) == 1:
# ==========================================================================================================
# read experimental image and normalize it
# ==========================================================================================================
# read exp image in uint16
exp_im = imread(img_names[0])
exp_img = exp_im.astype("float32")
# normalize image according to the training setting
if setup_params['project_01'] is True:
exp_img = normalize_01(exp_img)
else:
exp_img = (exp_img - setup_params['global_factors'][0]) / setup_params['global_factors'][1]
# alter the mean and std to match the training set
if setup_params['project_01'] is True:
exp_img = (exp_img - exp_img.mean()) / exp_img.std()
exp_img = exp_img * setup_params['train_stats'][1] + setup_params['train_stats'][0]
# turn image into torch tensor with 1 channel on GPU
exp_img = np.expand_dims(exp_img, 0)
exp_img = np.expand_dims(exp_img, 0)
exp_tensor = torch.FloatTensor(exp_img).to(device)
# ==========================================================================================================
# predict the positions by post-processing the net's output
# ==========================================================================================================
# prediction using model
cnn.eval()
with torch.set_grad_enabled(False):
pred_volume = cnn(exp_tensor)
# post-process predicted volume
tpost_start = time.time()
xyz_rec, conf_rec = postprocessing_module(pred_volume)
tpost_elapsed = time.time() - tpost_start
print('Post-processing complete in {:.6f}s'.format(tpost_elapsed))
# time prediction using model after first forward pass which is slow
cnn.eval()
tinf_start = time.time()
with torch.set_grad_enabled(False):
pred_volume = cnn(exp_tensor)
tinf_elapsed = time.time() - tinf_start
print('Inference complete in {:.6f}s'.format(tinf_elapsed))
# plot recovered 3D positions compared to GT
plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(xyz_rec[:, 0], xyz_rec[:, 1], xyz_rec[:, 2], c='r', marker='^', label='DL', depthshade=False)
ax.set_xlabel('X [um]')
ax.set_ylabel('Y [um]')
ax.set_zlabel('Z [um]')
plt.title('3D Recovered Positions')
# report the number of found emitters
print('Found {:d} emitters'.format(xyz_rec.shape[0]))
# ==========================================================================================================
# compare the network positions to the input image
# ==========================================================================================================
# visualization module to visualize the 3D positions recovered by the net as images
H, W = exp_im.shape
setup_params['H'], setup_params['W'] = H, W
psf_module_vis = PhysicalLayerVisualization(setup_params, 0, 0, 1)
# turn recovered positions into phases
xyz_rec = np.expand_dims(xyz_rec, 0)
phases_np = EmittersToPhases(xyz_rec, setup_params)
phases_emitter_rec = complex_to_tensor(phases_np).to(device)
# use a uniform number of photons for recovery visualization
nphotons_rec = 5000 * torch.ones((1, xyz_rec.shape[1])).to(device)
# generate the recovered image by the net
exp_pred_im = psf_module_vis(mask_param, phases_emitter_rec, nphotons_rec)
# compare the recovered image to the input
ShowRecNetInput(exp_tensor, 'Experimental Input to Localization Net')
ShowRecNetInput(exp_pred_im, 'Recovered Input Matching Net Localizations')
# return recovered locations and net confidence
return xyz_rec, conf_rec
else:
# ==========================================================================================================
# create a data generator to efficiently load imgs for temporal acquisitions
# ==========================================================================================================
# instantiate the data class and create a data loader for testing
num_imgs = len(img_names)
exp_test_set = ExpDataset(img_names, setup_params)
exp_generator = DataLoader(exp_test_set, batch_size=1, shuffle=False)
# time the entire dataset analysis
tall_start = time.time()
# needed pixel-size for plotting if only few images are in the folder
visualize_flag, pixel_size_FOV = num_imgs < 100, setup_params['pixel_size_FOV']
# needed recovery pixel size and minimal axial height for turning ums to nms
psize_rec_xy, zmin = setup_params['pixel_size_rec'], setup_params['zmin']
# process all experimental images
cnn.eval()
results = np.array(['frame', 'x [nm]', 'y [nm]', 'z [nm]', 'intensity [au]'])
with torch.set_grad_enabled(False):
for im_ind, exp_im_tensor in enumerate(exp_generator):
# print current image number
print('Processing Image [%d/%d]' % (im_ind + 1, num_imgs))
# time each frame
tfrm_start = time.time()
# transfer normalized image to device (CPU/GPU)
exp_im_tensor = exp_im_tensor.to(device)
# predicted volume using model
pred_volume = cnn(exp_im_tensor)
# post-process result to get the xyz coordinates and their confidence
xyz_rec, conf_rec = postprocessing_module(pred_volume)
# time it takes to analyze a single frame
tfrm_end = time.time() - tfrm_start
# if this is the first image, get the dimensions and the relevant center for plotting
if im_ind == 0:
N, C, H, W = exp_im_tensor.size()
ch, cw = np.floor(H / 2), np.floor(W / 2)
# if prediction is empty then set number fo found emitters to 0
# otherwise generate the frame column and append results for saving
if xyz_rec is None:
nemitters = 0
else:
nemitters = xyz_rec.shape[0]
frm_rec = (im_ind + 1)*np.ones(nemitters)
xyz_save = xyz_to_nm(xyz_rec, H*2, W*2, psize_rec_xy, zmin)
results = np.vstack((results, np.column_stack((frm_rec, xyz_save, conf_rec))))
# if the number of imgs is small then plot each image in the loop with localizations
if visualize_flag:
# show input image
fig100 = plt.figure(100)
im_np = np.squeeze(exp_im_tensor.cpu().numpy())
imfig = plt.imshow(im_np, cmap='gray')
plt.plot(xyz_rec[:, 0] / pixel_size_FOV + cw, xyz_rec[:, 1] / pixel_size_FOV + ch, 'r+')
plt.title('Single frame complete in {:.2f}s, found {:d} emitters'.format(tfrm_end, nemitters))
fig100.colorbar(imfig)
plt.draw()
plt.pause(0.05)
plt.clf()
else:
# print status
print('Single frame complete in {:.6f}s, found {:d} emitters'.format(tfrm_end, nemitters))
# print the time it took for the entire analysis
tall_end = time.time() - tall_start
print('=' * 50)
print('Analysis complete in {:.0f}h {:.0f}m {:.0f}s'.format(
tall_end // 3600, np.floor((tall_end / 3600 - tall_end // 3600) * 60), tall_end % 60))
print('=' * 50)
# write the results to a csv file named "localizations.csv" under the exp img folder
row_list = results.tolist()
with open(exp_imgs_path + 'localizations.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(row_list)
# return the localization results for the last image
return xyz_rec, conf_rec
if __name__ == '__main__':
# start a parser
parser = argparse.ArgumentParser()
# previously trained model
parser.add_argument('--path_results', help='path to the results folder for the pre-trained model', required=True)
# previously trained model
parser.add_argument('--postprocessing_params', help='post-processing dictionary parameters', required=True)
# path to the experimental images
parser.add_argument('--exp_imgs_path', default=None, help='path to the experimental test images')
# seed to run model
parser.add_argument('--seed', default=66, help='seed for random test data generation')
# parse the input arguments
args = parser.parse_args()
# run the data generation process
xyz_rec, conf_rec = test_model(args.path_results, args.postprocessing_params, args.exp_imgs_path, args.seed)
| StarcoderdataPython |
31200 | <gh_stars>0
from django.db import models
from django.utils import timezone
from django.core.validators import MinLengthValidator
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
email = models.EmailField(unique=True)
# Nickname is display name
nickname = models.CharField(
validators=[MinLengthValidator(1)], max_length=20)
# Avator is a url icon image url
avatar = models.URLField(
validators=[MinLengthValidator(1)], max_length=200, null=True)
def __str__(self):
return "@%s" % self.username
class Article(models.Model):
# The author of this article. This field can be referenced by `article.author`
author = models.ForeignKey(
User,
related_name="author",
on_delete=models.CASCADE
)
# The title of this article
title = models.CharField(
validators=[MinLengthValidator(1)], max_length=100)
# Actual content of this article
content = models.TextField()
# Date when data were created/updated
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return "\"%s\" by %s" % (self.title, self.author.profile)
| StarcoderdataPython |
172374 | from rest_framework.exceptions import status, APIException
class ConflictError(APIException):
"""
Base class for REST framework exceptions.
Subclasses should provide `.status_code` and `.default_detail` properties.
"""
status_code = status.HTTP_409_CONFLICT
default_detail = u'A database conflict occurred.'
def __init__(self, detail=None):
self.detail = detail if detail else self.default_detail
def __str__(self):
return self.detail
| StarcoderdataPython |
1687578 | <filename>icekit_events/management/commands/create_event_occurrences.py
from django.core.management.base import NoArgsCommand
from ...models import EventBase
class Command(NoArgsCommand):
help = 'Create missing repeat event occurrences'
def handle_noargs(self, *args, **options):
verbosity = int(options.get('verbosity'))
# Get all events with generators
events = EventBase.objects.exclude(repeat_generators=None)
count = 0
for event in events:
created = event.extend_occurrences()
if verbosity >= 2 or verbosity and created:
self.stdout.write(
u'Created %s occurrences for: %s' % (created, event))
count += created
if verbosity >= 2 or verbosity and count:
self.stdout.write('Created %s repeat events.' % count)
| StarcoderdataPython |
3251576 | #!/usr/bin/python3
from collections import namedtuple
variable = 42
Point = namedtuple('Point', ['long', 'lat'])
point = Point(long=3, lat=4)
def function(number):
return number ** 2
print(f'{variable}')
print(f'Longitude: {point.long}, Latitude: {point.lat}')
print(f'{function(variable)}')
| StarcoderdataPython |
3385460 | <reponame>rpm1995/LeetCode
class Solution:
def canJump(self, nums: List[int]) -> bool:
can_reach = [False for _ in range(len(nums))]
can_reach[-1] = True
cur_max = len(nums) - 1
for current in range(len(nums) - 2, -1, -1):
if nums[current] + current >= len(nums) or nums[current] + current >= cur_max:
can_reach[current] = True
cur_max = current
# print(can_reach)
return can_reach[0]
| StarcoderdataPython |
3289854 | <reponame>Acidburn0zzz/dfvfs<filename>dfvfs/lib/fvde.py
# -*- coding: utf-8 -*-
"""Helper function for FileVault Drive Encryption (FVDE) support."""
from __future__ import unicode_literals
def FVDEVolumeOpen(fvde_volume, path_spec, file_object, key_chain):
"""Opens the FVDE volume using the path specification.
Args:
fvde_volume (pyfvde.volume): FVDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain.
"""
encrypted_root_plist = key_chain.GetCredential(
path_spec, 'encrypted_root_plist')
if encrypted_root_plist:
fvde_volume.read_encrypted_root_plist(encrypted_root_plist)
password = key_chain.GetCredential(path_spec, 'password')
if password:
fvde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
fvde_volume.set_recovery_password(recovery_password)
fvde_volume.open_file_object(file_object)
| StarcoderdataPython |
3261480 | # terrascript/provider/hashicorp/consul.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:14:36 UTC)
import terrascript
class consul(terrascript.Provider):
"""terraform-provider-consul"""
__description__ = "terraform-provider-consul"
__namespace__ = "hashicorp"
__name__ = "consul"
__source__ = "https://github.com/hashicorp/terraform-provider-consul"
__version__ = "2.13.0"
__published__ = "2021-08-19T21:38:52Z"
__tier__ = "official"
__all__ = ["consul"]
| StarcoderdataPython |
3349970 | import os
class Wordlist:
words = []
def __init__(self, path, words=[]):
if os.path.exists(path) and os.path.isfile(path):
self.path = path
else:
raise FileNotFoundError(f"[!] - {path} cannot be found")
if isinstance(words, list) and words:
self.words = words
def read(self, append=False):
if not append:
self.words = []
try:
with open(self.path, 'rb') as words_file:
for line in words_file.readlines():
if not line:
continue
try:
word = line.decode('UTF-8').strip()
if word not in self.words:
self.words.append(word)
except UnicodeDecodeError:
# print("Skipping... ", end="")
# print(line)
try:
word = line.decode('Latin-1').strip()
if word not in self.words:
self.words.append(word)
# print(f"Read successfully {line.decode('Latin-1')}")
except UnicodeDecodeError:
continue
print(f"Read in {len(self.words)} words from {self.path}")
except IOError:
raise IOError(f"File {self.path} is not readable...")
return self.words
def write(self):
if not self.words:
print("No words to write")
return
try:
with open(self.path, 'w') as f:
print(f"Writing {len(self.words)} words to {self.path} ")
for word in self.words:
print(word.encode('utf-8'), file=f)
except IOError:
raise IOError(f"File {self.path} cannot be written to")
def clean(self):
self.read()
self.write()
print(f"Successfully Wrote {len(self.words)} to {self.path}")
'''
def extend(self):
# Will extend the password list
pass
'''
| StarcoderdataPython |
3231946 | <reponame>sUeharaE4/mlcomp
from typing import List
import os
import ast
from glob import glob
import pathspec
import pkg_resources
from mlcomp.db.core import Session
from mlcomp.utils.logging import create_logger
from mlcomp.utils.io import read_lines
_mapping = {
'cv2': 'opencv-python',
'sklearn': 'scikit-learn',
'migrate': 'sqlalchemy-migrate'
}
def find_imports(
path: str,
files: List[str] = None,
exclude_patterns: List[str] = None,
encoding='utf-8'
):
res = []
raw_imports = []
files = files if files is not None \
else glob(os.path.join(path, '**', '*.py'), recursive=True)
exclude_patterns = exclude_patterns \
if exclude_patterns is not None else []
spec = pathspec.PathSpec.from_lines(
pathspec.patterns.GitWildMatchPattern, exclude_patterns
)
for file in files:
if not file.endswith('.py'):
continue
file_rel = os.path.relpath(file, path)
if spec.match_file(file_rel):
continue
with open(file, 'r', encoding=encoding) as f:
content = f.read()
try:
tree = ast.parse(content)
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for subnode in node.names:
raw_imports.append((subnode.name, file_rel))
elif isinstance(node, ast.ImportFrom):
raw_imports.append((node.module, file_rel))
except Exception as exc:
logger = create_logger(Session.create_session(), __name__)
logger.error('Failed on file: %s' % file_rel)
raise exc
for lib, file in raw_imports:
name = lib.split('.')[0]
try:
if name in _mapping:
name = _mapping[name]
version = pkg_resources.get_distribution(name).version
res.append((name, version))
except Exception:
pass
return res
def _read_requirements(file: str):
res = []
for line in read_lines(file):
if line == '':
continue
name, rel, ver = None, None, None
if '>=' in line:
rel = '>='
elif '==' in line:
rel = '=='
name = line.split(rel)[0].strip()
if rel:
ver = line.split(rel)[1].strip()
res.append([name, rel, ver])
return res
def _write_requirements(file: str, reqs: List):
with open(file, 'w') as f:
text = '\n'.join(
[f'{name}{rel}{ver}' if rel else name for name, rel, ver in reqs]
)
f.write(text)
def control_requirements(
path: str, files: List[str] = None, exclude_patterns: List[str] = None
):
req_file = os.path.join(path, 'requirements.txt')
if not os.path.exists(req_file):
with open(req_file, 'w') as f:
f.write('')
req_ignore_file = os.path.join(path, 'requirements.ignore.txt')
if not os.path.exists(req_ignore_file):
with open(req_ignore_file, 'w') as f:
f.write('')
libs = find_imports(path, files=files, exclude_patterns=exclude_patterns)
module_folder = os.path.dirname(__file__)
stdlib_file = os.path.join(module_folder, 'req_stdlib')
ignore_libs = set(read_lines(req_ignore_file) + read_lines(stdlib_file))
reqs = _read_requirements(req_file)
for lib, version in libs:
if lib in ignore_libs:
continue
found = False
for i in range(len(reqs)):
if reqs[i][0] == lib:
found = True
reqs[i][1] = '=='
reqs[i][2] = version
break
if not found:
reqs.append([lib, '==', version])
_write_requirements(req_file, reqs)
return reqs
if __name__ == '__main__':
folder = '/home/light/projects/mlcomp/'
# for l, v in find_imports(folder):
# print(f'{l}={v}')
control_requirements(folder, exclude_patterns=['mlcomp/server/front'])
| StarcoderdataPython |
34167 | <filename>hw_asr/augmentations/wave_augmentations/__init__.py
from hw_asr.augmentations.wave_augmentations.Gain import Gain
from hw_asr.augmentations.wave_augmentations.ImpulseResponse import ImpulseResponse
from hw_asr.augmentations.wave_augmentations.Noise import GaussianNoise
from hw_asr.augmentations.wave_augmentations.TimeStretch import TimeStretch
__all__ = [
"Gain",
"ImpulseResponse",
"GaussianNoise"
]
| StarcoderdataPython |
1630979 | quote = """
Alright, but apart from the Sanitation, the Medicine, Education, Wine,
Public Order, Irrigation, Roads, the Fresh-Water System,
and Public Health, what have the Romans ever done for us?
"""
# Use a for loop and an if statement to print just the capitals in the quote above.
for char in quote:
if char.isalpha() and char.isupper():
print(char)
| StarcoderdataPython |
138775 | <filename>sql/tests/setup/dataloader/make_sqlite.py
import os
import subprocess
import pandas as pd
root_url = subprocess.check_output("git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()
from sqlalchemy import create_engine, text
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
from .create_pums_dbs import *
def make_sqlite():
pums_db_path = os.path.join(root_url, "datasets", "pums.db")
if not os.path.exists(pums_db_path):
print('Creating PUMS in SQLite')
sqlite_file_path = f'sqlite:///{pums_db_path}'
engine = create_engine(sqlite_file_path, echo=False)
create_pums(engine)
with engine.begin() as conn:
res = conn.execute(text('SELECT COUNT(*) FROM pums'))
print(list(res))
pums_pid_db_path = os.path.join(root_url, "datasets", "pums_pid.db")
if not os.path.exists(pums_pid_db_path):
print('Creating PUMS_pid in SQLite')
sqlite_file_path = f'sqlite:///{pums_pid_db_path}'
engine = create_engine(sqlite_file_path, echo=False)
create_pums_pid(engine)
with engine.begin() as conn:
res = conn.execute(text('SELECT COUNT(*) FROM pums'))
print(list(res))
pums_dup_db_path = os.path.join(root_url, "datasets", "pums_dup.db")
if not os.path.exists(pums_dup_db_path):
print('Creating PUMS_dup in SQLite')
sqlite_file_path = f'sqlite:///{pums_dup_db_path}'
engine = create_engine(sqlite_file_path, echo=False)
create_pums_dup(engine)
with engine.begin() as conn:
res = conn.execute(text('SELECT COUNT(*) FROM pums'))
print(list(res))
pums_null_db_path = os.path.join(root_url, "datasets", "pums_null.db")
if not os.path.exists(pums_null_db_path):
print('Creating PUMS_null in SQLite')
sqlite_file_path = f'sqlite:///{pums_null_db_path}'
engine = create_engine(sqlite_file_path, echo=False)
create_pums_null(engine)
with engine.begin() as conn:
res = conn.execute(text('SELECT COUNT(*) FROM pums'))
print(list(res))
| StarcoderdataPython |
1736880 | import logging
from io import StringIO
class Progress:
"""
IMPORTANT! Progress initialization is required to change job state into PROGRESS,
so that it's execution will be shown in '/process' app handler.
"""
def __init__(self, total):
self.stream = StringIO()
self.handler = logging.StreamHandler(self.stream)
self.handler.setFormatter(logging.Formatter('[%(asctime)s] %(message)s', "%Y-%m-%d %H:%M:%S"))
self.logger = logging.getLogger(__name__)
self.logger.addHandler(self.handler)
self.total = total
def info(self, message, current, task=None):
if message:
self.logger.info(message)
if task:
self.handler.flush()
task.update_state(state='PROGRESS',
meta={'current': current, 'total': self.total, 'log': self.stream.getvalue()})
def done(self, message='Done', task=None):
self.info(message, self.total, task)
def remove_handler(self):
self.logger.removeHandler(self.handler)
def log(self):
return self.stream.getvalue()
| StarcoderdataPython |
1766230 | from pathlib import Path
import os
#import dj_database_url
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
DEBUG = True
ALLOWED_HOSTS = ['192.168.100.198','192.168.1.21','192.168.1.7','127.0.0.1','localhost']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': os.path.join(os.path.join(BASE_DIR, 'phoenix'),'secret_local_my_sql.cnf'),
},
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db_local_2.sqlite3'),
# }
# }
# DATABASES['default'] = dj_database_url.config(conn_max_age=600, ssl_require=True)
MYSQL=True
TIME_ZONE = 'Asia/Tehran'
SITE_URL='/'
ADMIN_URL=SITE_URL+'admin/'
STATIC_URL = SITE_URL+'static/'
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
MEDIA_URL = SITE_URL+'media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
STATICFILES_DIRS=[os.path.join(BASE_DIR,'static')]
PUSHER_IS_ENABLE=True
REMOTE_MEDIA=False
COMING_SOON=False
DOWNLOAD_ROOT=os.path.join(BASE_DIR,'download')
SITE_DOMAIN='http://127.0.0.1:8080/'
| StarcoderdataPython |
4819444 | # -*- coding: utf-8 -*-
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2008-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file connection.py
# @author <NAME>
# @author <NAME>
# @author <NAME>
# @author <NAME>
# @author <NAME>
# @date 2008-10-09
from __future__ import print_function
from __future__ import absolute_import
import socket
import struct
import sys
import warnings
import abc
from . import constants as tc
from .exceptions import TraCIException, FatalTraCIError
from .domain import _defaultDomains
from .storage import Storage
_RESULTS = {0x00: "OK", 0x01: "Not implemented", 0xFF: "Error"}
class Connection:
"""Contains the socket, the composed message string
together with a list of TraCI commands which are inside.
"""
def __init__(self, host, port, process):
if sys.platform.startswith('java'):
# working around jython 2.7.0 bug #2273
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
else:
self._socket = socket.socket()
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self._socket.connect((host, port))
self._process = process
self._string = bytes()
self._queue = []
self._subscriptionMapping = {}
self._stepListeners = {}
self._nextStepListenerID = 0
for domain in _defaultDomains:
domain._register(self, self._subscriptionMapping)
def _recvExact(self):
try:
result = bytes()
while len(result) < 4:
t = self._socket.recv(4 - len(result))
if not t:
return None
result += t
length = struct.unpack("!i", result)[0] - 4
result = bytes()
while len(result) < length:
t = self._socket.recv(length - len(result))
if not t:
return None
result += t
return Storage(result)
except socket.error:
return None
def _sendExact(self):
length = struct.pack("!i", len(self._string) + 4)
# print("python_sendExact: '%s'" % ' '.join(map(lambda x : "%X" % ord(x), self._string)))
self._socket.send(length + self._string)
result = self._recvExact()
if not result:
self._socket.close()
del self._socket
raise FatalTraCIError("connection closed by SUMO")
for command in self._queue:
prefix = result.read("!BBB")
err = result.readString()
if prefix[2] or err:
self._string = bytes()
self._queue = []
raise TraCIException(err, prefix[1], _RESULTS[prefix[2]])
elif prefix[1] != command:
raise FatalTraCIError("Received answer %s for command %s." % (prefix[1], command))
elif prefix[1] == tc.CMD_STOP:
length = result.read("!B")[0] - 1
result.read("!%sx" % length)
self._string = bytes()
self._queue = []
return result
def _pack(self, format, *values):
packed = bytes()
for f, v in zip(format, values):
if f == "i":
packed += struct.pack("!Bi", tc.TYPE_INTEGER, int(v))
elif f == "I": # raw int for setOrder
packed += struct.pack("!i", int(v))
elif f == "d":
packed += struct.pack("!Bd", tc.TYPE_DOUBLE, float(v))
elif f == "D": # raw double for some base commands like simstep
packed += struct.pack("!d", float(v))
elif f == "b":
packed += struct.pack("!Bb", tc.TYPE_BYTE, int(v))
elif f == "B":
packed += struct.pack("!BB", tc.TYPE_UBYTE, int(v))
elif f == "u": # raw unsigned byte needed for distance command and subscribe
packed += struct.pack("!B", int(v))
elif f == "s":
v = str(v)
packed += struct.pack("!Bi", tc.TYPE_STRING, len(v)) + v.encode("latin1")
elif f == "p": # polygon
if len(v) <= 255:
packed += struct.pack("!BB", tc.TYPE_POLYGON, len(v))
else:
packed += struct.pack("!BBi", tc.TYPE_POLYGON, 0, len(v))
for p in v:
packed += struct.pack("!dd", *p)
elif f == "t": # tuple aka compound
packed += struct.pack("!Bi", tc.TYPE_COMPOUND, v)
elif f == "c": # color
packed += struct.pack("!BBBBB", tc.TYPE_COLOR, int(v[0]), int(v[1]), int(v[2]),
int(v[3]) if len(v) > 3 else 255)
elif f == "l": # string list
packed += struct.pack("!Bi", tc.TYPE_STRINGLIST, len(v))
for s in v:
packed += struct.pack("!i", len(s)) + s.encode("latin1")
elif f == "f": # float list
packed += struct.pack("!Bi", tc.TYPE_DOUBLELIST, len(v))
for x in v:
packed += struct.pack("!d", x)
elif f == "o":
packed += struct.pack("!Bdd", tc.POSITION_2D, *v)
elif f == "O":
packed += struct.pack("!Bddd", tc.POSITION_3D, *v)
elif f == "g":
packed += struct.pack("!Bdd", tc.POSITION_LON_LAT, *v)
elif f == "G":
packed += struct.pack("!Bddd", tc.POSITION_LON_LAT_ALT, *v)
elif f == "r":
packed += struct.pack("!Bi", tc.POSITION_ROADMAP, len(v[0])) + v[0].encode("latin1")
packed += struct.pack("!dB", v[1], v[2])
return packed
def _sendCmd(self, cmdID, varID, objID, format="", *values):
self._queue.append(cmdID)
packed = self._pack(format, *values)
length = len(packed) + 1 + 1 # length and command
if varID is not None:
if isinstance(varID, tuple): # begin and end of a subscription
length += 8 + 8 + 4 + len(objID)
else:
length += 1 + 4 + len(objID)
if length <= 255:
self._string += struct.pack("!BB", length, cmdID)
else:
self._string += struct.pack("!BiB", 0, length + 4, cmdID)
if varID is not None:
if isinstance(varID, tuple):
self._string += struct.pack("!dd", *varID)
else:
self._string += struct.pack("!B", varID)
self._string += struct.pack("!i", len(objID)) + objID.encode("latin1")
self._string += packed
return self._sendExact()
def _readSubscription(self, result):
# to enable this you also need to set _DEBUG to True in storage.py
# result.printDebug()
result.readLength()
response = result.read("!B")[0]
isVariableSubscription = (response >= tc.RESPONSE_SUBSCRIBE_INDUCTIONLOOP_VARIABLE and
response <= tc.RESPONSE_SUBSCRIBE_PERSON_VARIABLE)
objectID = result.readString()
if not isVariableSubscription:
domain = result.read("!B")[0]
numVars = result.read("!B")[0]
if isVariableSubscription:
while numVars > 0:
varID, status = result.read("!BB")
if status:
print("Error!", result.readTypedString())
elif response in self._subscriptionMapping:
self._subscriptionMapping[response].add(objectID, varID, result)
else:
raise FatalTraCIError(
"Cannot handle subscription response %02x for %s." % (response, objectID))
numVars -= 1
else:
objectNo = result.read("!i")[0]
for _ in range(objectNo):
oid = result.readString()
if numVars == 0:
self._subscriptionMapping[response].addContext(
objectID, self._subscriptionMapping[domain], oid)
for __ in range(numVars):
varID, status = result.read("!BB")
if status:
print("Error!", result.readTypedString())
elif response in self._subscriptionMapping:
self._subscriptionMapping[response].addContext(
objectID, self._subscriptionMapping[domain], oid, varID, result)
else:
raise FatalTraCIError(
"Cannot handle subscription response %02x for %s." % (response, objectID))
return objectID, response
def _subscribe(self, cmdID, begin, end, objID, varIDs, parameters=None):
format = "u"
args = [len(varIDs)]
for v in varIDs:
format += "u"
args.append(v)
if parameters is not None and v in parameters:
f, a = parameters[v]
format += f
args.append(a)
result = self._sendCmd(cmdID, (begin, end), objID, format, *args)
if varIDs:
objectID, response = self._readSubscription(result)
if response - cmdID != 16 or objectID != objID:
raise FatalTraCIError("Received answer %02x,%s for subscription command %02x,%s." % (
response, objectID, cmdID, objID))
def _getSubscriptionResults(self, cmdID):
return self._subscriptionMapping[cmdID]
def _subscribeContext(self, cmdID, begin, end, objID, domain, dist, varIDs):
result = self._sendCmd(cmdID, (begin, end), objID, "uDu" + (len(varIDs) * "u"),
domain, dist, len(varIDs), *varIDs)
if varIDs:
objectID, response = self._readSubscription(result)
if response - cmdID != 16 or objectID != objID:
raise FatalTraCIError("Received answer %02x,%s for context subscription command %02x,%s." % (
response, objectID, cmdID, objID))
def _addSubscriptionFilter(self, filterType, params=None):
if filterType in (tc.FILTER_TYPE_NONE, tc.FILTER_TYPE_NOOPPOSITE,
tc.FILTER_TYPE_TURN, tc.FILTER_TYPE_LEAD_FOLLOW):
# filter without parameter
assert params is None
self._sendCmd(tc.CMD_ADD_SUBSCRIPTION_FILTER, None, None, "u", filterType)
elif filterType in (tc.FILTER_TYPE_DOWNSTREAM_DIST, tc.FILTER_TYPE_UPSTREAM_DIST,
tc.FILTER_TYPE_FIELD_OF_VISION, tc.FILTER_TYPE_LATERAL_DIST):
# filter with float parameter
self._sendCmd(tc.CMD_ADD_SUBSCRIPTION_FILTER, None, None, "ud", filterType, params)
elif filterType in (tc.FILTER_TYPE_VCLASS, tc.FILTER_TYPE_VTYPE):
# filter with list(string) parameter
self._sendCmd(tc.CMD_ADD_SUBSCRIPTION_FILTER, None, None, "ul", filterType, params)
elif filterType == tc.FILTER_TYPE_LANES:
# filter with list(byte) parameter
# check uniqueness of given lanes in list
lanes = set()
for i in params:
lane = int(i)
if lane < 0:
lane += 256
lanes.add(lane)
if len(lanes) < len(list(params)):
warnings.warn("Ignoring duplicate lane specification for subscription filter.")
self._sendCmd(tc.CMD_ADD_SUBSCRIPTION_FILTER, None, None,
(len(lanes) + 2) * "u", filterType, len(lanes), *lanes)
def load(self, args):
"""
Load a simulation from the given arguments.
"""
self._sendCmd(tc.CMD_LOAD, None, None, "l", args)
def simulationStep(self, step=0.):
"""
Make a simulation step and simulate up to the given second in sim time.
If the given value is 0 or absent, exactly one step is performed.
Values smaller than or equal to the current sim time result in no action.
"""
if type(step) is int and step >= 1000:
warnings.warn("API change now handles step as floating point seconds", stacklevel=2)
result = self._sendCmd(tc.CMD_SIMSTEP, None, None, "D", step)
for subscriptionResults in self._subscriptionMapping.values():
subscriptionResults.reset()
numSubs = result.readInt()
responses = []
while numSubs > 0:
responses.append(self._readSubscription(result))
numSubs -= 1
self._manageStepListeners(step)
return responses
def _manageStepListeners(self, step):
listenersToRemove = []
for (listenerID, listener) in self._stepListeners.items():
keep = listener.step(step)
if not keep:
listenersToRemove.append(listenerID)
for listenerID in listenersToRemove:
self.removeStepListener(listenerID)
def addStepListener(self, listener):
"""addStepListener(traci.StepListener) -> int
Append the step listener (its step function is called at the end of every call to traci.simulationStep())
Returns the ID assigned to the listener if it was added successfully, None otherwise.
"""
if issubclass(type(listener), StepListener):
listener.setID(self._nextStepListenerID)
self._stepListeners[self._nextStepListenerID] = listener
self._nextStepListenerID += 1
# print ("traci: Added stepListener %s\nlisteners: %s"%(_nextStepListenerID - 1, _stepListeners))
return self._nextStepListenerID - 1
warnings.warn(
"Proposed listener's type must inherit from traci.StepListener. Not adding object of type '%s'" %
type(listener))
return None
def removeStepListener(self, listenerID):
"""removeStepListener(traci.StepListener) -> bool
Remove the step listener from traci's step listener container.
Returns True if the listener was removed successfully, False if it wasn't registered.
"""
# print ("traci: removeStepListener %s\nlisteners: %s"%(listenerID, _stepListeners))
if listenerID in self._stepListeners:
self._stepListeners[listenerID].cleanUp()
del self._stepListeners[listenerID]
# print ("traci: Removed stepListener %s"%(listenerID))
return True
warnings.warn("Cannot remove unknown listener %s.\nlisteners:%s" % (listenerID, self._stepListeners))
return False
def getVersion(self):
command = tc.CMD_GETVERSION
result = self._sendCmd(command, None, None)
result.readLength()
response = result.read("!B")[0]
if response != command:
raise FatalTraCIError("Received answer %s for command %s." % (response, command))
return result.readInt(), result.readString()
def setOrder(self, order):
self._sendCmd(tc.CMD_SETORDER, None, None, "I", order)
def close(self, wait=True):
for listenerID in list(self._stepListeners.keys()):
self.removeStepListener(listenerID)
if hasattr(self, "_socket"):
self._sendCmd(tc.CMD_CLOSE, None, None)
self._socket.close()
del self._socket
if wait and self._process is not None:
self._process.wait()
class StepListener(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def step(self, t=0):
"""step(int) -> bool
After adding a StepListener 'listener' with traci.addStepListener(listener),
TraCI will call listener.step(t) after each call to traci.simulationStep(t)
The return value indicates whether the stepListener wants to stay active.
"""
return True
def cleanUp(self):
"""cleanUp() -> None
This method is called at removal of the stepListener, allowing to schedule some final actions
"""
pass
def setID(self, ID):
self._ID = ID
def getID(self):
return self._ID
| StarcoderdataPython |
1639797 | <filename>cvat/apps/tf_annotation/views.py
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import ast
import datetime
import threading
import time
from zipfile import ZipFile
from django.http import HttpResponse, JsonResponse, HttpResponseBadRequest, QueryDict
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render
from rest_framework.decorators import api_view
from rules.contrib.views import permission_required, objectgetter
from cvat.apps.authentication.decorators import login_required
from cvat.apps.auto_annotation.models import AnnotationModel
from cvat.apps.engine.models import Task as TaskModel
from cvat.apps.engine.frame_provider import FrameProvider
from cvat.apps.engine.data_manager import TrackManager
from cvat.apps.engine.models import (Job, TrackedShape)
from cvat.apps.engine.serializers import (TrackedShapeSerializer)
from cvat.apps.engine import annotation, task
from cvat.apps.engine.serializers import LabeledDataSerializer
from cvat.apps.engine.annotation import put_task_data,patch_task_data
from tensorflow.python.client import device_lib
import django_rq
import fnmatch
import logging
import copy
import json
import os
import rq
import tensorflow as tf
import numpy as np
from PIL import Image
from cvat.apps.engine.log import slogger
from cvat.settings.base import DATA_ROOT
def load_image_into_numpy(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def run_tensorflow_annotation(frame_provider, labels_mapping, threshold, model_path):
def _normalize_box(box, w, h):
xmin = int(box[1] * w)
ymin = int(box[0] * h)
xmax = int(box[3] * w)
ymax = int(box[2] * h)
return xmin, ymin, xmax, ymax
result = {}
#use model path provided by user
# model_path = os.environ.get('TF_ANNOTATION_MODEL_PATH')
if model_path is None:
raise OSError('Model path env not found in the system.')
job = rq.get_current_job()
#add .pb if default model selected
if "inference" in model_path and not model_path.endswith('pb'):
model_path += ".pb"
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path , 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
try:
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(graph=detection_graph, config=config)
frames = frame_provider.get_frames(frame_provider.Quality.ORIGINAL)
for image_num, (image, _) in enumerate(frames):
job.refresh()
if 'cancel' in job.meta:
del job.meta['cancel']
job.save()
return None
job.meta['progress'] = image_num * 100 / len(frame_provider)
job.save_meta()
image = Image.open(image)
width, height = image.size
if width > 1920 or height > 1080:
image = image.resize((width // 2, height // 2), Image.ANTIALIAS)
image_np = load_image_into_numpy(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run([boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded})
for i in range(len(classes[0])):
if classes[0][i] in labels_mapping.keys():
if scores[0][i] >= threshold:
xmin, ymin, xmax, ymax = _normalize_box(boxes[0][i], width, height)
label = labels_mapping[classes[0][i]]
if label not in result:
result[label] = []
result[label].append([image_num, xmin, ymin, xmax, ymax])
finally:
sess.close()
del sess
return result
def convert_to_cvat_format(data):
result = {
"tracks": [],
"shapes": [],
"tags": [],
"version": 0,
}
for label in data:
boxes = data[label]
for box in boxes:
result['shapes'].append({
"type": "rectangle",
"label_id": label,
"frame": box[0],
"points": [box[1], box[2], box[3], box[4]],
"z_order": 0,
"group": None,
"occluded": False,
"attributes": [],
})
return result
def create_thread(tid, labels_mapping, user, tf_annotation_model_path, reset):
try:
THRESHOLD = 0.5
# Init rq job
job = rq.get_current_job()
job.meta['progress'] = 0
job.save_meta()
# Get job indexes and segment length
db_task = TaskModel.objects.get(pk=tid)
# Get image list
image_list = FrameProvider(db_task.data)
# Run auto annotation by tf
result = None
slogger.glob.info("tf annotation with tensorflow framework for task {}".format(tid))
result = run_tensorflow_annotation(image_list, labels_mapping, THRESHOLD, tf_annotation_model_path)
if result is None:
slogger.glob.info('tf annotation for task {} canceled by user'.format(tid))
return
# Modify data format and save
result = convert_to_cvat_format(result)
serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True):
if reset:
put_task_data(tid, user, result)
else:
patch_task_data(tid, user, result, "create")
slogger.glob.info('tf annotation for task {} done'.format(tid))
except Exception as ex:
try:
slogger.task[tid].exception('exception was occured during tf annotation of the task', exc_info=True)
except:
slogger.glob.exception('exception was occured during tf annotation of the task {}'.format(tid), exc_info=True)
raise ex
@api_view(['POST'])
@login_required
def get_meta_info(request):
try:
queue = django_rq.get_queue('low')
tids = request.data
result = {}
for tid in tids:
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None:
result[tid] = {
"active": job.is_queued or job.is_started,
"success": not job.is_failed
}
return JsonResponse(result)
except Exception as ex:
slogger.glob.exception('exception was occured during tf meta request', exc_into=True)
return HttpResponseBadRequest(str(ex))
@permission_required(perm=['engine.task.change'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def create(request, tid, mid):
slogger.glob.info('tf annotation create request for task {}'.format(tid))
try:
data = json.loads(request.body.decode('utf-8'))
user_label_mapping = data["labels"]
should_reset = data['reset']
db_task = TaskModel.objects.get(pk=tid)
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None and (job.is_started or job.is_queued):
raise Exception("The process is already running")
db_labels = db_task.label_set.prefetch_related('attributespec_set').all()
db_labels = {db_label.id:db_label.name for db_label in db_labels}
if int(mid) == 989898:
should_reset = True
tf_model_file_path = os.getenv('TF_ANNOTATION_MODEL_PATH')
tf_annotation_labels = {
"person": 1, "bicycle": 2, "car": 3, "motorcycle": 4, "airplane": 5,
"bus": 6, "train": 7, "truck": 8, "boat": 9, "traffic_light": 10,
"fire_hydrant": 11, "stop_sign": 13, "parking_meter": 14, "bench": 15,
"bird": 16, "cat": 17, "dog": 18, "horse": 19, "sheep": 20, "cow": 21,
"elephant": 22, "bear": 23, "zebra": 24, "giraffe": 25, "backpack": 27,
"umbrella": 28, "handbag": 31, "tie": 32, "suitcase": 33, "frisbee": 34,
"skis": 35, "snowboard": 36, "sports_ball": 37, "kite": 38, "baseball_bat": 39,
"baseball_glove": 40, "skateboard": 41, "surfboard": 42, "tennis_racket": 43,
"bottle": 44, "wine_glass": 46, "cup": 47, "fork": 48, "knife": 49, "spoon": 50,
"bowl": 51, "banana": 52, "apple": 53, "sandwich": 54, "orange": 55, "broccoli": 56,
"carrot": 57, "hot_dog": 58, "pizza": 59, "donut": 60, "cake": 61, "chair": 62,
"couch": 63, "potted_plant": 64, "bed": 65, "dining_table": 67, "toilet": 70,
"tv": 72, "laptop": 73, "mouse": 74, "remote": 75, "keyboard": 76, "cell_phone": 77,
"microwave": 78, "oven": 79, "toaster": 80, "sink": 81, "refrigerator": 83,
"book": 84, "clock": 85, "vase": 86, "scissors": 87, "teddy_bear": 88, "hair_drier": 89,
"toothbrush": 90
}
labels_mapping = {}
for key, labels in db_labels.items():
if labels in tf_annotation_labels.keys():
labels_mapping[tf_annotation_labels[labels]] = key
else:
dl_model = AnnotationModel.objects.get(pk=mid)
classes_file_path = dl_model.labelmap_file.name
tf_model_file_path = dl_model.model_file.name
# Load and generate the tf annotation labels
tf_annotation_labels = {}
with open(classes_file_path, "r") as f:
f.readline() # First line is header
line = f.readline().rstrip()
cnt = 1
while line:
tf_annotation_labels[line] = cnt
line = f.readline().rstrip()
cnt += 1
if len(tf_annotation_labels) == 0:
raise Exception("No classes found in classes file.")
labels_mapping = {}
for tf_class_label, mapped_task_label in user_label_mapping.items():
for task_label_id, task_label_name in db_labels.items():
if task_label_name == mapped_task_label:
if tf_class_label in tf_annotation_labels.keys():
labels_mapping[tf_annotation_labels[tf_class_label]] = task_label_id
if not len(labels_mapping.values()):
raise Exception('No labels found for tf annotation')
# Run tf annotation job
queue.enqueue_call(func=create_thread,
args=(tid, labels_mapping, request.user, tf_model_file_path, should_reset),
job_id='tf_annotation.create/{}'.format(tid),
timeout=604800) # 7 days
slogger.task[tid].info('tensorflow annotation job enqueued with labels {}'.format(labels_mapping))
except Exception as ex:
try:
slogger.task[tid].exception("exception was occured during tensorflow annotation request", exc_info=True)
except:
pass
return HttpResponseBadRequest(str(ex))
return HttpResponse()
@login_required
@permission_required(perm=['engine.task.access'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def check(request, tid):
try:
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is not None and 'cancel' in job.meta:
return JsonResponse({'status':'finished'})
data = {}
if job is None:
data['status'] = 'unknown'
elif job.is_queued:
data['status'] = 'queued'
elif job.is_started:
data['status'] = 'started'
data['progress'] = job.meta['progress']
elif job.is_finished:
data['status'] = 'finished'
job.delete()
else:
data['status'] = 'failed'
data['stderr'] = job.exc_info
job.delete()
except Exception:
data['status'] = 'unknown'
return JsonResponse(data)
@login_required
@permission_required(perm=['engine.task.change'],
fn=objectgetter(TaskModel, 'tid'), raise_exception=True)
def cancel(request, tid):
try:
queue = django_rq.get_queue('low')
job = queue.fetch_job('tf_annotation.create/{}'.format(tid))
if job is None or job.is_finished or job.is_failed:
raise Exception('Task is not being annotated currently')
elif 'cancel' not in job.meta:
job.meta['cancel'] = True
job.save()
except Exception as ex:
try:
slogger.task[tid].exception("cannot cancel tensorflow annotation for task #{}".format(tid), exc_info=True)
except:
pass
return HttpResponseBadRequest(str(ex))
return HttpResponse() | StarcoderdataPython |
1640748 | #!/usr/bin/python
import subprocess
from subprocess import PIPE
def test_subprocess():
cmd = 'adb devices'
#print(subprocess.call(cmd, shell=True))
#print(subprocess.check_output(["adb", "devices"]))
std_out, std_err = subprocess.Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate()
#print(type(std_out))
print(str(std_out, 'UTF-8'))
print(type(str(std_out, 'utf-8')))
p = subprocess.run('adb devices', shell=True, stdout=PIPE)
if p.returncode != 0:
print("no device found")
else:
print("device " + str(p.stdout, 'utf-8') + " is found")
print(str(p.stdout, 'utf-8').split())
print(len(str(p.stdout, 'utf-8').split('\n')))
if __name__ == "__main__":
test_subprocess()
| StarcoderdataPython |
133762 | <gh_stars>0
#!/usr/bin/python
###############################################
# This script computes char co-occurence within
# alexa's top 1-million web domains.
# The co-occurence counts, probability and
# log(probability) and stored in a json doc.
###############################################
import json
import math
file = 'alexa-top-1m.csv'
def bigrams(input_list):
return zip(input_list, input_list[1:])
pairs = {}
line_count = 0
with open(file, 'r') as fi:
for line in fi:
# keep track of progress
line_count += 1
# extract ranking
arr = line.split(',')
ranking = arr[0]
# extract basedomain
basedomain = arr[1].split('.')[0]
# sometimes basedomain is made up of 2 or more words with hyphens between, we split them on the hyphen to obtain 'words'
# e.g. my-malicious-domain.com, x-site.com etc.
words = basedomain.split('-')
# for each domain 'word' greater than 1 character
# obtain character pairs
for word in words:
if len(word) > 1:
b = bigrams(word)
#combine bigram char pairs and maintain count
for char_tuple in b:
pair = char_tuple[0] + char_tuple[1]
if pair in pairs:
pairs[pair] += 1
else:
pairs[pair] = 1
# now that we have pairs, compute sum of all pair combinations
# for example:
#if pairs['go'] = 4, pairs['im'] = 5, pairs['to'] = 9 then total_pairs = 18.
total_pairs = sum(pairs.values())
print (total_pairs)
# calculate probability for each pair of letters update the dict value with a dict of following format
# we use logarithm base 10 because later we would need to multiple pairwise to obtain an overall probability
# however, instead of multiplying small floats we add their logarithms as
# log(xy) = log(x) + log(y)
#
# key = { count: 2000,
# probability: 0.00008580898447093862,
# log: -4.605170185988091
# }
for k,v in pairs.items():
# probability = val / total_pairs
probability = float(v) / total_pairs
pairs[k] = {
'count': v,
'probability': probability,
'log': round(math.log10(probability), 5)
}
# finally add total_pairs to dict. useful to calculate pair counts later
pairs['total_pairs'] = total_pairs
j = json.dumps(pairs)
with open('character_pair_probabilities.json', 'w') as fo:
fo.write(j)
exit()
| StarcoderdataPython |
3311495 | <filename>Borda_Guevara_Tissera_TP2/resolutions/knapsack.py
from search import Problem
import random
class KnapsackState:
# A KnapsackState represents the state of the knapsack in
# a determinate moment in the knapsack problem
# The form of the items in the knapsack is
# (weight, value)
def __init__(self, capacity, weight, value, obj_in):
self.capacity = capacity
self.weight = weight
self.value = value
self.objects_in = obj_in
# Item = (weight, value), position = int+
def add_item(self, item, object_place):
if not self.objects_in[object_place]:
obj_in = self.objects_in.copy()
obj_in[object_place] = True
return KnapsackState(self.capacity, self.weight + item[0], self.value + item[1], obj_in)
# Item = (weight, value), position = int+
def remove_item(self, item, position):
if self.objects_in[position]:
obj_in = self.objects_in.copy()
obj_in[position] = False
return KnapsackState(self.capacity, self.weight - item[0], self.value - item[1], obj_in)
def __eq__(self, other):
return (
self.capacity == other.capacity and
self.weight == other.weight and
self.value == other.value and
self.objects_in == other.objects_in
)
class KnapsackProblem(Problem):
def __init__(self, capacity, objects):
self.initial = KnapsackState(capacity, 0, 0, [False for i in range(len(objects))])
self.knapsack_objects = objects
# This returns a tuple (flag, item, position)
# where the flag is positive if the action is add or negative otherwise
def actions(self, state):
available_items = []
for i in range(len(self.knapsack_objects)):
if (self.knapsack_objects[i][0] + state.weight <= state.capacity) and not state.objects_in[i]:
available_items.append((1, self.knapsack_objects[i], i))
for i in range(len(state.objects_in)):
if state.objects_in[i]:
available_items.append((-1, self.knapsack_objects[i], i)) # We can sack the objects of the knapsack
return available_items
def result(self, state, action):
if action[0] > 0:
return state.add_item(action[1], action[2])
else:
return state.remove_item(action[1], action[2])
def value(self, state):
return state.value
def objects(self, state):
obj = []
for i in range(len(state.objects_in)):
if state.objects_in[i]:
obj.append(self.knapsack_objects[i])
return obj
# The random restart fills a empty knapsack with random objects until
# a random weight is reached, or until it fails to put in an element for 10 times in a row
def random_restart(self):
new_state = KnapsackState(self.initial.capacity, 0, 0, [False] * len(self.knapsack_objects))
k = random.randint(0, self.initial.capacity)
knapsack_objects = list(enumerate(self.knapsack_objects))
failed_attempts = 0
while new_state.weight < k and failed_attempts < 10:
pos, item = random.choice(knapsack_objects)
if (item[0] + new_state.weight <= new_state.capacity) and not new_state.objects_in[pos]:
new_state = new_state.add_item(item, pos)
failed_attempts = 0
else:
failed_attempts += 1
return new_state
| StarcoderdataPython |
30694 | # Generated by Django 2.2.10 on 2020-05-02 05:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('purchases', '0008_auto_20200430_1617'),
]
operations = [
migrations.RenameField(
model_name='itempurchase',
old_name='supplier_price',
new_name='price',
),
]
| StarcoderdataPython |
1632681 | from CommonServerPython import *
""" IMPORTS """
import requests
import ast
from datetime import datetime
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
# remove proxy if not set to true in params
if not demisto.params().get("proxy"):
del os.environ["HTTP_PROXY"]
del os.environ["HTTPS_PROXY"]
del os.environ["http_proxy"]
del os.environ["https_proxy"]
""" GLOBAL VARS """
CONTEXT = demisto.getIntegrationContext()
USE_SSL = not demisto.params().get("unsecure", False)
DEMISTOBOT = "https://demistobot.demisto.com/azuresc-token"
SUBSCRIPTION_ID = CONTEXT.get("subscription_id")
SUBSCRIPTION_URL = "/subscriptions/{}".format(SUBSCRIPTION_ID)
TOKEN = demisto.params().get("token")
TENANT_ID = demisto.params().get("tenant_id")
BASE_URL = demisto.params().get("server_url")
RESOURCE = "https://management.azure.com/"
AUTH_GRANT_TYPE = "client_credentials"
# API Versions
ALERT_API_VERSION = "2015-06-01-preview"
LOCATION_API_VERSION = "2015-06-01-preview"
ATP_API_VERSION = "2017-08-01-preview"
APS_API_VERSION = "2017-08-01-preview"
IPP_API_VERSION = "2017-08-01-preview"
JIT_API_VERSION = "2015-06-01-preview"
STORAGE_API_VERSION = "2018-07-01"
""" HELPER FUNCTIONS """
def set_subscription_id():
"""
Setting subscription ID to the context and returning it
"""
headers = {"Authorization": TOKEN, "Accept": "application/json"}
params = {"tenant": TENANT_ID, "product": "AzureSecurityCenter"}
r = requests.get(DEMISTOBOT, headers=headers, params=params, verify=USE_SSL)
try:
data = r.json()
if r.status_code != requests.codes.ok:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
sub_id = data.get("subscription_id")
demisto.setIntegrationContext(
{
"token": data.get("token"),
"stored": epoch_seconds(),
"subscription_id": sub_id,
}
)
return sub_id
except ValueError:
return_error("There was problem with your request: {}".format(r.content))
def epoch_seconds(d=None):
"""
Return the number of seconds for given date. If no date, return current.
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def get_token():
"""
Check if we have a valid token and if not get one
"""
token = CONTEXT.get("token")
stored = CONTEXT.get("stored")
if token and stored:
if epoch_seconds() - stored < 60 * 60 - 30:
return token
headers = {"Authorization": TOKEN, "Accept": "application/json"}
r = requests.get(
DEMISTOBOT,
headers=headers,
params={"tenant": TENANT_ID, "product": "AzureSecurityCenter"},
verify=USE_SSL,
)
data = r.json()
if r.status_code != requests.codes.ok:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
demisto.setIntegrationContext(
{
"token": data.get("token"),
"stored": epoch_seconds(),
"subscription_id": data.get("subscription_id"),
}
)
return data.get("token")
def http_request(method, url_suffix, body=None, params=None, add_subscription=True):
"""
Generic request to the graph
"""
token = get_token()
headers = {
"Authorization": "Bearer " + token,
"Content-Type": "application/json",
"Accept": "application/json",
}
if add_subscription:
url = BASE_URL + SUBSCRIPTION_URL + url_suffix
else:
url = BASE_URL + url_suffix
r = requests.request(method, url, json=body, params=params, headers=headers)
if r.status_code not in {200, 201, 202, 204}:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
try:
r = r.json()
return r
except ValueError:
return dict()
# Format ports in JIT access policy rule to (portNum, protocol, allowedAddress, maxDuration)
def format_jit_port_rule(ports):
port_array = list()
for port in ports:
# for each item in unicode, has to use str to decode to ascii
p_num = str(port.get("number"))
p_src_addr = (
str(port.get("allowedSourceAddressPrefix"))
if port.get("allowedSourceAddressPrefix") != "*"
else "any"
)
p_protocol = str(port.get("protocol")) if port.get("protocol") != "*" else "any"
p_max_duration = str(port.get("maxRequestAccessDuration"))
port_array.append(str((p_num, p_protocol, p_src_addr, p_max_duration)))
return ", ".join(port_array)
# Format ports in JIT access request to (portNum, allowedAddress, endTime, status)
def format_jit_port_request(ports):
port_array = list()
for port in ports:
# for each item in unicode, has to use str to decode to ascii
p_num = str(port.get("number"))
p_src_addr = (
str(port.get("allowedSourceAddressPrefix"))
if port.get("allowedSourceAddressPrefix") != "*"
else "any"
)
p_status = str(port.get("status"))
p_end_time = str(port.get("endTimeUtc"))
port_array.append(str((p_num, p_src_addr, p_end_time, p_status)))
return ", ".join(port_array)
def normalize_context_key(string):
"""Normalize context keys
Function will normalize the string (remove white spaces and tailings)
Args:
string (str):
Returns:
Normalized string
"""
tmp = string[:1].upper() + string[1:]
return tmp.replace(" ", "")
""" FUNCTIONS """
""" Alert Start """
def get_alert_command(args):
"""Getting specified alert from API
Args
args (dict): dictionary containing commands args
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
alert_id = args.get("alert_id")
alert = get_alert(resource_group_name, asc_location, alert_id)
final_output = list()
# Basic Property Table
properties = alert.get("properties")
if properties:
basic_table_output = [
{
"DisplayName": properties.get("alertDisplayName"),
"CompromisedEntity": properties.get("compromisedEntity"),
"Description": properties.get("description"),
"DetectedTime": properties.get("detectedTimeUtc"),
"ReportedTime": properties.get("reportedTimeUtc"),
"ReportedSeverity": properties.get("reportedSeverity"),
"ConfidenceScore": properties.get("confidenceScore", "None"),
"State": properties.get("state"),
"ActionTaken": properties.get("actionTaken"),
"CanBeInvestigated": properties.get("canBeInvestigated"),
"RemediationSteps": properties.get("remediationSteps"),
"VendorName": properties.get("vendorName"),
"AssociatedResource": properties.get("associatedResource"),
"AlertName": properties.get("alertName"),
"InstanceID": properties.get("instanceId", "None"),
"ID": alert.get("name"),
"ExtendedProperties": properties.get("extendedProperties"),
"Entities": properties.get("entities"),
"SubscriptionID": properties.get("subscriptionId"),
}
]
md = tableToMarkdown(
"Azure Security Center - Get Alert - Basic Property",
basic_table_output,
[
"DisplayName",
"CompromisedEntity",
"Description",
"DetectedTime",
"ReportedTime",
"ReportedSeverity",
"ConfidenceScore",
"State",
"ActionTaken",
"CanBeInvestigated",
"RemediationSteps",
"VendorName",
"AssociatedResource",
"AlertName",
"InstanceID",
"ID",
],
removeNull=True,
)
ec = {
"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": basic_table_output
}
basic_table_entry = {
"Type": entryTypes["note"],
"Contents": alert,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
final_output.append(basic_table_entry)
# Extended Properties Table
if (
alert.get("properties")
and alert.get("properties")
and alert.get("properties").get("extendedProperties")
):
extended_properties = dict()
properties = alert.get("properties")
if isinstance(properties.get("extendedProperties"), dict):
for key, value in alert["properties"]["extendedProperties"].items():
extended_properties[normalize_context_key(key)] = value
extended_table_entry = {
"Type": entryTypes["note"],
"Contents": alert["properties"]["extendedProperties"],
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(
"Azure Security Center - Get Alert - Extended Property",
extended_properties,
removeNull=True,
),
}
final_output.append(extended_table_entry)
# Entities Table
entities = properties.get("entities")
if entities:
if isinstance(entities, dict):
entities_table_output = list()
for entity in entities:
entities_table_output.append(
{
"Content": ast.literal_eval(str(entity)),
"Type": entity["type"],
}
)
md = tableToMarkdown(
"Azure Security Center - Get Alert - Entity",
entities_table_output,
removeNull=True,
)
entities_table_entry = {
"Type": entryTypes["note"],
"Contents": alert.get("properties").get("entities"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
final_output.append(entities_table_entry)
demisto.results(final_output)
def get_alert(resource_group_name, asc_location, alert_id):
"""Building query
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
alert_id (str): Alert ID
Returns:
response body (dict)
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}".format(resource_group_name)
cmd_url += "/providers/Microsoft.Security/locations/{}/alerts/{}?api-version={}".format(
asc_location, alert_id, ALERT_API_VERSION
)
response = http_request("GET", cmd_url)
return response
def list_alerts_command(args):
"""Getting all alerts
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
filter_query = args.get("filter")
select_query = args.get("select")
expand_query = args.get("expand")
alerts = list_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
).get("value")
outputs = list()
for alert in alerts:
properties = alert.get("properties")
if properties:
outputs.append(
{
"DisplayName": properties.get("alertDisplayName"),
"CompromisedEntity": properties.get("compromisedEntity"),
"DetectedTime": properties.get("detectedTimeUtc"),
"ReportedSeverity": properties.get("reportedSeverity"),
"State": properties.get("state"),
"ActionTaken": properties.get("actionTaken"),
"Description": properties.get("description"),
"ID": alert.get("name"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Alerts",
outputs,
[
"DisplayName",
"CompromisedEntity",
"DetectedTime",
"ReportedSeverity",
"State",
"ActionTaken",
"Description",
"ID",
],
removeNull=True,
)
ec = {"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": alerts,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def get_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
):
"""Building query
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
filter_query (str): what to filter
select_query (str): what to select
expand_query (str): what to expand
Returns:
dict: contains response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}/providers/Microsoft.Security".format(
resource_group_name
)
# ascLocation muse be using with specifying resourceGroupName
if asc_location:
cmd_url += "/locations/{}".format(asc_location)
else:
cmd_url += "/providers/Microsoft.Security"
cmd_url += "/alerts?api-version={}".format(ALERT_API_VERSION)
if filter_query:
cmd_url += "&$filter={}".format(filter_query)
if select_query:
cmd_url += "&$select={}".format(select_query)
if expand_query:
cmd_url += "&$expand={}".format(expand_query)
response = http_request("GET", cmd_url)
return response
def list_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
):
"""Listing alerts
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
filter_query (str): what to filter
select_query (str): what to select
expand_query (str): what to expand
Returns:
dict: contains response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}/providers/Microsoft.Security".format(
resource_group_name
)
# ascLocation must be using with specifying resourceGroupName
if asc_location:
cmd_url += "/locations/{}".format(asc_location)
else:
cmd_url += "/providers/Microsoft.Security"
cmd_url += "/alerts?api-version={}".format(ALERT_API_VERSION)
if filter_query:
cmd_url += "&$filter={}".format(filter_query)
if select_query:
cmd_url += "&$select={}".format(select_query)
if expand_query:
cmd_url += "&$expand={}".format(expand_query)
response = http_request("GET", cmd_url)
return response
def update_alert_command(args):
"""Update given alert
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
alert_id = args.get("alert_id")
alert_update_action_type = args.get("alert_update_action_type")
response = update_alert(
resource_group_name, asc_location, alert_id, alert_update_action_type
)
outputs = {"ID": response.get("id"), "ActionTaken": alert_update_action_type}
ec = {"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": outputs}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": "Alert - {} has been set to {}.".format(
alert_id, alert_update_action_type
),
"ContentsFormat": formats["text"],
"EntryContext": ec,
}
)
def update_alert(resource_group_name, asc_location, alert_id, alert_update_action_type):
"""Building query
Args:
resource_group_name (str): Resource Name Group
asc_location (str): Azure Security Center Location
alert_id (str): Alert ID
alert_update_action_type (str): What update type need to update
Returns:
dict: response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}".format(resource_group_name)
cmd_url += "/providers/Microsoft.Security/locations/{}/alerts/{}/{}?api-version={}".format(
asc_location, alert_id, alert_update_action_type, ALERT_API_VERSION
)
return http_request("POST", cmd_url)
""" Alert End """
""" Location Start """
def list_locations_command():
"""Getting all locations
"""
locations = list_locations().get("value")
outputs = list()
if locations:
for location in locations:
if location.get("properties") and location.get("properties").get(
"homeRegionName"
):
home_region_name = location.get("properties").get("homeRegionName")
else:
home_region_name = None
outputs.append(
{
"HomeRegionName": home_region_name,
"Name": location.get("name"),
"ID": location.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Locations",
outputs,
["HomeRegionName", "Name", "ID"],
removeNull=True,
)
ec = {"AzureSecurityCenter.Location(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": locations,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
else:
demisto.results("No locations found")
def list_locations():
"""Building query
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Security/locations?api-version={}".format(
LOCATION_API_VERSION
)
response = http_request("GET", cmd_url)
return response
""" Location End """
""" Advanced Threat Protection Start """
def update_atp_command(args):
"""Updating given Advanced Threat Protection (enable/disable)
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
setting_name = args.get("setting_name")
is_enabled = args.get("is_enabled")
storage_account = args.get("storage_account")
response = update_atp(
resource_group_name, storage_account, setting_name, is_enabled
)
outputs = {
"ID": response.get("id"),
"Name": response.get("name"),
"IsEnabled": response.get("properties").get("is_enabled"),
}
md = tableToMarkdown(
"Azure Security Center - Update Advanced Threat Detection Setting",
outputs,
["ID", "Name", "IsEnabled"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AdvancedThreatProtection(val.ID && val.ID === obj.ID)": outputs
}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": response,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
)
def update_atp(resource_group_name, storage_account, setting_name, is_enabled):
"""Building query
Args:
resource_group_name (str): Resource Group Name
storage_account (str): Storange Account
setting_name (str): Setting Name
is_enabled (str): true/false
Returns:
dict: respones body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}"
"/providers/Microsoft.Security/advancedThreatProtectionSettings/{}?api-version={}".format(
resource_group_name, storage_account, setting_name, ATP_API_VERSION
)
)
data = {
"id": "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage"
"/storageAccounts/{}/providers/Microsoft.Security/advancedThreatProtectionSettings/{}".format(
SUBSCRIPTION_ID, resource_group_name, storage_account, setting_name
),
"name": setting_name,
"type": "Microsoft.Security/advancedThreatProtectionSettings",
"properties": {"is_enabled": is_enabled},
}
response = http_request("PUT", cmd_url, body=data)
return response
def get_atp_command(args):
"""Get given Advanced Threat Protection settings
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
setting_name = args.get("setting_name")
storage_account = args.get("storage_account")
response = get_atp(resource_group_name, storage_account, setting_name)
outputs = {
"ID": response.get("id"),
"Name": response.get("name"),
"IsEnabled": response["properties"]["isEnabled"]
if response.get("properties") and response.get("properties").get("isEnabled")
else None,
}
md = tableToMarkdown(
"Azure Security Center - Get Advanced Threat Detection Setting",
outputs,
["ID", "Name", "IsEnabled"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AdvancedThreatProtection(val.ID && val.ID === obj.ID)": outputs
}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": response,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
)
def get_atp(resource_group_name, storage_account, setting_name):
"""Building query
Args:
resource_group_name (str): Resource Group Name
storage_account (str): Storange Account
setting_name (str): Setting Name
Returns:
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts"
"/{}/providers/Microsoft.Security/advancedThreatProtectionSettings/{}?api-version={}".format(
resource_group_name, storage_account, setting_name, ATP_API_VERSION
)
)
response = http_request("GET", cmd_url)
return response
""" Advanced Threat Protection End """
""" Auto Provisioning Settings Start """
def update_aps_command(args):
"""Updating Analytics Platform System
Args:
args (dict): usually demisto.args()
"""
setting_name = args.get("setting_name")
auto_provision = args.get("auto_provision")
setting = update_aps(setting_name, auto_provision)
outputs = [
{
"Name": setting.get("name"),
"AutoProvision": setting["properties"]["auto_provision"]
if setting.get("properties")
and setting.get("properties").get("auto_provision")
else None,
"ID": setting.get("id"),
}
]
md = tableToMarkdown(
"Azure Security Center - Update Auto Provisioning Setting",
outputs,
["Name", "AutoProvision", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AutoProvisioningSetting(val.ID && val.ID === obj.ID)": outputs
}
entry = {
"Type": entryTypes["note"],
"Contents": setting,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def update_aps(setting_name, auto_provision):
"""Building query
Args:
setting_name (str): Setting name
auto_provision (str): Auto provision setting (On/Off)
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Security/autoProvisioningSettings/{}?api-version={}".format(
setting_name, APS_API_VERSION
)
data = {"properties": {"auto_provision": auto_provision}}
response = http_request("PUT", cmd_url, body=data)
return response
def list_aps_command():
"""List all Analytics Platform System
"""
settings = list_aps().get("value")
outputs = []
for setting in settings:
outputs.append(
{
"Name": setting.get("name"),
"AutoProvision": setting.get("properties").get("autoProvision")
if setting.get("properties")
and setting.get("properties").get("autoProvision")
else None,
"ID": setting.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Auto Provisioning Settings",
outputs,
["Name", "AutoProvision", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AutoProvisioningSetting(val.ID && val.ID === obj.ID)": outputs
}
entry = {
"Type": entryTypes["note"],
"Contents": settings,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def list_aps():
"""Build query
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Security/autoProvisioningSettings?api-version={}".format(
APS_API_VERSION
)
response = http_request("GET", cmd_url)
return response
def get_aps_command(args):
"""Get given Analytics Platform System setting
Args:
args (dict): usually demisto.args()
"""
setting_name = args.get("setting_name")
setting = get_aps(setting_name)
outputs = [
{
"Name": setting.get("name"),
"AutoProvision": setting.get("properties").get("autoProvision")
if setting.get("properties")
and setting.get("properties").get("autoProvision")
else None,
"ID": setting["id"],
}
]
md = tableToMarkdown(
"Azure Security Center - Get Auto Provisioning Setting",
outputs,
["Name", "AutoProvision", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AutoProvisioningSetting(val.ID && val.ID === obj.ID)": outputs
}
entry = {
"Type": entryTypes["note"],
"Contents": setting,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def get_aps(setting_name):
"""Build query
Args:
setting_name: Setting name
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Security/autoProvisioningSettings/{}?api-version={}".format(
setting_name, APS_API_VERSION
)
response = http_request("GET", cmd_url)
return response
""" Auto Provisioning Settings End """
""" Information Protection Policies Start """
def list_ipp_command(args):
"""Listing all Internet Presence Provider
Args:
args (dict): usually demisto.args()
"""
management_group = args.get("management_group")
policies = list_ipp(management_group).get("value")
outputs = list()
if policies:
for policy in policies:
if policy.get("properties") and policy.get("properties").get("labels"):
label_names = ", ".join(
[
label.get("displayName")
for label in policy["properties"]["labels"].values()
]
)
information_type_names = ", ".join(
[
it["displayName"]
for it in policy["properties"]["informationTypes"].values()
]
)
else:
label_names, information_type_names = None, None
outputs.append(
{
"Name": policy.get("name"),
"Labels": label_names,
"InformationTypeNames": information_type_names,
"InformationTypes": policy.get("properties").get("informationTypes")
if policy.get("properties")
and policy.get("properties").get("informationTypes")
else None,
"ID": policy["id"],
}
)
md = tableToMarkdown(
"Azure Security Center - List Information Protection Policies",
outputs,
["Name", "Labels", "InformationTypeNames", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.InformationProtectionPolicy(val.ID && val.ID === obj.ID)": outputs
}
entry = {
"Type": entryTypes["note"],
"Contents": policies,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
else:
demisto.results("no ")
def list_ipp(management_group=None):
"""Building query
Args:
management_group: Managment group to pull (if needed)
Returns:
dict: response body
"""
cmd_url = str()
scope_is_subscription = True
if management_group:
cmd_url += "/providers/Microsoft.Management/managementGroups/{}".format(
management_group
)
scope_is_subscription = False
cmd_url += "/providers/Microsoft.Security/informationProtectionPolicies?api-version={}".format(
IPP_API_VERSION
)
response = http_request("GET", cmd_url, add_subscription=scope_is_subscription)
return response
def get_ipp_command(args):
"""Getting Internet Presence Provider information
Args:
args (dict): usually demisto.args()
"""
policy_name = args.get("policy_name")
management_group = args.get("management_group")
policy = get_ipp(policy_name, management_group)
properties = policy.get("properties")
labels = properties.get("labels")
if properties and isinstance(labels, dict):
# Basic Property table
labels = ", ".join(
[
(str(label.get("displayName")) + str(label.get("enabled")))
for label in labels.values()
]
)
basic_table_output = [
{"Name": policy.get("name"), "Labels": labels, "ID": policy.get("id")}
]
md = tableToMarkdown(
"Azure Security Center - Get Information Protection Policy - Basic Property",
basic_table_output,
["Name", "Labels", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.InformationProtectionPolicy(val.ID && val.ID === obj.ID)": basic_table_output
}
basic_table_entry = {
"Type": entryTypes["note"],
"Contents": policy,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
# Information Type table
info_type_table_output = list()
for information_type_data in properties.get("informationTypes").values():
keywords = ", ".join(
[
(
str(keyword.get("displayName"))
+ str(keyword.get("custom"))
+ str(keyword.get("canBeNumeric"))
)
for keyword in information_type_data.get("keywords")
]
)
info_type_table_output.append(
{
"DisplayName": information_type_data.get("displayname"),
"Enabled": information_type_data("enabled"),
"Custom": information_type_data("custom"),
"Keywords": keywords,
"RecommendedLabelID": information_type_data("recommendedLabelId"),
}
)
md = tableToMarkdown(
"Azure Security Center - Get Information Protection Policy - Information Types",
info_type_table_output,
["DisplayName", "Enabled", "Custom", "Keywords", "RecommendedLabelID"],
removeNull=True,
)
info_type_table_entry = {
"Type": entryTypes["note"],
"Contents": properties.get("informationTypes"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
demisto.results([basic_table_entry, info_type_table_entry])
else:
demisto.results("No properties found in {}".format(management_group))
def get_ipp(policy_name, management_group):
"""Building query
Args:
policy_name (str): Policy name
management_group (str): Managment group
Returns:
dict: respone body
"""
cmd_url = ""
score_is_subscription = True
if management_group:
cmd_url += "/providers/Microsoft.Management/managementGroups/{}".format(
management_group
)
score_is_subscription = False
cmd_url += "/providers/Microsoft.Security/informationProtectionPolicies/{}?api-version={}".format(
policy_name, IPP_API_VERSION
)
response = http_request("GET", cmd_url, add_subscription=score_is_subscription)
return response
""" Information Protection Policies End """
""" Jit Network Access Policies Start """
def list_jit_command(args):
"""Lists all Just-in-time Virtual Machines
Args:
args (dict): usually demisto.args()
"""
asc_location = args.get("asc_location")
resource_group_name = args.get("resource_group_name")
policies = list_jit(asc_location, resource_group_name)["value"]
outputs = []
for policy in policies:
# summarize rules in (VMName: allowPort,...) format
if policy.get("properties") and policy.get("properties").get("virtualMachines"):
rules_data = policy["properties"]["virtualMachines"]
rules_summary_array = []
for rule in rules_data:
ID = rule.get("id")
if isinstance(ID, str):
vm_name = ID.split("/")[-1]
else:
vm_name = None # type: ignore
vm_ports = [str(port.get("number")) for port in rule.get("ports")]
rules_summary_array.append(
"({}: {})".format(vm_name, ", ".join(vm_ports))
)
rules = ", ".join(rules_summary_array)
outputs.append(
{
"Name": policy.get("name"),
"Rules": rules,
"Location": policy.get("location"),
"Kind": policy.get("kind"),
"ID": policy.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List JIT Access Policies",
outputs,
["Name", "Rules", "Location", "Kind"],
removeNull=True,
)
ec = {"AzureSecurityCenter.JITPolicy(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": policies,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def list_jit(asc_location, resource_group_name):
"""Building query
Args:
asc_location: Machine location
resource_group_name: Resource group name
Returns:
dict: response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}".format(resource_group_name)
if asc_location:
cmd_url += "/providers/Microsoft.Security/locations/{}".format(asc_location)
cmd_url += "/providers/Microsoft.Security/jitNetworkAccessPolicies?api-version={}".format(
JIT_API_VERSION
)
response = http_request("GET", cmd_url)
return response
def get_jit_command(args):
"""Getting given Just-in-time machine
Args:
args (dict): usually demisto.args()
"""
policy_name = args.get("policy_name")
asc_location = args.get("asc_location")
resource_group_name = args.get("resource_group_name")
policy = get_jit(policy_name, asc_location, resource_group_name)
# Property table
property_table_output = [
{
"Name": policy.get("name"),
"Kind": policy.get("kind"),
"ProvisioningState": policy.get("properties").get("provisioningState")
if policy.get("properties")
and policy.get("properties").get("provisioningState")
else None,
"Location": policy.get("location"),
"Rules": policy.get("properties").get("virtualMachines")
if policy.get("properties")
and policy.get("properties").get("virtualMachines")
else None,
"Requests": policy.get("properties").get("requests")
if policy.get("properties") and policy.get("properties").get("requests")
else None,
"ID": policy.get("id"),
}
]
md = tableToMarkdown(
"Azure Security Center - Get JIT Access Policy - Properties",
property_table_output,
["Name", "Kind", "ProvisioningState", "Location", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.JITPolicy(val.ID && val.ID === obj.ID)": property_table_output
}
property_table_entry = {
"Type": entryTypes["note"],
"Contents": policy,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
# Rules table
rules_table_output = list()
properties = policy.get("properties")
virtual_machines = properties.get("virtualMachines")
if isinstance(properties, dict) and virtual_machines:
for rule in virtual_machines:
rules_table_output.append(
{
"VmID": rule.get("id"),
"Ports": format_jit_port_rule(rule.get("ports")),
}
)
md = tableToMarkdown(
"Azure Security Center - Get JIT Access Policy - Rules",
rules_table_output,
["VmID", "Ports"],
removeNull=True,
)
rules_table_entry = {
"Type": entryTypes["note"],
"Contents": properties.get("virtualMachines"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
# Requests table
requests_table_output = list()
for requestData in properties.get("requests", []):
vms = list()
for vm in requestData.get("virtualMachines"):
vm_name = vm["id"].split("/")[-1]
vm_ports = format_jit_port_request(vm.get("ports"))
vms.append("[{}: {}]".format(vm_name, vm_ports))
requests_table_output.append(
{
"VirtualMachines": ", ".join(vms),
"Requestor": requestData.get("requestor")
if requestData.get("requestor")
else "service-account",
"StartTimeUtc": requestData.get("startTimeUtc"),
}
)
md = tableToMarkdown(
"Azure Security Center - Get JIT Access Policy - Requests",
requests_table_output,
["VirtualMachines", "Requestor", "StartTimeUtc"],
removeNull=True,
)
requests_table_entry = {
"Type": entryTypes["note"],
"Contents": properties.get("requests"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
demisto.results([property_table_entry, rules_table_entry, requests_table_entry])
def get_jit(policy_name, asc_location, resource_group_name):
"""Building query
Args:
policy_name: Policy name
asc_location: Machine location
resource_group_name: Resource name group
Returns:
dict: response body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/locations/{}/jitNetworkAccessPolicies/"
"{}?api-version={}".format(
resource_group_name, asc_location, policy_name, JIT_API_VERSION
)
)
response = http_request("GET", cmd_url)
return response
def initiate_jit_command(args):
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
policy_name = args.get("policy_name")
vm_id = args.get("vmID")
port = args.get("port")
source_address = args.get("source_address")
duration = args.get("duration")
response = initiate_jit(
resource_group_name,
asc_location,
policy_name,
vm_id,
port,
source_address,
duration,
)
policy_id = (
"/subscriptions/{}/resourceGroups/{}/providers/"
"Microsoft.Security/locations/{}/jitNetworkAccessPolicies/{}".format(
SUBSCRIPTION_ID, resource_group_name, asc_location, policy_name
)
)
virtual_machines = response.get("virtualMachines")
if virtual_machines and len(virtual_machines) > 0:
machine = virtual_machines[0]
port = machine.get("ports")[0]
outputs = {
"VmID": machine.get("id"),
"PortNum": port.get("number"),
"AllowedSourceAddress": port.get("allowedSourceAddressPrefix"),
"EndTimeUtc": port.get("endTimeUtc"),
"Status": port.get("status"),
"Requestor": response.get("requestor"),
"PolicyID": policy_id,
}
md = tableToMarkdown(
"Azure Security Center - Initiate JIT Access Request",
outputs,
[
"VmID",
"PortNum",
"AllowedSourceAddress",
"EndTimeUtc",
"Status",
"Requestor",
],
removeNull=True,
)
ec = {
"AzureSecurityCenter.JITPolicy(val.ID && val.ID ="
"== obj.{}).Initiate(val.endTimeUtc === obj.EndTimeUtc)".format(
policy_id
): outputs
}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": response,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
)
def initiate_jit(
resource_group_name,
asc_location,
policy_name,
vm_id,
port,
source_address,
duration,
):
"""Starting new Just-in-time machine
Args:
resource_group_name: Resource group name
asc_location: Machine location
policy_name: Policy name
vm_id: Virtual Machine ID
port: ports to be used
source_address: Source address
duration: Time in
Returns:
dict: response body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/"
"locations/{}/jitNetworkAccessPolicies/{}/initiate?api-version={}".format(
resource_group_name, asc_location, policy_name, JIT_API_VERSION
)
)
# only supports init access for one vm and one port now
data = {
"virtualMachines": [
{
"ID": vm_id,
"ports": [
{
"number": port,
"duration": duration,
"allowedSourceAddressPrefix": source_address,
}
],
}
]
}
response = http_request("POST", cmd_url, body=data)
return response
def delete_jit_command(args):
"""Deletes a Just-in-time machine
Args:
args (dict): usually demisto.args()
"""
asc_location = args.get("asc_location")
resource_group_name = args.get("resource_group_name")
policy_name = args.get("policy_name")
delete_jit(asc_location, resource_group_name, policy_name)
policy_id = (
"/subscriptions/{}/resourceGroups/"
"{}/providers/Microsoft.Security/locations/{}/jitNetworkAccessPolicies/{}".format(
SUBSCRIPTION_ID, resource_group_name, asc_location, policy_name
)
)
outputs = {"ID": policy_id, "Action": "deleted"}
ec = {"AzureSecurityCenter.JITPolicy(val.ID && val.ID === obj.ID)": outputs}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": "Policy - {} has been deleted sucessfully.".format(policy_name),
"ContentsFormat": formats["text"],
"EntryContext": ec,
}
)
def delete_jit(asc_location, resource_group_name, policy_name):
"""Building query
Args:
asc_location: Machine location
resource_group_name: Resource group name
policy_name: Policy name
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/"
"locations/{}/jitNetworkAccessPolicies/{}?api-version={}"
"".format(resource_group_name, asc_location, policy_name, JIT_API_VERSION)
)
http_request("DELETE", cmd_url)
""" Jit Network Access Policies End """
""" Storage Start """
# Add this command to security center integration because ATP-related command requires storage account info
def list_sc_storage_command():
"""Listing all Security Center Storages
"""
accounts = list_sc_storage().get("value")
outputs = list()
for account in accounts:
account_id_array = account.get("id", str()).split("/")
resource_group_name = account_id_array[
account_id_array.index("resourceGroups") + 1
]
outputs.append(
{
"Name": account.get("name"),
"ResourceGroupName": resource_group_name,
"Location": account.get("location"),
"ID": account.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Storage Accounts",
outputs,
["Name", "ResourceGroupName", "Location"],
removeNull=True,
)
ec = {"AzureSecurityCenter.Storage(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": accounts,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def list_sc_storage():
"""Building query
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Storage/storageAccounts?api-version={}".format(
STORAGE_API_VERSION
)
response = http_request("GET", cmd_url)
return response
""" Storage End """
""" Functions start """
if not SUBSCRIPTION_ID:
SUBSCRIPTION_ID = set_subscription_id()
SUBSCRIPTION_URL = "/subscriptions/{}".format(SUBSCRIPTION_ID)
try:
if demisto.command() == "test-module":
# If the command will fail, error will be thrown from the request itself
list_locations()
demisto.results("ok")
elif demisto.command() == "azure-sc-get-alert":
get_alert_command(demisto.args())
elif demisto.command() == "azure-sc-list-alert":
list_alerts_command(demisto.args())
elif demisto.command() == "azure-sc-update-alert":
update_alert_command(demisto.args())
elif demisto.command() == "azure-sc-list-location":
list_locations_command()
elif demisto.command() == "azure-sc-update-atp":
update_atp_command(demisto.args())
elif demisto.command() == "azure-sc-get-atp":
get_atp_command(demisto.args())
elif demisto.command() == "azure-sc-update-aps":
update_aps_command(demisto.args())
elif demisto.command() == "azure-sc-list-aps":
list_aps_command()
elif demisto.command() == "azure-sc-get-aps":
get_aps_command(demisto.args())
elif demisto.command() == "azure-sc-list-ipp":
list_ipp_command(demisto.args())
elif demisto.command() == "azure-sc-get-ipp":
get_ipp_command(demisto.args())
elif demisto.command() == "azure-sc-list-jit":
list_jit_command(demisto.args())
elif demisto.command() == "azure-sc-get-jit":
get_jit_command(demisto.args())
elif demisto.command() == "azure-sc-initiate-jit":
initiate_jit_command(demisto.args())
elif demisto.command() == "azure-sc-delete-jit":
delete_jit_command(demisto.args())
elif demisto.command() == "azure-sc-list-storage":
list_sc_storage_command()
except Exception, e:
LOG(e.message)
LOG.print_log()
raise
| StarcoderdataPython |
146731 | <gh_stars>0
import json
import numpy as np
class AbstractBenchmark:
"""
Abstract template for benchmark classes
"""
def __init__(self, config_path=None):
"""
Initialize benchmark class
Parameters
-------
config_path : str
Path to load configuration from (if read from file)
"""
if config_path:
self.config_path = config_path
self.read_config_file(self.config_path)
else:
self.config = None
def get_config(self):
"""
Return current configuration
Returns
-------
dict
Current config
"""
return self.config
def save_config(self, path):
"""
Save configuration to .json
Parameters
----------
path : str
File to save config to
"""
conf = self.config.copy()
if "observation_space_type" in self.config:
conf["observation_space_type"] = f"{self.config['observation_space_type']}"
for k in self.config.keys():
if isinstance(self.config[k], np.ndarray) or isinstance(
self.config[k], list
):
if type(self.config[k][0]) == np.ndarray:
conf[k] = list(map(list, conf[k]))
for i in range(len(conf[k])):
if (
not type(conf[k][i][0]) == float
and np.inf not in conf[k][i]
and -np.inf not in conf[k][i]
):
conf[k][i] = list(map(int, conf[k][i]))
with open(path, "w") as fp:
json.dump(conf, fp)
def read_config_file(self, path):
"""
Read configuration from file
Parameters
----------
path : str
Path to config file
"""
with open(path, "r") as fp:
self.config = objdict(json.load(fp))
if "observation_space_type" in self.config:
# Types have to be numpy dtype (for gym spaces)s
if type(self.config["observation_space_type"]) == str:
typestring = self.config["observation_space_type"].split(" ")[1][:-2]
typestring = typestring.split(".")[1]
self.config["observation_space_type"] = getattr(np, typestring)
for k in self.config.keys():
if type(self.config[k]) == list:
if type(self.config[k][0]) == list:
map(np.array, self.config[k])
self.config[k] = np.array(self.config[k])
def get_environment(self):
"""
Make benchmark environment
Returns
-------
env : gym.Env
Benchmark environment
"""
raise NotImplementedError
def set_seed(self, seed):
"""
Set environment seed
Parameters
----------
seed : int
New seed
"""
self.config["seed"] = seed
def set_action_space(self, kind, args):
"""
Change action space
Parameters
----------
kind : str
Name of action space class
args: list
List of arguments to pass to action space class
"""
self.config["action_space"] = kind
self.config["action_space_args"] = args
def set_observation_space(self, kind, args, data_type):
"""
Change observation_space
Parameters
----------
config : str
Name of observation space class
args : list
List of arguments to pass to observation space class
data_type : type
Data type of observation space
"""
self.config["observation_space"] = kind
self.config["observation_space_args"] = args
self.config["observation_space_type"] = data_type
# This code is taken from https://goodcode.io/articles/python-dict-object/
class objdict(dict):
"""
Modified dict to make config changes more flexible
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
| StarcoderdataPython |
1629335 | <gh_stars>0
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..augmentations.crop_and_pad_augmentations import center_crop, pad_nd_image_and_seg, random_crop
from ..transforms.abstract_transforms import AbstractTransform
import numpy as np
class CenterCropTransform(AbstractTransform):
""" Crops data and seg (if available) in the center
Args:
output_size (int or tuple of int): Output patch size
"""
def __init__(self, crop_size, data_key="data", label_key="seg"):
self.data_key = data_key
self.label_key = label_key
self.crop_size = crop_size
def __call__(self, **data_dict):
data = data_dict.get(self.data_key)
seg = data_dict.get(self.label_key)
data, seg = center_crop(data, self.crop_size, seg)
data_dict[self.data_key] = data
if seg is not None:
data_dict[self.label_key] = seg
return data_dict
class CenterCropSegTransform(AbstractTransform):
""" Crops seg in the center (required if you are using unpadded convolutions in a segmentation network).
Leaves data as it is
Args:
output_size (int or tuple of int): Output patch size
"""
def __init__(self, output_size, data_key="data", label_key="seg"):
self.data_key = data_key
self.label_key = label_key
self.output_size = output_size
def __call__(self, **data_dict):
seg = data_dict.get(self.label_key)
if seg is not None:
data_dict[self.label_key] = center_crop(seg, self.output_size, None)[0]
else:
from warnings import warn
warn("You shall not pass data_dict without seg: Used CenterCropSegTransform, but there is no seg", Warning)
return data_dict
class RandomCropTransform(AbstractTransform):
""" Randomly crops data and seg (if available)
Args:
crop_size (int or tuple of int): Output patch size
margins (tuple of int): how much distance should the patch border have to the image broder (bilaterally)?
"""
def __init__(self, crop_size=128, margins=(0, 0, 0), data_key="data", label_key="seg"):
self.data_key = data_key
self.label_key = label_key
self.margins = margins
self.crop_size = crop_size
def __call__(self, **data_dict):
data = data_dict.get(self.data_key)
seg = data_dict.get(self.label_key)
data, seg = random_crop(data, seg, self.crop_size, self.margins)
data_dict[self.data_key] = data
if seg is not None:
data_dict[self.label_key] = seg
return data_dict
class PadTransform(AbstractTransform):
def __init__(self, new_size, pad_mode_data='constant', pad_mode_seg='constant',
np_pad_kwargs_data=None, np_pad_kwargs_seg=None,
data_key="data", label_key="seg"):
"""
Pads data and seg to new_size. Only supports numpy arrays for data and seg.
:param new_size: (x, y(, z))
:param pad_value_data:
:param pad_value_seg:
:param data_key:
:param label_key:
"""
self.data_key = data_key
self.label_key = label_key
self.new_size = new_size
self.pad_mode_data = pad_mode_data
self.pad_mode_seg = pad_mode_seg
if np_pad_kwargs_data is None:
np_pad_kwargs_data = {}
if np_pad_kwargs_seg is None:
np_pad_kwargs_seg = {}
self.np_pad_kwargs_data = np_pad_kwargs_data
self.np_pad_kwargs_seg = np_pad_kwargs_seg
assert isinstance(self.new_size, (tuple, list, np.ndarray)), "new_size must be tuple, list or np.ndarray"
def __call__(self, **data_dict):
data = data_dict.get(self.data_key)
seg = data_dict.get(self.label_key)
assert len(self.new_size) + 2 == len(data.shape), "new size must be a tuple/list/np.ndarray with shape " \
"(x, y(, z))"
data, seg = pad_nd_image_and_seg(data, seg, self.new_size, None,
np_pad_kwargs_data=self.np_pad_kwargs_data,
np_pad_kwargs_seg=self.np_pad_kwargs_seg,
pad_mode_data=self.pad_mode_data,
pad_mode_seg=self.pad_mode_seg)
data_dict[self.data_key] = data
if seg is not None:
data_dict[self.label_key] = seg
return data_dict
class RandomShiftTransform(AbstractTransform):
def __init__(self, shift_mu, shift_sigma, p_per_sample=1, p_per_channel=0.5, border_value=0, apply_to_keys=('data',)):
"""
randomly shifts the data by some amount. Equivalent to pad -> random crop but with (probably) less
computational requirements
shift_mu gives the mean value of the shift, 0 is recommended
shift_sigma gives the standard deviation of the shift
shift will ne drawn from a Gaussian distribution with mean shift_mu and variance shift_sigma
shift_mu and shift_sigma can either be float values OR tuples of float values. If they are tuples they will
be interpreted as separate mean and std for each dimension
TODO separate per channel or not?
:param shift_mu:
:param shift_sigma:
:param p_per_sample:
:param p_per_channel:
:param apply_to_keys:
"""
self.apply_to_keys = apply_to_keys
self.p_per_channel = p_per_channel
self.p_per_sample = p_per_sample
self.shift_sigma = shift_sigma
self.shift_mu = shift_mu
self.border_value = border_value
def __call__(self, **data_dict):
for k in self.apply_to_keys:
workon = data_dict[k]
for b in range(workon.shape[0]):
if np.random.uniform(0, 1) < self.p_per_sample:
for c in range(workon.shape[1]):
if np.random.uniform(0, 1) < self.p_per_channel:
shift_here = []
for d in range(len(workon.shape) - 2):
shift_here.append(int(np.round(np.random.normal(
self.shift_mu[d] if isinstance(self.shift_mu, (list, tuple)) else self.shift_mu,
self.shift_sigma[d] if isinstance(self.shift_sigma,
(list, tuple)) else self.shift_sigma,
size=1))))
data_copy = np.ones_like(workon[b, c]) * self.border_value
lb_x = max(shift_here[0], 0)
ub_x = max(0, min(workon.shape[2], workon.shape[2] + shift_here[0]))
lb_y = max(shift_here[1], 0)
ub_y = max(0, min(workon.shape[3], workon.shape[3] + shift_here[1]))
t_lb_x = max(-shift_here[0], 0)
t_ub_x = max(0, min(workon.shape[2], workon.shape[2] - shift_here[0]))
t_lb_y = max(-shift_here[1], 0)
t_ub_y = max(0, min(workon.shape[3], workon.shape[3] - shift_here[1]))
if len(shift_here) == 2:
data_copy[t_lb_x:t_ub_x, t_lb_y:t_ub_y] = workon[b, c, lb_x:ub_x, lb_y:ub_y]
elif len(shift_here) == 3:
lb_z = max(shift_here[2], 0)
ub_z = max(0, min(workon.shape[4], workon.shape[4] + shift_here[2]))
t_lb_z = max(-shift_here[2], 0)
t_ub_z = max(0, min(workon.shape[2], workon.shape[4] - shift_here[2]))
data_copy[t_lb_x:t_ub_x, t_lb_y:t_ub_y, t_lb_z:t_ub_z] = workon[b, c, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z]
data_dict[k][b, c] = data_copy
return data_dict | StarcoderdataPython |
53651 | import datetime
import os
import uuid
from os.path import join as opjoin
from pathlib import Path
import numpy as np
import requests
import yaml
from celery.result import AsyncResult
from django.db.models import Q
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, status, views, viewsets
from rest_framework.response import Response
from backend import celery_app, settings
from backend_app import mixins as BAMixins, models, serializers, swagger
from backend_app import utils
from deeplearning.tasks import classification, segmentation
from deeplearning.utils import nn_settings
class AllowedPropViewSet(BAMixins.ParamListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = models.AllowedProperty.objects.all()
serializer_class = serializers.AllowedPropertySerializer
params = ['model_id', 'property_id']
def get_queryset(self):
model_id = self.request.query_params.get('model_id')
property_id = self.request.query_params.get('property_id')
self.queryset = models.AllowedProperty.objects.filter(model_id=model_id, property_id=property_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('model_id', openapi.IN_QUERY, "Integer representing a model",
required=True, type=openapi.TYPE_INTEGER),
openapi.Parameter('property_id', openapi.IN_QUERY, "Integer representing a property",
required=True, type=openapi.TYPE_INTEGER)]
)
def list(self, request, *args, **kwargs):
"""Return the allowed and default values of a property
This method returns the values that a property can assume depending on the model employed. \
It provides a default value and a comma separated list of values to choose from.
When this api returns an empty list, the property allowed values and default should be retrieved \
using the `/properties/{id}` API.
"""
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""Create a new AllowedProperty
This method create a new AllowedProperty
"""
return super().create(request, *args, **kwargs)
class DatasetViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = models.Dataset.objects.filter(is_single_image=False)
serializer_class = serializers.DatasetSerializer
def get_queryset(self):
task_id = self.request.query_params.get('task_id')
if task_id:
self.queryset = models.Dataset.objects.filter(task_id=task_id, is_single_image=False)
# self.queryset = models.Dataset.objects.filter(task_id=task_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('task_id', openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request, *args, **kwargs):
"""Get the list datasets to use for training or finetuning
This method returns all the datasets in the backend.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single dataset
This method returns the `{id}` dataset.
"""
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(responses=swagger.DatasetViewSet_create_response)
def create(self, request, *args, **kwargs):
"""Upload a new dataset downloading it from a URL
This API uploads a dataset YAML file and stores it in the backend.
The `path` field must contain the URL of a dataset, e.g. \
[`dropbox.com/s/ul1yc8owj0hxpu6/isic_segmentation.yml`](https://www.dropbox.com/s/ul1yc8owj0hxpu6/isic_segmentation.yml?dl=1).
"""
serializer = self.get_serializer(data=request.data)
if not serializer.is_valid():
return Response({'error': 'Validation error. Request data is malformed.'},
status=status.HTTP_400_BAD_REQUEST)
# Download the yml file in url
url = serializer.validated_data['path']
dataset_name = serializer.validated_data['name']
dataset_out_path = f'{settings.DATASETS_DIR}/{dataset_name}.yml'
if Path(f'{settings.DATASETS_DIR}/{dataset_name}.yml').exists():
return Response({'error': f'The dataset `{dataset_name}` already exists'},
status=status.HTTP_400_BAD_REQUEST)
try:
r = requests.get(url, allow_redirects=True)
if r.status_code == 200:
yaml_content = yaml.load(r.content, Loader=yaml.FullLoader)
with open(f'{settings.DATASETS_DIR}/{dataset_name}.yml', 'w') as f:
yaml.dump(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)
# Update the path
serializer.save(path=dataset_out_path)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
except requests.exceptions.RequestException:
# URL malformed
return Response({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'error': 'URL malformed'}, status=status.HTTP_400_BAD_REQUEST)
class InferenceViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.InferenceSerializer,
responses=swagger.inferences_post_responses)
def post(self, request):
"""Start an inference process using a pre-trained model on a dataset
This is the main entry point to start the inference. \
It is mandatory to specify a pre-trained model and a dataset.
"""
serializer = serializers.InferenceSerializer(data=request.data)
if serializer.is_valid():
return utils.do_inference(serializer)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class InferenceSingleViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.InferenceSingleSerializer,
responses=swagger.inferences_post_responses)
def post(self, request):
"""Starts the inference providing an image URL
This API allows the inference of a single image.
It is mandatory to specify the same fields of `/inference` API, but for dataset_id which is replaced by \
the url of the image to process.
"""
serializer = serializers.InferenceSingleSerializer(data=request.data)
if serializer.is_valid():
image_url = serializer.validated_data['image_url']
project_id = serializer.validated_data['project_id']
task_id = models.Project.objects.get(id=project_id).task_id
# Create a dataset with the single image to process
dummy_dataset = f'name: "{image_url}"\n' \
f'description: "{image_url} auto-generated dataset"\n' \
f'images: ["{image_url}"]\n' \
f'split:\n' \
f' test: [0]'
# Save dataset and get id
d = models.Dataset(name=f'single-image-dataset', task_id=task_id, path='', is_single_image=True)
d.save()
try:
yaml_content = yaml.load(dummy_dataset, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
d.delete()
print(e)
return Response({'error': 'Error in YAML parsing'}, status=status.HTTP_400_BAD_REQUEST)
with open(f'{settings.DATASETS_DIR}/single_image_dataset_{d.id}.yml', 'w') as f:
yaml.dump(yaml_content, f, Dumper=utils.MyDumper, sort_keys=False)
# Update the path
d.path = f'{settings.DATASETS_DIR}/single_image_dataset_{d.id}.yml'
d.save()
serializer.validated_data['dataset_id'] = d
return utils.do_inference(serializer)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ModelViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Model.objects.all()
serializer_class = serializers.ModelSerializer
def get_queryset(self):
task_id = self.request.query_params.get('task_id')
if task_id:
self.queryset = models.Model.objects.filter(task_id=task_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('task_id', openapi.IN_QUERY,
"Integer for filtering the models based on task.",
type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request):
"""Returns the available Neural Network models
This API allows the client to know which Neural Network models are available in the system in order to allow \
their selection.
The optional `task_id` parameter is used to filter them based on the task the models are used for.
"""
return super().list(request)
class ModelWeightsViewSet(BAMixins.ParamListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = models.ModelWeights.objects.all()
serializer_class = serializers.ModelWeightsSerializer
params = ['model_id']
def get_queryset(self):
if self.action == 'list':
model_id = self.request.query_params.get('model_id')
self.queryset = models.ModelWeights.objects.filter(model_id=model_id)
return self.queryset
else:
return super(ModelWeightsViewSet, self).get_queryset()
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('model_id', openapi.IN_QUERY,
"Return the modelweights obtained on `model_id` model.",
type=openapi.TYPE_INTEGER, required=False)]
)
def list(self, request):
"""Returns the available Neural Network models
When 'use pre-trained' is selected, it is possible to query the backend passing a `model_id` to obtain a list
of dataset on which it was pretrained.
"""
return super().list(request)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single modelweight
This API returns the modelweight with the requested`{id}`.
"""
return super().retrieve(request, *args, **kwargs)
def get_obj(self, id):
try:
return models.ModelWeights.objects.get(id=id)
except models.ModelWeights.DoesNotExist:
return None
def put(self, request, *args, **kwargs):
"""Update an existing weight
This method updates an existing model weight (e.g. change the name).
"""
weight = self.get_obj(request.data['id'])
if not weight:
error = {"Error": f"Weight {request.data['id']} does not exist"}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
serializer = self.serializer_class(weight, data=request.data)
if serializer.is_valid():
serializer.save()
# Returns all the elements with model_id in request
queryset = models.ModelWeights.objects.filter(model_id=weight.model_id)
serializer = self.get_serializer(queryset, many=True)
# serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
"""Update an existing weight
This method updates an existing model weight (e.g. change the name).
"""
return super().update(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def partial_update(self, request, *args, **kwargs):
return super().partial_update(request, *args, **kwargs)
class OutputViewSet(views.APIView):
@staticmethod
def trunc(values, decs=0):
return np.trunc(values * 10 ** decs) / (10 ** decs)
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('process_id', openapi.IN_QUERY,
"Pass a required UUID representing a finished process.",
type=openapi.TYPE_STRING, format=openapi.FORMAT_UUID, required=False)],
responses=swagger.OutputViewSet_get_responses
)
def get(self, request, *args, **kwargs):
"""Retrieve results about an inference process
This API provides information about an `inference` process.In classification task it returns the list \
of images and an array composed of the classes prediction scores.
In segmentation task it returns the URLs of the segmented images.
"""
if not self.request.query_params.get('process_id'):
error = {'Error': f'Missing required parameter `process_id`'}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
process_id = self.request.query_params.get('process_id')
infer = models.Inference.objects.filter(celery_id=process_id)
if not infer:
# already deleted weight/training or inference
return Response({"result": "Process stopped before finishing or non existing."},
status=status.HTTP_404_NOT_FOUND)
if AsyncResult(process_id).status == 'PENDING':
return Response({"result": "Process in execution. Try later for output results."},
status=status.HTTP_200_OK)
infer = infer.first()
if not os.path.exists(opjoin(settings.OUTPUTS_DIR, infer.outputfile)):
return Response({"result": "Output file not found"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
outputs = open(opjoin(settings.OUTPUTS_DIR, infer.outputfile), 'r')
# Differentiate classification and segmentation
if infer.modelweights_id.model_id.task_id.name.lower() == 'classification':
lines = outputs.read().splitlines()
lines = [line.split(';') for line in lines]
# preds = self.trunc(preds, decs=8)
else:
# Segmentation
# output file contains path of files
uri = request.build_absolute_uri(settings.MEDIA_URL)
lines = outputs.read().splitlines()
lines = [l.replace(settings.OUTPUTS_DIR, uri) for l in lines]
response = {'outputs': lines}
return Response(response, status=status.HTTP_200_OK)
class ProjectViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = models.Project.objects.all()
serializer_class = serializers.ProjectSerializer
def get_obj(self, id):
try:
return models.Project.objects.get(id=id)
except models.Project.DoesNotExist:
return None
def list(self, request, *args, **kwargs):
"""Loads all the projects
This method lists all the available projects.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single project
Returns a project by `{id}`.
"""
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(responses=swagger.ProjectViewSet_create_response)
def create(self, request, *args, **kwargs):
"""Create a new project
Create a new project.
"""
return super().create(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
project = self.get_obj(request.data['id'])
if not project:
error = {"Error": f"Project {request.data['id']} does not exist"}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.ProjectSerializer(project, data=request.data)
if serializer.is_valid():
serializer.save()
# Returns all the elements
return self.list(request)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
"""Update an existing project
Update a project instance by providing its `{id}`.
"""
return super().update(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def partial_update(self, request, *args, **kwargs):
return super().partial_update(request, *args, **kwargs)
class PropertyViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = models.Property.objects.all()
serializer_class = serializers.PropertyListSerializer
def get_queryset(self):
name = self.request.query_params.get('name')
# Substitute underscore with space if present
if name:
name = [name, name.replace('_', ' ')]
self.queryset = models.Property.objects.filter(Q(name__icontains=name[0]) | Q(name__icontains=name[1]))
return self.queryset
def list(self, request, *args, **kwargs):
"""Return the Properties supported by backend
This API allows the client to know which properties are "globally" supported by the backend.
A model can have different default value and allowed values if the `/allowedProperties` return an entry.
"""
return super().list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""Retrieve a single property
Return a property by `{id}`.
"""
return super().retrieve(request, *args, **kwargs)
class StatusView(views.APIView):
@swagger_auto_schema(manual_parameters=[openapi.Parameter('process_id', openapi.IN_QUERY,
"UUID representing a process",
required=True, type=openapi.TYPE_STRING,
format=openapi.FORMAT_UUID)],
responses=swagger.StatusView_get_response
)
def get(self, request):
"""Return the status of an training or inference process
This API allows the frontend to query the status of a training or inference, identified by a `process_id` \
(which is returned by `/train` or `/inference` APIs).
"""
if not self.request.query_params.get('process_id'):
error = {'Error': f'Missing required parameter `process_id`'}
return Response(data=error, status=status.HTTP_400_BAD_REQUEST)
process_id = self.request.query_params.get('process_id')
if models.ModelWeights.objects.filter(celery_id=process_id).exists():
process_type = 'training'
process = models.ModelWeights.objects.filter(celery_id=process_id).first()
elif models.Inference.objects.filter(celery_id=process_id).exists():
process_type = 'inference'
process = models.Inference.objects.filter(celery_id=process_id).first()
else:
res = {
"result": "error",
"error": "Process not found."
}
return Response(data=res, status=status.HTTP_404_NOT_FOUND)
try:
with open(process.logfile, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
except:
res = {
"result": "error",
"error": "Log file not found"
}
return Response(data=res, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if last_line == '<done>':
process_status = 'finished'
last_line = lines[-2]
else:
process_status = 'running'
res = {
'result': 'ok',
'status': {
'process_type': process_type,
'process_status': process_status,
'process_data': last_line,
}
}
return Response(data=res, status=status.HTTP_200_OK)
class StopProcessViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.StopProcessSerializer,
responses=swagger.StopProcessViewSet_post_response
)
def post(self, request):
"""Kill a training or inference process
Stop a training process specifying a `process_id` (which is returned by `/train` or `/inference` APIs).
"""
serializer = serializers.StopProcessSerializer(data=request.data)
if serializer.is_valid():
process_id = serializer.data['process_id']
weights = models.ModelWeights.objects.filter(celery_id=process_id)
infer = models.Inference.objects.filter(celery_id=process_id)
response = {"result": "Process stopped"}
if not weights.exists() and not infer.exists():
# already deleted weight/training or inference
return Response({"result": "Process already stopped or non existing"}, status=status.HTTP_404_NOT_FOUND)
elif weights:
weights = weights.first()
celery_id = weights.celery_id
celery_app.control.revoke(celery_id, terminate=True, signal='SIGUSR1')
response = {"result": "Training stopped"}
# delete the ModelWeights entry from db
# also delete ModelWeights fk in project
weights.delete()
elif infer:
infer = infer.first()
celery_id = infer.celery_id
celery_app.control.revoke(celery_id, terminate=True, signal='SIGUSR1')
response = {"result": "Inference stopped"}
# delete the ModelWeights entry from db
infer.delete()
# todo delete log file? delete weight file?
return Response(response, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TaskViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Task.objects.all()
serializer_class = serializers.TaskSerializer
def list(self, request, *args, **kwargs):
"""Return the tasks supported by backend
This API allows the client to know which task this platform supports. e.g. classification or segmentation tasks.
"""
return super().list(request, *args, **kwargs)
class TrainViewSet(views.APIView):
@swagger_auto_schema(request_body=serializers.TrainSerializer,
responses=swagger.TrainViewSet_post_response
)
def post(self, request):
"""Starts the training of a (possibly pre-trained) model on a dataset
This is the main entry point to start the training of a model on a dataset. \
It is mandatory to specify a model to be trained and a dataset.
When providing a `weights_id`, the training starts from the pre-trained model.
"""
serializer = serializers.TrainSerializer(data=request.data)
if serializer.is_valid():
# Create a new modelweights and start training
weight = models.ModelWeights()
weight.dataset_id_id = serializer.data['dataset_id']
weight.model_id_id = serializer.data['model_id']
if not models.Dataset.objects.filter(id=weight.dataset_id_id, is_single_image=False).exists():
error = {"Error": f"Dataset with id `{weight.dataset_id_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not models.Model.objects.filter(id=weight.model_id_id).exists():
error = {"Error": f"Model with id `{weight.model_id_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not models.Project.objects.filter(id=serializer.data['project_id']).exists():
error = {"Error": f"Project with id `{serializer.data['project_id']}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
# Check if dataset and model are both for same task
if weight.model_id.task_id != weight.dataset_id.task_id:
error = {"Error": f"Model and dataset must belong to the same task"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
project = models.Project.objects.get(id=serializer.data['project_id'])
task_name = project.task_id.name.lower()
weight.task_id = project.task_id
weight.name = f'{weight.model_id.name}_{weight.dataset_id.name}_' \
f'{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}'
if serializer.data['weights_id']:
weight.pretrained_on_id = serializer.data['weights_id']
if not models.ModelWeights.objects.filter(id=weight.pretrained_on_id).exists():
error = {"Error": f"Model weight with id `{weight.pretrained_on_id}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
weight.save() # Generate an id for the weight
ckpts_dir = opjoin(settings.TRAINING_DIR, 'ckpts')
weight.location = Path(opjoin(ckpts_dir, f'{weight.id}.bin')).absolute()
# Create a logfile
weight.logfile = models.generate_file_path(f'{uuid.uuid4().hex}.log', settings.TRAINING_DIR, 'logs')
weight.save()
hyperparams = {}
# Check if current model has some custom properties and load them
props_allowed = models.AllowedProperty.objects.filter(model_id=weight.model_id_id)
if props_allowed:
for p in props_allowed:
hyperparams[p.property_id.name] = p.default_value
# Load default values for those properties not in props_allowed
props_general = models.Property.objects.all()
for p in props_general:
if hyperparams.get(p.name) is None:
hyperparams[p.name] = p.default
# Overwrite hyperparams with ones provided by the user
props = serializer.data['properties']
for p in props:
ts = models.TrainingSetting()
# Get the property by name
name = p['name']
name = [name, name.replace('_', ' ')]
queryset = models.Property.objects.filter(Q(name__icontains=name[0]) | Q(name__icontains=name[1]))
if len(queryset) == 0:
# Property does not exist, delete the weight and its associated properties (cascade)
weight.delete()
error = {"Error": f"Property `{p['name']}` does not exist"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
property = queryset[0]
ts.property_id = property
ts.modelweights_id = weight
ts.value = str(p['value'])
ts.save()
hyperparams[property.name] = ts.value
config = nn_settings(modelweight=weight, hyperparams=hyperparams)
if not config:
return Response({"Error": "Properties error"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Differentiate the task and start training
if task_name == 'classification':
celery_id = classification.classificate.delay(config)
# celery_id = classification.classificate(config)
elif task_name == 'segmentation':
celery_id = segmentation.segment.delay(config)
# celery_id = segmentation.segment(config)
else:
return Response({'error': 'error on task'}, status=status.HTTP_400_BAD_REQUEST)
weight = models.ModelWeights.objects.get(id=weight.id)
weight.celery_id = celery_id.id
weight.save()
# todo what if project already has a modelweight?
# Training started, store the training in project
project.modelweights_id = weight
project.save()
response = {
"result": "ok",
"process_id": celery_id.id,
"weight_id": weight.id
}
return Response(response, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TrainingSettingViewSet(BAMixins.ParamListModelMixin,
viewsets.GenericViewSet):
queryset = models.TrainingSetting.objects.all()
serializer_class = serializers.TrainingSettingSerializer
params = ['modelweights_id', 'property_id']
def get_queryset(self):
modelweights_id = self.request.query_params.get('modelweights_id')
property_id = self.request.query_params.get('property_id')
self.queryset = models.TrainingSetting.objects.filter(modelweights_id=modelweights_id, property_id=property_id)
return self.queryset
@swagger_auto_schema(
manual_parameters=[openapi.Parameter('modelweights_id', openapi.IN_QUERY, "Integer representing a ModelWeights",
required=True, type=openapi.TYPE_INTEGER),
openapi.Parameter('property_id', openapi.IN_QUERY, "Integer representing a Property",
required=True, type=openapi.TYPE_INTEGER)]
)
def list(self, request, *args, **kwargs):
"""Returns settings used for a training
This API returns the value used for a property in a specific training (a modelweights).
It requires a `modelweights_id`, indicating a training process, and a `property_id`.
"""
return super().list(request, *args, **kwargs)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.